1 /*
   2  * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "asm/macroAssembler.hpp"
  27 #include "compiler/disassembler.hpp"
  28 #include "interpreter/interpreter.hpp"
  29 #include "interpreter/interpreterRuntime.hpp"
  30 #include "interpreter/interp_masm.hpp"
  31 #include "interpreter/templateTable.hpp"
  32 #include "memory/universe.hpp"
  33 #include "oops/methodData.hpp"
  34 #include "oops/objArrayKlass.hpp"
  35 #include "oops/oop.inline.hpp"
  36 #include "prims/methodHandles.hpp"
  37 #include "runtime/frame.inline.hpp"
  38 #include "runtime/safepointMechanism.hpp"
  39 #include "runtime/sharedRuntime.hpp"
  40 #include "runtime/stubRoutines.hpp"
  41 #include "runtime/synchronizer.hpp"
  42 #include "utilities/macros.hpp"
  43 
  44 #define __ Disassembler::hook<InterpreterMacroAssembler>(__FILE__, __LINE__, _masm)->
  45 
  46 // Global Register Names
  47 static const Register rbcp     = LP64_ONLY(r13) NOT_LP64(rsi);
  48 static const Register rlocals  = LP64_ONLY(r14) NOT_LP64(rdi);
  49 
  50 // Platform-dependent initialization
  51 void TemplateTable::pd_initialize() {
  52   // No x86 specific initialization
  53 }
  54 
  55 // Address Computation: local variables
  56 static inline Address iaddress(int n) {
  57   return Address(rlocals, Interpreter::local_offset_in_bytes(n));
  58 }
  59 
  60 static inline Address laddress(int n) {
  61   return iaddress(n + 1);
  62 }
  63 
  64 #ifndef _LP64
  65 static inline Address haddress(int n) {
  66   return iaddress(n + 0);
  67 }
  68 #endif
  69 
  70 static inline Address faddress(int n) {
  71   return iaddress(n);
  72 }
  73 
  74 static inline Address daddress(int n) {
  75   return laddress(n);
  76 }
  77 
  78 static inline Address aaddress(int n) {
  79   return iaddress(n);
  80 }
  81 
  82 static inline Address iaddress(Register r) {
  83   return Address(rlocals, r, Address::times_ptr);
  84 }
  85 
  86 static inline Address laddress(Register r) {
  87   return Address(rlocals, r, Address::times_ptr, Interpreter::local_offset_in_bytes(1));
  88 }
  89 
  90 #ifndef _LP64
  91 static inline Address haddress(Register r)       {
  92   return Address(rlocals, r, Interpreter::stackElementScale(), Interpreter::local_offset_in_bytes(0));
  93 }
  94 #endif
  95 
  96 static inline Address faddress(Register r) {
  97   return iaddress(r);
  98 }
  99 
 100 static inline Address daddress(Register r) {
 101   return laddress(r);
 102 }
 103 
 104 static inline Address aaddress(Register r) {
 105   return iaddress(r);
 106 }
 107 
 108 
 109 // expression stack
 110 // (Note: Must not use symmetric equivalents at_rsp_m1/2 since they store
 111 // data beyond the rsp which is potentially unsafe in an MT environment;
 112 // an interrupt may overwrite that data.)
 113 static inline Address at_rsp   () {
 114   return Address(rsp, 0);
 115 }
 116 
 117 // At top of Java expression stack which may be different than esp().  It
 118 // isn't for category 1 objects.
 119 static inline Address at_tos   () {
 120   return Address(rsp,  Interpreter::expr_offset_in_bytes(0));
 121 }
 122 
 123 static inline Address at_tos_p1() {
 124   return Address(rsp,  Interpreter::expr_offset_in_bytes(1));
 125 }
 126 
 127 static inline Address at_tos_p2() {
 128   return Address(rsp,  Interpreter::expr_offset_in_bytes(2));
 129 }
 130 
 131 // Condition conversion
 132 static Assembler::Condition j_not(TemplateTable::Condition cc) {
 133   switch (cc) {
 134   case TemplateTable::equal        : return Assembler::notEqual;
 135   case TemplateTable::not_equal    : return Assembler::equal;
 136   case TemplateTable::less         : return Assembler::greaterEqual;
 137   case TemplateTable::less_equal   : return Assembler::greater;
 138   case TemplateTable::greater      : return Assembler::lessEqual;
 139   case TemplateTable::greater_equal: return Assembler::less;
 140   }
 141   ShouldNotReachHere();
 142   return Assembler::zero;
 143 }
 144 
 145 
 146 
 147 // Miscelaneous helper routines
 148 // Store an oop (or NULL) at the address described by obj.
 149 // If val == noreg this means store a NULL
 150 
 151 
 152 static void do_oop_store(InterpreterMacroAssembler* _masm,
 153                          Address dst,
 154                          Register val,
 155                          DecoratorSet decorators = 0) {
 156   assert(val == noreg || val == rax, "parameter is just for looks");
 157   __ store_heap_oop(dst, val, rdx, rbx, decorators);
 158 }
 159 
 160 static void do_oop_load(InterpreterMacroAssembler* _masm,
 161                         Address src,
 162                         Register dst,
 163                         DecoratorSet decorators = 0) {
 164   __ load_heap_oop(dst, src, rdx, rbx, decorators);
 165 }
 166 
 167 Address TemplateTable::at_bcp(int offset) {
 168   assert(_desc->uses_bcp(), "inconsistent uses_bcp information");
 169   return Address(rbcp, offset);
 170 }
 171 
 172 
 173 void TemplateTable::patch_bytecode(Bytecodes::Code bc, Register bc_reg,
 174                                    Register temp_reg, bool load_bc_into_bc_reg/*=true*/,
 175                                    int byte_no) {
 176   if (!RewriteBytecodes)  return;
 177   Label L_patch_done;
 178 
 179   switch (bc) {
 180   case Bytecodes::_fast_qputfield:
 181   case Bytecodes::_fast_aputfield:
 182   case Bytecodes::_fast_bputfield:
 183   case Bytecodes::_fast_zputfield:
 184   case Bytecodes::_fast_cputfield:
 185   case Bytecodes::_fast_dputfield:
 186   case Bytecodes::_fast_fputfield:
 187   case Bytecodes::_fast_iputfield:
 188   case Bytecodes::_fast_lputfield:
 189   case Bytecodes::_fast_sputfield:
 190     {
 191       // We skip bytecode quickening for putfield instructions when
 192       // the put_code written to the constant pool cache is zero.
 193       // This is required so that every execution of this instruction
 194       // calls out to InterpreterRuntime::resolve_get_put to do
 195       // additional, required work.
 196       assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range");
 197       assert(load_bc_into_bc_reg, "we use bc_reg as temp");
 198       __ get_cache_and_index_and_bytecode_at_bcp(temp_reg, bc_reg, temp_reg, byte_no, 1);
 199       __ movl(bc_reg, bc);
 200       __ cmpl(temp_reg, (int) 0);
 201       __ jcc(Assembler::zero, L_patch_done);  // don't patch
 202     }
 203     break;
 204   default:
 205     assert(byte_no == -1, "sanity");
 206     // the pair bytecodes have already done the load.
 207     if (load_bc_into_bc_reg) {
 208       __ movl(bc_reg, bc);
 209     }
 210   }
 211 
 212   if (JvmtiExport::can_post_breakpoint()) {
 213     Label L_fast_patch;
 214     // if a breakpoint is present we can't rewrite the stream directly
 215     __ movzbl(temp_reg, at_bcp(0));
 216     __ cmpl(temp_reg, Bytecodes::_breakpoint);
 217     __ jcc(Assembler::notEqual, L_fast_patch);
 218     __ get_method(temp_reg);
 219     // Let breakpoint table handling rewrite to quicker bytecode
 220     __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::set_original_bytecode_at), temp_reg, rbcp, bc_reg);
 221 #ifndef ASSERT
 222     __ jmpb(L_patch_done);
 223 #else
 224     __ jmp(L_patch_done);
 225 #endif
 226     __ bind(L_fast_patch);
 227   }
 228 
 229 #ifdef ASSERT
 230   Label L_okay;
 231   __ load_unsigned_byte(temp_reg, at_bcp(0));
 232   __ cmpl(temp_reg, (int) Bytecodes::java_code(bc));
 233   __ jcc(Assembler::equal, L_okay);
 234   __ cmpl(temp_reg, bc_reg);
 235   __ jcc(Assembler::equal, L_okay);
 236   __ stop("patching the wrong bytecode");
 237   __ bind(L_okay);
 238 #endif
 239 
 240   // patch bytecode
 241   __ movb(at_bcp(0), bc_reg);
 242   __ bind(L_patch_done);
 243 }
 244 // Individual instructions
 245 
 246 
 247 void TemplateTable::nop() {
 248   transition(vtos, vtos);
 249   // nothing to do
 250 }
 251 
 252 void TemplateTable::shouldnotreachhere() {
 253   transition(vtos, vtos);
 254   __ stop("shouldnotreachhere bytecode");
 255 }
 256 
 257 void TemplateTable::aconst_null() {
 258   transition(vtos, atos);
 259   __ xorl(rax, rax);
 260 }
 261 
 262 void TemplateTable::iconst(int value) {
 263   transition(vtos, itos);
 264   if (value == 0) {
 265     __ xorl(rax, rax);
 266   } else {
 267     __ movl(rax, value);
 268   }
 269 }
 270 
 271 void TemplateTable::lconst(int value) {
 272   transition(vtos, ltos);
 273   if (value == 0) {
 274     __ xorl(rax, rax);
 275   } else {
 276     __ movl(rax, value);
 277   }
 278 #ifndef _LP64
 279   assert(value >= 0, "check this code");
 280   __ xorptr(rdx, rdx);
 281 #endif
 282 }
 283 
 284 
 285 
 286 void TemplateTable::fconst(int value) {
 287   transition(vtos, ftos);
 288   if (UseSSE >= 1) {
 289     static float one = 1.0f, two = 2.0f;
 290     switch (value) {
 291     case 0:
 292       __ xorps(xmm0, xmm0);
 293       break;
 294     case 1:
 295       __ movflt(xmm0, ExternalAddress((address) &one));
 296       break;
 297     case 2:
 298       __ movflt(xmm0, ExternalAddress((address) &two));
 299       break;
 300     default:
 301       ShouldNotReachHere();
 302       break;
 303     }
 304   } else {
 305 #ifdef _LP64
 306     ShouldNotReachHere();
 307 #else
 308            if (value == 0) { __ fldz();
 309     } else if (value == 1) { __ fld1();
 310     } else if (value == 2) { __ fld1(); __ fld1(); __ faddp(); // should do a better solution here
 311     } else                 { ShouldNotReachHere();
 312     }
 313 #endif // _LP64
 314   }
 315 }
 316 
 317 void TemplateTable::dconst(int value) {
 318   transition(vtos, dtos);
 319   if (UseSSE >= 2) {
 320     static double one = 1.0;
 321     switch (value) {
 322     case 0:
 323       __ xorpd(xmm0, xmm0);
 324       break;
 325     case 1:
 326       __ movdbl(xmm0, ExternalAddress((address) &one));
 327       break;
 328     default:
 329       ShouldNotReachHere();
 330       break;
 331     }
 332   } else {
 333 #ifdef _LP64
 334     ShouldNotReachHere();
 335 #else
 336            if (value == 0) { __ fldz();
 337     } else if (value == 1) { __ fld1();
 338     } else                 { ShouldNotReachHere();
 339     }
 340 #endif
 341   }
 342 }
 343 
 344 void TemplateTable::bipush() {
 345   transition(vtos, itos);
 346   __ load_signed_byte(rax, at_bcp(1));
 347 }
 348 
 349 void TemplateTable::sipush() {
 350   transition(vtos, itos);
 351   __ load_unsigned_short(rax, at_bcp(1));
 352   __ bswapl(rax);
 353   __ sarl(rax, 16);
 354 }
 355 
 356 void TemplateTable::ldc(bool wide) {
 357   transition(vtos, vtos);
 358   Register rarg = NOT_LP64(rcx) LP64_ONLY(c_rarg1);
 359   Label call_ldc, notFloat, notClass, notInt, Done;
 360 
 361   if (wide) {
 362     __ get_unsigned_2_byte_index_at_bcp(rbx, 1);
 363   } else {
 364     __ load_unsigned_byte(rbx, at_bcp(1));
 365   }
 366 
 367   __ get_cpool_and_tags(rcx, rax);
 368   const int base_offset = ConstantPool::header_size() * wordSize;
 369   const int tags_offset = Array<u1>::base_offset_in_bytes();
 370 
 371   // get type
 372   __ movzbl(rdx, Address(rax, rbx, Address::times_1, tags_offset));
 373 
 374   // unresolved class - get the resolved class
 375   __ cmpl(rdx, JVM_CONSTANT_UnresolvedClass);
 376   __ jccb(Assembler::equal, call_ldc);
 377 
 378   // unresolved class in error state - call into runtime to throw the error
 379   // from the first resolution attempt
 380   __ cmpl(rdx, JVM_CONSTANT_UnresolvedClassInError);
 381   __ jccb(Assembler::equal, call_ldc);
 382 
 383   // resolved class - need to call vm to get java mirror of the class
 384   __ cmpl(rdx, JVM_CONSTANT_Class);
 385   __ jcc(Assembler::notEqual, notClass);
 386 
 387   __ bind(call_ldc);
 388 
 389   __ movl(rarg, wide);
 390   call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::ldc), rarg);
 391 
 392   __ push(atos);
 393   __ jmp(Done);
 394 
 395   __ bind(notClass);
 396   __ cmpl(rdx, JVM_CONSTANT_Float);
 397   __ jccb(Assembler::notEqual, notFloat);
 398 
 399   // ftos
 400   __ load_float(Address(rcx, rbx, Address::times_ptr, base_offset));
 401   __ push(ftos);
 402   __ jmp(Done);
 403 
 404   __ bind(notFloat);
 405   __ cmpl(rdx, JVM_CONSTANT_Integer);
 406   __ jccb(Assembler::notEqual, notInt);
 407 
 408   // itos
 409   __ movl(rax, Address(rcx, rbx, Address::times_ptr, base_offset));
 410   __ push(itos);
 411   __ jmp(Done);
 412 
 413   // assume the tag is for condy; if not, the VM runtime will tell us
 414   __ bind(notInt);
 415   condy_helper(Done);
 416 
 417   __ bind(Done);
 418 }
 419 
 420 // Fast path for caching oop constants.
 421 void TemplateTable::fast_aldc(bool wide) {
 422   transition(vtos, atos);
 423 
 424   Register result = rax;
 425   Register tmp = rdx;
 426   Register rarg = NOT_LP64(rcx) LP64_ONLY(c_rarg1);
 427   int index_size = wide ? sizeof(u2) : sizeof(u1);
 428 
 429   Label resolved;
 430 
 431   // We are resolved if the resolved reference cache entry contains a
 432   // non-null object (String, MethodType, etc.)
 433   assert_different_registers(result, tmp);
 434   __ get_cache_index_at_bcp(tmp, 1, index_size);
 435   __ load_resolved_reference_at_index(result, tmp);
 436   __ testptr(result, result);
 437   __ jcc(Assembler::notZero, resolved);
 438 
 439   address entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_ldc);
 440 
 441   // first time invocation - must resolve first
 442   __ movl(rarg, (int)bytecode());
 443   __ call_VM(result, entry, rarg);
 444   __ bind(resolved);
 445 
 446   { // Check for the null sentinel.
 447     // If we just called the VM, it already did the mapping for us,
 448     // but it's harmless to retry.
 449     Label notNull;
 450     ExternalAddress null_sentinel((address)Universe::the_null_sentinel_addr());
 451     __ movptr(tmp, null_sentinel);
 452     __ cmpoop(tmp, result);
 453     __ jccb(Assembler::notEqual, notNull);
 454     __ xorptr(result, result);  // NULL object reference
 455     __ bind(notNull);
 456   }
 457 
 458   if (VerifyOops) {
 459     __ verify_oop(result);
 460   }
 461 }
 462 
 463 void TemplateTable::ldc2_w() {
 464   transition(vtos, vtos);
 465   Label notDouble, notLong, Done;
 466   __ get_unsigned_2_byte_index_at_bcp(rbx, 1);
 467 
 468   __ get_cpool_and_tags(rcx, rax);
 469   const int base_offset = ConstantPool::header_size() * wordSize;
 470   const int tags_offset = Array<u1>::base_offset_in_bytes();
 471 
 472   // get type
 473   __ movzbl(rdx, Address(rax, rbx, Address::times_1, tags_offset));
 474   __ cmpl(rdx, JVM_CONSTANT_Double);
 475   __ jccb(Assembler::notEqual, notDouble);
 476 
 477   // dtos
 478   __ load_double(Address(rcx, rbx, Address::times_ptr, base_offset));
 479   __ push(dtos);
 480 
 481   __ jmp(Done);
 482   __ bind(notDouble);
 483   __ cmpl(rdx, JVM_CONSTANT_Long);
 484   __ jccb(Assembler::notEqual, notLong);
 485 
 486   // ltos
 487   __ movptr(rax, Address(rcx, rbx, Address::times_ptr, base_offset + 0 * wordSize));
 488   NOT_LP64(__ movptr(rdx, Address(rcx, rbx, Address::times_ptr, base_offset + 1 * wordSize)));
 489   __ push(ltos);
 490   __ jmp(Done);
 491 
 492   __ bind(notLong);
 493   condy_helper(Done);
 494 
 495   __ bind(Done);
 496 }
 497 
 498 void TemplateTable::condy_helper(Label& Done) {
 499   const Register obj = rax;
 500   const Register off = rbx;
 501   const Register flags = rcx;
 502   const Register rarg = NOT_LP64(rcx) LP64_ONLY(c_rarg1);
 503   __ movl(rarg, (int)bytecode());
 504   call_VM(obj, CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_ldc), rarg);
 505 #ifndef _LP64
 506   // borrow rdi from locals
 507   __ get_thread(rdi);
 508   __ get_vm_result_2(flags, rdi);
 509   __ restore_locals();
 510 #else
 511   __ get_vm_result_2(flags, r15_thread);
 512 #endif
 513   // VMr = obj = base address to find primitive value to push
 514   // VMr2 = flags = (tos, off) using format of CPCE::_flags
 515   __ movl(off, flags);
 516   __ andl(off, ConstantPoolCacheEntry::field_index_mask);
 517   const Address field(obj, off, Address::times_1, 0*wordSize);
 518 
 519   // What sort of thing are we loading?
 520   __ shrl(flags, ConstantPoolCacheEntry::tos_state_shift);
 521   __ andl(flags, ConstantPoolCacheEntry::tos_state_mask);
 522 
 523   switch (bytecode()) {
 524   case Bytecodes::_ldc:
 525   case Bytecodes::_ldc_w:
 526     {
 527       // tos in (itos, ftos, stos, btos, ctos, ztos)
 528       Label notInt, notFloat, notShort, notByte, notChar, notBool;
 529       __ cmpl(flags, itos);
 530       __ jcc(Assembler::notEqual, notInt);
 531       // itos
 532       __ movl(rax, field);
 533       __ push(itos);
 534       __ jmp(Done);
 535 
 536       __ bind(notInt);
 537       __ cmpl(flags, ftos);
 538       __ jcc(Assembler::notEqual, notFloat);
 539       // ftos
 540       __ load_float(field);
 541       __ push(ftos);
 542       __ jmp(Done);
 543 
 544       __ bind(notFloat);
 545       __ cmpl(flags, stos);
 546       __ jcc(Assembler::notEqual, notShort);
 547       // stos
 548       __ load_signed_short(rax, field);
 549       __ push(stos);
 550       __ jmp(Done);
 551 
 552       __ bind(notShort);
 553       __ cmpl(flags, btos);
 554       __ jcc(Assembler::notEqual, notByte);
 555       // btos
 556       __ load_signed_byte(rax, field);
 557       __ push(btos);
 558       __ jmp(Done);
 559 
 560       __ bind(notByte);
 561       __ cmpl(flags, ctos);
 562       __ jcc(Assembler::notEqual, notChar);
 563       // ctos
 564       __ load_unsigned_short(rax, field);
 565       __ push(ctos);
 566       __ jmp(Done);
 567 
 568       __ bind(notChar);
 569       __ cmpl(flags, ztos);
 570       __ jcc(Assembler::notEqual, notBool);
 571       // ztos
 572       __ load_signed_byte(rax, field);
 573       __ push(ztos);
 574       __ jmp(Done);
 575 
 576       __ bind(notBool);
 577       break;
 578     }
 579 
 580   case Bytecodes::_ldc2_w:
 581     {
 582       Label notLong, notDouble;
 583       __ cmpl(flags, ltos);
 584       __ jcc(Assembler::notEqual, notLong);
 585       // ltos
 586       // Loading high word first because movptr clobbers rax
 587       NOT_LP64(__ movptr(rdx, field.plus_disp(4)));
 588       __ movptr(rax, field);
 589       __ push(ltos);
 590       __ jmp(Done);
 591 
 592       __ bind(notLong);
 593       __ cmpl(flags, dtos);
 594       __ jcc(Assembler::notEqual, notDouble);
 595       // dtos
 596       __ load_double(field);
 597       __ push(dtos);
 598       __ jmp(Done);
 599 
 600       __ bind(notDouble);
 601       break;
 602     }
 603 
 604   default:
 605     ShouldNotReachHere();
 606   }
 607 
 608   __ stop("bad ldc/condy");
 609 }
 610 
 611 void TemplateTable::locals_index(Register reg, int offset) {
 612   __ load_unsigned_byte(reg, at_bcp(offset));
 613   __ negptr(reg);
 614 }
 615 
 616 void TemplateTable::iload() {
 617   iload_internal();
 618 }
 619 
 620 void TemplateTable::nofast_iload() {
 621   iload_internal(may_not_rewrite);
 622 }
 623 
 624 void TemplateTable::iload_internal(RewriteControl rc) {
 625   transition(vtos, itos);
 626   if (RewriteFrequentPairs && rc == may_rewrite) {
 627     Label rewrite, done;
 628     const Register bc = LP64_ONLY(c_rarg3) NOT_LP64(rcx);
 629     LP64_ONLY(assert(rbx != bc, "register damaged"));
 630 
 631     // get next byte
 632     __ load_unsigned_byte(rbx,
 633                           at_bcp(Bytecodes::length_for(Bytecodes::_iload)));
 634     // if _iload, wait to rewrite to iload2.  We only want to rewrite the
 635     // last two iloads in a pair.  Comparing against fast_iload means that
 636     // the next bytecode is neither an iload or a caload, and therefore
 637     // an iload pair.
 638     __ cmpl(rbx, Bytecodes::_iload);
 639     __ jcc(Assembler::equal, done);
 640 
 641     __ cmpl(rbx, Bytecodes::_fast_iload);
 642     __ movl(bc, Bytecodes::_fast_iload2);
 643 
 644     __ jccb(Assembler::equal, rewrite);
 645 
 646     // if _caload, rewrite to fast_icaload
 647     __ cmpl(rbx, Bytecodes::_caload);
 648     __ movl(bc, Bytecodes::_fast_icaload);
 649     __ jccb(Assembler::equal, rewrite);
 650 
 651     // rewrite so iload doesn't check again.
 652     __ movl(bc, Bytecodes::_fast_iload);
 653 
 654     // rewrite
 655     // bc: fast bytecode
 656     __ bind(rewrite);
 657     patch_bytecode(Bytecodes::_iload, bc, rbx, false);
 658     __ bind(done);
 659   }
 660 
 661   // Get the local value into tos
 662   locals_index(rbx);
 663   __ movl(rax, iaddress(rbx));
 664 }
 665 
 666 void TemplateTable::fast_iload2() {
 667   transition(vtos, itos);
 668   locals_index(rbx);
 669   __ movl(rax, iaddress(rbx));
 670   __ push(itos);
 671   locals_index(rbx, 3);
 672   __ movl(rax, iaddress(rbx));
 673 }
 674 
 675 void TemplateTable::fast_iload() {
 676   transition(vtos, itos);
 677   locals_index(rbx);
 678   __ movl(rax, iaddress(rbx));
 679 }
 680 
 681 void TemplateTable::lload() {
 682   transition(vtos, ltos);
 683   locals_index(rbx);
 684   __ movptr(rax, laddress(rbx));
 685   NOT_LP64(__ movl(rdx, haddress(rbx)));
 686 }
 687 
 688 void TemplateTable::fload() {
 689   transition(vtos, ftos);
 690   locals_index(rbx);
 691   __ load_float(faddress(rbx));
 692 }
 693 
 694 void TemplateTable::dload() {
 695   transition(vtos, dtos);
 696   locals_index(rbx);
 697   __ load_double(daddress(rbx));
 698 }
 699 
 700 void TemplateTable::aload() {
 701   transition(vtos, atos);
 702   locals_index(rbx);
 703   __ movptr(rax, aaddress(rbx));
 704 }
 705 
 706 void TemplateTable::locals_index_wide(Register reg) {
 707   __ load_unsigned_short(reg, at_bcp(2));
 708   __ bswapl(reg);
 709   __ shrl(reg, 16);
 710   __ negptr(reg);
 711 }
 712 
 713 void TemplateTable::wide_iload() {
 714   transition(vtos, itos);
 715   locals_index_wide(rbx);
 716   __ movl(rax, iaddress(rbx));
 717 }
 718 
 719 void TemplateTable::wide_lload() {
 720   transition(vtos, ltos);
 721   locals_index_wide(rbx);
 722   __ movptr(rax, laddress(rbx));
 723   NOT_LP64(__ movl(rdx, haddress(rbx)));
 724 }
 725 
 726 void TemplateTable::wide_fload() {
 727   transition(vtos, ftos);
 728   locals_index_wide(rbx);
 729   __ load_float(faddress(rbx));
 730 }
 731 
 732 void TemplateTable::wide_dload() {
 733   transition(vtos, dtos);
 734   locals_index_wide(rbx);
 735   __ load_double(daddress(rbx));
 736 }
 737 
 738 void TemplateTable::wide_aload() {
 739   transition(vtos, atos);
 740   locals_index_wide(rbx);
 741   __ movptr(rax, aaddress(rbx));
 742 }
 743 
 744 void TemplateTable::index_check(Register array, Register index) {
 745   // Pop ptr into array
 746   __ pop_ptr(array);
 747   index_check_without_pop(array, index);
 748 }
 749 
 750 void TemplateTable::index_check_without_pop(Register array, Register index) {
 751   // destroys rbx
 752   // check array
 753   __ null_check(array, arrayOopDesc::length_offset_in_bytes());
 754   // sign extend index for use by indexed load
 755   __ movl2ptr(index, index);
 756   // check index
 757   __ cmpl(index, Address(array, arrayOopDesc::length_offset_in_bytes()));
 758   if (index != rbx) {
 759     // ??? convention: move aberrant index into rbx for exception message
 760     assert(rbx != array, "different registers");
 761     __ movl(rbx, index);
 762   }
 763   Label skip;
 764   __ jccb(Assembler::below, skip);
 765   // Pass array to create more detailed exceptions.
 766   __ mov(NOT_LP64(rax) LP64_ONLY(c_rarg1), array);
 767   __ jump(ExternalAddress(Interpreter::_throw_ArrayIndexOutOfBoundsException_entry));
 768   __ bind(skip);
 769 }
 770 
 771 void TemplateTable::iaload() {
 772   transition(itos, itos);
 773   // rax: index
 774   // rdx: array
 775   index_check(rdx, rax); // kills rbx
 776   __ access_load_at(T_INT, IN_HEAP | IS_ARRAY, rax,
 777                     Address(rdx, rax, Address::times_4,
 778                             arrayOopDesc::base_offset_in_bytes(T_INT)),
 779                     noreg, noreg);
 780 }
 781 
 782 void TemplateTable::laload() {
 783   transition(itos, ltos);
 784   // rax: index
 785   // rdx: array
 786   index_check(rdx, rax); // kills rbx
 787   NOT_LP64(__ mov(rbx, rax));
 788   // rbx,: index
 789   __ access_load_at(T_LONG, IN_HEAP | IS_ARRAY, noreg /* ltos */,
 790                     Address(rdx, rbx, Address::times_8,
 791                             arrayOopDesc::base_offset_in_bytes(T_LONG)),
 792                     noreg, noreg);
 793 }
 794 
 795 
 796 
 797 void TemplateTable::faload() {
 798   transition(itos, ftos);
 799   // rax: index
 800   // rdx: array
 801   index_check(rdx, rax); // kills rbx
 802   __ access_load_at(T_FLOAT, IN_HEAP | IS_ARRAY, noreg /* ftos */,
 803                     Address(rdx, rax,
 804                             Address::times_4,
 805                             arrayOopDesc::base_offset_in_bytes(T_FLOAT)),
 806                     noreg, noreg);
 807 }
 808 
 809 void TemplateTable::daload() {
 810   transition(itos, dtos);
 811   // rax: index
 812   // rdx: array
 813   index_check(rdx, rax); // kills rbx
 814   __ access_load_at(T_DOUBLE, IN_HEAP | IS_ARRAY, noreg /* dtos */,
 815                     Address(rdx, rax,
 816                             Address::times_8,
 817                             arrayOopDesc::base_offset_in_bytes(T_DOUBLE)),
 818                     noreg, noreg);
 819 }
 820 
 821 void TemplateTable::aaload() {
 822   transition(itos, atos);
 823 
 824   Register array = rcx;
 825   Register index = rax;
 826 
 827   index_check(array, index); // kills rbx
 828   if (ValueArrayFlatten) {
 829     Label is_flat_array, done;
 830     __ test_flat_array_oop(array, rbx, is_flat_array);
 831     do_oop_load(_masm,
 832                 Address(array, index,
 833                         UseCompressedOops ? Address::times_4 : Address::times_ptr,
 834                         arrayOopDesc::base_offset_in_bytes(T_OBJECT)),
 835                 rax,
 836                 IS_ARRAY);
 837     __ jmp(done);
 838     __ bind(is_flat_array);
 839     __ call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::value_array_load), array, index);
 840     __ bind(done);
 841   } else {
 842     do_oop_load(_masm,
 843                 Address(array, index,
 844                         UseCompressedOops ? Address::times_4 : Address::times_ptr,
 845                         arrayOopDesc::base_offset_in_bytes(T_OBJECT)),
 846                 rax,
 847                 IS_ARRAY);
 848   }
 849 }
 850 
 851 void TemplateTable::baload() {
 852   transition(itos, itos);
 853   // rax: index
 854   // rdx: array
 855   index_check(rdx, rax); // kills rbx
 856   __ access_load_at(T_BYTE, IN_HEAP | IS_ARRAY, rax,
 857                     Address(rdx, rax, Address::times_1, arrayOopDesc::base_offset_in_bytes(T_BYTE)),
 858                     noreg, noreg);
 859 }
 860 
 861 void TemplateTable::caload() {
 862   transition(itos, itos);
 863   // rax: index
 864   // rdx: array
 865   index_check(rdx, rax); // kills rbx
 866   __ access_load_at(T_CHAR, IN_HEAP | IS_ARRAY, rax,
 867                     Address(rdx, rax, Address::times_2, arrayOopDesc::base_offset_in_bytes(T_CHAR)),
 868                     noreg, noreg);
 869 }
 870 
 871 // iload followed by caload frequent pair
 872 void TemplateTable::fast_icaload() {
 873   transition(vtos, itos);
 874   // load index out of locals
 875   locals_index(rbx);
 876   __ movl(rax, iaddress(rbx));
 877 
 878   // rax: index
 879   // rdx: array
 880   index_check(rdx, rax); // kills rbx
 881   __ access_load_at(T_CHAR, IN_HEAP | IS_ARRAY, rax,
 882                     Address(rdx, rax, Address::times_2, arrayOopDesc::base_offset_in_bytes(T_CHAR)),
 883                     noreg, noreg);
 884 }
 885 
 886 
 887 void TemplateTable::saload() {
 888   transition(itos, itos);
 889   // rax: index
 890   // rdx: array
 891   index_check(rdx, rax); // kills rbx
 892   __ access_load_at(T_SHORT, IN_HEAP | IS_ARRAY, rax,
 893                     Address(rdx, rax, Address::times_2, arrayOopDesc::base_offset_in_bytes(T_SHORT)),
 894                     noreg, noreg);
 895 }
 896 
 897 void TemplateTable::iload(int n) {
 898   transition(vtos, itos);
 899   __ movl(rax, iaddress(n));
 900 }
 901 
 902 void TemplateTable::lload(int n) {
 903   transition(vtos, ltos);
 904   __ movptr(rax, laddress(n));
 905   NOT_LP64(__ movptr(rdx, haddress(n)));
 906 }
 907 
 908 void TemplateTable::fload(int n) {
 909   transition(vtos, ftos);
 910   __ load_float(faddress(n));
 911 }
 912 
 913 void TemplateTable::dload(int n) {
 914   transition(vtos, dtos);
 915   __ load_double(daddress(n));
 916 }
 917 
 918 void TemplateTable::aload(int n) {
 919   transition(vtos, atos);
 920   __ movptr(rax, aaddress(n));
 921 }
 922 
 923 void TemplateTable::aload_0() {
 924   aload_0_internal();
 925 }
 926 
 927 void TemplateTable::nofast_aload_0() {
 928   aload_0_internal(may_not_rewrite);
 929 }
 930 
 931 void TemplateTable::aload_0_internal(RewriteControl rc) {
 932   transition(vtos, atos);
 933   // According to bytecode histograms, the pairs:
 934   //
 935   // _aload_0, _fast_igetfield
 936   // _aload_0, _fast_agetfield
 937   // _aload_0, _fast_fgetfield
 938   //
 939   // occur frequently. If RewriteFrequentPairs is set, the (slow)
 940   // _aload_0 bytecode checks if the next bytecode is either
 941   // _fast_igetfield, _fast_agetfield or _fast_fgetfield and then
 942   // rewrites the current bytecode into a pair bytecode; otherwise it
 943   // rewrites the current bytecode into _fast_aload_0 that doesn't do
 944   // the pair check anymore.
 945   //
 946   // Note: If the next bytecode is _getfield, the rewrite must be
 947   //       delayed, otherwise we may miss an opportunity for a pair.
 948   //
 949   // Also rewrite frequent pairs
 950   //   aload_0, aload_1
 951   //   aload_0, iload_1
 952   // These bytecodes with a small amount of code are most profitable
 953   // to rewrite
 954   if (RewriteFrequentPairs && rc == may_rewrite) {
 955     Label rewrite, done;
 956 
 957     const Register bc = LP64_ONLY(c_rarg3) NOT_LP64(rcx);
 958     LP64_ONLY(assert(rbx != bc, "register damaged"));
 959 
 960     // get next byte
 961     __ load_unsigned_byte(rbx, at_bcp(Bytecodes::length_for(Bytecodes::_aload_0)));
 962 
 963     // if _getfield then wait with rewrite
 964     __ cmpl(rbx, Bytecodes::_getfield);
 965     __ jcc(Assembler::equal, done);
 966 
 967     // if _igetfield then rewrite to _fast_iaccess_0
 968     assert(Bytecodes::java_code(Bytecodes::_fast_iaccess_0) == Bytecodes::_aload_0, "fix bytecode definition");
 969     __ cmpl(rbx, Bytecodes::_fast_igetfield);
 970     __ movl(bc, Bytecodes::_fast_iaccess_0);
 971     __ jccb(Assembler::equal, rewrite);
 972 
 973     // if _agetfield then rewrite to _fast_aaccess_0
 974     assert(Bytecodes::java_code(Bytecodes::_fast_aaccess_0) == Bytecodes::_aload_0, "fix bytecode definition");
 975     __ cmpl(rbx, Bytecodes::_fast_agetfield);
 976     __ movl(bc, Bytecodes::_fast_aaccess_0);
 977     __ jccb(Assembler::equal, rewrite);
 978 
 979     // if _fgetfield then rewrite to _fast_faccess_0
 980     assert(Bytecodes::java_code(Bytecodes::_fast_faccess_0) == Bytecodes::_aload_0, "fix bytecode definition");
 981     __ cmpl(rbx, Bytecodes::_fast_fgetfield);
 982     __ movl(bc, Bytecodes::_fast_faccess_0);
 983     __ jccb(Assembler::equal, rewrite);
 984 
 985     // else rewrite to _fast_aload0
 986     assert(Bytecodes::java_code(Bytecodes::_fast_aload_0) == Bytecodes::_aload_0, "fix bytecode definition");
 987     __ movl(bc, Bytecodes::_fast_aload_0);
 988 
 989     // rewrite
 990     // bc: fast bytecode
 991     __ bind(rewrite);
 992     patch_bytecode(Bytecodes::_aload_0, bc, rbx, false);
 993 
 994     __ bind(done);
 995   }
 996 
 997   // Do actual aload_0 (must do this after patch_bytecode which might call VM and GC might change oop).
 998   aload(0);
 999 }
1000 
1001 void TemplateTable::istore() {
1002   transition(itos, vtos);
1003   locals_index(rbx);
1004   __ movl(iaddress(rbx), rax);
1005 }
1006 
1007 
1008 void TemplateTable::lstore() {
1009   transition(ltos, vtos);
1010   locals_index(rbx);
1011   __ movptr(laddress(rbx), rax);
1012   NOT_LP64(__ movptr(haddress(rbx), rdx));
1013 }
1014 
1015 void TemplateTable::fstore() {
1016   transition(ftos, vtos);
1017   locals_index(rbx);
1018   __ store_float(faddress(rbx));
1019 }
1020 
1021 void TemplateTable::dstore() {
1022   transition(dtos, vtos);
1023   locals_index(rbx);
1024   __ store_double(daddress(rbx));
1025 }
1026 
1027 void TemplateTable::astore() {
1028   transition(vtos, vtos);
1029   __ pop_ptr(rax);
1030   locals_index(rbx);
1031   __ movptr(aaddress(rbx), rax);
1032 }
1033 
1034 void TemplateTable::wide_istore() {
1035   transition(vtos, vtos);
1036   __ pop_i();
1037   locals_index_wide(rbx);
1038   __ movl(iaddress(rbx), rax);
1039 }
1040 
1041 void TemplateTable::wide_lstore() {
1042   transition(vtos, vtos);
1043   NOT_LP64(__ pop_l(rax, rdx));
1044   LP64_ONLY(__ pop_l());
1045   locals_index_wide(rbx);
1046   __ movptr(laddress(rbx), rax);
1047   NOT_LP64(__ movl(haddress(rbx), rdx));
1048 }
1049 
1050 void TemplateTable::wide_fstore() {
1051 #ifdef _LP64
1052   transition(vtos, vtos);
1053   __ pop_f(xmm0);
1054   locals_index_wide(rbx);
1055   __ movflt(faddress(rbx), xmm0);
1056 #else
1057   wide_istore();
1058 #endif
1059 }
1060 
1061 void TemplateTable::wide_dstore() {
1062 #ifdef _LP64
1063   transition(vtos, vtos);
1064   __ pop_d(xmm0);
1065   locals_index_wide(rbx);
1066   __ movdbl(daddress(rbx), xmm0);
1067 #else
1068   wide_lstore();
1069 #endif
1070 }
1071 
1072 void TemplateTable::wide_astore() {
1073   transition(vtos, vtos);
1074   __ pop_ptr(rax);
1075   locals_index_wide(rbx);
1076   __ movptr(aaddress(rbx), rax);
1077 }
1078 
1079 void TemplateTable::iastore() {
1080   transition(itos, vtos);
1081   __ pop_i(rbx);
1082   // rax: value
1083   // rbx: index
1084   // rdx: array
1085   index_check(rdx, rbx); // prefer index in rbx
1086   __ access_store_at(T_INT, IN_HEAP | IS_ARRAY,
1087                      Address(rdx, rbx, Address::times_4,
1088                              arrayOopDesc::base_offset_in_bytes(T_INT)),
1089                      rax, noreg, noreg);
1090 }
1091 
1092 void TemplateTable::lastore() {
1093   transition(ltos, vtos);
1094   __ pop_i(rbx);
1095   // rax,: low(value)
1096   // rcx: array
1097   // rdx: high(value)
1098   index_check(rcx, rbx);  // prefer index in rbx,
1099   // rbx,: index
1100   __ access_store_at(T_LONG, IN_HEAP | IS_ARRAY,
1101                      Address(rcx, rbx, Address::times_8,
1102                              arrayOopDesc::base_offset_in_bytes(T_LONG)),
1103                      noreg /* ltos */, noreg, noreg);
1104 }
1105 
1106 
1107 void TemplateTable::fastore() {
1108   transition(ftos, vtos);
1109   __ pop_i(rbx);
1110   // value is in UseSSE >= 1 ? xmm0 : ST(0)
1111   // rbx:  index
1112   // rdx:  array
1113   index_check(rdx, rbx); // prefer index in rbx
1114   __ access_store_at(T_FLOAT, IN_HEAP | IS_ARRAY,
1115                      Address(rdx, rbx, Address::times_4,
1116                              arrayOopDesc::base_offset_in_bytes(T_FLOAT)),
1117                      noreg /* ftos */, noreg, noreg);
1118 }
1119 
1120 void TemplateTable::dastore() {
1121   transition(dtos, vtos);
1122   __ pop_i(rbx);
1123   // value is in UseSSE >= 2 ? xmm0 : ST(0)
1124   // rbx:  index
1125   // rdx:  array
1126   index_check(rdx, rbx); // prefer index in rbx
1127   __ access_store_at(T_DOUBLE, IN_HEAP | IS_ARRAY,
1128                      Address(rdx, rbx, Address::times_8,
1129                              arrayOopDesc::base_offset_in_bytes(T_DOUBLE)),
1130                      noreg /* dtos */, noreg, noreg);
1131 }
1132 
1133 void TemplateTable::aastore() {
1134   Label is_null, is_flat_array, ok_is_subtype, done;
1135   transition(vtos, vtos);
1136   // stack: ..., array, index, value
1137   __ movptr(rax, at_tos());    // value
1138   __ movl(rcx, at_tos_p1()); // index
1139   __ movptr(rdx, at_tos_p2()); // array
1140 
1141   Address element_address(rdx, rcx,
1142                           UseCompressedOops? Address::times_4 : Address::times_ptr,
1143                           arrayOopDesc::base_offset_in_bytes(T_OBJECT));
1144 
1145   index_check_without_pop(rdx, rcx);     // kills rbx
1146 
1147   __ testptr(rax, rax);
1148   __ jcc(Assembler::zero, is_null);
1149 
1150   // Move array class to rdi
1151   __ load_klass(rdi, rdx);
1152   if (ValueArrayFlatten) {
1153     __ test_flat_array_klass(rdi, rbx, is_flat_array);
1154   }
1155 
1156   // Move subklass into rbx
1157   __ load_klass(rbx, rax);
1158   // Move array element superklass into rax
1159   __ movptr(rax, Address(rdi,
1160                          ObjArrayKlass::element_klass_offset()));
1161 
1162   // Generate subtype check.  Blows rcx, rdi
1163   // Superklass in rax.  Subklass in rbx.
1164   // is "rbx <: rax" ? (value subclass <: array element superclass)
1165   __ gen_subtype_check(rbx, ok_is_subtype);
1166 
1167   // Come here on failure
1168   // object is at TOS
1169   __ jump(ExternalAddress(Interpreter::_throw_ArrayStoreException_entry));
1170 
1171   // Come here on success
1172   __ bind(ok_is_subtype);
1173 
1174   // Get the value we will store
1175   __ movptr(rax, at_tos());
1176   __ movl(rcx, at_tos_p1()); // index
1177   // Now store using the appropriate barrier
1178   do_oop_store(_masm, element_address, rax, IS_ARRAY);
1179   __ jmp(done);
1180 
1181   // Have a NULL in rax, rdx=array, ecx=index.  Store NULL at ary[idx]
1182   __ bind(is_null);
1183   __ profile_null_seen(rbx);
1184   if (EnableValhalla) {
1185     Label is_null_into_value_array_npe, store_null;
1186 
1187     __ load_klass(rdi, rdx);
1188     // No way to store null in flat array
1189     __ test_flat_array_klass(rdi, rbx, is_null_into_value_array_npe);
1190 
1191     // Use case for storing values in objArray where element_klass is specifically
1192     // a value type because they could not be flattened "for reasons",
1193     // these need to have the same semantics as flat arrays, i.e. NPE
1194     __ movptr(rdi, Address(rdi, ObjArrayKlass::element_klass_offset()));
1195     __ test_klass_is_value(rdi, rdi, is_null_into_value_array_npe);
1196     __ jmp(store_null);
1197 
1198     __ bind(is_null_into_value_array_npe);
1199     __ jump(ExternalAddress(Interpreter::_throw_NullPointerException_entry));
1200 
1201     __ bind(store_null);
1202   }
1203   // Store a NULL
1204   do_oop_store(_masm, element_address, noreg, IS_ARRAY);
1205   __ jmp(done);
1206 
1207   if (EnableValhalla) {
1208     Label is_type_ok;
1209     __ bind(is_flat_array); // Store non-null value to flat
1210 
1211     // Simplistic type check...
1212 
1213     // Profile the not-null value's klass.
1214     __ load_klass(rbx, rax);
1215     __ profile_typecheck(rcx, rbx, rax); // blows rcx, and rax
1216     // Move element klass into rax
1217     __ movptr(rax, Address(rdi, ArrayKlass::element_klass_offset()));
1218     // flat value array needs exact type match
1219     // is "rax == rbx" (value subclass == array element superclass)
1220     __ cmpptr(rax, rbx);
1221     __ jccb(Assembler::equal, is_type_ok);
1222 
1223     __ profile_typecheck_failed(rcx);
1224     __ jump(ExternalAddress(Interpreter::_throw_ArrayStoreException_entry));
1225 
1226     __ bind(is_type_ok);
1227     __ movptr(rax, at_tos());  // value
1228     __ movl(rcx, at_tos_p1()); // index
1229     __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::value_array_store), rax, rdx, rcx);
1230   }
1231   // Pop stack arguments
1232   __ bind(done);
1233   __ addptr(rsp, 3 * Interpreter::stackElementSize);
1234 }
1235 
1236 void TemplateTable::bastore() {
1237   transition(itos, vtos);
1238   __ pop_i(rbx);
1239   // rax: value
1240   // rbx: index
1241   // rdx: array
1242   index_check(rdx, rbx); // prefer index in rbx
1243   // Need to check whether array is boolean or byte
1244   // since both types share the bastore bytecode.
1245   __ load_klass(rcx, rdx);
1246   __ movl(rcx, Address(rcx, Klass::layout_helper_offset()));
1247   int diffbit = Klass::layout_helper_boolean_diffbit();
1248   __ testl(rcx, diffbit);
1249   Label L_skip;
1250   __ jccb(Assembler::zero, L_skip);
1251   __ andl(rax, 1);  // if it is a T_BOOLEAN array, mask the stored value to 0/1
1252   __ bind(L_skip);
1253   __ access_store_at(T_BYTE, IN_HEAP | IS_ARRAY,
1254                      Address(rdx, rbx,Address::times_1,
1255                              arrayOopDesc::base_offset_in_bytes(T_BYTE)),
1256                      rax, noreg, noreg);
1257 }
1258 
1259 void TemplateTable::castore() {
1260   transition(itos, vtos);
1261   __ pop_i(rbx);
1262   // rax: value
1263   // rbx: index
1264   // rdx: array
1265   index_check(rdx, rbx);  // prefer index in rbx
1266   __ access_store_at(T_CHAR, IN_HEAP | IS_ARRAY,
1267                      Address(rdx, rbx, Address::times_2,
1268                              arrayOopDesc::base_offset_in_bytes(T_CHAR)),
1269                      rax, noreg, noreg);
1270 }
1271 
1272 
1273 void TemplateTable::sastore() {
1274   castore();
1275 }
1276 
1277 void TemplateTable::istore(int n) {
1278   transition(itos, vtos);
1279   __ movl(iaddress(n), rax);
1280 }
1281 
1282 void TemplateTable::lstore(int n) {
1283   transition(ltos, vtos);
1284   __ movptr(laddress(n), rax);
1285   NOT_LP64(__ movptr(haddress(n), rdx));
1286 }
1287 
1288 void TemplateTable::fstore(int n) {
1289   transition(ftos, vtos);
1290   __ store_float(faddress(n));
1291 }
1292 
1293 void TemplateTable::dstore(int n) {
1294   transition(dtos, vtos);
1295   __ store_double(daddress(n));
1296 }
1297 
1298 
1299 void TemplateTable::astore(int n) {
1300   transition(vtos, vtos);
1301   __ pop_ptr(rax);
1302   __ movptr(aaddress(n), rax);
1303 }
1304 
1305 void TemplateTable::pop() {
1306   transition(vtos, vtos);
1307   __ addptr(rsp, Interpreter::stackElementSize);
1308 }
1309 
1310 void TemplateTable::pop2() {
1311   transition(vtos, vtos);
1312   __ addptr(rsp, 2 * Interpreter::stackElementSize);
1313 }
1314 
1315 
1316 void TemplateTable::dup() {
1317   transition(vtos, vtos);
1318   __ load_ptr(0, rax);
1319   __ push_ptr(rax);
1320   // stack: ..., a, a
1321 }
1322 
1323 void TemplateTable::dup_x1() {
1324   transition(vtos, vtos);
1325   // stack: ..., a, b
1326   __ load_ptr( 0, rax);  // load b
1327   __ load_ptr( 1, rcx);  // load a
1328   __ store_ptr(1, rax);  // store b
1329   __ store_ptr(0, rcx);  // store a
1330   __ push_ptr(rax);      // push b
1331   // stack: ..., b, a, b
1332 }
1333 
1334 void TemplateTable::dup_x2() {
1335   transition(vtos, vtos);
1336   // stack: ..., a, b, c
1337   __ load_ptr( 0, rax);  // load c
1338   __ load_ptr( 2, rcx);  // load a
1339   __ store_ptr(2, rax);  // store c in a
1340   __ push_ptr(rax);      // push c
1341   // stack: ..., c, b, c, c
1342   __ load_ptr( 2, rax);  // load b
1343   __ store_ptr(2, rcx);  // store a in b
1344   // stack: ..., c, a, c, c
1345   __ store_ptr(1, rax);  // store b in c
1346   // stack: ..., c, a, b, c
1347 }
1348 
1349 void TemplateTable::dup2() {
1350   transition(vtos, vtos);
1351   // stack: ..., a, b
1352   __ load_ptr(1, rax);  // load a
1353   __ push_ptr(rax);     // push a
1354   __ load_ptr(1, rax);  // load b
1355   __ push_ptr(rax);     // push b
1356   // stack: ..., a, b, a, b
1357 }
1358 
1359 
1360 void TemplateTable::dup2_x1() {
1361   transition(vtos, vtos);
1362   // stack: ..., a, b, c
1363   __ load_ptr( 0, rcx);  // load c
1364   __ load_ptr( 1, rax);  // load b
1365   __ push_ptr(rax);      // push b
1366   __ push_ptr(rcx);      // push c
1367   // stack: ..., a, b, c, b, c
1368   __ store_ptr(3, rcx);  // store c in b
1369   // stack: ..., a, c, c, b, c
1370   __ load_ptr( 4, rcx);  // load a
1371   __ store_ptr(2, rcx);  // store a in 2nd c
1372   // stack: ..., a, c, a, b, c
1373   __ store_ptr(4, rax);  // store b in a
1374   // stack: ..., b, c, a, b, c
1375 }
1376 
1377 void TemplateTable::dup2_x2() {
1378   transition(vtos, vtos);
1379   // stack: ..., a, b, c, d
1380   __ load_ptr( 0, rcx);  // load d
1381   __ load_ptr( 1, rax);  // load c
1382   __ push_ptr(rax);      // push c
1383   __ push_ptr(rcx);      // push d
1384   // stack: ..., a, b, c, d, c, d
1385   __ load_ptr( 4, rax);  // load b
1386   __ store_ptr(2, rax);  // store b in d
1387   __ store_ptr(4, rcx);  // store d in b
1388   // stack: ..., a, d, c, b, c, d
1389   __ load_ptr( 5, rcx);  // load a
1390   __ load_ptr( 3, rax);  // load c
1391   __ store_ptr(3, rcx);  // store a in c
1392   __ store_ptr(5, rax);  // store c in a
1393   // stack: ..., c, d, a, b, c, d
1394 }
1395 
1396 void TemplateTable::swap() {
1397   transition(vtos, vtos);
1398   // stack: ..., a, b
1399   __ load_ptr( 1, rcx);  // load a
1400   __ load_ptr( 0, rax);  // load b
1401   __ store_ptr(0, rcx);  // store a in b
1402   __ store_ptr(1, rax);  // store b in a
1403   // stack: ..., b, a
1404 }
1405 
1406 void TemplateTable::iop2(Operation op) {
1407   transition(itos, itos);
1408   switch (op) {
1409   case add  :                    __ pop_i(rdx); __ addl (rax, rdx); break;
1410   case sub  : __ movl(rdx, rax); __ pop_i(rax); __ subl (rax, rdx); break;
1411   case mul  :                    __ pop_i(rdx); __ imull(rax, rdx); break;
1412   case _and :                    __ pop_i(rdx); __ andl (rax, rdx); break;
1413   case _or  :                    __ pop_i(rdx); __ orl  (rax, rdx); break;
1414   case _xor :                    __ pop_i(rdx); __ xorl (rax, rdx); break;
1415   case shl  : __ movl(rcx, rax); __ pop_i(rax); __ shll (rax);      break;
1416   case shr  : __ movl(rcx, rax); __ pop_i(rax); __ sarl (rax);      break;
1417   case ushr : __ movl(rcx, rax); __ pop_i(rax); __ shrl (rax);      break;
1418   default   : ShouldNotReachHere();
1419   }
1420 }
1421 
1422 void TemplateTable::lop2(Operation op) {
1423   transition(ltos, ltos);
1424 #ifdef _LP64
1425   switch (op) {
1426   case add  :                    __ pop_l(rdx); __ addptr(rax, rdx); break;
1427   case sub  : __ mov(rdx, rax);  __ pop_l(rax); __ subptr(rax, rdx); break;
1428   case _and :                    __ pop_l(rdx); __ andptr(rax, rdx); break;
1429   case _or  :                    __ pop_l(rdx); __ orptr (rax, rdx); break;
1430   case _xor :                    __ pop_l(rdx); __ xorptr(rax, rdx); break;
1431   default   : ShouldNotReachHere();
1432   }
1433 #else
1434   __ pop_l(rbx, rcx);
1435   switch (op) {
1436     case add  : __ addl(rax, rbx); __ adcl(rdx, rcx); break;
1437     case sub  : __ subl(rbx, rax); __ sbbl(rcx, rdx);
1438                 __ mov (rax, rbx); __ mov (rdx, rcx); break;
1439     case _and : __ andl(rax, rbx); __ andl(rdx, rcx); break;
1440     case _or  : __ orl (rax, rbx); __ orl (rdx, rcx); break;
1441     case _xor : __ xorl(rax, rbx); __ xorl(rdx, rcx); break;
1442     default   : ShouldNotReachHere();
1443   }
1444 #endif
1445 }
1446 
1447 void TemplateTable::idiv() {
1448   transition(itos, itos);
1449   __ movl(rcx, rax);
1450   __ pop_i(rax);
1451   // Note: could xor rax and ecx and compare with (-1 ^ min_int). If
1452   //       they are not equal, one could do a normal division (no correction
1453   //       needed), which may speed up this implementation for the common case.
1454   //       (see also JVM spec., p.243 & p.271)
1455   __ corrected_idivl(rcx);
1456 }
1457 
1458 void TemplateTable::irem() {
1459   transition(itos, itos);
1460   __ movl(rcx, rax);
1461   __ pop_i(rax);
1462   // Note: could xor rax and ecx and compare with (-1 ^ min_int). If
1463   //       they are not equal, one could do a normal division (no correction
1464   //       needed), which may speed up this implementation for the common case.
1465   //       (see also JVM spec., p.243 & p.271)
1466   __ corrected_idivl(rcx);
1467   __ movl(rax, rdx);
1468 }
1469 
1470 void TemplateTable::lmul() {
1471   transition(ltos, ltos);
1472 #ifdef _LP64
1473   __ pop_l(rdx);
1474   __ imulq(rax, rdx);
1475 #else
1476   __ pop_l(rbx, rcx);
1477   __ push(rcx); __ push(rbx);
1478   __ push(rdx); __ push(rax);
1479   __ lmul(2 * wordSize, 0);
1480   __ addptr(rsp, 4 * wordSize);  // take off temporaries
1481 #endif
1482 }
1483 
1484 void TemplateTable::ldiv() {
1485   transition(ltos, ltos);
1486 #ifdef _LP64
1487   __ mov(rcx, rax);
1488   __ pop_l(rax);
1489   // generate explicit div0 check
1490   __ testq(rcx, rcx);
1491   __ jump_cc(Assembler::zero,
1492              ExternalAddress(Interpreter::_throw_ArithmeticException_entry));
1493   // Note: could xor rax and rcx and compare with (-1 ^ min_int). If
1494   //       they are not equal, one could do a normal division (no correction
1495   //       needed), which may speed up this implementation for the common case.
1496   //       (see also JVM spec., p.243 & p.271)
1497   __ corrected_idivq(rcx); // kills rbx
1498 #else
1499   __ pop_l(rbx, rcx);
1500   __ push(rcx); __ push(rbx);
1501   __ push(rdx); __ push(rax);
1502   // check if y = 0
1503   __ orl(rax, rdx);
1504   __ jump_cc(Assembler::zero,
1505              ExternalAddress(Interpreter::_throw_ArithmeticException_entry));
1506   __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::ldiv));
1507   __ addptr(rsp, 4 * wordSize);  // take off temporaries
1508 #endif
1509 }
1510 
1511 void TemplateTable::lrem() {
1512   transition(ltos, ltos);
1513 #ifdef _LP64
1514   __ mov(rcx, rax);
1515   __ pop_l(rax);
1516   __ testq(rcx, rcx);
1517   __ jump_cc(Assembler::zero,
1518              ExternalAddress(Interpreter::_throw_ArithmeticException_entry));
1519   // Note: could xor rax and rcx and compare with (-1 ^ min_int). If
1520   //       they are not equal, one could do a normal division (no correction
1521   //       needed), which may speed up this implementation for the common case.
1522   //       (see also JVM spec., p.243 & p.271)
1523   __ corrected_idivq(rcx); // kills rbx
1524   __ mov(rax, rdx);
1525 #else
1526   __ pop_l(rbx, rcx);
1527   __ push(rcx); __ push(rbx);
1528   __ push(rdx); __ push(rax);
1529   // check if y = 0
1530   __ orl(rax, rdx);
1531   __ jump_cc(Assembler::zero,
1532              ExternalAddress(Interpreter::_throw_ArithmeticException_entry));
1533   __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::lrem));
1534   __ addptr(rsp, 4 * wordSize);
1535 #endif
1536 }
1537 
1538 void TemplateTable::lshl() {
1539   transition(itos, ltos);
1540   __ movl(rcx, rax);                             // get shift count
1541   #ifdef _LP64
1542   __ pop_l(rax);                                 // get shift value
1543   __ shlq(rax);
1544 #else
1545   __ pop_l(rax, rdx);                            // get shift value
1546   __ lshl(rdx, rax);
1547 #endif
1548 }
1549 
1550 void TemplateTable::lshr() {
1551 #ifdef _LP64
1552   transition(itos, ltos);
1553   __ movl(rcx, rax);                             // get shift count
1554   __ pop_l(rax);                                 // get shift value
1555   __ sarq(rax);
1556 #else
1557   transition(itos, ltos);
1558   __ mov(rcx, rax);                              // get shift count
1559   __ pop_l(rax, rdx);                            // get shift value
1560   __ lshr(rdx, rax, true);
1561 #endif
1562 }
1563 
1564 void TemplateTable::lushr() {
1565   transition(itos, ltos);
1566 #ifdef _LP64
1567   __ movl(rcx, rax);                             // get shift count
1568   __ pop_l(rax);                                 // get shift value
1569   __ shrq(rax);
1570 #else
1571   __ mov(rcx, rax);                              // get shift count
1572   __ pop_l(rax, rdx);                            // get shift value
1573   __ lshr(rdx, rax);
1574 #endif
1575 }
1576 
1577 void TemplateTable::fop2(Operation op) {
1578   transition(ftos, ftos);
1579 
1580   if (UseSSE >= 1) {
1581     switch (op) {
1582     case add:
1583       __ addss(xmm0, at_rsp());
1584       __ addptr(rsp, Interpreter::stackElementSize);
1585       break;
1586     case sub:
1587       __ movflt(xmm1, xmm0);
1588       __ pop_f(xmm0);
1589       __ subss(xmm0, xmm1);
1590       break;
1591     case mul:
1592       __ mulss(xmm0, at_rsp());
1593       __ addptr(rsp, Interpreter::stackElementSize);
1594       break;
1595     case div:
1596       __ movflt(xmm1, xmm0);
1597       __ pop_f(xmm0);
1598       __ divss(xmm0, xmm1);
1599       break;
1600     case rem:
1601       // On x86_64 platforms the SharedRuntime::frem method is called to perform the
1602       // modulo operation. The frem method calls the function
1603       // double fmod(double x, double y) in math.h. The documentation of fmod states:
1604       // "If x or y is a NaN, a NaN is returned." without specifying what type of NaN
1605       // (signalling or quiet) is returned.
1606       //
1607       // On x86_32 platforms the FPU is used to perform the modulo operation. The
1608       // reason is that on 32-bit Windows the sign of modulo operations diverges from
1609       // what is considered the standard (e.g., -0.0f % -3.14f is 0.0f (and not -0.0f).
1610       // The fprem instruction used on x86_32 is functionally equivalent to
1611       // SharedRuntime::frem in that it returns a NaN.
1612 #ifdef _LP64
1613       __ movflt(xmm1, xmm0);
1614       __ pop_f(xmm0);
1615       __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::frem), 2);
1616 #else
1617       __ push_f(xmm0);
1618       __ pop_f();
1619       __ fld_s(at_rsp());
1620       __ fremr(rax);
1621       __ f2ieee();
1622       __ pop(rax);  // pop second operand off the stack
1623       __ push_f();
1624       __ pop_f(xmm0);
1625 #endif
1626       break;
1627     default:
1628       ShouldNotReachHere();
1629       break;
1630     }
1631   } else {
1632 #ifdef _LP64
1633     ShouldNotReachHere();
1634 #else
1635     switch (op) {
1636     case add: __ fadd_s (at_rsp());                break;
1637     case sub: __ fsubr_s(at_rsp());                break;
1638     case mul: __ fmul_s (at_rsp());                break;
1639     case div: __ fdivr_s(at_rsp());                break;
1640     case rem: __ fld_s  (at_rsp()); __ fremr(rax); break;
1641     default : ShouldNotReachHere();
1642     }
1643     __ f2ieee();
1644     __ pop(rax);  // pop second operand off the stack
1645 #endif // _LP64
1646   }
1647 }
1648 
1649 void TemplateTable::dop2(Operation op) {
1650   transition(dtos, dtos);
1651   if (UseSSE >= 2) {
1652     switch (op) {
1653     case add:
1654       __ addsd(xmm0, at_rsp());
1655       __ addptr(rsp, 2 * Interpreter::stackElementSize);
1656       break;
1657     case sub:
1658       __ movdbl(xmm1, xmm0);
1659       __ pop_d(xmm0);
1660       __ subsd(xmm0, xmm1);
1661       break;
1662     case mul:
1663       __ mulsd(xmm0, at_rsp());
1664       __ addptr(rsp, 2 * Interpreter::stackElementSize);
1665       break;
1666     case div:
1667       __ movdbl(xmm1, xmm0);
1668       __ pop_d(xmm0);
1669       __ divsd(xmm0, xmm1);
1670       break;
1671     case rem:
1672       // Similar to fop2(), the modulo operation is performed using the
1673       // SharedRuntime::drem method (on x86_64 platforms) or using the
1674       // FPU (on x86_32 platforms) for the same reasons as mentioned in fop2().
1675 #ifdef _LP64
1676       __ movdbl(xmm1, xmm0);
1677       __ pop_d(xmm0);
1678       __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::drem), 2);
1679 #else
1680       __ push_d(xmm0);
1681       __ pop_d();
1682       __ fld_d(at_rsp());
1683       __ fremr(rax);
1684       __ d2ieee();
1685       __ pop(rax);
1686       __ pop(rdx);
1687       __ push_d();
1688       __ pop_d(xmm0);
1689 #endif
1690       break;
1691     default:
1692       ShouldNotReachHere();
1693       break;
1694     }
1695   } else {
1696 #ifdef _LP64
1697     ShouldNotReachHere();
1698 #else
1699     switch (op) {
1700     case add: __ fadd_d (at_rsp());                break;
1701     case sub: __ fsubr_d(at_rsp());                break;
1702     case mul: {
1703       Label L_strict;
1704       Label L_join;
1705       const Address access_flags      (rcx, Method::access_flags_offset());
1706       __ get_method(rcx);
1707       __ movl(rcx, access_flags);
1708       __ testl(rcx, JVM_ACC_STRICT);
1709       __ jccb(Assembler::notZero, L_strict);
1710       __ fmul_d (at_rsp());
1711       __ jmpb(L_join);
1712       __ bind(L_strict);
1713       __ fld_x(ExternalAddress(StubRoutines::addr_fpu_subnormal_bias1()));
1714       __ fmulp();
1715       __ fmul_d (at_rsp());
1716       __ fld_x(ExternalAddress(StubRoutines::addr_fpu_subnormal_bias2()));
1717       __ fmulp();
1718       __ bind(L_join);
1719       break;
1720     }
1721     case div: {
1722       Label L_strict;
1723       Label L_join;
1724       const Address access_flags      (rcx, Method::access_flags_offset());
1725       __ get_method(rcx);
1726       __ movl(rcx, access_flags);
1727       __ testl(rcx, JVM_ACC_STRICT);
1728       __ jccb(Assembler::notZero, L_strict);
1729       __ fdivr_d(at_rsp());
1730       __ jmp(L_join);
1731       __ bind(L_strict);
1732       __ fld_x(ExternalAddress(StubRoutines::addr_fpu_subnormal_bias1()));
1733       __ fmul_d (at_rsp());
1734       __ fdivrp();
1735       __ fld_x(ExternalAddress(StubRoutines::addr_fpu_subnormal_bias2()));
1736       __ fmulp();
1737       __ bind(L_join);
1738       break;
1739     }
1740     case rem: __ fld_d  (at_rsp()); __ fremr(rax); break;
1741     default : ShouldNotReachHere();
1742     }
1743     __ d2ieee();
1744     // Pop double precision number from rsp.
1745     __ pop(rax);
1746     __ pop(rdx);
1747 #endif
1748   }
1749 }
1750 
1751 void TemplateTable::ineg() {
1752   transition(itos, itos);
1753   __ negl(rax);
1754 }
1755 
1756 void TemplateTable::lneg() {
1757   transition(ltos, ltos);
1758   LP64_ONLY(__ negq(rax));
1759   NOT_LP64(__ lneg(rdx, rax));
1760 }
1761 
1762 // Note: 'double' and 'long long' have 32-bits alignment on x86.
1763 static jlong* double_quadword(jlong *adr, jlong lo, jlong hi) {
1764   // Use the expression (adr)&(~0xF) to provide 128-bits aligned address
1765   // of 128-bits operands for SSE instructions.
1766   jlong *operand = (jlong*)(((intptr_t)adr)&((intptr_t)(~0xF)));
1767   // Store the value to a 128-bits operand.
1768   operand[0] = lo;
1769   operand[1] = hi;
1770   return operand;
1771 }
1772 
1773 // Buffer for 128-bits masks used by SSE instructions.
1774 static jlong float_signflip_pool[2*2];
1775 static jlong double_signflip_pool[2*2];
1776 
1777 void TemplateTable::fneg() {
1778   transition(ftos, ftos);
1779   if (UseSSE >= 1) {
1780     static jlong *float_signflip  = double_quadword(&float_signflip_pool[1],  CONST64(0x8000000080000000),  CONST64(0x8000000080000000));
1781     __ xorps(xmm0, ExternalAddress((address) float_signflip));
1782   } else {
1783     LP64_ONLY(ShouldNotReachHere());
1784     NOT_LP64(__ fchs());
1785   }
1786 }
1787 
1788 void TemplateTable::dneg() {
1789   transition(dtos, dtos);
1790   if (UseSSE >= 2) {
1791     static jlong *double_signflip =
1792       double_quadword(&double_signflip_pool[1], CONST64(0x8000000000000000), CONST64(0x8000000000000000));
1793     __ xorpd(xmm0, ExternalAddress((address) double_signflip));
1794   } else {
1795 #ifdef _LP64
1796     ShouldNotReachHere();
1797 #else
1798     __ fchs();
1799 #endif
1800   }
1801 }
1802 
1803 void TemplateTable::iinc() {
1804   transition(vtos, vtos);
1805   __ load_signed_byte(rdx, at_bcp(2)); // get constant
1806   locals_index(rbx);
1807   __ addl(iaddress(rbx), rdx);
1808 }
1809 
1810 void TemplateTable::wide_iinc() {
1811   transition(vtos, vtos);
1812   __ movl(rdx, at_bcp(4)); // get constant
1813   locals_index_wide(rbx);
1814   __ bswapl(rdx); // swap bytes & sign-extend constant
1815   __ sarl(rdx, 16);
1816   __ addl(iaddress(rbx), rdx);
1817   // Note: should probably use only one movl to get both
1818   //       the index and the constant -> fix this
1819 }
1820 
1821 void TemplateTable::convert() {
1822 #ifdef _LP64
1823   // Checking
1824 #ifdef ASSERT
1825   {
1826     TosState tos_in  = ilgl;
1827     TosState tos_out = ilgl;
1828     switch (bytecode()) {
1829     case Bytecodes::_i2l: // fall through
1830     case Bytecodes::_i2f: // fall through
1831     case Bytecodes::_i2d: // fall through
1832     case Bytecodes::_i2b: // fall through
1833     case Bytecodes::_i2c: // fall through
1834     case Bytecodes::_i2s: tos_in = itos; break;
1835     case Bytecodes::_l2i: // fall through
1836     case Bytecodes::_l2f: // fall through
1837     case Bytecodes::_l2d: tos_in = ltos; break;
1838     case Bytecodes::_f2i: // fall through
1839     case Bytecodes::_f2l: // fall through
1840     case Bytecodes::_f2d: tos_in = ftos; break;
1841     case Bytecodes::_d2i: // fall through
1842     case Bytecodes::_d2l: // fall through
1843     case Bytecodes::_d2f: tos_in = dtos; break;
1844     default             : ShouldNotReachHere();
1845     }
1846     switch (bytecode()) {
1847     case Bytecodes::_l2i: // fall through
1848     case Bytecodes::_f2i: // fall through
1849     case Bytecodes::_d2i: // fall through
1850     case Bytecodes::_i2b: // fall through
1851     case Bytecodes::_i2c: // fall through
1852     case Bytecodes::_i2s: tos_out = itos; break;
1853     case Bytecodes::_i2l: // fall through
1854     case Bytecodes::_f2l: // fall through
1855     case Bytecodes::_d2l: tos_out = ltos; break;
1856     case Bytecodes::_i2f: // fall through
1857     case Bytecodes::_l2f: // fall through
1858     case Bytecodes::_d2f: tos_out = ftos; break;
1859     case Bytecodes::_i2d: // fall through
1860     case Bytecodes::_l2d: // fall through
1861     case Bytecodes::_f2d: tos_out = dtos; break;
1862     default             : ShouldNotReachHere();
1863     }
1864     transition(tos_in, tos_out);
1865   }
1866 #endif // ASSERT
1867 
1868   static const int64_t is_nan = 0x8000000000000000L;
1869 
1870   // Conversion
1871   switch (bytecode()) {
1872   case Bytecodes::_i2l:
1873     __ movslq(rax, rax);
1874     break;
1875   case Bytecodes::_i2f:
1876     __ cvtsi2ssl(xmm0, rax);
1877     break;
1878   case Bytecodes::_i2d:
1879     __ cvtsi2sdl(xmm0, rax);
1880     break;
1881   case Bytecodes::_i2b:
1882     __ movsbl(rax, rax);
1883     break;
1884   case Bytecodes::_i2c:
1885     __ movzwl(rax, rax);
1886     break;
1887   case Bytecodes::_i2s:
1888     __ movswl(rax, rax);
1889     break;
1890   case Bytecodes::_l2i:
1891     __ movl(rax, rax);
1892     break;
1893   case Bytecodes::_l2f:
1894     __ cvtsi2ssq(xmm0, rax);
1895     break;
1896   case Bytecodes::_l2d:
1897     __ cvtsi2sdq(xmm0, rax);
1898     break;
1899   case Bytecodes::_f2i:
1900   {
1901     Label L;
1902     __ cvttss2sil(rax, xmm0);
1903     __ cmpl(rax, 0x80000000); // NaN or overflow/underflow?
1904     __ jcc(Assembler::notEqual, L);
1905     __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::f2i), 1);
1906     __ bind(L);
1907   }
1908     break;
1909   case Bytecodes::_f2l:
1910   {
1911     Label L;
1912     __ cvttss2siq(rax, xmm0);
1913     // NaN or overflow/underflow?
1914     __ cmp64(rax, ExternalAddress((address) &is_nan));
1915     __ jcc(Assembler::notEqual, L);
1916     __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::f2l), 1);
1917     __ bind(L);
1918   }
1919     break;
1920   case Bytecodes::_f2d:
1921     __ cvtss2sd(xmm0, xmm0);
1922     break;
1923   case Bytecodes::_d2i:
1924   {
1925     Label L;
1926     __ cvttsd2sil(rax, xmm0);
1927     __ cmpl(rax, 0x80000000); // NaN or overflow/underflow?
1928     __ jcc(Assembler::notEqual, L);
1929     __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::d2i), 1);
1930     __ bind(L);
1931   }
1932     break;
1933   case Bytecodes::_d2l:
1934   {
1935     Label L;
1936     __ cvttsd2siq(rax, xmm0);
1937     // NaN or overflow/underflow?
1938     __ cmp64(rax, ExternalAddress((address) &is_nan));
1939     __ jcc(Assembler::notEqual, L);
1940     __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::d2l), 1);
1941     __ bind(L);
1942   }
1943     break;
1944   case Bytecodes::_d2f:
1945     __ cvtsd2ss(xmm0, xmm0);
1946     break;
1947   default:
1948     ShouldNotReachHere();
1949   }
1950 #else
1951   // Checking
1952 #ifdef ASSERT
1953   { TosState tos_in  = ilgl;
1954     TosState tos_out = ilgl;
1955     switch (bytecode()) {
1956       case Bytecodes::_i2l: // fall through
1957       case Bytecodes::_i2f: // fall through
1958       case Bytecodes::_i2d: // fall through
1959       case Bytecodes::_i2b: // fall through
1960       case Bytecodes::_i2c: // fall through
1961       case Bytecodes::_i2s: tos_in = itos; break;
1962       case Bytecodes::_l2i: // fall through
1963       case Bytecodes::_l2f: // fall through
1964       case Bytecodes::_l2d: tos_in = ltos; break;
1965       case Bytecodes::_f2i: // fall through
1966       case Bytecodes::_f2l: // fall through
1967       case Bytecodes::_f2d: tos_in = ftos; break;
1968       case Bytecodes::_d2i: // fall through
1969       case Bytecodes::_d2l: // fall through
1970       case Bytecodes::_d2f: tos_in = dtos; break;
1971       default             : ShouldNotReachHere();
1972     }
1973     switch (bytecode()) {
1974       case Bytecodes::_l2i: // fall through
1975       case Bytecodes::_f2i: // fall through
1976       case Bytecodes::_d2i: // fall through
1977       case Bytecodes::_i2b: // fall through
1978       case Bytecodes::_i2c: // fall through
1979       case Bytecodes::_i2s: tos_out = itos; break;
1980       case Bytecodes::_i2l: // fall through
1981       case Bytecodes::_f2l: // fall through
1982       case Bytecodes::_d2l: tos_out = ltos; break;
1983       case Bytecodes::_i2f: // fall through
1984       case Bytecodes::_l2f: // fall through
1985       case Bytecodes::_d2f: tos_out = ftos; break;
1986       case Bytecodes::_i2d: // fall through
1987       case Bytecodes::_l2d: // fall through
1988       case Bytecodes::_f2d: tos_out = dtos; break;
1989       default             : ShouldNotReachHere();
1990     }
1991     transition(tos_in, tos_out);
1992   }
1993 #endif // ASSERT
1994 
1995   // Conversion
1996   // (Note: use push(rcx)/pop(rcx) for 1/2-word stack-ptr manipulation)
1997   switch (bytecode()) {
1998     case Bytecodes::_i2l:
1999       __ extend_sign(rdx, rax);
2000       break;
2001     case Bytecodes::_i2f:
2002       if (UseSSE >= 1) {
2003         __ cvtsi2ssl(xmm0, rax);
2004       } else {
2005         __ push(rax);          // store int on tos
2006         __ fild_s(at_rsp());   // load int to ST0
2007         __ f2ieee();           // truncate to float size
2008         __ pop(rcx);           // adjust rsp
2009       }
2010       break;
2011     case Bytecodes::_i2d:
2012       if (UseSSE >= 2) {
2013         __ cvtsi2sdl(xmm0, rax);
2014       } else {
2015       __ push(rax);          // add one slot for d2ieee()
2016       __ push(rax);          // store int on tos
2017       __ fild_s(at_rsp());   // load int to ST0
2018       __ d2ieee();           // truncate to double size
2019       __ pop(rcx);           // adjust rsp
2020       __ pop(rcx);
2021       }
2022       break;
2023     case Bytecodes::_i2b:
2024       __ shll(rax, 24);      // truncate upper 24 bits
2025       __ sarl(rax, 24);      // and sign-extend byte
2026       LP64_ONLY(__ movsbl(rax, rax));
2027       break;
2028     case Bytecodes::_i2c:
2029       __ andl(rax, 0xFFFF);  // truncate upper 16 bits
2030       LP64_ONLY(__ movzwl(rax, rax));
2031       break;
2032     case Bytecodes::_i2s:
2033       __ shll(rax, 16);      // truncate upper 16 bits
2034       __ sarl(rax, 16);      // and sign-extend short
2035       LP64_ONLY(__ movswl(rax, rax));
2036       break;
2037     case Bytecodes::_l2i:
2038       /* nothing to do */
2039       break;
2040     case Bytecodes::_l2f:
2041       // On 64-bit platforms, the cvtsi2ssq instruction is used to convert
2042       // 64-bit long values to floats. On 32-bit platforms it is not possible
2043       // to use that instruction with 64-bit operands, therefore the FPU is
2044       // used to perform the conversion.
2045       __ push(rdx);          // store long on tos
2046       __ push(rax);
2047       __ fild_d(at_rsp());   // load long to ST0
2048       __ f2ieee();           // truncate to float size
2049       __ pop(rcx);           // adjust rsp
2050       __ pop(rcx);
2051       if (UseSSE >= 1) {
2052         __ push_f();
2053         __ pop_f(xmm0);
2054       }
2055       break;
2056     case Bytecodes::_l2d:
2057       // On 32-bit platforms the FPU is used for conversion because on
2058       // 32-bit platforms it is not not possible to use the cvtsi2sdq
2059       // instruction with 64-bit operands.
2060       __ push(rdx);          // store long on tos
2061       __ push(rax);
2062       __ fild_d(at_rsp());   // load long to ST0
2063       __ d2ieee();           // truncate to double size
2064       __ pop(rcx);           // adjust rsp
2065       __ pop(rcx);
2066       if (UseSSE >= 2) {
2067         __ push_d();
2068         __ pop_d(xmm0);
2069       }
2070       break;
2071     case Bytecodes::_f2i:
2072       // SharedRuntime::f2i does not differentiate between sNaNs and qNaNs
2073       // as it returns 0 for any NaN.
2074       if (UseSSE >= 1) {
2075         __ push_f(xmm0);
2076       } else {
2077         __ push(rcx);          // reserve space for argument
2078         __ fstp_s(at_rsp());   // pass float argument on stack
2079       }
2080       __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::f2i), 1);
2081       break;
2082     case Bytecodes::_f2l:
2083       // SharedRuntime::f2l does not differentiate between sNaNs and qNaNs
2084       // as it returns 0 for any NaN.
2085       if (UseSSE >= 1) {
2086        __ push_f(xmm0);
2087       } else {
2088         __ push(rcx);          // reserve space for argument
2089         __ fstp_s(at_rsp());   // pass float argument on stack
2090       }
2091       __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::f2l), 1);
2092       break;
2093     case Bytecodes::_f2d:
2094       if (UseSSE < 1) {
2095         /* nothing to do */
2096       } else if (UseSSE == 1) {
2097         __ push_f(xmm0);
2098         __ pop_f();
2099       } else { // UseSSE >= 2
2100         __ cvtss2sd(xmm0, xmm0);
2101       }
2102       break;
2103     case Bytecodes::_d2i:
2104       if (UseSSE >= 2) {
2105         __ push_d(xmm0);
2106       } else {
2107         __ push(rcx);          // reserve space for argument
2108         __ push(rcx);
2109         __ fstp_d(at_rsp());   // pass double argument on stack
2110       }
2111       __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::d2i), 2);
2112       break;
2113     case Bytecodes::_d2l:
2114       if (UseSSE >= 2) {
2115         __ push_d(xmm0);
2116       } else {
2117         __ push(rcx);          // reserve space for argument
2118         __ push(rcx);
2119         __ fstp_d(at_rsp());   // pass double argument on stack
2120       }
2121       __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::d2l), 2);
2122       break;
2123     case Bytecodes::_d2f:
2124       if (UseSSE <= 1) {
2125         __ push(rcx);          // reserve space for f2ieee()
2126         __ f2ieee();           // truncate to float size
2127         __ pop(rcx);           // adjust rsp
2128         if (UseSSE == 1) {
2129           // The cvtsd2ss instruction is not available if UseSSE==1, therefore
2130           // the conversion is performed using the FPU in this case.
2131           __ push_f();
2132           __ pop_f(xmm0);
2133         }
2134       } else { // UseSSE >= 2
2135         __ cvtsd2ss(xmm0, xmm0);
2136       }
2137       break;
2138     default             :
2139       ShouldNotReachHere();
2140   }
2141 #endif
2142 }
2143 
2144 void TemplateTable::lcmp() {
2145   transition(ltos, itos);
2146 #ifdef _LP64
2147   Label done;
2148   __ pop_l(rdx);
2149   __ cmpq(rdx, rax);
2150   __ movl(rax, -1);
2151   __ jccb(Assembler::less, done);
2152   __ setb(Assembler::notEqual, rax);
2153   __ movzbl(rax, rax);
2154   __ bind(done);
2155 #else
2156 
2157   // y = rdx:rax
2158   __ pop_l(rbx, rcx);             // get x = rcx:rbx
2159   __ lcmp2int(rcx, rbx, rdx, rax);// rcx := cmp(x, y)
2160   __ mov(rax, rcx);
2161 #endif
2162 }
2163 
2164 void TemplateTable::float_cmp(bool is_float, int unordered_result) {
2165   if ((is_float && UseSSE >= 1) ||
2166       (!is_float && UseSSE >= 2)) {
2167     Label done;
2168     if (is_float) {
2169       // XXX get rid of pop here, use ... reg, mem32
2170       __ pop_f(xmm1);
2171       __ ucomiss(xmm1, xmm0);
2172     } else {
2173       // XXX get rid of pop here, use ... reg, mem64
2174       __ pop_d(xmm1);
2175       __ ucomisd(xmm1, xmm0);
2176     }
2177     if (unordered_result < 0) {
2178       __ movl(rax, -1);
2179       __ jccb(Assembler::parity, done);
2180       __ jccb(Assembler::below, done);
2181       __ setb(Assembler::notEqual, rdx);
2182       __ movzbl(rax, rdx);
2183     } else {
2184       __ movl(rax, 1);
2185       __ jccb(Assembler::parity, done);
2186       __ jccb(Assembler::above, done);
2187       __ movl(rax, 0);
2188       __ jccb(Assembler::equal, done);
2189       __ decrementl(rax);
2190     }
2191     __ bind(done);
2192   } else {
2193 #ifdef _LP64
2194     ShouldNotReachHere();
2195 #else
2196     if (is_float) {
2197       __ fld_s(at_rsp());
2198     } else {
2199       __ fld_d(at_rsp());
2200       __ pop(rdx);
2201     }
2202     __ pop(rcx);
2203     __ fcmp2int(rax, unordered_result < 0);
2204 #endif // _LP64
2205   }
2206 }
2207 
2208 void TemplateTable::branch(bool is_jsr, bool is_wide) {
2209   __ get_method(rcx); // rcx holds method
2210   __ profile_taken_branch(rax, rbx); // rax holds updated MDP, rbx
2211                                      // holds bumped taken count
2212 
2213   const ByteSize be_offset = MethodCounters::backedge_counter_offset() +
2214                              InvocationCounter::counter_offset();
2215   const ByteSize inv_offset = MethodCounters::invocation_counter_offset() +
2216                               InvocationCounter::counter_offset();
2217 
2218   // Load up edx with the branch displacement
2219   if (is_wide) {
2220     __ movl(rdx, at_bcp(1));
2221   } else {
2222     __ load_signed_short(rdx, at_bcp(1));
2223   }
2224   __ bswapl(rdx);
2225 
2226   if (!is_wide) {
2227     __ sarl(rdx, 16);
2228   }
2229   LP64_ONLY(__ movl2ptr(rdx, rdx));
2230 
2231   // Handle all the JSR stuff here, then exit.
2232   // It's much shorter and cleaner than intermingling with the non-JSR
2233   // normal-branch stuff occurring below.
2234   if (is_jsr) {
2235     // Pre-load the next target bytecode into rbx
2236     __ load_unsigned_byte(rbx, Address(rbcp, rdx, Address::times_1, 0));
2237 
2238     // compute return address as bci in rax
2239     __ lea(rax, at_bcp((is_wide ? 5 : 3) -
2240                         in_bytes(ConstMethod::codes_offset())));
2241     __ subptr(rax, Address(rcx, Method::const_offset()));
2242     // Adjust the bcp in r13 by the displacement in rdx
2243     __ addptr(rbcp, rdx);
2244     // jsr returns atos that is not an oop
2245     __ push_i(rax);
2246     __ dispatch_only(vtos, true);
2247     return;
2248   }
2249 
2250   // Normal (non-jsr) branch handling
2251 
2252   // Adjust the bcp in r13 by the displacement in rdx
2253   __ addptr(rbcp, rdx);
2254 
2255   assert(UseLoopCounter || !UseOnStackReplacement,
2256          "on-stack-replacement requires loop counters");
2257   Label backedge_counter_overflow;
2258   Label profile_method;
2259   Label dispatch;
2260   if (UseLoopCounter) {
2261     // increment backedge counter for backward branches
2262     // rax: MDO
2263     // rbx: MDO bumped taken-count
2264     // rcx: method
2265     // rdx: target offset
2266     // r13: target bcp
2267     // r14: locals pointer
2268     __ testl(rdx, rdx);             // check if forward or backward branch
2269     __ jcc(Assembler::positive, dispatch); // count only if backward branch
2270 
2271     // check if MethodCounters exists
2272     Label has_counters;
2273     __ movptr(rax, Address(rcx, Method::method_counters_offset()));
2274     __ testptr(rax, rax);
2275     __ jcc(Assembler::notZero, has_counters);
2276     __ push(rdx);
2277     __ push(rcx);
2278     __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::build_method_counters),
2279                rcx);
2280     __ pop(rcx);
2281     __ pop(rdx);
2282     __ movptr(rax, Address(rcx, Method::method_counters_offset()));
2283     __ testptr(rax, rax);
2284     __ jcc(Assembler::zero, dispatch);
2285     __ bind(has_counters);
2286 
2287     if (TieredCompilation) {
2288       Label no_mdo;
2289       int increment = InvocationCounter::count_increment;
2290       if (ProfileInterpreter) {
2291         // Are we profiling?
2292         __ movptr(rbx, Address(rcx, in_bytes(Method::method_data_offset())));
2293         __ testptr(rbx, rbx);
2294         __ jccb(Assembler::zero, no_mdo);
2295         // Increment the MDO backedge counter
2296         const Address mdo_backedge_counter(rbx, in_bytes(MethodData::backedge_counter_offset()) +
2297                                            in_bytes(InvocationCounter::counter_offset()));
2298         const Address mask(rbx, in_bytes(MethodData::backedge_mask_offset()));
2299         __ increment_mask_and_jump(mdo_backedge_counter, increment, mask, rax, false, Assembler::zero,
2300                                    UseOnStackReplacement ? &backedge_counter_overflow : NULL);
2301         __ jmp(dispatch);
2302       }
2303       __ bind(no_mdo);
2304       // Increment backedge counter in MethodCounters*
2305       __ movptr(rcx, Address(rcx, Method::method_counters_offset()));
2306       const Address mask(rcx, in_bytes(MethodCounters::backedge_mask_offset()));
2307       __ increment_mask_and_jump(Address(rcx, be_offset), increment, mask,
2308                                  rax, false, Assembler::zero,
2309                                  UseOnStackReplacement ? &backedge_counter_overflow : NULL);
2310     } else { // not TieredCompilation
2311       // increment counter
2312       __ movptr(rcx, Address(rcx, Method::method_counters_offset()));
2313       __ movl(rax, Address(rcx, be_offset));        // load backedge counter
2314       __ incrementl(rax, InvocationCounter::count_increment); // increment counter
2315       __ movl(Address(rcx, be_offset), rax);        // store counter
2316 
2317       __ movl(rax, Address(rcx, inv_offset));    // load invocation counter
2318 
2319       __ andl(rax, InvocationCounter::count_mask_value); // and the status bits
2320       __ addl(rax, Address(rcx, be_offset));        // add both counters
2321 
2322       if (ProfileInterpreter) {
2323         // Test to see if we should create a method data oop
2324         __ cmp32(rax, Address(rcx, in_bytes(MethodCounters::interpreter_profile_limit_offset())));
2325         __ jcc(Assembler::less, dispatch);
2326 
2327         // if no method data exists, go to profile method
2328         __ test_method_data_pointer(rax, profile_method);
2329 
2330         if (UseOnStackReplacement) {
2331           // check for overflow against rbx which is the MDO taken count
2332           __ cmp32(rbx, Address(rcx, in_bytes(MethodCounters::interpreter_backward_branch_limit_offset())));
2333           __ jcc(Assembler::below, dispatch);
2334 
2335           // When ProfileInterpreter is on, the backedge_count comes
2336           // from the MethodData*, which value does not get reset on
2337           // the call to frequency_counter_overflow().  To avoid
2338           // excessive calls to the overflow routine while the method is
2339           // being compiled, add a second test to make sure the overflow
2340           // function is called only once every overflow_frequency.
2341           const int overflow_frequency = 1024;
2342           __ andl(rbx, overflow_frequency - 1);
2343           __ jcc(Assembler::zero, backedge_counter_overflow);
2344 
2345         }
2346       } else {
2347         if (UseOnStackReplacement) {
2348           // check for overflow against rax, which is the sum of the
2349           // counters
2350           __ cmp32(rax, Address(rcx, in_bytes(MethodCounters::interpreter_backward_branch_limit_offset())));
2351           __ jcc(Assembler::aboveEqual, backedge_counter_overflow);
2352 
2353         }
2354       }
2355     }
2356     __ bind(dispatch);
2357   }
2358 
2359   // Pre-load the next target bytecode into rbx
2360   __ load_unsigned_byte(rbx, Address(rbcp, 0));
2361 
2362   // continue with the bytecode @ target
2363   // rax: return bci for jsr's, unused otherwise
2364   // rbx: target bytecode
2365   // r13: target bcp
2366   __ dispatch_only(vtos, true);
2367 
2368   if (UseLoopCounter) {
2369     if (ProfileInterpreter) {
2370       // Out-of-line code to allocate method data oop.
2371       __ bind(profile_method);
2372       __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::profile_method));
2373       __ set_method_data_pointer_for_bcp();
2374       __ jmp(dispatch);
2375     }
2376 
2377     if (UseOnStackReplacement) {
2378       // invocation counter overflow
2379       __ bind(backedge_counter_overflow);
2380       __ negptr(rdx);
2381       __ addptr(rdx, rbcp); // branch bcp
2382       // IcoResult frequency_counter_overflow([JavaThread*], address branch_bcp)
2383       __ call_VM(noreg,
2384                  CAST_FROM_FN_PTR(address,
2385                                   InterpreterRuntime::frequency_counter_overflow),
2386                  rdx);
2387 
2388       // rax: osr nmethod (osr ok) or NULL (osr not possible)
2389       // rdx: scratch
2390       // r14: locals pointer
2391       // r13: bcp
2392       __ testptr(rax, rax);                        // test result
2393       __ jcc(Assembler::zero, dispatch);         // no osr if null
2394       // nmethod may have been invalidated (VM may block upon call_VM return)
2395       __ cmpb(Address(rax, nmethod::state_offset()), nmethod::in_use);
2396       __ jcc(Assembler::notEqual, dispatch);
2397 
2398       // We have the address of an on stack replacement routine in rax.
2399       // In preparation of invoking it, first we must migrate the locals
2400       // and monitors from off the interpreter frame on the stack.
2401       // Ensure to save the osr nmethod over the migration call,
2402       // it will be preserved in rbx.
2403       __ mov(rbx, rax);
2404 
2405       NOT_LP64(__ get_thread(rcx));
2406 
2407       call_VM(noreg, CAST_FROM_FN_PTR(address, SharedRuntime::OSR_migration_begin));
2408 
2409       // rax is OSR buffer, move it to expected parameter location
2410       LP64_ONLY(__ mov(j_rarg0, rax));
2411       NOT_LP64(__ mov(rcx, rax));
2412       // We use j_rarg definitions here so that registers don't conflict as parameter
2413       // registers change across platforms as we are in the midst of a calling
2414       // sequence to the OSR nmethod and we don't want collision. These are NOT parameters.
2415 
2416       const Register retaddr   = LP64_ONLY(j_rarg2) NOT_LP64(rdi);
2417       const Register sender_sp = LP64_ONLY(j_rarg1) NOT_LP64(rdx);
2418 
2419       // pop the interpreter frame
2420       __ movptr(sender_sp, Address(rbp, frame::interpreter_frame_sender_sp_offset * wordSize)); // get sender sp
2421       __ leave();                                // remove frame anchor
2422       __ pop(retaddr);                           // get return address
2423       __ mov(rsp, sender_sp);                   // set sp to sender sp
2424       // Ensure compiled code always sees stack at proper alignment
2425       __ andptr(rsp, -(StackAlignmentInBytes));
2426 
2427       // unlike x86 we need no specialized return from compiled code
2428       // to the interpreter or the call stub.
2429 
2430       // push the return address
2431       __ push(retaddr);
2432 
2433       // and begin the OSR nmethod
2434       __ jmp(Address(rbx, nmethod::osr_entry_point_offset()));
2435     }
2436   }
2437 }
2438 
2439 void TemplateTable::if_0cmp(Condition cc) {
2440   transition(itos, vtos);
2441   // assume branch is more often taken than not (loops use backward branches)
2442   Label not_taken;
2443   __ testl(rax, rax);
2444   __ jcc(j_not(cc), not_taken);
2445   branch(false, false);
2446   __ bind(not_taken);
2447   __ profile_not_taken_branch(rax);
2448 }
2449 
2450 void TemplateTable::if_icmp(Condition cc) {
2451   transition(itos, vtos);
2452   // assume branch is more often taken than not (loops use backward branches)
2453   Label not_taken;
2454   __ pop_i(rdx);
2455   __ cmpl(rdx, rax);
2456   __ jcc(j_not(cc), not_taken);
2457   branch(false, false);
2458   __ bind(not_taken);
2459   __ profile_not_taken_branch(rax);
2460 }
2461 
2462 void TemplateTable::if_nullcmp(Condition cc) {
2463   transition(atos, vtos);
2464   // assume branch is more often taken than not (loops use backward branches)
2465   Label not_taken;
2466   __ testptr(rax, rax);
2467   __ jcc(j_not(cc), not_taken);
2468   branch(false, false);
2469   __ bind(not_taken);
2470   __ profile_not_taken_branch(rax);
2471 }
2472 
2473 void TemplateTable::if_acmp(Condition cc) {
2474   transition(atos, vtos);
2475   // assume branch is more often taken than not (loops use backward branches)
2476   Label taken, not_taken;
2477   __ pop_ptr(rdx);
2478 
2479   const int is_value_mask = markOopDesc::always_locked_pattern;
2480   if (EnableValhalla && ACmpOnValues == 1) {
2481     Label is_null;
2482     __ testptr(rdx, rdx);
2483     __ jcc(Assembler::zero, is_null);
2484     __ movptr(rbx, Address(rdx, oopDesc::mark_offset_in_bytes()));
2485     __ andptr(rbx, is_value_mask);
2486     __ cmpl(rbx, is_value_mask);
2487     __ setb(Assembler::equal, rbx);
2488     __ movzbl(rbx, rbx);
2489     __ orptr(rdx, rbx);
2490     __ bind(is_null);
2491   }
2492 
2493   __ cmpoop(rdx, rax);
2494 
2495   if (EnableValhalla && ACmpOnValues != 1) {
2496     __ jcc(Assembler::notEqual, (cc == not_equal) ? taken : not_taken);
2497     __ testptr(rdx, rdx);
2498     __ jcc(Assembler::zero, (cc == equal) ? taken : not_taken);
2499     __ movptr(rbx, Address(rdx, oopDesc::mark_offset_in_bytes()));
2500     __ andptr(rbx, is_value_mask);
2501     __ cmpl(rbx, is_value_mask);
2502     cc = (cc == equal) ? not_equal : equal;
2503   }
2504 
2505   __ jcc(j_not(cc), not_taken);
2506   __ bind(taken);
2507   branch(false, false);
2508   __ bind(not_taken);
2509   __ profile_not_taken_branch(rax);
2510 }
2511 
2512 void TemplateTable::ret() {
2513   transition(vtos, vtos);
2514   locals_index(rbx);
2515   LP64_ONLY(__ movslq(rbx, iaddress(rbx))); // get return bci, compute return bcp
2516   NOT_LP64(__ movptr(rbx, iaddress(rbx)));
2517   __ profile_ret(rbx, rcx);
2518   __ get_method(rax);
2519   __ movptr(rbcp, Address(rax, Method::const_offset()));
2520   __ lea(rbcp, Address(rbcp, rbx, Address::times_1,
2521                       ConstMethod::codes_offset()));
2522   __ dispatch_next(vtos, 0, true);
2523 }
2524 
2525 void TemplateTable::wide_ret() {
2526   transition(vtos, vtos);
2527   locals_index_wide(rbx);
2528   __ movptr(rbx, aaddress(rbx)); // get return bci, compute return bcp
2529   __ profile_ret(rbx, rcx);
2530   __ get_method(rax);
2531   __ movptr(rbcp, Address(rax, Method::const_offset()));
2532   __ lea(rbcp, Address(rbcp, rbx, Address::times_1, ConstMethod::codes_offset()));
2533   __ dispatch_next(vtos, 0, true);
2534 }
2535 
2536 void TemplateTable::tableswitch() {
2537   Label default_case, continue_execution;
2538   transition(itos, vtos);
2539 
2540   // align r13/rsi
2541   __ lea(rbx, at_bcp(BytesPerInt));
2542   __ andptr(rbx, -BytesPerInt);
2543   // load lo & hi
2544   __ movl(rcx, Address(rbx, BytesPerInt));
2545   __ movl(rdx, Address(rbx, 2 * BytesPerInt));
2546   __ bswapl(rcx);
2547   __ bswapl(rdx);
2548   // check against lo & hi
2549   __ cmpl(rax, rcx);
2550   __ jcc(Assembler::less, default_case);
2551   __ cmpl(rax, rdx);
2552   __ jcc(Assembler::greater, default_case);
2553   // lookup dispatch offset
2554   __ subl(rax, rcx);
2555   __ movl(rdx, Address(rbx, rax, Address::times_4, 3 * BytesPerInt));
2556   __ profile_switch_case(rax, rbx, rcx);
2557   // continue execution
2558   __ bind(continue_execution);
2559   __ bswapl(rdx);
2560   LP64_ONLY(__ movl2ptr(rdx, rdx));
2561   __ load_unsigned_byte(rbx, Address(rbcp, rdx, Address::times_1));
2562   __ addptr(rbcp, rdx);
2563   __ dispatch_only(vtos, true);
2564   // handle default
2565   __ bind(default_case);
2566   __ profile_switch_default(rax);
2567   __ movl(rdx, Address(rbx, 0));
2568   __ jmp(continue_execution);
2569 }
2570 
2571 void TemplateTable::lookupswitch() {
2572   transition(itos, itos);
2573   __ stop("lookupswitch bytecode should have been rewritten");
2574 }
2575 
2576 void TemplateTable::fast_linearswitch() {
2577   transition(itos, vtos);
2578   Label loop_entry, loop, found, continue_execution;
2579   // bswap rax so we can avoid bswapping the table entries
2580   __ bswapl(rax);
2581   // align r13
2582   __ lea(rbx, at_bcp(BytesPerInt)); // btw: should be able to get rid of
2583                                     // this instruction (change offsets
2584                                     // below)
2585   __ andptr(rbx, -BytesPerInt);
2586   // set counter
2587   __ movl(rcx, Address(rbx, BytesPerInt));
2588   __ bswapl(rcx);
2589   __ jmpb(loop_entry);
2590   // table search
2591   __ bind(loop);
2592   __ cmpl(rax, Address(rbx, rcx, Address::times_8, 2 * BytesPerInt));
2593   __ jcc(Assembler::equal, found);
2594   __ bind(loop_entry);
2595   __ decrementl(rcx);
2596   __ jcc(Assembler::greaterEqual, loop);
2597   // default case
2598   __ profile_switch_default(rax);
2599   __ movl(rdx, Address(rbx, 0));
2600   __ jmp(continue_execution);
2601   // entry found -> get offset
2602   __ bind(found);
2603   __ movl(rdx, Address(rbx, rcx, Address::times_8, 3 * BytesPerInt));
2604   __ profile_switch_case(rcx, rax, rbx);
2605   // continue execution
2606   __ bind(continue_execution);
2607   __ bswapl(rdx);
2608   __ movl2ptr(rdx, rdx);
2609   __ load_unsigned_byte(rbx, Address(rbcp, rdx, Address::times_1));
2610   __ addptr(rbcp, rdx);
2611   __ dispatch_only(vtos, true);
2612 }
2613 
2614 void TemplateTable::fast_binaryswitch() {
2615   transition(itos, vtos);
2616   // Implementation using the following core algorithm:
2617   //
2618   // int binary_search(int key, LookupswitchPair* array, int n) {
2619   //   // Binary search according to "Methodik des Programmierens" by
2620   //   // Edsger W. Dijkstra and W.H.J. Feijen, Addison Wesley Germany 1985.
2621   //   int i = 0;
2622   //   int j = n;
2623   //   while (i+1 < j) {
2624   //     // invariant P: 0 <= i < j <= n and (a[i] <= key < a[j] or Q)
2625   //     // with      Q: for all i: 0 <= i < n: key < a[i]
2626   //     // where a stands for the array and assuming that the (inexisting)
2627   //     // element a[n] is infinitely big.
2628   //     int h = (i + j) >> 1;
2629   //     // i < h < j
2630   //     if (key < array[h].fast_match()) {
2631   //       j = h;
2632   //     } else {
2633   //       i = h;
2634   //     }
2635   //   }
2636   //   // R: a[i] <= key < a[i+1] or Q
2637   //   // (i.e., if key is within array, i is the correct index)
2638   //   return i;
2639   // }
2640 
2641   // Register allocation
2642   const Register key   = rax; // already set (tosca)
2643   const Register array = rbx;
2644   const Register i     = rcx;
2645   const Register j     = rdx;
2646   const Register h     = rdi;
2647   const Register temp  = rsi;
2648 
2649   // Find array start
2650   NOT_LP64(__ save_bcp());
2651 
2652   __ lea(array, at_bcp(3 * BytesPerInt)); // btw: should be able to
2653                                           // get rid of this
2654                                           // instruction (change
2655                                           // offsets below)
2656   __ andptr(array, -BytesPerInt);
2657 
2658   // Initialize i & j
2659   __ xorl(i, i);                            // i = 0;
2660   __ movl(j, Address(array, -BytesPerInt)); // j = length(array);
2661 
2662   // Convert j into native byteordering
2663   __ bswapl(j);
2664 
2665   // And start
2666   Label entry;
2667   __ jmp(entry);
2668 
2669   // binary search loop
2670   {
2671     Label loop;
2672     __ bind(loop);
2673     // int h = (i + j) >> 1;
2674     __ leal(h, Address(i, j, Address::times_1)); // h = i + j;
2675     __ sarl(h, 1);                               // h = (i + j) >> 1;
2676     // if (key < array[h].fast_match()) {
2677     //   j = h;
2678     // } else {
2679     //   i = h;
2680     // }
2681     // Convert array[h].match to native byte-ordering before compare
2682     __ movl(temp, Address(array, h, Address::times_8));
2683     __ bswapl(temp);
2684     __ cmpl(key, temp);
2685     // j = h if (key <  array[h].fast_match())
2686     __ cmov32(Assembler::less, j, h);
2687     // i = h if (key >= array[h].fast_match())
2688     __ cmov32(Assembler::greaterEqual, i, h);
2689     // while (i+1 < j)
2690     __ bind(entry);
2691     __ leal(h, Address(i, 1)); // i+1
2692     __ cmpl(h, j);             // i+1 < j
2693     __ jcc(Assembler::less, loop);
2694   }
2695 
2696   // end of binary search, result index is i (must check again!)
2697   Label default_case;
2698   // Convert array[i].match to native byte-ordering before compare
2699   __ movl(temp, Address(array, i, Address::times_8));
2700   __ bswapl(temp);
2701   __ cmpl(key, temp);
2702   __ jcc(Assembler::notEqual, default_case);
2703 
2704   // entry found -> j = offset
2705   __ movl(j , Address(array, i, Address::times_8, BytesPerInt));
2706   __ profile_switch_case(i, key, array);
2707   __ bswapl(j);
2708   LP64_ONLY(__ movslq(j, j));
2709 
2710   NOT_LP64(__ restore_bcp());
2711   NOT_LP64(__ restore_locals());                           // restore rdi
2712 
2713   __ load_unsigned_byte(rbx, Address(rbcp, j, Address::times_1));
2714   __ addptr(rbcp, j);
2715   __ dispatch_only(vtos, true);
2716 
2717   // default case -> j = default offset
2718   __ bind(default_case);
2719   __ profile_switch_default(i);
2720   __ movl(j, Address(array, -2 * BytesPerInt));
2721   __ bswapl(j);
2722   LP64_ONLY(__ movslq(j, j));
2723 
2724   NOT_LP64(__ restore_bcp());
2725   NOT_LP64(__ restore_locals());
2726 
2727   __ load_unsigned_byte(rbx, Address(rbcp, j, Address::times_1));
2728   __ addptr(rbcp, j);
2729   __ dispatch_only(vtos, true);
2730 }
2731 
2732 void TemplateTable::_return(TosState state) {
2733   transition(state, state);
2734 
2735   assert(_desc->calls_vm(),
2736          "inconsistent calls_vm information"); // call in remove_activation
2737 
2738   if (_desc->bytecode() == Bytecodes::_return_register_finalizer) {
2739     assert(state == vtos, "only valid state");
2740     Register robj = LP64_ONLY(c_rarg1) NOT_LP64(rax);
2741     __ movptr(robj, aaddress(0));
2742     __ load_klass(rdi, robj);
2743     __ movl(rdi, Address(rdi, Klass::access_flags_offset()));
2744     __ testl(rdi, JVM_ACC_HAS_FINALIZER);
2745     Label skip_register_finalizer;
2746     __ jcc(Assembler::zero, skip_register_finalizer);
2747 
2748     __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::register_finalizer), robj);
2749 
2750     __ bind(skip_register_finalizer);
2751   }
2752 
2753   if (SafepointMechanism::uses_thread_local_poll() && _desc->bytecode() != Bytecodes::_return_register_finalizer) {
2754     Label no_safepoint;
2755     NOT_PRODUCT(__ block_comment("Thread-local Safepoint poll"));
2756 #ifdef _LP64
2757     __ testb(Address(r15_thread, Thread::polling_page_offset()), SafepointMechanism::poll_bit());
2758 #else
2759     const Register thread = rdi;
2760     __ get_thread(thread);
2761     __ testb(Address(thread, Thread::polling_page_offset()), SafepointMechanism::poll_bit());
2762 #endif
2763     __ jcc(Assembler::zero, no_safepoint);
2764     __ push(state);
2765     __ call_VM(noreg, CAST_FROM_FN_PTR(address,
2766                                     InterpreterRuntime::at_safepoint));
2767     __ pop(state);
2768     __ bind(no_safepoint);
2769   }
2770 
2771   // Narrow result if state is itos but result type is smaller.
2772   // Need to narrow in the return bytecode rather than in generate_return_entry
2773   // since compiled code callers expect the result to already be narrowed.
2774   if (state == itos) {
2775     __ narrow(rax);
2776   }
2777 
2778   __ remove_activation(state, rbcp, true, true, true);
2779 
2780   __ jmp(rbcp);
2781 }
2782 
2783 // ----------------------------------------------------------------------------
2784 // Volatile variables demand their effects be made known to all CPU's
2785 // in order.  Store buffers on most chips allow reads & writes to
2786 // reorder; the JMM's ReadAfterWrite.java test fails in -Xint mode
2787 // without some kind of memory barrier (i.e., it's not sufficient that
2788 // the interpreter does not reorder volatile references, the hardware
2789 // also must not reorder them).
2790 //
2791 // According to the new Java Memory Model (JMM):
2792 // (1) All volatiles are serialized wrt to each other.  ALSO reads &
2793 //     writes act as aquire & release, so:
2794 // (2) A read cannot let unrelated NON-volatile memory refs that
2795 //     happen after the read float up to before the read.  It's OK for
2796 //     non-volatile memory refs that happen before the volatile read to
2797 //     float down below it.
2798 // (3) Similar a volatile write cannot let unrelated NON-volatile
2799 //     memory refs that happen BEFORE the write float down to after the
2800 //     write.  It's OK for non-volatile memory refs that happen after the
2801 //     volatile write to float up before it.
2802 //
2803 // We only put in barriers around volatile refs (they are expensive),
2804 // not _between_ memory refs (that would require us to track the
2805 // flavor of the previous memory refs).  Requirements (2) and (3)
2806 // require some barriers before volatile stores and after volatile
2807 // loads.  These nearly cover requirement (1) but miss the
2808 // volatile-store-volatile-load case.  This final case is placed after
2809 // volatile-stores although it could just as well go before
2810 // volatile-loads.
2811 
2812 void TemplateTable::volatile_barrier(Assembler::Membar_mask_bits order_constraint ) {
2813   // Helper function to insert a is-volatile test and memory barrier
2814   __ membar(order_constraint);
2815 }
2816 
2817 void TemplateTable::resolve_cache_and_index(int byte_no,
2818                                             Register Rcache,
2819                                             Register index,
2820                                             size_t index_size) {
2821   const Register temp = rbx;
2822   assert_different_registers(Rcache, index, temp);
2823 
2824   Label resolved;
2825 
2826   Bytecodes::Code code = bytecode();
2827   switch (code) {
2828   case Bytecodes::_nofast_getfield: code = Bytecodes::_getfield; break;
2829   case Bytecodes::_nofast_putfield: code = Bytecodes::_putfield; break;
2830   default: break;
2831   }
2832 
2833   assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range");
2834   __ get_cache_and_index_and_bytecode_at_bcp(Rcache, index, temp, byte_no, 1, index_size);
2835   __ cmpl(temp, code);  // have we resolved this bytecode?
2836   __ jcc(Assembler::equal, resolved);
2837 
2838   // resolve first time through
2839   address entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_from_cache);
2840   __ movl(temp, code);
2841   __ call_VM(noreg, entry, temp);
2842   // Update registers with resolved info
2843   __ get_cache_and_index_at_bcp(Rcache, index, 1, index_size);
2844   __ bind(resolved);
2845 }
2846 
2847 // The cache and index registers must be set before call
2848 void TemplateTable::load_field_cp_cache_entry(Register obj,
2849                                               Register cache,
2850                                               Register index,
2851                                               Register off,
2852                                               Register flags,
2853                                               bool is_static = false) {
2854   assert_different_registers(cache, index, flags, off);
2855 
2856   ByteSize cp_base_offset = ConstantPoolCache::base_offset();
2857   // Field offset
2858   __ movptr(off, Address(cache, index, Address::times_ptr,
2859                          in_bytes(cp_base_offset +
2860                                   ConstantPoolCacheEntry::f2_offset())));
2861   // Flags
2862   __ movl(flags, Address(cache, index, Address::times_ptr,
2863                          in_bytes(cp_base_offset +
2864                                   ConstantPoolCacheEntry::flags_offset())));
2865 
2866   // klass overwrite register
2867   if (is_static) {
2868     __ movptr(obj, Address(cache, index, Address::times_ptr,
2869                            in_bytes(cp_base_offset +
2870                                     ConstantPoolCacheEntry::f1_offset())));
2871     const int mirror_offset = in_bytes(Klass::java_mirror_offset());
2872     __ movptr(obj, Address(obj, mirror_offset));
2873     __ resolve_oop_handle(obj);
2874   }
2875 }
2876 
2877 void TemplateTable::load_invoke_cp_cache_entry(int byte_no,
2878                                                Register method,
2879                                                Register itable_index,
2880                                                Register flags,
2881                                                bool is_invokevirtual,
2882                                                bool is_invokevfinal, /*unused*/
2883                                                bool is_invokedynamic) {
2884   // setup registers
2885   const Register cache = rcx;
2886   const Register index = rdx;
2887   assert_different_registers(method, flags);
2888   assert_different_registers(method, cache, index);
2889   assert_different_registers(itable_index, flags);
2890   assert_different_registers(itable_index, cache, index);
2891   // determine constant pool cache field offsets
2892   assert(is_invokevirtual == (byte_no == f2_byte), "is_invokevirtual flag redundant");
2893   const int method_offset = in_bytes(
2894     ConstantPoolCache::base_offset() +
2895       ((byte_no == f2_byte)
2896        ? ConstantPoolCacheEntry::f2_offset()
2897        : ConstantPoolCacheEntry::f1_offset()));
2898   const int flags_offset = in_bytes(ConstantPoolCache::base_offset() +
2899                                     ConstantPoolCacheEntry::flags_offset());
2900   // access constant pool cache fields
2901   const int index_offset = in_bytes(ConstantPoolCache::base_offset() +
2902                                     ConstantPoolCacheEntry::f2_offset());
2903 
2904   size_t index_size = (is_invokedynamic ? sizeof(u4) : sizeof(u2));
2905   resolve_cache_and_index(byte_no, cache, index, index_size);
2906     __ movptr(method, Address(cache, index, Address::times_ptr, method_offset));
2907 
2908   if (itable_index != noreg) {
2909     // pick up itable or appendix index from f2 also:
2910     __ movptr(itable_index, Address(cache, index, Address::times_ptr, index_offset));
2911   }
2912   __ movl(flags, Address(cache, index, Address::times_ptr, flags_offset));
2913 }
2914 
2915 // The registers cache and index expected to be set before call.
2916 // Correct values of the cache and index registers are preserved.
2917 void TemplateTable::jvmti_post_field_access(Register cache,
2918                                             Register index,
2919                                             bool is_static,
2920                                             bool has_tos) {
2921   if (JvmtiExport::can_post_field_access()) {
2922     // Check to see if a field access watch has been set before we take
2923     // the time to call into the VM.
2924     Label L1;
2925     assert_different_registers(cache, index, rax);
2926     __ mov32(rax, ExternalAddress((address) JvmtiExport::get_field_access_count_addr()));
2927     __ testl(rax,rax);
2928     __ jcc(Assembler::zero, L1);
2929 
2930     // cache entry pointer
2931     __ addptr(cache, in_bytes(ConstantPoolCache::base_offset()));
2932     __ shll(index, LogBytesPerWord);
2933     __ addptr(cache, index);
2934     if (is_static) {
2935       __ xorptr(rax, rax);      // NULL object reference
2936     } else {
2937       __ pop(atos);         // Get the object
2938       __ verify_oop(rax);
2939       __ push(atos);        // Restore stack state
2940     }
2941     // rax,:   object pointer or NULL
2942     // cache: cache entry pointer
2943     __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_access),
2944                rax, cache);
2945     __ get_cache_and_index_at_bcp(cache, index, 1);
2946     __ bind(L1);
2947   }
2948 }
2949 
2950 void TemplateTable::pop_and_check_object(Register r) {
2951   __ pop_ptr(r);
2952   __ null_check(r);  // for field access must check obj.
2953   __ verify_oop(r);
2954 }
2955 
2956 void TemplateTable::getfield_or_static(int byte_no, bool is_static, RewriteControl rc) {
2957   transition(vtos, vtos);
2958 
2959   const Register cache = rcx;
2960   const Register index = rdx;
2961   const Register obj   = LP64_ONLY(c_rarg3) NOT_LP64(rcx);
2962   const Register off   = rbx;
2963   const Register flags = rax;
2964   const Register bc    = LP64_ONLY(c_rarg3) NOT_LP64(rcx); // uses same reg as obj, so don't mix them
2965   const Register flags2 = rdx;
2966 
2967   resolve_cache_and_index(byte_no, cache, index, sizeof(u2));
2968   jvmti_post_field_access(cache, index, is_static, false);
2969   load_field_cp_cache_entry(obj, cache, index, off, flags, is_static);
2970 
2971   const Address field(obj, off, Address::times_1, 0*wordSize);
2972 
2973   Label Done, notByte, notBool, notInt, notShort, notChar, notLong, notFloat, notObj, notValueType;
2974 
2975   if (!is_static) {
2976     __ movptr(rcx, Address(cache, index, Address::times_ptr,
2977                            in_bytes(ConstantPoolCache::base_offset() +
2978                                     ConstantPoolCacheEntry::f1_offset())));
2979   }
2980 
2981   __ movl(flags2, flags);
2982 
2983   __ shrl(flags, ConstantPoolCacheEntry::tos_state_shift);
2984   // Make sure we don't need to mask edx after the above shift
2985   assert(btos == 0, "change code, btos != 0");
2986 
2987   __ andl(flags, ConstantPoolCacheEntry::tos_state_mask);
2988 
2989   __ jcc(Assembler::notZero, notByte);
2990   // btos
2991   if (!is_static) pop_and_check_object(obj);
2992   __ access_load_at(T_BYTE, IN_HEAP, rax, field, noreg, noreg);
2993   __ push(btos);
2994   // Rewrite bytecode to be faster
2995   if (!is_static && rc == may_rewrite) {
2996     patch_bytecode(Bytecodes::_fast_bgetfield, bc, rbx);
2997   }
2998   __ jmp(Done);
2999 
3000   __ bind(notByte);
3001 
3002   __ cmpl(flags, ztos);
3003   __ jcc(Assembler::notEqual, notBool);
3004    if (!is_static) pop_and_check_object(obj);
3005   // ztos (same code as btos)
3006   __ access_load_at(T_BOOLEAN, IN_HEAP, rax, field, noreg, noreg);
3007   __ push(ztos);
3008   // Rewrite bytecode to be faster
3009   if (!is_static && rc == may_rewrite) {
3010     // use btos rewriting, no truncating to t/f bit is needed for getfield.
3011     patch_bytecode(Bytecodes::_fast_bgetfield, bc, rbx);
3012   }
3013   __ jmp(Done);
3014 
3015   __ bind(notBool);
3016   __ cmpl(flags, atos);
3017   __ jcc(Assembler::notEqual, notObj);
3018   // atos
3019   if (!EnableValhalla) {
3020     if (!is_static) pop_and_check_object(obj);
3021     do_oop_load(_masm, field, rax);
3022     __ push(atos);
3023     if (!is_static && rc == may_rewrite) {
3024       patch_bytecode(Bytecodes::_fast_agetfield, bc, rbx);
3025     }
3026     __ jmp(Done);
3027   } else {
3028     if (is_static) {
3029       __ load_heap_oop(rax, field);
3030       Label isFlattenable, uninitialized;
3031       // Issue below if the static field has not been initialized yet
3032       __ test_field_is_flattenable(flags2, rscratch1, isFlattenable);
3033         // Not flattenable case
3034         __ push(atos);
3035         __ jmp(Done);
3036       // Flattenable case, must not return null even if uninitialized
3037       __ bind(isFlattenable);
3038         __ testptr(rax, rax);
3039         __ jcc(Assembler::zero, uninitialized);
3040           __ push(atos);
3041           __ jmp(Done);
3042         __ bind(uninitialized);
3043           __ andl(flags2, ConstantPoolCacheEntry::field_index_mask);
3044           __ call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::uninitialized_static_value_field),
3045                  obj, flags2);
3046           __ verify_oop(rax);
3047           __ push(atos);
3048           __ jmp(Done);
3049     } else {
3050       Label isFlattened, nonnull, isFlattenable, rewriteFlattenable;
3051       __ test_field_is_flattenable(flags2, rscratch1, isFlattenable);
3052         // Non-flattenable field case, also covers the object case
3053         pop_and_check_object(obj);
3054         __ load_heap_oop(rax, field);
3055         __ push(atos);
3056         if (rc == may_rewrite) {
3057           patch_bytecode(Bytecodes::_fast_agetfield, bc, rbx);
3058         }
3059         __ jmp(Done);
3060       __ bind(isFlattenable);
3061         __ test_field_is_flattened(flags2, rscratch1, isFlattened);
3062           // Non-flattened field case
3063           pop_and_check_object(obj);
3064           __ load_heap_oop(rax, field);
3065           __ testptr(rax, rax);
3066           __ jcc(Assembler::notZero, nonnull);
3067             __ andl(flags2, ConstantPoolCacheEntry::field_index_mask);
3068             __ call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::uninitialized_instance_value_field),
3069                        obj, flags2);
3070           __ bind(nonnull);
3071           __ verify_oop(rax);
3072           __ push(atos);
3073           __ jmp(rewriteFlattenable);
3074         __ bind(isFlattened);
3075           __ andl(flags2, ConstantPoolCacheEntry::field_index_mask);
3076           pop_and_check_object(rbx);
3077           call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::read_flattened_field),
3078                   rbx, flags2, rcx);
3079           __ verify_oop(rax);
3080           __ push(atos);
3081       __ bind(rewriteFlattenable);
3082       if (rc == may_rewrite) {
3083         patch_bytecode(Bytecodes::_fast_qgetfield, bc, rbx);
3084       }
3085       __ jmp(Done);
3086     }
3087   }
3088 
3089   __ bind(notObj);
3090 
3091   if (!is_static) pop_and_check_object(obj);
3092 
3093   __ cmpl(flags, itos);
3094   __ jcc(Assembler::notEqual, notInt);
3095   // itos
3096   __ access_load_at(T_INT, IN_HEAP, rax, field, noreg, noreg);
3097   __ push(itos);
3098   // Rewrite bytecode to be faster
3099   if (!is_static && rc == may_rewrite) {
3100     patch_bytecode(Bytecodes::_fast_igetfield, bc, rbx);
3101   }
3102   __ jmp(Done);
3103 
3104   __ bind(notInt);
3105   __ cmpl(flags, ctos);
3106   __ jcc(Assembler::notEqual, notChar);
3107   // ctos
3108   __ access_load_at(T_CHAR, IN_HEAP, rax, field, noreg, noreg);
3109   __ push(ctos);
3110   // Rewrite bytecode to be faster
3111   if (!is_static && rc == may_rewrite) {
3112     patch_bytecode(Bytecodes::_fast_cgetfield, bc, rbx);
3113   }
3114   __ jmp(Done);
3115 
3116   __ bind(notChar);
3117   __ cmpl(flags, stos);
3118   __ jcc(Assembler::notEqual, notShort);
3119   // stos
3120   __ access_load_at(T_SHORT, IN_HEAP, rax, field, noreg, noreg);
3121   __ push(stos);
3122   // Rewrite bytecode to be faster
3123   if (!is_static && rc == may_rewrite) {
3124     patch_bytecode(Bytecodes::_fast_sgetfield, bc, rbx);
3125   }
3126   __ jmp(Done);
3127 
3128   __ bind(notShort);
3129   __ cmpl(flags, ltos);
3130   __ jcc(Assembler::notEqual, notLong);
3131   // ltos
3132     // Generate code as if volatile (x86_32).  There just aren't enough registers to
3133     // save that information and this code is faster than the test.
3134   __ access_load_at(T_LONG, IN_HEAP | MO_RELAXED, noreg /* ltos */, field, noreg, noreg);
3135   __ push(ltos);
3136   // Rewrite bytecode to be faster
3137   LP64_ONLY(if (!is_static && rc == may_rewrite) patch_bytecode(Bytecodes::_fast_lgetfield, bc, rbx));
3138   __ jmp(Done);
3139 
3140   __ bind(notLong);
3141   __ cmpl(flags, ftos);
3142   __ jcc(Assembler::notEqual, notFloat);
3143   // ftos
3144 
3145   __ access_load_at(T_FLOAT, IN_HEAP, noreg /* ftos */, field, noreg, noreg);
3146   __ push(ftos);
3147   // Rewrite bytecode to be faster
3148   if (!is_static && rc == may_rewrite) {
3149     patch_bytecode(Bytecodes::_fast_fgetfield, bc, rbx);
3150   }
3151   __ jmp(Done);
3152 
3153   __ bind(notFloat);
3154 #ifdef ASSERT
3155   Label notDouble;
3156   __ cmpl(flags, dtos);
3157   __ jcc(Assembler::notEqual, notDouble);
3158 #endif
3159   // dtos
3160   __ access_load_at(T_DOUBLE, IN_HEAP, noreg /* dtos */, field, noreg, noreg);
3161   __ push(dtos);
3162   // Rewrite bytecode to be faster
3163   if (!is_static && rc == may_rewrite) {
3164     patch_bytecode(Bytecodes::_fast_dgetfield, bc, rbx);
3165   }
3166 #ifdef ASSERT
3167   __ jmp(Done);
3168 
3169   __ bind(notDouble);
3170   __ stop("Bad state");
3171 #endif
3172 
3173   __ bind(Done);
3174   // [jk] not needed currently
3175   // volatile_barrier(Assembler::Membar_mask_bits(Assembler::LoadLoad |
3176   //                                              Assembler::LoadStore));
3177 }
3178 
3179 void TemplateTable::getfield(int byte_no) {
3180   getfield_or_static(byte_no, false);
3181 }
3182 
3183 void TemplateTable::nofast_getfield(int byte_no) {
3184   getfield_or_static(byte_no, false, may_not_rewrite);
3185 }
3186 
3187 void TemplateTable::getstatic(int byte_no) {
3188   getfield_or_static(byte_no, true);
3189 }
3190 
3191 void TemplateTable::withfield() {
3192   transition(vtos, atos);
3193 
3194   Register cache = LP64_ONLY(c_rarg1) NOT_LP64(rcx);
3195   Register index = LP64_ONLY(c_rarg2) NOT_LP64(rdx);
3196 
3197   resolve_cache_and_index(f2_byte, cache, index, sizeof(u2));
3198 
3199   call_VM(rbx, CAST_FROM_FN_PTR(address, InterpreterRuntime::withfield), cache);
3200   // new value type is returned in rbx
3201   // stack adjustement is returned in rax
3202   __ verify_oop(rbx);
3203   __ addptr(rsp, rax);
3204   __ movptr(rax, rbx);
3205 }
3206 
3207 // The registers cache and index expected to be set before call.
3208 // The function may destroy various registers, just not the cache and index registers.
3209 void TemplateTable::jvmti_post_field_mod(Register cache, Register index, bool is_static) {
3210 
3211   const Register robj = LP64_ONLY(c_rarg2)   NOT_LP64(rax);
3212   const Register RBX  = LP64_ONLY(c_rarg1)   NOT_LP64(rbx);
3213   const Register RCX  = LP64_ONLY(c_rarg3)   NOT_LP64(rcx);
3214   const Register RDX  = LP64_ONLY(rscratch1) NOT_LP64(rdx);
3215 
3216   ByteSize cp_base_offset = ConstantPoolCache::base_offset();
3217 
3218   if (JvmtiExport::can_post_field_modification()) {
3219     // Check to see if a field modification watch has been set before
3220     // we take the time to call into the VM.
3221     Label L1;
3222     assert_different_registers(cache, index, rax);
3223     __ mov32(rax, ExternalAddress((address)JvmtiExport::get_field_modification_count_addr()));
3224     __ testl(rax, rax);
3225     __ jcc(Assembler::zero, L1);
3226 
3227     __ get_cache_and_index_at_bcp(robj, RDX, 1);
3228 
3229 
3230     if (is_static) {
3231       // Life is simple.  Null out the object pointer.
3232       __ xorl(RBX, RBX);
3233 
3234     } else {
3235       // Life is harder. The stack holds the value on top, followed by
3236       // the object.  We don't know the size of the value, though; it
3237       // could be one or two words depending on its type. As a result,
3238       // we must find the type to determine where the object is.
3239 #ifndef _LP64
3240       Label two_word, valsize_known;
3241 #endif
3242       __ movl(RCX, Address(robj, RDX,
3243                            Address::times_ptr,
3244                            in_bytes(cp_base_offset +
3245                                      ConstantPoolCacheEntry::flags_offset())));
3246       NOT_LP64(__ mov(rbx, rsp));
3247       __ shrl(RCX, ConstantPoolCacheEntry::tos_state_shift);
3248 
3249       // Make sure we don't need to mask rcx after the above shift
3250       ConstantPoolCacheEntry::verify_tos_state_shift();
3251 #ifdef _LP64
3252       __ movptr(c_rarg1, at_tos_p1());  // initially assume a one word jvalue
3253       __ cmpl(c_rarg3, ltos);
3254       __ cmovptr(Assembler::equal,
3255                  c_rarg1, at_tos_p2()); // ltos (two word jvalue)
3256       __ cmpl(c_rarg3, dtos);
3257       __ cmovptr(Assembler::equal,
3258                  c_rarg1, at_tos_p2()); // dtos (two word jvalue)
3259 #else
3260       __ cmpl(rcx, ltos);
3261       __ jccb(Assembler::equal, two_word);
3262       __ cmpl(rcx, dtos);
3263       __ jccb(Assembler::equal, two_word);
3264       __ addptr(rbx, Interpreter::expr_offset_in_bytes(1)); // one word jvalue (not ltos, dtos)
3265       __ jmpb(valsize_known);
3266 
3267       __ bind(two_word);
3268       __ addptr(rbx, Interpreter::expr_offset_in_bytes(2)); // two words jvalue
3269 
3270       __ bind(valsize_known);
3271       // setup object pointer
3272       __ movptr(rbx, Address(rbx, 0));
3273 #endif
3274     }
3275     // cache entry pointer
3276     __ addptr(robj, in_bytes(cp_base_offset));
3277     __ shll(RDX, LogBytesPerWord);
3278     __ addptr(robj, RDX);
3279     // object (tos)
3280     __ mov(RCX, rsp);
3281     // c_rarg1: object pointer set up above (NULL if static)
3282     // c_rarg2: cache entry pointer
3283     // c_rarg3: jvalue object on the stack
3284     __ call_VM(noreg,
3285                CAST_FROM_FN_PTR(address,
3286                                 InterpreterRuntime::post_field_modification),
3287                RBX, robj, RCX);
3288     __ get_cache_and_index_at_bcp(cache, index, 1);
3289     __ bind(L1);
3290   }
3291 }
3292 
3293 void TemplateTable::putfield_or_static(int byte_no, bool is_static, RewriteControl rc) {
3294   transition(vtos, vtos);
3295 
3296   const Register cache = rcx;
3297   const Register index = rdx;
3298   const Register obj   = rcx;
3299   const Register off   = rbx;
3300   const Register flags = rax;
3301   const Register flags2 = rdx;
3302 
3303   resolve_cache_and_index(byte_no, cache, index, sizeof(u2));
3304   jvmti_post_field_mod(cache, index, is_static);
3305   load_field_cp_cache_entry(obj, cache, index, off, flags, is_static);
3306 
3307   // [jk] not needed currently
3308   // volatile_barrier(Assembler::Membar_mask_bits(Assembler::LoadStore |
3309   //                                              Assembler::StoreStore));
3310 
3311   Label notVolatile, Done;
3312   __ movl(rdx, flags);
3313   __ shrl(rdx, ConstantPoolCacheEntry::is_volatile_shift);
3314   __ andl(rdx, 0x1);
3315 
3316   // Check for volatile store
3317   __ testl(rdx, rdx);
3318   __ movl(flags2, flags);
3319   __ jcc(Assembler::zero, notVolatile);
3320 
3321   putfield_or_static_helper(byte_no, is_static, rc, obj, off, flags, flags2);
3322   volatile_barrier(Assembler::Membar_mask_bits(Assembler::StoreLoad |
3323                                                Assembler::StoreStore));
3324   __ jmp(Done);
3325   __ bind(notVolatile);
3326 
3327   putfield_or_static_helper(byte_no, is_static, rc, obj, off, flags, flags2);
3328 
3329   __ bind(Done);
3330 }
3331 
3332 void TemplateTable::putfield_or_static_helper(int byte_no, bool is_static, RewriteControl rc,
3333                                               Register obj, Register off, Register flags, Register flags2) {
3334 
3335   // field addresses
3336   const Address field(obj, off, Address::times_1, 0*wordSize);
3337   NOT_LP64( const Address hi(obj, off, Address::times_1, 1*wordSize);)
3338 
3339   Label notByte, notBool, notInt, notShort, notChar,
3340         notLong, notFloat, notObj, notValueType;
3341   Label Done;
3342 
3343   const Register bc    = LP64_ONLY(c_rarg3) NOT_LP64(rcx);
3344 
3345   __ shrl(flags, ConstantPoolCacheEntry::tos_state_shift);
3346 
3347   assert(btos == 0, "change code, btos != 0");
3348   __ andl(flags, ConstantPoolCacheEntry::tos_state_mask);
3349   __ jcc(Assembler::notZero, notByte);
3350 
3351   // btos
3352   {
3353     __ pop(btos);
3354     if (!is_static) pop_and_check_object(obj);
3355     __ access_store_at(T_BYTE, IN_HEAP, field, rax, noreg, noreg);
3356     if (!is_static && rc == may_rewrite) {
3357       patch_bytecode(Bytecodes::_fast_bputfield, bc, rbx, true, byte_no);
3358     }
3359     __ jmp(Done);
3360   }
3361 
3362   __ bind(notByte);
3363   __ cmpl(flags, ztos);
3364   __ jcc(Assembler::notEqual, notBool);
3365 
3366   // ztos
3367   {
3368     __ pop(ztos);
3369     if (!is_static) pop_and_check_object(obj);
3370     __ access_store_at(T_BOOLEAN, IN_HEAP, field, rax, noreg, noreg);
3371     if (!is_static && rc == may_rewrite) {
3372       patch_bytecode(Bytecodes::_fast_zputfield, bc, rbx, true, byte_no);
3373     }
3374     __ jmp(Done);
3375   }
3376 
3377   __ bind(notBool);
3378   __ cmpl(flags, atos);
3379   __ jcc(Assembler::notEqual, notObj);
3380 
3381   // atos
3382   {
3383     if (!EnableValhalla) {
3384       __ pop(atos);
3385       if (!is_static) pop_and_check_object(obj);
3386       // Store into the field
3387       do_oop_store(_masm, field, rax);
3388       if (!is_static && rc == may_rewrite) {
3389         patch_bytecode(Bytecodes::_fast_aputfield, bc, rbx, true, byte_no);
3390       }
3391       __ jmp(Done);
3392     } else {
3393       __ pop(atos);
3394       if (is_static) {
3395         Label notFlattenable, notBuffered;
3396         __ test_field_is_not_flattenable(flags2, rscratch1, notFlattenable);
3397         __ null_check(rax);
3398         __ bind(notFlattenable);
3399         do_oop_store(_masm, field, rax);
3400         __ jmp(Done);
3401       } else {
3402         Label isFlattenable, isFlattened, notBuffered, notBuffered2, rewriteNotFlattenable, rewriteFlattenable;
3403         __ test_field_is_flattenable(flags2, rscratch1, isFlattenable);
3404         // Not flattenable case, covers not flattenable values and objects
3405         pop_and_check_object(obj);
3406         // Store into the field
3407         do_oop_store(_masm, field, rax);
3408         __ bind(rewriteNotFlattenable);
3409         if (rc == may_rewrite) {
3410           patch_bytecode(Bytecodes::_fast_aputfield, bc, rbx, true, byte_no);
3411         }
3412         __ jmp(Done);
3413         // Implementation of the flattenable semantic
3414         __ bind(isFlattenable);
3415         __ null_check(rax);
3416         __ test_field_is_flattened(flags2, rscratch1, isFlattened);
3417         // Not flattened case
3418         pop_and_check_object(obj);
3419         // Store into the field
3420         do_oop_store(_masm, field, rax);
3421         __ jmp(rewriteFlattenable);
3422         __ bind(isFlattened);
3423         pop_and_check_object(obj);
3424         call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::write_flattened_value),
3425                 rax, off, obj);
3426         __ bind(rewriteFlattenable);
3427         if (rc == may_rewrite) {
3428           patch_bytecode(Bytecodes::_fast_qputfield, bc, rbx, true, byte_no);
3429         }
3430         __ jmp(Done);
3431       }
3432     }
3433   }
3434 
3435   __ bind(notObj);
3436   __ cmpl(flags, itos);
3437   __ jcc(Assembler::notEqual, notInt);
3438 
3439   // itos
3440   {
3441     __ pop(itos);
3442     if (!is_static) pop_and_check_object(obj);
3443     __ access_store_at(T_INT, IN_HEAP, field, rax, noreg, noreg);
3444     if (!is_static && rc == may_rewrite) {
3445       patch_bytecode(Bytecodes::_fast_iputfield, bc, rbx, true, byte_no);
3446     }
3447     __ jmp(Done);
3448   }
3449 
3450   __ bind(notInt);
3451   __ cmpl(flags, ctos);
3452   __ jcc(Assembler::notEqual, notChar);
3453 
3454   // ctos
3455   {
3456     __ pop(ctos);
3457     if (!is_static) pop_and_check_object(obj);
3458     __ access_store_at(T_CHAR, IN_HEAP, field, rax, noreg, noreg);
3459     if (!is_static && rc == may_rewrite) {
3460       patch_bytecode(Bytecodes::_fast_cputfield, bc, rbx, true, byte_no);
3461     }
3462     __ jmp(Done);
3463   }
3464 
3465   __ bind(notChar);
3466   __ cmpl(flags, stos);
3467   __ jcc(Assembler::notEqual, notShort);
3468 
3469   // stos
3470   {
3471     __ pop(stos);
3472     if (!is_static) pop_and_check_object(obj);
3473     __ access_store_at(T_SHORT, IN_HEAP, field, rax, noreg, noreg);
3474     if (!is_static && rc == may_rewrite) {
3475       patch_bytecode(Bytecodes::_fast_sputfield, bc, rbx, true, byte_no);
3476     }
3477     __ jmp(Done);
3478   }
3479 
3480   __ bind(notShort);
3481   __ cmpl(flags, ltos);
3482   __ jcc(Assembler::notEqual, notLong);
3483 
3484   // ltos
3485   {
3486     __ pop(ltos);
3487     if (!is_static) pop_and_check_object(obj);
3488     __ access_store_at(T_LONG, IN_HEAP, field, noreg /* ltos*/, noreg, noreg);
3489 #ifdef _LP64
3490     if (!is_static && rc == may_rewrite) {
3491       patch_bytecode(Bytecodes::_fast_lputfield, bc, rbx, true, byte_no);
3492     }
3493 #endif // _LP64
3494     __ jmp(Done);
3495   }
3496 
3497   __ bind(notLong);
3498   __ cmpl(flags, ftos);
3499   __ jcc(Assembler::notEqual, notFloat);
3500 
3501   // ftos
3502   {
3503     __ pop(ftos);
3504     if (!is_static) pop_and_check_object(obj);
3505     __ access_store_at(T_FLOAT, IN_HEAP, field, noreg /* ftos */, noreg, noreg);
3506     if (!is_static && rc == may_rewrite) {
3507       patch_bytecode(Bytecodes::_fast_fputfield, bc, rbx, true, byte_no);
3508     }
3509     __ jmp(Done);
3510   }
3511 
3512   __ bind(notFloat);
3513 #ifdef ASSERT
3514   Label notDouble;
3515   __ cmpl(flags, dtos);
3516   __ jcc(Assembler::notEqual, notDouble);
3517 #endif
3518 
3519   // dtos
3520   {
3521     __ pop(dtos);
3522     if (!is_static) pop_and_check_object(obj);
3523     __ access_store_at(T_DOUBLE, IN_HEAP, field, noreg /* dtos */, noreg, noreg);
3524     if (!is_static && rc == may_rewrite) {
3525       patch_bytecode(Bytecodes::_fast_dputfield, bc, rbx, true, byte_no);
3526     }
3527   }
3528 
3529 #ifdef ASSERT
3530   __ jmp(Done);
3531 
3532   __ bind(notDouble);
3533   __ stop("Bad state");
3534 #endif
3535 
3536   __ bind(Done);
3537 }
3538 
3539 void TemplateTable::putfield(int byte_no) {
3540   putfield_or_static(byte_no, false);
3541 }
3542 
3543 void TemplateTable::nofast_putfield(int byte_no) {
3544   putfield_or_static(byte_no, false, may_not_rewrite);
3545 }
3546 
3547 void TemplateTable::putstatic(int byte_no) {
3548   putfield_or_static(byte_no, true);
3549 }
3550 
3551 void TemplateTable::jvmti_post_fast_field_mod() {
3552 
3553   const Register scratch = LP64_ONLY(c_rarg3) NOT_LP64(rcx);
3554 
3555   if (JvmtiExport::can_post_field_modification()) {
3556     // Check to see if a field modification watch has been set before
3557     // we take the time to call into the VM.
3558     Label L2;
3559     __ mov32(scratch, ExternalAddress((address)JvmtiExport::get_field_modification_count_addr()));
3560     __ testl(scratch, scratch);
3561     __ jcc(Assembler::zero, L2);
3562     __ pop_ptr(rbx);                  // copy the object pointer from tos
3563     __ verify_oop(rbx);
3564     __ push_ptr(rbx);                 // put the object pointer back on tos
3565     // Save tos values before call_VM() clobbers them. Since we have
3566     // to do it for every data type, we use the saved values as the
3567     // jvalue object.
3568     switch (bytecode()) {          // load values into the jvalue object
3569     case Bytecodes::_fast_qputfield: //fall through
3570     case Bytecodes::_fast_aputfield: __ push_ptr(rax); break;
3571     case Bytecodes::_fast_bputfield: // fall through
3572     case Bytecodes::_fast_zputfield: // fall through
3573     case Bytecodes::_fast_sputfield: // fall through
3574     case Bytecodes::_fast_cputfield: // fall through
3575     case Bytecodes::_fast_iputfield: __ push_i(rax); break;
3576     case Bytecodes::_fast_dputfield: __ push(dtos); break;
3577     case Bytecodes::_fast_fputfield: __ push(ftos); break;
3578     case Bytecodes::_fast_lputfield: __ push_l(rax); break;
3579 
3580     default:
3581       ShouldNotReachHere();
3582     }
3583     __ mov(scratch, rsp);             // points to jvalue on the stack
3584     // access constant pool cache entry
3585     LP64_ONLY(__ get_cache_entry_pointer_at_bcp(c_rarg2, rax, 1));
3586     NOT_LP64(__ get_cache_entry_pointer_at_bcp(rax, rdx, 1));
3587     __ verify_oop(rbx);
3588     // rbx: object pointer copied above
3589     // c_rarg2: cache entry pointer
3590     // c_rarg3: jvalue object on the stack
3591     LP64_ONLY(__ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_modification), rbx, c_rarg2, c_rarg3));
3592     NOT_LP64(__ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_modification), rbx, rax, rcx));
3593 
3594     switch (bytecode()) {             // restore tos values
3595     case Bytecodes::_fast_qputfield: // fall through
3596     case Bytecodes::_fast_aputfield: __ pop_ptr(rax); break;
3597     case Bytecodes::_fast_bputfield: // fall through
3598     case Bytecodes::_fast_zputfield: // fall through
3599     case Bytecodes::_fast_sputfield: // fall through
3600     case Bytecodes::_fast_cputfield: // fall through
3601     case Bytecodes::_fast_iputfield: __ pop_i(rax); break;
3602     case Bytecodes::_fast_dputfield: __ pop(dtos); break;
3603     case Bytecodes::_fast_fputfield: __ pop(ftos); break;
3604     case Bytecodes::_fast_lputfield: __ pop_l(rax); break;
3605     default: break;
3606     }
3607     __ bind(L2);
3608   }
3609 }
3610 
3611 void TemplateTable::fast_storefield(TosState state) {
3612   transition(state, vtos);
3613 
3614   ByteSize base = ConstantPoolCache::base_offset();
3615 
3616   jvmti_post_fast_field_mod();
3617 
3618   // access constant pool cache
3619   __ get_cache_and_index_at_bcp(rcx, rbx, 1);
3620 
3621   // test for volatile with rdx but rdx is tos register for lputfield.
3622   __ movl(rdx, Address(rcx, rbx, Address::times_ptr,
3623                        in_bytes(base +
3624                                 ConstantPoolCacheEntry::flags_offset())));
3625 
3626   // replace index with field offset from cache entry
3627   __ movptr(rbx, Address(rcx, rbx, Address::times_ptr,
3628                          in_bytes(base + ConstantPoolCacheEntry::f2_offset())));
3629 
3630   // [jk] not needed currently
3631   // volatile_barrier(Assembler::Membar_mask_bits(Assembler::LoadStore |
3632   //                                              Assembler::StoreStore));
3633 
3634   Label notVolatile, Done;
3635   if (bytecode() == Bytecodes::_fast_qputfield) {
3636     __ movl(rscratch2, rdx);
3637   }
3638 
3639   __ shrl(rdx, ConstantPoolCacheEntry::is_volatile_shift);
3640   __ andl(rdx, 0x1);
3641 
3642   // Get object from stack
3643   pop_and_check_object(rcx);
3644 
3645   // field address
3646   const Address field(rcx, rbx, Address::times_1);
3647 
3648   // Check for volatile store
3649   __ testl(rdx, rdx);
3650   __ jcc(Assembler::zero, notVolatile);
3651 
3652   fast_storefield_helper(field, rax);
3653   volatile_barrier(Assembler::Membar_mask_bits(Assembler::StoreLoad |
3654                                                Assembler::StoreStore));
3655   __ jmp(Done);
3656   __ bind(notVolatile);
3657 
3658   fast_storefield_helper(field, rax);
3659 
3660   __ bind(Done);
3661 }
3662 
3663 void TemplateTable::fast_storefield_helper(Address field, Register rax) {
3664 
3665   // access field
3666   switch (bytecode()) {
3667   case Bytecodes::_fast_qputfield:
3668     {
3669       Label isFlattened, done;
3670       __ null_check(rax);
3671       __ test_field_is_flattened(rscratch2, rscratch1, isFlattened);
3672       // No Flattened case
3673       do_oop_store(_masm, field, rax);
3674       __ jmp(done);
3675       __ bind(isFlattened);
3676       call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::write_flattened_value),
3677           rax, rbx, rcx);
3678       __ bind(done);
3679     }
3680     break;
3681   case Bytecodes::_fast_aputfield:
3682     {
3683       do_oop_store(_masm, field, rax);
3684     }
3685     break;
3686   case Bytecodes::_fast_lputfield:
3687 #ifdef _LP64
3688     __ access_store_at(T_LONG, IN_HEAP, field, noreg /* ltos */, noreg, noreg);
3689 #else
3690   __ stop("should not be rewritten");
3691 #endif
3692     break;
3693   case Bytecodes::_fast_iputfield:
3694     __ access_store_at(T_INT, IN_HEAP, field, rax, noreg, noreg);
3695     break;
3696   case Bytecodes::_fast_zputfield:
3697     __ access_store_at(T_BOOLEAN, IN_HEAP, field, rax, noreg, noreg);
3698     break;
3699   case Bytecodes::_fast_bputfield:
3700     __ access_store_at(T_BYTE, IN_HEAP, field, rax, noreg, noreg);
3701     break;
3702   case Bytecodes::_fast_sputfield:
3703     __ access_store_at(T_SHORT, IN_HEAP, field, rax, noreg, noreg);
3704     break;
3705   case Bytecodes::_fast_cputfield:
3706     __ access_store_at(T_CHAR, IN_HEAP, field, rax, noreg, noreg);
3707     break;
3708   case Bytecodes::_fast_fputfield:
3709     __ access_store_at(T_FLOAT, IN_HEAP, field, noreg /* ftos*/, noreg, noreg);
3710     break;
3711   case Bytecodes::_fast_dputfield:
3712     __ access_store_at(T_DOUBLE, IN_HEAP, field, noreg /* dtos*/, noreg, noreg);
3713     break;
3714   default:
3715     ShouldNotReachHere();
3716   }
3717 }
3718 
3719 void TemplateTable::fast_accessfield(TosState state) {
3720   transition(atos, state);
3721 
3722   // Do the JVMTI work here to avoid disturbing the register state below
3723   if (JvmtiExport::can_post_field_access()) {
3724     // Check to see if a field access watch has been set before we
3725     // take the time to call into the VM.
3726     Label L1;
3727     __ mov32(rcx, ExternalAddress((address) JvmtiExport::get_field_access_count_addr()));
3728     __ testl(rcx, rcx);
3729     __ jcc(Assembler::zero, L1);
3730     // access constant pool cache entry
3731     LP64_ONLY(__ get_cache_entry_pointer_at_bcp(c_rarg2, rcx, 1));
3732     NOT_LP64(__ get_cache_entry_pointer_at_bcp(rcx, rdx, 1));
3733     __ verify_oop(rax);
3734     __ push_ptr(rax);  // save object pointer before call_VM() clobbers it
3735     LP64_ONLY(__ mov(c_rarg1, rax));
3736     // c_rarg1: object pointer copied above
3737     // c_rarg2: cache entry pointer
3738     LP64_ONLY(__ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_access), c_rarg1, c_rarg2));
3739     NOT_LP64(__ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_access), rax, rcx));
3740     __ pop_ptr(rax); // restore object pointer
3741     __ bind(L1);
3742   }
3743 
3744   // access constant pool cache
3745   __ get_cache_and_index_at_bcp(rcx, rbx, 1);
3746   // replace index with field offset from cache entry
3747   // [jk] not needed currently
3748   // __ movl(rdx, Address(rcx, rbx, Address::times_8,
3749   //                      in_bytes(ConstantPoolCache::base_offset() +
3750   //                               ConstantPoolCacheEntry::flags_offset())));
3751   // __ shrl(rdx, ConstantPoolCacheEntry::is_volatile_shift);
3752   // __ andl(rdx, 0x1);
3753   //
3754   __ movptr(rdx, Address(rcx, rbx, Address::times_ptr,
3755                          in_bytes(ConstantPoolCache::base_offset() +
3756                                   ConstantPoolCacheEntry::f2_offset())));
3757 
3758   // rax: object
3759   __ verify_oop(rax);
3760   __ null_check(rax);
3761   Address field(rax, rdx, Address::times_1);
3762 
3763   // access field
3764   switch (bytecode()) {
3765   case Bytecodes::_fast_qgetfield:
3766     {
3767       Label isFlattened, nonnull, Done;
3768       __ movptr(rscratch1, Address(rcx, rbx, Address::times_ptr,
3769                                    in_bytes(ConstantPoolCache::base_offset() +
3770                                             ConstantPoolCacheEntry::flags_offset())));
3771       __ test_field_is_flattened(rscratch1, rscratch2, isFlattened);
3772         // Non-flattened field case
3773         __ movptr(rscratch1, rax);
3774         __ load_heap_oop(rax, field);
3775         __ testptr(rax, rax);
3776         __ jcc(Assembler::notZero, nonnull);
3777           __ movptr(rax, rscratch1);
3778           __ movl(rcx, Address(rcx, rbx, Address::times_ptr,
3779                              in_bytes(ConstantPoolCache::base_offset() +
3780                                       ConstantPoolCacheEntry::flags_offset())));
3781           __ andl(rcx, ConstantPoolCacheEntry::field_index_mask);
3782           __ call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::uninitialized_instance_value_field),
3783                      rax, rcx);
3784         __ bind(nonnull);
3785         __ verify_oop(rax);
3786         __ jmp(Done);
3787       __ bind(isFlattened);
3788         __ movl(rdx, Address(rcx, rbx, Address::times_ptr,
3789                            in_bytes(ConstantPoolCache::base_offset() +
3790                                     ConstantPoolCacheEntry::flags_offset())));
3791         __ andl(rdx, ConstantPoolCacheEntry::field_index_mask);
3792         __ movptr(rcx, Address(rcx, rbx, Address::times_ptr,
3793                                      in_bytes(ConstantPoolCache::base_offset() +
3794                                               ConstantPoolCacheEntry::f1_offset())));
3795         call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::read_flattened_field),
3796                 rax, rdx, rcx);
3797         __ verify_oop(rax);
3798       __ bind(Done);
3799     }
3800     break;
3801   case Bytecodes::_fast_agetfield:
3802     do_oop_load(_masm, field, rax);
3803     __ verify_oop(rax);
3804     break;
3805   case Bytecodes::_fast_lgetfield:
3806 #ifdef _LP64
3807     __ access_load_at(T_LONG, IN_HEAP, noreg /* ltos */, field, noreg, noreg);
3808 #else
3809   __ stop("should not be rewritten");
3810 #endif
3811     break;
3812   case Bytecodes::_fast_igetfield:
3813     __ access_load_at(T_INT, IN_HEAP, rax, field, noreg, noreg);
3814     break;
3815   case Bytecodes::_fast_bgetfield:
3816     __ access_load_at(T_BYTE, IN_HEAP, rax, field, noreg, noreg);
3817     break;
3818   case Bytecodes::_fast_sgetfield:
3819     __ access_load_at(T_SHORT, IN_HEAP, rax, field, noreg, noreg);
3820     break;
3821   case Bytecodes::_fast_cgetfield:
3822     __ access_load_at(T_CHAR, IN_HEAP, rax, field, noreg, noreg);
3823     break;
3824   case Bytecodes::_fast_fgetfield:
3825     __ access_load_at(T_FLOAT, IN_HEAP, noreg /* ftos */, field, noreg, noreg);
3826     break;
3827   case Bytecodes::_fast_dgetfield:
3828     __ access_load_at(T_DOUBLE, IN_HEAP, noreg /* dtos */, field, noreg, noreg);
3829     break;
3830   default:
3831     ShouldNotReachHere();
3832   }
3833   // [jk] not needed currently
3834   //   Label notVolatile;
3835   //   __ testl(rdx, rdx);
3836   //   __ jcc(Assembler::zero, notVolatile);
3837   //   __ membar(Assembler::LoadLoad);
3838   //   __ bind(notVolatile);
3839 }
3840 
3841 void TemplateTable::fast_xaccess(TosState state) {
3842   transition(vtos, state);
3843 
3844   // get receiver
3845   __ movptr(rax, aaddress(0));
3846   // access constant pool cache
3847   __ get_cache_and_index_at_bcp(rcx, rdx, 2);
3848   __ movptr(rbx,
3849             Address(rcx, rdx, Address::times_ptr,
3850                     in_bytes(ConstantPoolCache::base_offset() +
3851                              ConstantPoolCacheEntry::f2_offset())));
3852   // make sure exception is reported in correct bcp range (getfield is
3853   // next instruction)
3854   __ increment(rbcp);
3855   __ null_check(rax);
3856   const Address field = Address(rax, rbx, Address::times_1, 0*wordSize);
3857   switch (state) {
3858   case itos:
3859     __ access_load_at(T_INT, IN_HEAP, rax, field, noreg, noreg);
3860     break;
3861   case atos:
3862     do_oop_load(_masm, field, rax);
3863     __ verify_oop(rax);
3864     break;
3865   case ftos:
3866     __ access_load_at(T_FLOAT, IN_HEAP, noreg /* ftos */, field, noreg, noreg);
3867     break;
3868   default:
3869     ShouldNotReachHere();
3870   }
3871 
3872   // [jk] not needed currently
3873   // Label notVolatile;
3874   // __ movl(rdx, Address(rcx, rdx, Address::times_8,
3875   //                      in_bytes(ConstantPoolCache::base_offset() +
3876   //                               ConstantPoolCacheEntry::flags_offset())));
3877   // __ shrl(rdx, ConstantPoolCacheEntry::is_volatile_shift);
3878   // __ testl(rdx, 0x1);
3879   // __ jcc(Assembler::zero, notVolatile);
3880   // __ membar(Assembler::LoadLoad);
3881   // __ bind(notVolatile);
3882 
3883   __ decrement(rbcp);
3884 }
3885 
3886 //-----------------------------------------------------------------------------
3887 // Calls
3888 
3889 void TemplateTable::count_calls(Register method, Register temp) {
3890   // implemented elsewhere
3891   ShouldNotReachHere();
3892 }
3893 
3894 void TemplateTable::prepare_invoke(int byte_no,
3895                                    Register method,  // linked method (or i-klass)
3896                                    Register index,   // itable index, MethodType, etc.
3897                                    Register recv,    // if caller wants to see it
3898                                    Register flags    // if caller wants to test it
3899                                    ) {
3900   // determine flags
3901   const Bytecodes::Code code = bytecode();
3902   const bool is_invokeinterface  = code == Bytecodes::_invokeinterface;
3903   const bool is_invokedynamic    = code == Bytecodes::_invokedynamic;
3904   const bool is_invokehandle     = code == Bytecodes::_invokehandle;
3905   const bool is_invokevirtual    = code == Bytecodes::_invokevirtual;
3906   const bool is_invokespecial    = code == Bytecodes::_invokespecial;
3907   const bool load_receiver       = (recv  != noreg);
3908   const bool save_flags          = (flags != noreg);
3909   assert(load_receiver == (code != Bytecodes::_invokestatic && code != Bytecodes::_invokedynamic), "");
3910   assert(save_flags    == (is_invokeinterface || is_invokevirtual), "need flags for vfinal");
3911   assert(flags == noreg || flags == rdx, "");
3912   assert(recv  == noreg || recv  == rcx, "");
3913 
3914   // setup registers & access constant pool cache
3915   if (recv  == noreg)  recv  = rcx;
3916   if (flags == noreg)  flags = rdx;
3917   assert_different_registers(method, index, recv, flags);
3918 
3919   // save 'interpreter return address'
3920   __ save_bcp();
3921 
3922   load_invoke_cp_cache_entry(byte_no, method, index, flags, is_invokevirtual, false, is_invokedynamic);
3923 
3924   // maybe push appendix to arguments (just before return address)
3925   if (is_invokedynamic || is_invokehandle) {
3926     Label L_no_push;
3927     __ testl(flags, (1 << ConstantPoolCacheEntry::has_appendix_shift));
3928     __ jcc(Assembler::zero, L_no_push);
3929     // Push the appendix as a trailing parameter.
3930     // This must be done before we get the receiver,
3931     // since the parameter_size includes it.
3932     __ push(rbx);
3933     __ mov(rbx, index);
3934     assert(ConstantPoolCacheEntry::_indy_resolved_references_appendix_offset == 0, "appendix expected at index+0");
3935     __ load_resolved_reference_at_index(index, rbx);
3936     __ pop(rbx);
3937     __ push(index);  // push appendix (MethodType, CallSite, etc.)
3938     __ bind(L_no_push);
3939   }
3940 
3941   // load receiver if needed (after appendix is pushed so parameter size is correct)
3942   // Note: no return address pushed yet
3943   if (load_receiver) {
3944     __ movl(recv, flags);
3945     __ andl(recv, ConstantPoolCacheEntry::parameter_size_mask);
3946     const int no_return_pc_pushed_yet = -1;  // argument slot correction before we push return address
3947     const int receiver_is_at_end      = -1;  // back off one slot to get receiver
3948     Address recv_addr = __ argument_address(recv, no_return_pc_pushed_yet + receiver_is_at_end);
3949     __ movptr(recv, recv_addr);
3950     __ verify_oop(recv);
3951   }
3952 
3953   if (save_flags) {
3954     __ movl(rbcp, flags);
3955   }
3956 
3957   // compute return type
3958   __ shrl(flags, ConstantPoolCacheEntry::tos_state_shift);
3959   // Make sure we don't need to mask flags after the above shift
3960   ConstantPoolCacheEntry::verify_tos_state_shift();
3961   // load return address
3962   {
3963     const address table_addr = (address) Interpreter::invoke_return_entry_table_for(code);
3964     ExternalAddress table(table_addr);
3965     LP64_ONLY(__ lea(rscratch1, table));
3966     LP64_ONLY(__ movptr(flags, Address(rscratch1, flags, Address::times_ptr)));
3967     NOT_LP64(__ movptr(flags, ArrayAddress(table, Address(noreg, flags, Address::times_ptr))));
3968   }
3969 
3970   // push return address
3971   __ push(flags);
3972 
3973   // Restore flags value from the constant pool cache, and restore rsi
3974   // for later null checks.  r13 is the bytecode pointer
3975   if (save_flags) {
3976     __ movl(flags, rbcp);
3977     __ restore_bcp();
3978   }
3979 }
3980 
3981 void TemplateTable::invokevirtual_helper(Register index,
3982                                          Register recv,
3983                                          Register flags) {
3984   // Uses temporary registers rax, rdx
3985   assert_different_registers(index, recv, rax, rdx);
3986   assert(index == rbx, "");
3987   assert(recv  == rcx, "");
3988 
3989   // Test for an invoke of a final method
3990   Label notFinal;
3991   __ movl(rax, flags);
3992   __ andl(rax, (1 << ConstantPoolCacheEntry::is_vfinal_shift));
3993   __ jcc(Assembler::zero, notFinal);
3994 
3995   const Register method = index;  // method must be rbx
3996   assert(method == rbx,
3997          "Method* must be rbx for interpreter calling convention");
3998 
3999   // do the call - the index is actually the method to call
4000   // that is, f2 is a vtable index if !is_vfinal, else f2 is a Method*
4001 
4002   // It's final, need a null check here!
4003   __ null_check(recv);
4004 
4005   // profile this call
4006   __ profile_final_call(rax);
4007   __ profile_arguments_type(rax, method, rbcp, true);
4008 
4009   __ jump_from_interpreted(method, rax);
4010 
4011   __ bind(notFinal);
4012 
4013   // get receiver klass
4014   __ null_check(recv, oopDesc::klass_offset_in_bytes());
4015   __ load_klass(rax, recv);
4016 
4017   // profile this call
4018   __ profile_virtual_call(rax, rlocals, rdx);
4019   // get target Method* & entry point
4020   __ lookup_virtual_method(rax, index, method);
4021   __ profile_called_method(method, rdx, rbcp);
4022 
4023   __ profile_arguments_type(rdx, method, rbcp, true);
4024   __ jump_from_interpreted(method, rdx);
4025 }
4026 
4027 void TemplateTable::invokevirtual(int byte_no) {
4028   transition(vtos, vtos);
4029   assert(byte_no == f2_byte, "use this argument");
4030   prepare_invoke(byte_no,
4031                  rbx,    // method or vtable index
4032                  noreg,  // unused itable index
4033                  rcx, rdx); // recv, flags
4034 
4035   // rbx: index
4036   // rcx: receiver
4037   // rdx: flags
4038 
4039   invokevirtual_helper(rbx, rcx, rdx);
4040 }
4041 
4042 void TemplateTable::invokespecial(int byte_no) {
4043   transition(vtos, vtos);
4044   assert(byte_no == f1_byte, "use this argument");
4045   prepare_invoke(byte_no, rbx, noreg,  // get f1 Method*
4046                  rcx);  // get receiver also for null check
4047   __ verify_oop(rcx);
4048   __ null_check(rcx);
4049   // do the call
4050   __ profile_call(rax);
4051   __ profile_arguments_type(rax, rbx, rbcp, false);
4052   __ jump_from_interpreted(rbx, rax);
4053 }
4054 
4055 void TemplateTable::invokestatic(int byte_no) {
4056   transition(vtos, vtos);
4057   assert(byte_no == f1_byte, "use this argument");
4058   prepare_invoke(byte_no, rbx);  // get f1 Method*
4059   // do the call
4060   __ profile_call(rax);
4061   __ profile_arguments_type(rax, rbx, rbcp, false);
4062   __ jump_from_interpreted(rbx, rax);
4063 }
4064 
4065 
4066 void TemplateTable::fast_invokevfinal(int byte_no) {
4067   transition(vtos, vtos);
4068   assert(byte_no == f2_byte, "use this argument");
4069   __ stop("fast_invokevfinal not used on x86");
4070 }
4071 
4072 
4073 void TemplateTable::invokeinterface(int byte_no) {
4074   transition(vtos, vtos);
4075   assert(byte_no == f1_byte, "use this argument");
4076   prepare_invoke(byte_no, rax, rbx,  // get f1 Klass*, f2 Method*
4077                  rcx, rdx); // recv, flags
4078 
4079   // rax: reference klass (from f1) if interface method
4080   // rbx: method (from f2)
4081   // rcx: receiver
4082   // rdx: flags
4083 
4084   // First check for Object case, then private interface method,
4085   // then regular interface method.
4086 
4087   // Special case of invokeinterface called for virtual method of
4088   // java.lang.Object.  See cpCache.cpp for details.
4089   Label notObjectMethod;
4090   __ movl(rlocals, rdx);
4091   __ andl(rlocals, (1 << ConstantPoolCacheEntry::is_forced_virtual_shift));
4092   __ jcc(Assembler::zero, notObjectMethod);
4093   invokevirtual_helper(rbx, rcx, rdx);
4094   // no return from above
4095   __ bind(notObjectMethod);
4096 
4097   Label no_such_interface; // for receiver subtype check
4098   Register recvKlass; // used for exception processing
4099 
4100   // Check for private method invocation - indicated by vfinal
4101   Label notVFinal;
4102   __ movl(rlocals, rdx);
4103   __ andl(rlocals, (1 << ConstantPoolCacheEntry::is_vfinal_shift));
4104   __ jcc(Assembler::zero, notVFinal);
4105 
4106   // Get receiver klass into rlocals - also a null check
4107   __ null_check(rcx, oopDesc::klass_offset_in_bytes());
4108   __ load_klass(rlocals, rcx);
4109 
4110   Label subtype;
4111   __ check_klass_subtype(rlocals, rax, rbcp, subtype);
4112   // If we get here the typecheck failed
4113   recvKlass = rdx;
4114   __ mov(recvKlass, rlocals); // shuffle receiver class for exception use
4115   __ jmp(no_such_interface);
4116 
4117   __ bind(subtype);
4118 
4119   // do the call - rbx is actually the method to call
4120 
4121   __ profile_final_call(rdx);
4122   __ profile_arguments_type(rdx, rbx, rbcp, true);
4123 
4124   __ jump_from_interpreted(rbx, rdx);
4125   // no return from above
4126   __ bind(notVFinal);
4127 
4128   // Get receiver klass into rdx - also a null check
4129   __ restore_locals();  // restore r14
4130   __ null_check(rcx, oopDesc::klass_offset_in_bytes());
4131   __ load_klass(rdx, rcx);
4132 
4133   Label no_such_method;
4134 
4135   // Preserve method for throw_AbstractMethodErrorVerbose.
4136   __ mov(rcx, rbx);
4137   // Receiver subtype check against REFC.
4138   // Superklass in rax. Subklass in rdx. Blows rcx, rdi.
4139   __ lookup_interface_method(// inputs: rec. class, interface, itable index
4140                              rdx, rax, noreg,
4141                              // outputs: scan temp. reg, scan temp. reg
4142                              rbcp, rlocals,
4143                              no_such_interface,
4144                              /*return_method=*/false);
4145 
4146   // profile this call
4147   __ restore_bcp(); // rbcp was destroyed by receiver type check
4148   __ profile_virtual_call(rdx, rbcp, rlocals);
4149 
4150   // Get declaring interface class from method, and itable index
4151   __ movptr(rax, Address(rbx, Method::const_offset()));
4152   __ movptr(rax, Address(rax, ConstMethod::constants_offset()));
4153   __ movptr(rax, Address(rax, ConstantPool::pool_holder_offset_in_bytes()));
4154   __ movl(rbx, Address(rbx, Method::itable_index_offset()));
4155   __ subl(rbx, Method::itable_index_max);
4156   __ negl(rbx);
4157 
4158   // Preserve recvKlass for throw_AbstractMethodErrorVerbose.
4159   __ mov(rlocals, rdx);
4160   __ lookup_interface_method(// inputs: rec. class, interface, itable index
4161                              rlocals, rax, rbx,
4162                              // outputs: method, scan temp. reg
4163                              rbx, rbcp,
4164                              no_such_interface);
4165 
4166   // rbx: Method* to call
4167   // rcx: receiver
4168   // Check for abstract method error
4169   // Note: This should be done more efficiently via a throw_abstract_method_error
4170   //       interpreter entry point and a conditional jump to it in case of a null
4171   //       method.
4172   __ testptr(rbx, rbx);
4173   __ jcc(Assembler::zero, no_such_method);
4174 
4175   __ profile_called_method(rbx, rbcp, rdx);
4176   __ profile_arguments_type(rdx, rbx, rbcp, true);
4177 
4178   // do the call
4179   // rcx: receiver
4180   // rbx,: Method*
4181   __ jump_from_interpreted(rbx, rdx);
4182   __ should_not_reach_here();
4183 
4184   // exception handling code follows...
4185   // note: must restore interpreter registers to canonical
4186   //       state for exception handling to work correctly!
4187 
4188   __ bind(no_such_method);
4189   // throw exception
4190   __ pop(rbx);           // pop return address (pushed by prepare_invoke)
4191   __ restore_bcp();      // rbcp must be correct for exception handler   (was destroyed)
4192   __ restore_locals();   // make sure locals pointer is correct as well (was destroyed)
4193   // Pass arguments for generating a verbose error message.
4194 #ifdef _LP64
4195   recvKlass = c_rarg1;
4196   Register method    = c_rarg2;
4197   if (recvKlass != rdx) { __ movq(recvKlass, rdx); }
4198   if (method != rcx)    { __ movq(method, rcx);    }
4199 #else
4200   recvKlass = rdx;
4201   Register method    = rcx;
4202 #endif
4203   __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_AbstractMethodErrorVerbose),
4204              recvKlass, method);
4205   // The call_VM checks for exception, so we should never return here.
4206   __ should_not_reach_here();
4207 
4208   __ bind(no_such_interface);
4209   // throw exception
4210   __ pop(rbx);           // pop return address (pushed by prepare_invoke)
4211   __ restore_bcp();      // rbcp must be correct for exception handler   (was destroyed)
4212   __ restore_locals();   // make sure locals pointer is correct as well (was destroyed)
4213   // Pass arguments for generating a verbose error message.
4214   LP64_ONLY( if (recvKlass != rdx) { __ movq(recvKlass, rdx); } )
4215   __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_IncompatibleClassChangeErrorVerbose),
4216              recvKlass, rax);
4217   // the call_VM checks for exception, so we should never return here.
4218   __ should_not_reach_here();
4219 }
4220 
4221 void TemplateTable::invokehandle(int byte_no) {
4222   transition(vtos, vtos);
4223   assert(byte_no == f1_byte, "use this argument");
4224   const Register rbx_method = rbx;
4225   const Register rax_mtype  = rax;
4226   const Register rcx_recv   = rcx;
4227   const Register rdx_flags  = rdx;
4228 
4229   prepare_invoke(byte_no, rbx_method, rax_mtype, rcx_recv);
4230   __ verify_method_ptr(rbx_method);
4231   __ verify_oop(rcx_recv);
4232   __ null_check(rcx_recv);
4233 
4234   // rax: MethodType object (from cpool->resolved_references[f1], if necessary)
4235   // rbx: MH.invokeExact_MT method (from f2)
4236 
4237   // Note:  rax_mtype is already pushed (if necessary) by prepare_invoke
4238 
4239   // FIXME: profile the LambdaForm also
4240   __ profile_final_call(rax);
4241   __ profile_arguments_type(rdx, rbx_method, rbcp, true);
4242 
4243   __ jump_from_interpreted(rbx_method, rdx);
4244 }
4245 
4246 void TemplateTable::invokedynamic(int byte_no) {
4247   transition(vtos, vtos);
4248   assert(byte_no == f1_byte, "use this argument");
4249 
4250   const Register rbx_method   = rbx;
4251   const Register rax_callsite = rax;
4252 
4253   prepare_invoke(byte_no, rbx_method, rax_callsite);
4254 
4255   // rax: CallSite object (from cpool->resolved_references[f1])
4256   // rbx: MH.linkToCallSite method (from f2)
4257 
4258   // Note:  rax_callsite is already pushed by prepare_invoke
4259 
4260   // %%% should make a type profile for any invokedynamic that takes a ref argument
4261   // profile this call
4262   __ profile_call(rbcp);
4263   __ profile_arguments_type(rdx, rbx_method, rbcp, false);
4264 
4265   __ verify_oop(rax_callsite);
4266 
4267   __ jump_from_interpreted(rbx_method, rdx);
4268 }
4269 
4270 //-----------------------------------------------------------------------------
4271 // Allocation
4272 
4273 void TemplateTable::_new() {
4274   transition(vtos, atos);
4275   __ get_unsigned_2_byte_index_at_bcp(rdx, 1);
4276   Label slow_case;
4277   Label slow_case_no_pop;
4278   Label done;
4279   Label initialize_header;
4280   Label initialize_object;  // including clearing the fields
4281 
4282   __ get_cpool_and_tags(rcx, rax);
4283 
4284   // Make sure the class we're about to instantiate has been resolved.
4285   // This is done before loading InstanceKlass to be consistent with the order
4286   // how Constant Pool is updated (see ConstantPool::klass_at_put)
4287   const int tags_offset = Array<u1>::base_offset_in_bytes();
4288   __ cmpb(Address(rax, rdx, Address::times_1, tags_offset), JVM_CONSTANT_Class);
4289   __ jcc(Assembler::notEqual, slow_case_no_pop);
4290 
4291   // get InstanceKlass
4292   __ load_resolved_klass_at_index(rcx, rdx, rcx);
4293   __ push(rcx);  // save the contexts of klass for initializing the header
4294 
4295   // make sure klass is initialized & doesn't have finalizer
4296   // make sure klass is fully initialized
4297   __ cmpb(Address(rcx, InstanceKlass::init_state_offset()), InstanceKlass::fully_initialized);
4298   __ jcc(Assembler::notEqual, slow_case);
4299 
4300   // get instance_size in InstanceKlass (scaled to a count of bytes)
4301   __ movl(rdx, Address(rcx, Klass::layout_helper_offset()));
4302   // test to see if it has a finalizer or is malformed in some way
4303   __ testl(rdx, Klass::_lh_instance_slow_path_bit);
4304   __ jcc(Assembler::notZero, slow_case);
4305 
4306   // Allocate the instance:
4307   //  If TLAB is enabled:
4308   //    Try to allocate in the TLAB.
4309   //    If fails, go to the slow path.
4310   //  Else If inline contiguous allocations are enabled:
4311   //    Try to allocate in eden.
4312   //    If fails due to heap end, go to slow path.
4313   //
4314   //  If TLAB is enabled OR inline contiguous is enabled:
4315   //    Initialize the allocation.
4316   //    Exit.
4317   //
4318   //  Go to slow path.
4319 
4320   const bool allow_shared_alloc =
4321     Universe::heap()->supports_inline_contig_alloc();
4322 
4323   const Register thread = LP64_ONLY(r15_thread) NOT_LP64(rcx);
4324 #ifndef _LP64
4325   if (UseTLAB || allow_shared_alloc) {
4326     __ get_thread(thread);
4327   }
4328 #endif // _LP64
4329 
4330   if (UseTLAB) {
4331     __ tlab_allocate(thread, rax, rdx, 0, rcx, rbx, slow_case);
4332     if (ZeroTLAB) {
4333       // the fields have been already cleared
4334       __ jmp(initialize_header);
4335     } else {
4336       // initialize both the header and fields
4337       __ jmp(initialize_object);
4338     }
4339   } else {
4340     // Allocation in the shared Eden, if allowed.
4341     //
4342     // rdx: instance size in bytes
4343     __ eden_allocate(thread, rax, rdx, 0, rbx, slow_case);
4344   }
4345 
4346   // If UseTLAB or allow_shared_alloc are true, the object is created above and
4347   // there is an initialize need. Otherwise, skip and go to the slow path.
4348   if (UseTLAB || allow_shared_alloc) {
4349     // The object is initialized before the header.  If the object size is
4350     // zero, go directly to the header initialization.
4351     __ bind(initialize_object);
4352     __ decrement(rdx, sizeof(oopDesc));
4353     __ jcc(Assembler::zero, initialize_header);
4354 
4355     // Initialize topmost object field, divide rdx by 8, check if odd and
4356     // test if zero.
4357     __ xorl(rcx, rcx);    // use zero reg to clear memory (shorter code)
4358     __ shrl(rdx, LogBytesPerLong); // divide by 2*oopSize and set carry flag if odd
4359 
4360     // rdx must have been multiple of 8
4361 #ifdef ASSERT
4362     // make sure rdx was multiple of 8
4363     Label L;
4364     // Ignore partial flag stall after shrl() since it is debug VM
4365     __ jcc(Assembler::carryClear, L);
4366     __ stop("object size is not multiple of 2 - adjust this code");
4367     __ bind(L);
4368     // rdx must be > 0, no extra check needed here
4369 #endif
4370 
4371     // initialize remaining object fields: rdx was a multiple of 8
4372     { Label loop;
4373     __ bind(loop);
4374     __ movptr(Address(rax, rdx, Address::times_8, sizeof(oopDesc) - 1*oopSize), rcx);
4375     NOT_LP64(__ movptr(Address(rax, rdx, Address::times_8, sizeof(oopDesc) - 2*oopSize), rcx));
4376     __ decrement(rdx);
4377     __ jcc(Assembler::notZero, loop);
4378     }
4379 
4380     // initialize object header only.
4381     __ bind(initialize_header);
4382     if (UseBiasedLocking) {
4383       __ pop(rcx);   // get saved klass back in the register.
4384       __ movptr(rbx, Address(rcx, Klass::prototype_header_offset()));
4385       __ movptr(Address(rax, oopDesc::mark_offset_in_bytes ()), rbx);
4386     } else {
4387       __ movptr(Address(rax, oopDesc::mark_offset_in_bytes ()),
4388                 (intptr_t)markOopDesc::prototype()); // header
4389       __ pop(rcx);   // get saved klass back in the register.
4390     }
4391 #ifdef _LP64
4392     __ xorl(rsi, rsi); // use zero reg to clear memory (shorter code)
4393     __ store_klass_gap(rax, rsi);  // zero klass gap for compressed oops
4394 #endif
4395     __ store_klass(rax, rcx);  // klass
4396 
4397     {
4398       SkipIfEqual skip_if(_masm, &DTraceAllocProbes, 0);
4399       // Trigger dtrace event for fastpath
4400       __ push(atos);
4401       __ call_VM_leaf(
4402            CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_object_alloc), rax);
4403       __ pop(atos);
4404     }
4405 
4406     __ jmp(done);
4407   }
4408 
4409   // slow case
4410   __ bind(slow_case);
4411   __ pop(rcx);   // restore stack pointer to what it was when we came in.
4412   __ bind(slow_case_no_pop);
4413 
4414   Register rarg1 = LP64_ONLY(c_rarg1) NOT_LP64(rax);
4415   Register rarg2 = LP64_ONLY(c_rarg2) NOT_LP64(rdx);
4416 
4417   __ get_constant_pool(rarg1);
4418   __ get_unsigned_2_byte_index_at_bcp(rarg2, 1);
4419   call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::_new), rarg1, rarg2);
4420    __ verify_oop(rax);
4421 
4422   // continue
4423   __ bind(done);
4424 }
4425 
4426 void TemplateTable::defaultvalue() {
4427   transition(vtos, atos);
4428 
4429   Register rarg1 = LP64_ONLY(c_rarg1) NOT_LP64(rcx);
4430   Register rarg2 = LP64_ONLY(c_rarg2) NOT_LP64(rdx);
4431 
4432   __ get_unsigned_2_byte_index_at_bcp(rarg2, 1);
4433   __ get_constant_pool(rarg1);
4434 
4435   call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::defaultvalue),
4436       rarg1, rarg2);
4437   __ verify_oop(rax);
4438 }
4439 
4440 void TemplateTable::newarray() {
4441   transition(itos, atos);
4442   Register rarg1 = LP64_ONLY(c_rarg1) NOT_LP64(rdx);
4443   __ load_unsigned_byte(rarg1, at_bcp(1));
4444   call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::newarray),
4445           rarg1, rax);
4446 }
4447 
4448 void TemplateTable::anewarray() {
4449   transition(itos, atos);
4450 
4451   Register rarg1 = LP64_ONLY(c_rarg1) NOT_LP64(rcx);
4452   Register rarg2 = LP64_ONLY(c_rarg2) NOT_LP64(rdx);
4453 
4454   __ get_unsigned_2_byte_index_at_bcp(rarg2, 1);
4455   __ get_constant_pool(rarg1);
4456   call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::anewarray),
4457           rarg1, rarg2, rax);
4458 }
4459 
4460 void TemplateTable::arraylength() {
4461   transition(atos, itos);
4462   __ null_check(rax, arrayOopDesc::length_offset_in_bytes());
4463   __ movl(rax, Address(rax, arrayOopDesc::length_offset_in_bytes()));
4464 }
4465 
4466 void TemplateTable::checkcast() {
4467   transition(atos, atos);
4468   Label done, is_null, ok_is_subtype, quicked, resolved;
4469   __ testptr(rax, rax); // object is in rax
4470   __ jcc(Assembler::zero, is_null);
4471 
4472   // Get cpool & tags index
4473   __ get_cpool_and_tags(rcx, rdx); // rcx=cpool, rdx=tags array
4474   __ get_unsigned_2_byte_index_at_bcp(rbx, 1); // rbx=index
4475   // See if bytecode has already been quicked
4476   __ movzbl(rdx, Address(rdx, rbx,
4477       Address::times_1,
4478       Array<u1>::base_offset_in_bytes()));
4479   __ andl (rdx, ~JVM_CONSTANT_QDESC_BIT);
4480   __ cmpl(rdx, JVM_CONSTANT_Class);
4481   __ jcc(Assembler::equal, quicked);
4482   __ push(atos); // save receiver for result, and for GC
4483   call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc));
4484 
4485   // vm_result_2 has metadata result
4486 #ifndef _LP64
4487   // borrow rdi from locals
4488   __ get_thread(rdi);
4489   __ get_vm_result_2(rax, rdi);
4490   __ restore_locals();
4491 #else
4492   __ get_vm_result_2(rax, r15_thread);
4493 #endif
4494 
4495   __ pop_ptr(rdx); // restore receiver
4496   __ jmpb(resolved);
4497 
4498   // Get superklass in rax and subklass in rbx
4499   __ bind(quicked);
4500   __ mov(rdx, rax); // Save object in rdx; rax needed for subtype check
4501   __ load_resolved_klass_at_index(rcx, rbx, rax);
4502 
4503   __ bind(resolved);
4504   __ load_klass(rbx, rdx);
4505 
4506   // Generate subtype check.  Blows rcx, rdi.  Object in rdx.
4507   // Superklass in rax.  Subklass in rbx.
4508   __ gen_subtype_check(rbx, ok_is_subtype);
4509 
4510   // Come here on failure
4511   __ push_ptr(rdx);
4512   // object is at TOS
4513   __ jump(ExternalAddress(Interpreter::_throw_ClassCastException_entry));
4514 
4515   // Come here on success
4516   __ bind(ok_is_subtype);
4517   __ mov(rax, rdx); // Restore object in rdx
4518   __ jmp(done);
4519 
4520   __ bind(is_null);
4521 
4522   // Collect counts on whether this check-cast sees NULLs a lot or not.
4523   if (ProfileInterpreter) {
4524     __ profile_null_seen(rcx);
4525   }
4526 
4527   if (EnableValhalla) {
4528     // Get cpool & tags index
4529     __ get_cpool_and_tags(rcx, rdx); // rcx=cpool, rdx=tags array
4530     __ get_unsigned_2_byte_index_at_bcp(rbx, 1); // rbx=index
4531     // See if CP entry is a Q-descriptor
4532     __ movzbl(rcx, Address(rdx, rbx,
4533         Address::times_1,
4534         Array<u1>::base_offset_in_bytes()));
4535     __ andl (rcx, JVM_CONSTANT_QDESC_BIT);
4536     __ cmpl(rcx, JVM_CONSTANT_QDESC_BIT);
4537     __ jcc(Assembler::notEqual, done);
4538     __ jump(ExternalAddress(Interpreter::_throw_NullPointerException_entry));
4539   }
4540 
4541   __ bind(done);
4542 }
4543 
4544 void TemplateTable::instanceof() {
4545   transition(atos, itos);
4546   Label done, is_null, ok_is_subtype, quicked, resolved;
4547   __ testptr(rax, rax);
4548   __ jcc(Assembler::zero, is_null);
4549 
4550   // Get cpool & tags index
4551   __ get_cpool_and_tags(rcx, rdx); // rcx=cpool, rdx=tags array
4552   __ get_unsigned_2_byte_index_at_bcp(rbx, 1); // rbx=index
4553   // See if bytecode has already been quicked
4554   __ movzbl(rdx, Address(rdx, rbx,
4555         Address::times_1,
4556         Array<u1>::base_offset_in_bytes()));
4557   __ andl (rdx, ~JVM_CONSTANT_QDESC_BIT);
4558   __ cmpl(rdx, JVM_CONSTANT_Class);
4559   __ jcc(Assembler::equal, quicked);
4560 
4561   __ push(atos); // save receiver for result, and for GC
4562   call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc));
4563   // vm_result_2 has metadata result
4564 
4565 #ifndef _LP64
4566   // borrow rdi from locals
4567   __ get_thread(rdi);
4568   __ get_vm_result_2(rax, rdi);
4569   __ restore_locals();
4570 #else
4571   __ get_vm_result_2(rax, r15_thread);
4572 #endif
4573 
4574   __ pop_ptr(rdx); // restore receiver
4575   __ verify_oop(rdx);
4576   __ load_klass(rdx, rdx);
4577   __ jmpb(resolved);
4578 
4579   // Get superklass in rax and subklass in rdx
4580   __ bind(quicked);
4581   __ load_klass(rdx, rax);
4582   __ load_resolved_klass_at_index(rcx, rbx, rax);
4583 
4584   __ bind(resolved);
4585 
4586   // Generate subtype check.  Blows rcx, rdi
4587   // Superklass in rax.  Subklass in rdx.
4588   __ gen_subtype_check(rdx, ok_is_subtype);
4589 
4590   // Come here on failure
4591   __ xorl(rax, rax);
4592   __ jmpb(done);
4593   // Come here on success
4594   __ bind(ok_is_subtype);
4595   __ movl(rax, 1);
4596 
4597   // Collect counts on whether this test sees NULLs a lot or not.
4598   if (ProfileInterpreter) {
4599     __ jmp(done);
4600     __ bind(is_null);
4601     __ profile_null_seen(rcx);
4602   } else {
4603     __ bind(is_null);   // same as 'done'
4604   }
4605   __ bind(done);
4606   // rax = 0: obj == NULL or  obj is not an instanceof the specified klass
4607   // rax = 1: obj != NULL and obj is     an instanceof the specified klass
4608 }
4609 
4610 //----------------------------------------------------------------------------------------------------
4611 // Breakpoints
4612 void TemplateTable::_breakpoint() {
4613   // Note: We get here even if we are single stepping..
4614   // jbug insists on setting breakpoints at every bytecode
4615   // even if we are in single step mode.
4616 
4617   transition(vtos, vtos);
4618 
4619   Register rarg = LP64_ONLY(c_rarg1) NOT_LP64(rcx);
4620 
4621   // get the unpatched byte code
4622   __ get_method(rarg);
4623   __ call_VM(noreg,
4624              CAST_FROM_FN_PTR(address,
4625                               InterpreterRuntime::get_original_bytecode_at),
4626              rarg, rbcp);
4627   __ mov(rbx, rax);  // why?
4628 
4629   // post the breakpoint event
4630   __ get_method(rarg);
4631   __ call_VM(noreg,
4632              CAST_FROM_FN_PTR(address, InterpreterRuntime::_breakpoint),
4633              rarg, rbcp);
4634 
4635   // complete the execution of original bytecode
4636   __ dispatch_only_normal(vtos);
4637 }
4638 
4639 //-----------------------------------------------------------------------------
4640 // Exceptions
4641 
4642 void TemplateTable::athrow() {
4643   transition(atos, vtos);
4644   __ null_check(rax);
4645   __ jump(ExternalAddress(Interpreter::throw_exception_entry()));
4646 }
4647 
4648 //-----------------------------------------------------------------------------
4649 // Synchronization
4650 //
4651 // Note: monitorenter & exit are symmetric routines; which is reflected
4652 //       in the assembly code structure as well
4653 //
4654 // Stack layout:
4655 //
4656 // [expressions  ] <--- rsp               = expression stack top
4657 // ..
4658 // [expressions  ]
4659 // [monitor entry] <--- monitor block top = expression stack bot
4660 // ..
4661 // [monitor entry]
4662 // [frame data   ] <--- monitor block bot
4663 // ...
4664 // [saved rbp    ] <--- rbp
4665 void TemplateTable::monitorenter() {
4666   transition(atos, vtos);
4667 
4668   // check for NULL object
4669   __ null_check(rax);
4670 
4671   __ resolve(IS_NOT_NULL, rax);
4672 
4673   const Address monitor_block_top(
4674         rbp, frame::interpreter_frame_monitor_block_top_offset * wordSize);
4675   const Address monitor_block_bot(
4676         rbp, frame::interpreter_frame_initial_sp_offset * wordSize);
4677   const int entry_size = frame::interpreter_frame_monitor_size() * wordSize;
4678 
4679   Label allocated;
4680 
4681   Register rtop = LP64_ONLY(c_rarg3) NOT_LP64(rcx);
4682   Register rbot = LP64_ONLY(c_rarg2) NOT_LP64(rbx);
4683   Register rmon = LP64_ONLY(c_rarg1) NOT_LP64(rdx);
4684 
4685   // initialize entry pointer
4686   __ xorl(rmon, rmon); // points to free slot or NULL
4687 
4688   // find a free slot in the monitor block (result in rmon)
4689   {
4690     Label entry, loop, exit;
4691     __ movptr(rtop, monitor_block_top); // points to current entry,
4692                                         // starting with top-most entry
4693     __ lea(rbot, monitor_block_bot);    // points to word before bottom
4694                                         // of monitor block
4695     __ jmpb(entry);
4696 
4697     __ bind(loop);
4698     // check if current entry is used
4699     __ cmpptr(Address(rtop, BasicObjectLock::obj_offset_in_bytes()), (int32_t) NULL_WORD);
4700     // if not used then remember entry in rmon
4701     __ cmovptr(Assembler::equal, rmon, rtop);   // cmov => cmovptr
4702     // check if current entry is for same object
4703     __ cmpptr(rax, Address(rtop, BasicObjectLock::obj_offset_in_bytes()));
4704     // if same object then stop searching
4705     __ jccb(Assembler::equal, exit);
4706     // otherwise advance to next entry
4707     __ addptr(rtop, entry_size);
4708     __ bind(entry);
4709     // check if bottom reached
4710     __ cmpptr(rtop, rbot);
4711     // if not at bottom then check this entry
4712     __ jcc(Assembler::notEqual, loop);
4713     __ bind(exit);
4714   }
4715 
4716   __ testptr(rmon, rmon); // check if a slot has been found
4717   __ jcc(Assembler::notZero, allocated); // if found, continue with that one
4718 
4719   // allocate one if there's no free slot
4720   {
4721     Label entry, loop;
4722     // 1. compute new pointers          // rsp: old expression stack top
4723     __ movptr(rmon, monitor_block_bot); // rmon: old expression stack bottom
4724     __ subptr(rsp, entry_size);         // move expression stack top
4725     __ subptr(rmon, entry_size);        // move expression stack bottom
4726     __ mov(rtop, rsp);                  // set start value for copy loop
4727     __ movptr(monitor_block_bot, rmon); // set new monitor block bottom
4728     __ jmp(entry);
4729     // 2. move expression stack contents
4730     __ bind(loop);
4731     __ movptr(rbot, Address(rtop, entry_size)); // load expression stack
4732                                                 // word from old location
4733     __ movptr(Address(rtop, 0), rbot);          // and store it at new location
4734     __ addptr(rtop, wordSize);                  // advance to next word
4735     __ bind(entry);
4736     __ cmpptr(rtop, rmon);                      // check if bottom reached
4737     __ jcc(Assembler::notEqual, loop);          // if not at bottom then
4738                                                 // copy next word
4739   }
4740 
4741   // call run-time routine
4742   // rmon: points to monitor entry
4743   __ bind(allocated);
4744 
4745   // Increment bcp to point to the next bytecode, so exception
4746   // handling for async. exceptions work correctly.
4747   // The object has already been poped from the stack, so the
4748   // expression stack looks correct.
4749   __ increment(rbcp);
4750 
4751   // store object
4752   __ movptr(Address(rmon, BasicObjectLock::obj_offset_in_bytes()), rax);
4753   __ lock_object(rmon);
4754 
4755   // check to make sure this monitor doesn't cause stack overflow after locking
4756   __ save_bcp();  // in case of exception
4757   __ generate_stack_overflow_check(0);
4758 
4759   // The bcp has already been incremented. Just need to dispatch to
4760   // next instruction.
4761   __ dispatch_next(vtos);
4762 }
4763 
4764 void TemplateTable::monitorexit() {
4765   transition(atos, vtos);
4766 
4767   // check for NULL object
4768   __ null_check(rax);
4769 
4770   __ resolve(IS_NOT_NULL, rax);
4771 
4772   const Address monitor_block_top(
4773         rbp, frame::interpreter_frame_monitor_block_top_offset * wordSize);
4774   const Address monitor_block_bot(
4775         rbp, frame::interpreter_frame_initial_sp_offset * wordSize);
4776   const int entry_size = frame::interpreter_frame_monitor_size() * wordSize;
4777 
4778   Register rtop = LP64_ONLY(c_rarg1) NOT_LP64(rdx);
4779   Register rbot = LP64_ONLY(c_rarg2) NOT_LP64(rbx);
4780 
4781   Label found;
4782 
4783   // find matching slot
4784   {
4785     Label entry, loop;
4786     __ movptr(rtop, monitor_block_top); // points to current entry,
4787                                         // starting with top-most entry
4788     __ lea(rbot, monitor_block_bot);    // points to word before bottom
4789                                         // of monitor block
4790     __ jmpb(entry);
4791 
4792     __ bind(loop);
4793     // check if current entry is for same object
4794     __ cmpptr(rax, Address(rtop, BasicObjectLock::obj_offset_in_bytes()));
4795     // if same object then stop searching
4796     __ jcc(Assembler::equal, found);
4797     // otherwise advance to next entry
4798     __ addptr(rtop, entry_size);
4799     __ bind(entry);
4800     // check if bottom reached
4801     __ cmpptr(rtop, rbot);
4802     // if not at bottom then check this entry
4803     __ jcc(Assembler::notEqual, loop);
4804   }
4805 
4806   // error handling. Unlocking was not block-structured
4807   __ call_VM(noreg, CAST_FROM_FN_PTR(address,
4808                    InterpreterRuntime::throw_illegal_monitor_state_exception));
4809   __ should_not_reach_here();
4810 
4811   // call run-time routine
4812   __ bind(found);
4813   __ push_ptr(rax); // make sure object is on stack (contract with oopMaps)
4814   __ unlock_object(rtop);
4815   __ pop_ptr(rax); // discard object
4816 }
4817 
4818 // Wide instructions
4819 void TemplateTable::wide() {
4820   transition(vtos, vtos);
4821   __ load_unsigned_byte(rbx, at_bcp(1));
4822   ExternalAddress wtable((address)Interpreter::_wentry_point);
4823   __ jump(ArrayAddress(wtable, Address(noreg, rbx, Address::times_ptr)));
4824   // Note: the rbcp increment step is part of the individual wide bytecode implementations
4825 }
4826 
4827 // Multi arrays
4828 void TemplateTable::multianewarray() {
4829   transition(vtos, atos);
4830 
4831   Register rarg = LP64_ONLY(c_rarg1) NOT_LP64(rax);
4832   __ load_unsigned_byte(rax, at_bcp(3)); // get number of dimensions
4833   // last dim is on top of stack; we want address of first one:
4834   // first_addr = last_addr + (ndims - 1) * stackElementSize - 1*wordsize
4835   // the latter wordSize to point to the beginning of the array.
4836   __ lea(rarg, Address(rsp, rax, Interpreter::stackElementScale(), -wordSize));
4837   call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::multianewarray), rarg);
4838   __ load_unsigned_byte(rbx, at_bcp(3));
4839   __ lea(rsp, Address(rsp, rbx, Interpreter::stackElementScale()));  // get rid of counts
4840 }