1 /*
   2  * Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "asm/macroAssembler.hpp"
  27 #include "compiler/disassembler.hpp"
  28 #include "interpreter/interpreter.hpp"
  29 #include "interpreter/interpreterRuntime.hpp"
  30 #include "interpreter/interp_masm.hpp"
  31 #include "interpreter/templateTable.hpp"
  32 #include "memory/universe.hpp"
  33 #include "oops/methodData.hpp"
  34 #include "oops/objArrayKlass.hpp"
  35 #include "oops/oop.inline.hpp"
  36 #include "prims/methodHandles.hpp"
  37 #include "runtime/frame.inline.hpp"
  38 #include "runtime/safepointMechanism.hpp"
  39 #include "runtime/sharedRuntime.hpp"
  40 #include "runtime/stubRoutines.hpp"
  41 #include "runtime/synchronizer.hpp"
  42 #include "utilities/macros.hpp"
  43 
  44 #define __ Disassembler::hook<InterpreterMacroAssembler>(__FILE__, __LINE__, _masm)->
  45 
  46 // Global Register Names
  47 static const Register rbcp     = LP64_ONLY(r13) NOT_LP64(rsi);
  48 static const Register rlocals  = LP64_ONLY(r14) NOT_LP64(rdi);
  49 
  50 // Platform-dependent initialization
  51 void TemplateTable::pd_initialize() {
  52   // No x86 specific initialization
  53 }
  54 
  55 // Address Computation: local variables
  56 static inline Address iaddress(int n) {
  57   return Address(rlocals, Interpreter::local_offset_in_bytes(n));
  58 }
  59 
  60 static inline Address laddress(int n) {
  61   return iaddress(n + 1);
  62 }
  63 
  64 #ifndef _LP64
  65 static inline Address haddress(int n) {
  66   return iaddress(n + 0);
  67 }
  68 #endif
  69 
  70 static inline Address faddress(int n) {
  71   return iaddress(n);
  72 }
  73 
  74 static inline Address daddress(int n) {
  75   return laddress(n);
  76 }
  77 
  78 static inline Address aaddress(int n) {
  79   return iaddress(n);
  80 }
  81 
  82 static inline Address iaddress(Register r) {
  83   return Address(rlocals, r, Address::times_ptr);
  84 }
  85 
  86 static inline Address laddress(Register r) {
  87   return Address(rlocals, r, Address::times_ptr, Interpreter::local_offset_in_bytes(1));
  88 }
  89 
  90 #ifndef _LP64
  91 static inline Address haddress(Register r)       {
  92   return Address(rlocals, r, Interpreter::stackElementScale(), Interpreter::local_offset_in_bytes(0));
  93 }
  94 #endif
  95 
  96 static inline Address faddress(Register r) {
  97   return iaddress(r);
  98 }
  99 
 100 static inline Address daddress(Register r) {
 101   return laddress(r);
 102 }
 103 
 104 static inline Address aaddress(Register r) {
 105   return iaddress(r);
 106 }
 107 
 108 
 109 // expression stack
 110 // (Note: Must not use symmetric equivalents at_rsp_m1/2 since they store
 111 // data beyond the rsp which is potentially unsafe in an MT environment;
 112 // an interrupt may overwrite that data.)
 113 static inline Address at_rsp   () {
 114   return Address(rsp, 0);
 115 }
 116 
 117 // At top of Java expression stack which may be different than esp().  It
 118 // isn't for category 1 objects.
 119 static inline Address at_tos   () {
 120   return Address(rsp,  Interpreter::expr_offset_in_bytes(0));
 121 }
 122 
 123 static inline Address at_tos_p1() {
 124   return Address(rsp,  Interpreter::expr_offset_in_bytes(1));
 125 }
 126 
 127 static inline Address at_tos_p2() {
 128   return Address(rsp,  Interpreter::expr_offset_in_bytes(2));
 129 }
 130 
 131 // Condition conversion
 132 static Assembler::Condition j_not(TemplateTable::Condition cc) {
 133   switch (cc) {
 134   case TemplateTable::equal        : return Assembler::notEqual;
 135   case TemplateTable::not_equal    : return Assembler::equal;
 136   case TemplateTable::less         : return Assembler::greaterEqual;
 137   case TemplateTable::less_equal   : return Assembler::greater;
 138   case TemplateTable::greater      : return Assembler::lessEqual;
 139   case TemplateTable::greater_equal: return Assembler::less;
 140   }
 141   ShouldNotReachHere();
 142   return Assembler::zero;
 143 }
 144 
 145 
 146 
 147 // Miscelaneous helper routines
 148 // Store an oop (or NULL) at the address described by obj.
 149 // If val == noreg this means store a NULL
 150 
 151 
 152 static void do_oop_store(InterpreterMacroAssembler* _masm,
 153                          Address dst,
 154                          Register val,
 155                          DecoratorSet decorators = 0) {
 156   assert(val == noreg || val == rax, "parameter is just for looks");
 157   __ store_heap_oop(dst, val, rdx, rbx, noreg, decorators);
 158 }
 159 
 160 static void do_oop_load(InterpreterMacroAssembler* _masm,
 161                         Address src,
 162                         Register dst,
 163                         DecoratorSet decorators = 0) {
 164   __ load_heap_oop(dst, src, rdx, rbx, decorators);
 165 }
 166 
 167 Address TemplateTable::at_bcp(int offset) {
 168   assert(_desc->uses_bcp(), "inconsistent uses_bcp information");
 169   return Address(rbcp, offset);
 170 }
 171 
 172 
 173 void TemplateTable::patch_bytecode(Bytecodes::Code bc, Register bc_reg,
 174                                    Register temp_reg, bool load_bc_into_bc_reg/*=true*/,
 175                                    int byte_no) {
 176   if (!RewriteBytecodes)  return;
 177   Label L_patch_done;
 178 
 179   switch (bc) {
 180   case Bytecodes::_fast_qputfield:
 181   case Bytecodes::_fast_aputfield:
 182   case Bytecodes::_fast_bputfield:
 183   case Bytecodes::_fast_zputfield:
 184   case Bytecodes::_fast_cputfield:
 185   case Bytecodes::_fast_dputfield:
 186   case Bytecodes::_fast_fputfield:
 187   case Bytecodes::_fast_iputfield:
 188   case Bytecodes::_fast_lputfield:
 189   case Bytecodes::_fast_sputfield:
 190     {
 191       // We skip bytecode quickening for putfield instructions when
 192       // the put_code written to the constant pool cache is zero.
 193       // This is required so that every execution of this instruction
 194       // calls out to InterpreterRuntime::resolve_get_put to do
 195       // additional, required work.
 196       assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range");
 197       assert(load_bc_into_bc_reg, "we use bc_reg as temp");
 198       __ get_cache_and_index_and_bytecode_at_bcp(temp_reg, bc_reg, temp_reg, byte_no, 1);
 199       __ movl(bc_reg, bc);
 200       __ cmpl(temp_reg, (int) 0);
 201       __ jcc(Assembler::zero, L_patch_done);  // don't patch
 202     }
 203     break;
 204   default:
 205     assert(byte_no == -1, "sanity");
 206     // the pair bytecodes have already done the load.
 207     if (load_bc_into_bc_reg) {
 208       __ movl(bc_reg, bc);
 209     }
 210   }
 211 
 212   if (JvmtiExport::can_post_breakpoint()) {
 213     Label L_fast_patch;
 214     // if a breakpoint is present we can't rewrite the stream directly
 215     __ movzbl(temp_reg, at_bcp(0));
 216     __ cmpl(temp_reg, Bytecodes::_breakpoint);
 217     __ jcc(Assembler::notEqual, L_fast_patch);
 218     __ get_method(temp_reg);
 219     // Let breakpoint table handling rewrite to quicker bytecode
 220     __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::set_original_bytecode_at), temp_reg, rbcp, bc_reg);
 221 #ifndef ASSERT
 222     __ jmpb(L_patch_done);
 223 #else
 224     __ jmp(L_patch_done);
 225 #endif
 226     __ bind(L_fast_patch);
 227   }
 228 
 229 #ifdef ASSERT
 230   Label L_okay;
 231   __ load_unsigned_byte(temp_reg, at_bcp(0));
 232   __ cmpl(temp_reg, (int) Bytecodes::java_code(bc));
 233   __ jcc(Assembler::equal, L_okay);
 234   __ cmpl(temp_reg, bc_reg);
 235   __ jcc(Assembler::equal, L_okay);
 236   __ stop("patching the wrong bytecode");
 237   __ bind(L_okay);
 238 #endif
 239 
 240   // patch bytecode
 241   __ movb(at_bcp(0), bc_reg);
 242   __ bind(L_patch_done);
 243 }
 244 // Individual instructions
 245 
 246 
 247 void TemplateTable::nop() {
 248   transition(vtos, vtos);
 249   // nothing to do
 250 }
 251 
 252 void TemplateTable::shouldnotreachhere() {
 253   transition(vtos, vtos);
 254   __ stop("shouldnotreachhere bytecode");
 255 }
 256 
 257 void TemplateTable::aconst_null() {
 258   transition(vtos, atos);
 259   __ xorl(rax, rax);
 260 }
 261 
 262 void TemplateTable::iconst(int value) {
 263   transition(vtos, itos);
 264   if (value == 0) {
 265     __ xorl(rax, rax);
 266   } else {
 267     __ movl(rax, value);
 268   }
 269 }
 270 
 271 void TemplateTable::lconst(int value) {
 272   transition(vtos, ltos);
 273   if (value == 0) {
 274     __ xorl(rax, rax);
 275   } else {
 276     __ movl(rax, value);
 277   }
 278 #ifndef _LP64
 279   assert(value >= 0, "check this code");
 280   __ xorptr(rdx, rdx);
 281 #endif
 282 }
 283 
 284 
 285 
 286 void TemplateTable::fconst(int value) {
 287   transition(vtos, ftos);
 288   if (UseSSE >= 1) {
 289     static float one = 1.0f, two = 2.0f;
 290     switch (value) {
 291     case 0:
 292       __ xorps(xmm0, xmm0);
 293       break;
 294     case 1:
 295       __ movflt(xmm0, ExternalAddress((address) &one));
 296       break;
 297     case 2:
 298       __ movflt(xmm0, ExternalAddress((address) &two));
 299       break;
 300     default:
 301       ShouldNotReachHere();
 302       break;
 303     }
 304   } else {
 305 #ifdef _LP64
 306     ShouldNotReachHere();
 307 #else
 308            if (value == 0) { __ fldz();
 309     } else if (value == 1) { __ fld1();
 310     } else if (value == 2) { __ fld1(); __ fld1(); __ faddp(); // should do a better solution here
 311     } else                 { ShouldNotReachHere();
 312     }
 313 #endif // _LP64
 314   }
 315 }
 316 
 317 void TemplateTable::dconst(int value) {
 318   transition(vtos, dtos);
 319   if (UseSSE >= 2) {
 320     static double one = 1.0;
 321     switch (value) {
 322     case 0:
 323       __ xorpd(xmm0, xmm0);
 324       break;
 325     case 1:
 326       __ movdbl(xmm0, ExternalAddress((address) &one));
 327       break;
 328     default:
 329       ShouldNotReachHere();
 330       break;
 331     }
 332   } else {
 333 #ifdef _LP64
 334     ShouldNotReachHere();
 335 #else
 336            if (value == 0) { __ fldz();
 337     } else if (value == 1) { __ fld1();
 338     } else                 { ShouldNotReachHere();
 339     }
 340 #endif
 341   }
 342 }
 343 
 344 void TemplateTable::bipush() {
 345   transition(vtos, itos);
 346   __ load_signed_byte(rax, at_bcp(1));
 347 }
 348 
 349 void TemplateTable::sipush() {
 350   transition(vtos, itos);
 351   __ load_unsigned_short(rax, at_bcp(1));
 352   __ bswapl(rax);
 353   __ sarl(rax, 16);
 354 }
 355 
 356 void TemplateTable::ldc(bool wide) {
 357   transition(vtos, vtos);
 358   Register rarg = NOT_LP64(rcx) LP64_ONLY(c_rarg1);
 359   Label call_ldc, notFloat, notClass, notInt, Done;
 360 
 361   if (wide) {
 362     __ get_unsigned_2_byte_index_at_bcp(rbx, 1);
 363   } else {
 364     __ load_unsigned_byte(rbx, at_bcp(1));
 365   }
 366 
 367   __ get_cpool_and_tags(rcx, rax);
 368   const int base_offset = ConstantPool::header_size() * wordSize;
 369   const int tags_offset = Array<u1>::base_offset_in_bytes();
 370 
 371   // get type
 372   __ movzbl(rdx, Address(rax, rbx, Address::times_1, tags_offset));
 373   __ andl(rdx, ~JVM_CONSTANT_QDescBit);
 374 
 375   // unresolved class - get the resolved class
 376   __ cmpl(rdx, JVM_CONSTANT_UnresolvedClass);
 377   __ jccb(Assembler::equal, call_ldc);
 378 
 379   // unresolved class in error state - call into runtime to throw the error
 380   // from the first resolution attempt
 381   __ cmpl(rdx, JVM_CONSTANT_UnresolvedClassInError);
 382   __ jccb(Assembler::equal, call_ldc);
 383 
 384   // resolved class - need to call vm to get java mirror of the class
 385   __ cmpl(rdx, JVM_CONSTANT_Class);
 386   __ jcc(Assembler::notEqual, notClass);
 387 
 388   __ bind(call_ldc);
 389 
 390   __ movl(rarg, wide);
 391   call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::ldc), rarg);
 392 
 393   __ push(atos);
 394   __ jmp(Done);
 395 
 396   __ bind(notClass);
 397   __ cmpl(rdx, JVM_CONSTANT_Float);
 398   __ jccb(Assembler::notEqual, notFloat);
 399 
 400   // ftos
 401   __ load_float(Address(rcx, rbx, Address::times_ptr, base_offset));
 402   __ push(ftos);
 403   __ jmp(Done);
 404 
 405   __ bind(notFloat);
 406   __ cmpl(rdx, JVM_CONSTANT_Integer);
 407   __ jccb(Assembler::notEqual, notInt);
 408 
 409   // itos
 410   __ movl(rax, Address(rcx, rbx, Address::times_ptr, base_offset));
 411   __ push(itos);
 412   __ jmp(Done);
 413 
 414   // assume the tag is for condy; if not, the VM runtime will tell us
 415   __ bind(notInt);
 416   condy_helper(Done);
 417 
 418   __ bind(Done);
 419 }
 420 
 421 // Fast path for caching oop constants.
 422 void TemplateTable::fast_aldc(bool wide) {
 423   transition(vtos, atos);
 424 
 425   Register result = rax;
 426   Register tmp = rdx;
 427   Register rarg = NOT_LP64(rcx) LP64_ONLY(c_rarg1);
 428   int index_size = wide ? sizeof(u2) : sizeof(u1);
 429 
 430   Label resolved;
 431 
 432   // We are resolved if the resolved reference cache entry contains a
 433   // non-null object (String, MethodType, etc.)
 434   assert_different_registers(result, tmp);
 435   __ get_cache_index_at_bcp(tmp, 1, index_size);
 436   __ load_resolved_reference_at_index(result, tmp);
 437   __ testptr(result, result);
 438   __ jcc(Assembler::notZero, resolved);
 439 
 440   address entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_ldc);
 441 
 442   // first time invocation - must resolve first
 443   __ movl(rarg, (int)bytecode());
 444   __ call_VM(result, entry, rarg);
 445   __ bind(resolved);
 446 
 447   { // Check for the null sentinel.
 448     // If we just called the VM, it already did the mapping for us,
 449     // but it's harmless to retry.
 450     Label notNull;
 451     ExternalAddress null_sentinel((address)Universe::the_null_sentinel_addr());
 452     __ movptr(tmp, null_sentinel);
 453     __ cmpoop(tmp, result);
 454     __ jccb(Assembler::notEqual, notNull);
 455     __ xorptr(result, result);  // NULL object reference
 456     __ bind(notNull);
 457   }
 458 
 459   if (VerifyOops) {
 460     __ verify_oop(result);
 461   }
 462 }
 463 
 464 void TemplateTable::ldc2_w() {
 465   transition(vtos, vtos);
 466   Label notDouble, notLong, Done;
 467   __ get_unsigned_2_byte_index_at_bcp(rbx, 1);
 468 
 469   __ get_cpool_and_tags(rcx, rax);
 470   const int base_offset = ConstantPool::header_size() * wordSize;
 471   const int tags_offset = Array<u1>::base_offset_in_bytes();
 472 
 473   // get type
 474   __ movzbl(rdx, Address(rax, rbx, Address::times_1, tags_offset));
 475   __ cmpl(rdx, JVM_CONSTANT_Double);
 476   __ jccb(Assembler::notEqual, notDouble);
 477 
 478   // dtos
 479   __ load_double(Address(rcx, rbx, Address::times_ptr, base_offset));
 480   __ push(dtos);
 481 
 482   __ jmp(Done);
 483   __ bind(notDouble);
 484   __ cmpl(rdx, JVM_CONSTANT_Long);
 485   __ jccb(Assembler::notEqual, notLong);
 486 
 487   // ltos
 488   __ movptr(rax, Address(rcx, rbx, Address::times_ptr, base_offset + 0 * wordSize));
 489   NOT_LP64(__ movptr(rdx, Address(rcx, rbx, Address::times_ptr, base_offset + 1 * wordSize)));
 490   __ push(ltos);
 491   __ jmp(Done);
 492 
 493   __ bind(notLong);
 494   condy_helper(Done);
 495 
 496   __ bind(Done);
 497 }
 498 
 499 void TemplateTable::condy_helper(Label& Done) {
 500   const Register obj = rax;
 501   const Register off = rbx;
 502   const Register flags = rcx;
 503   const Register rarg = NOT_LP64(rcx) LP64_ONLY(c_rarg1);
 504   __ movl(rarg, (int)bytecode());
 505   call_VM(obj, CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_ldc), rarg);
 506 #ifndef _LP64
 507   // borrow rdi from locals
 508   __ get_thread(rdi);
 509   __ get_vm_result_2(flags, rdi);
 510   __ restore_locals();
 511 #else
 512   __ get_vm_result_2(flags, r15_thread);
 513 #endif
 514   // VMr = obj = base address to find primitive value to push
 515   // VMr2 = flags = (tos, off) using format of CPCE::_flags
 516   __ movl(off, flags);
 517   __ andl(off, ConstantPoolCacheEntry::field_index_mask);
 518   const Address field(obj, off, Address::times_1, 0*wordSize);
 519 
 520   // What sort of thing are we loading?
 521   __ shrl(flags, ConstantPoolCacheEntry::tos_state_shift);
 522   __ andl(flags, ConstantPoolCacheEntry::tos_state_mask);
 523 
 524   switch (bytecode()) {
 525   case Bytecodes::_ldc:
 526   case Bytecodes::_ldc_w:
 527     {
 528       // tos in (itos, ftos, stos, btos, ctos, ztos)
 529       Label notInt, notFloat, notShort, notByte, notChar, notBool;
 530       __ cmpl(flags, itos);
 531       __ jcc(Assembler::notEqual, notInt);
 532       // itos
 533       __ movl(rax, field);
 534       __ push(itos);
 535       __ jmp(Done);
 536 
 537       __ bind(notInt);
 538       __ cmpl(flags, ftos);
 539       __ jcc(Assembler::notEqual, notFloat);
 540       // ftos
 541       __ load_float(field);
 542       __ push(ftos);
 543       __ jmp(Done);
 544 
 545       __ bind(notFloat);
 546       __ cmpl(flags, stos);
 547       __ jcc(Assembler::notEqual, notShort);
 548       // stos
 549       __ load_signed_short(rax, field);
 550       __ push(stos);
 551       __ jmp(Done);
 552 
 553       __ bind(notShort);
 554       __ cmpl(flags, btos);
 555       __ jcc(Assembler::notEqual, notByte);
 556       // btos
 557       __ load_signed_byte(rax, field);
 558       __ push(btos);
 559       __ jmp(Done);
 560 
 561       __ bind(notByte);
 562       __ cmpl(flags, ctos);
 563       __ jcc(Assembler::notEqual, notChar);
 564       // ctos
 565       __ load_unsigned_short(rax, field);
 566       __ push(ctos);
 567       __ jmp(Done);
 568 
 569       __ bind(notChar);
 570       __ cmpl(flags, ztos);
 571       __ jcc(Assembler::notEqual, notBool);
 572       // ztos
 573       __ load_signed_byte(rax, field);
 574       __ push(ztos);
 575       __ jmp(Done);
 576 
 577       __ bind(notBool);
 578       break;
 579     }
 580 
 581   case Bytecodes::_ldc2_w:
 582     {
 583       Label notLong, notDouble;
 584       __ cmpl(flags, ltos);
 585       __ jcc(Assembler::notEqual, notLong);
 586       // ltos
 587       // Loading high word first because movptr clobbers rax
 588       NOT_LP64(__ movptr(rdx, field.plus_disp(4)));
 589       __ movptr(rax, field);
 590       __ push(ltos);
 591       __ jmp(Done);
 592 
 593       __ bind(notLong);
 594       __ cmpl(flags, dtos);
 595       __ jcc(Assembler::notEqual, notDouble);
 596       // dtos
 597       __ load_double(field);
 598       __ push(dtos);
 599       __ jmp(Done);
 600 
 601       __ bind(notDouble);
 602       break;
 603     }
 604 
 605   default:
 606     ShouldNotReachHere();
 607   }
 608 
 609   __ stop("bad ldc/condy");
 610 }
 611 
 612 void TemplateTable::locals_index(Register reg, int offset) {
 613   __ load_unsigned_byte(reg, at_bcp(offset));
 614   __ negptr(reg);
 615 }
 616 
 617 void TemplateTable::iload() {
 618   iload_internal();
 619 }
 620 
 621 void TemplateTable::nofast_iload() {
 622   iload_internal(may_not_rewrite);
 623 }
 624 
 625 void TemplateTable::iload_internal(RewriteControl rc) {
 626   transition(vtos, itos);
 627   if (RewriteFrequentPairs && rc == may_rewrite) {
 628     Label rewrite, done;
 629     const Register bc = LP64_ONLY(c_rarg3) NOT_LP64(rcx);
 630     LP64_ONLY(assert(rbx != bc, "register damaged"));
 631 
 632     // get next byte
 633     __ load_unsigned_byte(rbx,
 634                           at_bcp(Bytecodes::length_for(Bytecodes::_iload)));
 635     // if _iload, wait to rewrite to iload2.  We only want to rewrite the
 636     // last two iloads in a pair.  Comparing against fast_iload means that
 637     // the next bytecode is neither an iload or a caload, and therefore
 638     // an iload pair.
 639     __ cmpl(rbx, Bytecodes::_iload);
 640     __ jcc(Assembler::equal, done);
 641 
 642     __ cmpl(rbx, Bytecodes::_fast_iload);
 643     __ movl(bc, Bytecodes::_fast_iload2);
 644 
 645     __ jccb(Assembler::equal, rewrite);
 646 
 647     // if _caload, rewrite to fast_icaload
 648     __ cmpl(rbx, Bytecodes::_caload);
 649     __ movl(bc, Bytecodes::_fast_icaload);
 650     __ jccb(Assembler::equal, rewrite);
 651 
 652     // rewrite so iload doesn't check again.
 653     __ movl(bc, Bytecodes::_fast_iload);
 654 
 655     // rewrite
 656     // bc: fast bytecode
 657     __ bind(rewrite);
 658     patch_bytecode(Bytecodes::_iload, bc, rbx, false);
 659     __ bind(done);
 660   }
 661 
 662   // Get the local value into tos
 663   locals_index(rbx);
 664   __ movl(rax, iaddress(rbx));
 665 }
 666 
 667 void TemplateTable::fast_iload2() {
 668   transition(vtos, itos);
 669   locals_index(rbx);
 670   __ movl(rax, iaddress(rbx));
 671   __ push(itos);
 672   locals_index(rbx, 3);
 673   __ movl(rax, iaddress(rbx));
 674 }
 675 
 676 void TemplateTable::fast_iload() {
 677   transition(vtos, itos);
 678   locals_index(rbx);
 679   __ movl(rax, iaddress(rbx));
 680 }
 681 
 682 void TemplateTable::lload() {
 683   transition(vtos, ltos);
 684   locals_index(rbx);
 685   __ movptr(rax, laddress(rbx));
 686   NOT_LP64(__ movl(rdx, haddress(rbx)));
 687 }
 688 
 689 void TemplateTable::fload() {
 690   transition(vtos, ftos);
 691   locals_index(rbx);
 692   __ load_float(faddress(rbx));
 693 }
 694 
 695 void TemplateTable::dload() {
 696   transition(vtos, dtos);
 697   locals_index(rbx);
 698   __ load_double(daddress(rbx));
 699 }
 700 
 701 void TemplateTable::aload() {
 702   transition(vtos, atos);
 703   locals_index(rbx);
 704   __ movptr(rax, aaddress(rbx));
 705 }
 706 
 707 void TemplateTable::locals_index_wide(Register reg) {
 708   __ load_unsigned_short(reg, at_bcp(2));
 709   __ bswapl(reg);
 710   __ shrl(reg, 16);
 711   __ negptr(reg);
 712 }
 713 
 714 void TemplateTable::wide_iload() {
 715   transition(vtos, itos);
 716   locals_index_wide(rbx);
 717   __ movl(rax, iaddress(rbx));
 718 }
 719 
 720 void TemplateTable::wide_lload() {
 721   transition(vtos, ltos);
 722   locals_index_wide(rbx);
 723   __ movptr(rax, laddress(rbx));
 724   NOT_LP64(__ movl(rdx, haddress(rbx)));
 725 }
 726 
 727 void TemplateTable::wide_fload() {
 728   transition(vtos, ftos);
 729   locals_index_wide(rbx);
 730   __ load_float(faddress(rbx));
 731 }
 732 
 733 void TemplateTable::wide_dload() {
 734   transition(vtos, dtos);
 735   locals_index_wide(rbx);
 736   __ load_double(daddress(rbx));
 737 }
 738 
 739 void TemplateTable::wide_aload() {
 740   transition(vtos, atos);
 741   locals_index_wide(rbx);
 742   __ movptr(rax, aaddress(rbx));
 743 }
 744 
 745 void TemplateTable::index_check(Register array, Register index) {
 746   // Pop ptr into array
 747   __ pop_ptr(array);
 748   index_check_without_pop(array, index);
 749 }
 750 
 751 void TemplateTable::index_check_without_pop(Register array, Register index) {
 752   // destroys rbx
 753   // check array
 754   __ null_check(array, arrayOopDesc::length_offset_in_bytes());
 755   // sign extend index for use by indexed load
 756   __ movl2ptr(index, index);
 757   // check index
 758   __ cmpl(index, Address(array, arrayOopDesc::length_offset_in_bytes()));
 759   if (index != rbx) {
 760     // ??? convention: move aberrant index into rbx for exception message
 761     assert(rbx != array, "different registers");
 762     __ movl(rbx, index);
 763   }
 764   Label skip;
 765   __ jccb(Assembler::below, skip);
 766   // Pass array to create more detailed exceptions.
 767   __ mov(NOT_LP64(rax) LP64_ONLY(c_rarg1), array);
 768   __ jump(ExternalAddress(Interpreter::_throw_ArrayIndexOutOfBoundsException_entry));
 769   __ bind(skip);
 770 }
 771 
 772 void TemplateTable::iaload() {
 773   transition(itos, itos);
 774   // rax: index
 775   // rdx: array
 776   index_check(rdx, rax); // kills rbx
 777   __ access_load_at(T_INT, IN_HEAP | IS_ARRAY, rax,
 778                     Address(rdx, rax, Address::times_4,
 779                             arrayOopDesc::base_offset_in_bytes(T_INT)),
 780                     noreg, noreg);
 781 }
 782 
 783 void TemplateTable::laload() {
 784   transition(itos, ltos);
 785   // rax: index
 786   // rdx: array
 787   index_check(rdx, rax); // kills rbx
 788   NOT_LP64(__ mov(rbx, rax));
 789   // rbx,: index
 790   __ access_load_at(T_LONG, IN_HEAP | IS_ARRAY, noreg /* ltos */,
 791                     Address(rdx, rbx, Address::times_8,
 792                             arrayOopDesc::base_offset_in_bytes(T_LONG)),
 793                     noreg, noreg);
 794 }
 795 
 796 
 797 
 798 void TemplateTable::faload() {
 799   transition(itos, ftos);
 800   // rax: index
 801   // rdx: array
 802   index_check(rdx, rax); // kills rbx
 803   __ access_load_at(T_FLOAT, IN_HEAP | IS_ARRAY, noreg /* ftos */,
 804                     Address(rdx, rax,
 805                             Address::times_4,
 806                             arrayOopDesc::base_offset_in_bytes(T_FLOAT)),
 807                     noreg, noreg);
 808 }
 809 
 810 void TemplateTable::daload() {
 811   transition(itos, dtos);
 812   // rax: index
 813   // rdx: array
 814   index_check(rdx, rax); // kills rbx
 815   __ access_load_at(T_DOUBLE, IN_HEAP | IS_ARRAY, noreg /* dtos */,
 816                     Address(rdx, rax,
 817                             Address::times_8,
 818                             arrayOopDesc::base_offset_in_bytes(T_DOUBLE)),
 819                     noreg, noreg);
 820 }
 821 
 822 void TemplateTable::aaload() {
 823   transition(itos, atos);
 824 
 825   Register array = rcx;
 826   Register index = rax;
 827 
 828   index_check(array, index); // kills rbx
 829   if (ValueArrayFlatten) {
 830     Label is_flat_array, done;
 831     __ test_flattened_array_oop(array, rbx, is_flat_array);
 832     do_oop_load(_masm,
 833                 Address(array, index,
 834                         UseCompressedOops ? Address::times_4 : Address::times_ptr,
 835                         arrayOopDesc::base_offset_in_bytes(T_OBJECT)),
 836                 rax,
 837                 IS_ARRAY);
 838     __ jmp(done);
 839     __ bind(is_flat_array);
 840     __ call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::value_array_load), array, index);
 841     __ bind(done);
 842   } else {
 843     do_oop_load(_masm,
 844                 Address(array, index,
 845                         UseCompressedOops ? Address::times_4 : Address::times_ptr,
 846                         arrayOopDesc::base_offset_in_bytes(T_OBJECT)),
 847                 rax,
 848                 IS_ARRAY);
 849   }
 850 }
 851 
 852 void TemplateTable::baload() {
 853   transition(itos, itos);
 854   // rax: index
 855   // rdx: array
 856   index_check(rdx, rax); // kills rbx
 857   __ access_load_at(T_BYTE, IN_HEAP | IS_ARRAY, rax,
 858                     Address(rdx, rax, Address::times_1, arrayOopDesc::base_offset_in_bytes(T_BYTE)),
 859                     noreg, noreg);
 860 }
 861 
 862 void TemplateTable::caload() {
 863   transition(itos, itos);
 864   // rax: index
 865   // rdx: array
 866   index_check(rdx, rax); // kills rbx
 867   __ access_load_at(T_CHAR, IN_HEAP | IS_ARRAY, rax,
 868                     Address(rdx, rax, Address::times_2, arrayOopDesc::base_offset_in_bytes(T_CHAR)),
 869                     noreg, noreg);
 870 }
 871 
 872 // iload followed by caload frequent pair
 873 void TemplateTable::fast_icaload() {
 874   transition(vtos, itos);
 875   // load index out of locals
 876   locals_index(rbx);
 877   __ movl(rax, iaddress(rbx));
 878 
 879   // rax: index
 880   // rdx: array
 881   index_check(rdx, rax); // kills rbx
 882   __ access_load_at(T_CHAR, IN_HEAP | IS_ARRAY, rax,
 883                     Address(rdx, rax, Address::times_2, arrayOopDesc::base_offset_in_bytes(T_CHAR)),
 884                     noreg, noreg);
 885 }
 886 
 887 
 888 void TemplateTable::saload() {
 889   transition(itos, itos);
 890   // rax: index
 891   // rdx: array
 892   index_check(rdx, rax); // kills rbx
 893   __ access_load_at(T_SHORT, IN_HEAP | IS_ARRAY, rax,
 894                     Address(rdx, rax, Address::times_2, arrayOopDesc::base_offset_in_bytes(T_SHORT)),
 895                     noreg, noreg);
 896 }
 897 
 898 void TemplateTable::iload(int n) {
 899   transition(vtos, itos);
 900   __ movl(rax, iaddress(n));
 901 }
 902 
 903 void TemplateTable::lload(int n) {
 904   transition(vtos, ltos);
 905   __ movptr(rax, laddress(n));
 906   NOT_LP64(__ movptr(rdx, haddress(n)));
 907 }
 908 
 909 void TemplateTable::fload(int n) {
 910   transition(vtos, ftos);
 911   __ load_float(faddress(n));
 912 }
 913 
 914 void TemplateTable::dload(int n) {
 915   transition(vtos, dtos);
 916   __ load_double(daddress(n));
 917 }
 918 
 919 void TemplateTable::aload(int n) {
 920   transition(vtos, atos);
 921   __ movptr(rax, aaddress(n));
 922 }
 923 
 924 void TemplateTable::aload_0() {
 925   aload_0_internal();
 926 }
 927 
 928 void TemplateTable::nofast_aload_0() {
 929   aload_0_internal(may_not_rewrite);
 930 }
 931 
 932 void TemplateTable::aload_0_internal(RewriteControl rc) {
 933   transition(vtos, atos);
 934   // According to bytecode histograms, the pairs:
 935   //
 936   // _aload_0, _fast_igetfield
 937   // _aload_0, _fast_agetfield
 938   // _aload_0, _fast_fgetfield
 939   //
 940   // occur frequently. If RewriteFrequentPairs is set, the (slow)
 941   // _aload_0 bytecode checks if the next bytecode is either
 942   // _fast_igetfield, _fast_agetfield or _fast_fgetfield and then
 943   // rewrites the current bytecode into a pair bytecode; otherwise it
 944   // rewrites the current bytecode into _fast_aload_0 that doesn't do
 945   // the pair check anymore.
 946   //
 947   // Note: If the next bytecode is _getfield, the rewrite must be
 948   //       delayed, otherwise we may miss an opportunity for a pair.
 949   //
 950   // Also rewrite frequent pairs
 951   //   aload_0, aload_1
 952   //   aload_0, iload_1
 953   // These bytecodes with a small amount of code are most profitable
 954   // to rewrite
 955   if (RewriteFrequentPairs && rc == may_rewrite) {
 956     Label rewrite, done;
 957 
 958     const Register bc = LP64_ONLY(c_rarg3) NOT_LP64(rcx);
 959     LP64_ONLY(assert(rbx != bc, "register damaged"));
 960 
 961     // get next byte
 962     __ load_unsigned_byte(rbx, at_bcp(Bytecodes::length_for(Bytecodes::_aload_0)));
 963 
 964     // if _getfield then wait with rewrite
 965     __ cmpl(rbx, Bytecodes::_getfield);
 966     __ jcc(Assembler::equal, done);
 967 
 968     // if _igetfield then rewrite to _fast_iaccess_0
 969     assert(Bytecodes::java_code(Bytecodes::_fast_iaccess_0) == Bytecodes::_aload_0, "fix bytecode definition");
 970     __ cmpl(rbx, Bytecodes::_fast_igetfield);
 971     __ movl(bc, Bytecodes::_fast_iaccess_0);
 972     __ jccb(Assembler::equal, rewrite);
 973 
 974     // if _agetfield then rewrite to _fast_aaccess_0
 975     assert(Bytecodes::java_code(Bytecodes::_fast_aaccess_0) == Bytecodes::_aload_0, "fix bytecode definition");
 976     __ cmpl(rbx, Bytecodes::_fast_agetfield);
 977     __ movl(bc, Bytecodes::_fast_aaccess_0);
 978     __ jccb(Assembler::equal, rewrite);
 979 
 980     // if _fgetfield then rewrite to _fast_faccess_0
 981     assert(Bytecodes::java_code(Bytecodes::_fast_faccess_0) == Bytecodes::_aload_0, "fix bytecode definition");
 982     __ cmpl(rbx, Bytecodes::_fast_fgetfield);
 983     __ movl(bc, Bytecodes::_fast_faccess_0);
 984     __ jccb(Assembler::equal, rewrite);
 985 
 986     // else rewrite to _fast_aload0
 987     assert(Bytecodes::java_code(Bytecodes::_fast_aload_0) == Bytecodes::_aload_0, "fix bytecode definition");
 988     __ movl(bc, Bytecodes::_fast_aload_0);
 989 
 990     // rewrite
 991     // bc: fast bytecode
 992     __ bind(rewrite);
 993     patch_bytecode(Bytecodes::_aload_0, bc, rbx, false);
 994 
 995     __ bind(done);
 996   }
 997 
 998   // Do actual aload_0 (must do this after patch_bytecode which might call VM and GC might change oop).
 999   aload(0);
1000 }
1001 
1002 void TemplateTable::istore() {
1003   transition(itos, vtos);
1004   locals_index(rbx);
1005   __ movl(iaddress(rbx), rax);
1006 }
1007 
1008 
1009 void TemplateTable::lstore() {
1010   transition(ltos, vtos);
1011   locals_index(rbx);
1012   __ movptr(laddress(rbx), rax);
1013   NOT_LP64(__ movptr(haddress(rbx), rdx));
1014 }
1015 
1016 void TemplateTable::fstore() {
1017   transition(ftos, vtos);
1018   locals_index(rbx);
1019   __ store_float(faddress(rbx));
1020 }
1021 
1022 void TemplateTable::dstore() {
1023   transition(dtos, vtos);
1024   locals_index(rbx);
1025   __ store_double(daddress(rbx));
1026 }
1027 
1028 void TemplateTable::astore() {
1029   transition(vtos, vtos);
1030   __ pop_ptr(rax);
1031   locals_index(rbx);
1032   __ movptr(aaddress(rbx), rax);
1033 }
1034 
1035 void TemplateTable::wide_istore() {
1036   transition(vtos, vtos);
1037   __ pop_i();
1038   locals_index_wide(rbx);
1039   __ movl(iaddress(rbx), rax);
1040 }
1041 
1042 void TemplateTable::wide_lstore() {
1043   transition(vtos, vtos);
1044   NOT_LP64(__ pop_l(rax, rdx));
1045   LP64_ONLY(__ pop_l());
1046   locals_index_wide(rbx);
1047   __ movptr(laddress(rbx), rax);
1048   NOT_LP64(__ movl(haddress(rbx), rdx));
1049 }
1050 
1051 void TemplateTable::wide_fstore() {
1052 #ifdef _LP64
1053   transition(vtos, vtos);
1054   __ pop_f(xmm0);
1055   locals_index_wide(rbx);
1056   __ movflt(faddress(rbx), xmm0);
1057 #else
1058   wide_istore();
1059 #endif
1060 }
1061 
1062 void TemplateTable::wide_dstore() {
1063 #ifdef _LP64
1064   transition(vtos, vtos);
1065   __ pop_d(xmm0);
1066   locals_index_wide(rbx);
1067   __ movdbl(daddress(rbx), xmm0);
1068 #else
1069   wide_lstore();
1070 #endif
1071 }
1072 
1073 void TemplateTable::wide_astore() {
1074   transition(vtos, vtos);
1075   __ pop_ptr(rax);
1076   locals_index_wide(rbx);
1077   __ movptr(aaddress(rbx), rax);
1078 }
1079 
1080 void TemplateTable::iastore() {
1081   transition(itos, vtos);
1082   __ pop_i(rbx);
1083   // rax: value
1084   // rbx: index
1085   // rdx: array
1086   index_check(rdx, rbx); // prefer index in rbx
1087   __ access_store_at(T_INT, IN_HEAP | IS_ARRAY,
1088                      Address(rdx, rbx, Address::times_4,
1089                              arrayOopDesc::base_offset_in_bytes(T_INT)),
1090                      rax, noreg, noreg);
1091 }
1092 
1093 void TemplateTable::lastore() {
1094   transition(ltos, vtos);
1095   __ pop_i(rbx);
1096   // rax,: low(value)
1097   // rcx: array
1098   // rdx: high(value)
1099   index_check(rcx, rbx);  // prefer index in rbx,
1100   // rbx,: index
1101   __ access_store_at(T_LONG, IN_HEAP | IS_ARRAY,
1102                      Address(rcx, rbx, Address::times_8,
1103                              arrayOopDesc::base_offset_in_bytes(T_LONG)),
1104                      noreg /* ltos */, noreg, noreg);
1105 }
1106 
1107 
1108 void TemplateTable::fastore() {
1109   transition(ftos, vtos);
1110   __ pop_i(rbx);
1111   // value is in UseSSE >= 1 ? xmm0 : ST(0)
1112   // rbx:  index
1113   // rdx:  array
1114   index_check(rdx, rbx); // prefer index in rbx
1115   __ access_store_at(T_FLOAT, IN_HEAP | IS_ARRAY,
1116                      Address(rdx, rbx, Address::times_4,
1117                              arrayOopDesc::base_offset_in_bytes(T_FLOAT)),
1118                      noreg /* ftos */, noreg, noreg);
1119 }
1120 
1121 void TemplateTable::dastore() {
1122   transition(dtos, vtos);
1123   __ pop_i(rbx);
1124   // value is in UseSSE >= 2 ? xmm0 : ST(0)
1125   // rbx:  index
1126   // rdx:  array
1127   index_check(rdx, rbx); // prefer index in rbx
1128   __ access_store_at(T_DOUBLE, IN_HEAP | IS_ARRAY,
1129                      Address(rdx, rbx, Address::times_8,
1130                              arrayOopDesc::base_offset_in_bytes(T_DOUBLE)),
1131                      noreg /* dtos */, noreg, noreg);
1132 }
1133 
1134 void TemplateTable::aastore() {
1135   Label is_null, is_flat_array, ok_is_subtype, done;
1136   transition(vtos, vtos);
1137   // stack: ..., array, index, value
1138   __ movptr(rax, at_tos());    // value
1139   __ movl(rcx, at_tos_p1()); // index
1140   __ movptr(rdx, at_tos_p2()); // array
1141 
1142   Address element_address(rdx, rcx,
1143                           UseCompressedOops? Address::times_4 : Address::times_ptr,
1144                           arrayOopDesc::base_offset_in_bytes(T_OBJECT));
1145 
1146   index_check_without_pop(rdx, rcx);     // kills rbx
1147 
1148   __ testptr(rax, rax);
1149   __ jcc(Assembler::zero, is_null);
1150 
1151   // Move array class to rdi
1152   __ load_klass(rdi, rdx);
1153   if (ValueArrayFlatten) {
1154     __ test_flattened_array_oop(rdx, rbx, is_flat_array);
1155   }
1156 
1157   // Move subklass into rbx
1158   __ load_klass(rbx, rax);
1159   // Move array element superklass into rax
1160   __ movptr(rax, Address(rdi,
1161                          ObjArrayKlass::element_klass_offset()));
1162 
1163   // Generate subtype check.  Blows rcx, rdi
1164   // Superklass in rax.  Subklass in rbx.
1165   // is "rbx <: rax" ? (value subclass <: array element superclass)
1166   __ gen_subtype_check(rbx, ok_is_subtype);
1167 
1168   // Come here on failure
1169   // object is at TOS
1170   __ jump(ExternalAddress(Interpreter::_throw_ArrayStoreException_entry));
1171 
1172   // Come here on success
1173   __ bind(ok_is_subtype);
1174 
1175   // Get the value we will store
1176   __ movptr(rax, at_tos());
1177   __ movl(rcx, at_tos_p1()); // index
1178   // Now store using the appropriate barrier
1179   do_oop_store(_masm, element_address, rax, IS_ARRAY);
1180   __ jmp(done);
1181 
1182   // Have a NULL in rax, rdx=array, ecx=index.  Store NULL at ary[idx]
1183   __ bind(is_null);
1184   __ profile_null_seen(rbx);
1185   if (EnableValhalla) {
1186     Label is_null_into_value_array_npe, store_null;
1187 
1188     // No way to store null in null-free array
1189     __ test_null_free_array_oop(rdx, rbx, is_null_into_value_array_npe);
1190     __ jmp(store_null);
1191 
1192     __ bind(is_null_into_value_array_npe);
1193     __ jump(ExternalAddress(Interpreter::_throw_NullPointerException_entry));
1194 
1195     __ bind(store_null);
1196   }
1197   // Store a NULL
1198   do_oop_store(_masm, element_address, noreg, IS_ARRAY);
1199   __ jmp(done);
1200 
1201   if (EnableValhalla) {
1202     Label is_type_ok;
1203     __ bind(is_flat_array); // Store non-null value to flat
1204 
1205     // Simplistic type check...
1206 
1207     // Profile the not-null value's klass.
1208     __ load_klass(rbx, rax);
1209     __ profile_typecheck(rcx, rbx, rax); // blows rcx, and rax
1210     // Move element klass into rax
1211     __ movptr(rax, Address(rdi, ArrayKlass::element_klass_offset()));
1212     // flat value array needs exact type match
1213     // is "rax == rbx" (value subclass == array element superclass)
1214     __ cmpptr(rax, rbx);
1215     __ jccb(Assembler::equal, is_type_ok);
1216 
1217     __ profile_typecheck_failed(rcx);
1218     __ jump(ExternalAddress(Interpreter::_throw_ArrayStoreException_entry));
1219 
1220     __ bind(is_type_ok);
1221     __ movptr(rax, at_tos());  // value
1222     __ movl(rcx, at_tos_p1()); // index
1223     __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::value_array_store), rax, rdx, rcx);
1224   }
1225   // Pop stack arguments
1226   __ bind(done);
1227   __ addptr(rsp, 3 * Interpreter::stackElementSize);
1228 }
1229 
1230 void TemplateTable::bastore() {
1231   transition(itos, vtos);
1232   __ pop_i(rbx);
1233   // rax: value
1234   // rbx: index
1235   // rdx: array
1236   index_check(rdx, rbx); // prefer index in rbx
1237   // Need to check whether array is boolean or byte
1238   // since both types share the bastore bytecode.
1239   __ load_klass(rcx, rdx);
1240   __ movl(rcx, Address(rcx, Klass::layout_helper_offset()));
1241   int diffbit = Klass::layout_helper_boolean_diffbit();
1242   __ testl(rcx, diffbit);
1243   Label L_skip;
1244   __ jccb(Assembler::zero, L_skip);
1245   __ andl(rax, 1);  // if it is a T_BOOLEAN array, mask the stored value to 0/1
1246   __ bind(L_skip);
1247   __ access_store_at(T_BYTE, IN_HEAP | IS_ARRAY,
1248                      Address(rdx, rbx,Address::times_1,
1249                              arrayOopDesc::base_offset_in_bytes(T_BYTE)),
1250                      rax, noreg, noreg);
1251 }
1252 
1253 void TemplateTable::castore() {
1254   transition(itos, vtos);
1255   __ pop_i(rbx);
1256   // rax: value
1257   // rbx: index
1258   // rdx: array
1259   index_check(rdx, rbx);  // prefer index in rbx
1260   __ access_store_at(T_CHAR, IN_HEAP | IS_ARRAY,
1261                      Address(rdx, rbx, Address::times_2,
1262                              arrayOopDesc::base_offset_in_bytes(T_CHAR)),
1263                      rax, noreg, noreg);
1264 }
1265 
1266 
1267 void TemplateTable::sastore() {
1268   castore();
1269 }
1270 
1271 void TemplateTable::istore(int n) {
1272   transition(itos, vtos);
1273   __ movl(iaddress(n), rax);
1274 }
1275 
1276 void TemplateTable::lstore(int n) {
1277   transition(ltos, vtos);
1278   __ movptr(laddress(n), rax);
1279   NOT_LP64(__ movptr(haddress(n), rdx));
1280 }
1281 
1282 void TemplateTable::fstore(int n) {
1283   transition(ftos, vtos);
1284   __ store_float(faddress(n));
1285 }
1286 
1287 void TemplateTable::dstore(int n) {
1288   transition(dtos, vtos);
1289   __ store_double(daddress(n));
1290 }
1291 
1292 
1293 void TemplateTable::astore(int n) {
1294   transition(vtos, vtos);
1295   __ pop_ptr(rax);
1296   __ movptr(aaddress(n), rax);
1297 }
1298 
1299 void TemplateTable::pop() {
1300   transition(vtos, vtos);
1301   __ addptr(rsp, Interpreter::stackElementSize);
1302 }
1303 
1304 void TemplateTable::pop2() {
1305   transition(vtos, vtos);
1306   __ addptr(rsp, 2 * Interpreter::stackElementSize);
1307 }
1308 
1309 
1310 void TemplateTable::dup() {
1311   transition(vtos, vtos);
1312   __ load_ptr(0, rax);
1313   __ push_ptr(rax);
1314   // stack: ..., a, a
1315 }
1316 
1317 void TemplateTable::dup_x1() {
1318   transition(vtos, vtos);
1319   // stack: ..., a, b
1320   __ load_ptr( 0, rax);  // load b
1321   __ load_ptr( 1, rcx);  // load a
1322   __ store_ptr(1, rax);  // store b
1323   __ store_ptr(0, rcx);  // store a
1324   __ push_ptr(rax);      // push b
1325   // stack: ..., b, a, b
1326 }
1327 
1328 void TemplateTable::dup_x2() {
1329   transition(vtos, vtos);
1330   // stack: ..., a, b, c
1331   __ load_ptr( 0, rax);  // load c
1332   __ load_ptr( 2, rcx);  // load a
1333   __ store_ptr(2, rax);  // store c in a
1334   __ push_ptr(rax);      // push c
1335   // stack: ..., c, b, c, c
1336   __ load_ptr( 2, rax);  // load b
1337   __ store_ptr(2, rcx);  // store a in b
1338   // stack: ..., c, a, c, c
1339   __ store_ptr(1, rax);  // store b in c
1340   // stack: ..., c, a, b, c
1341 }
1342 
1343 void TemplateTable::dup2() {
1344   transition(vtos, vtos);
1345   // stack: ..., a, b
1346   __ load_ptr(1, rax);  // load a
1347   __ push_ptr(rax);     // push a
1348   __ load_ptr(1, rax);  // load b
1349   __ push_ptr(rax);     // push b
1350   // stack: ..., a, b, a, b
1351 }
1352 
1353 
1354 void TemplateTable::dup2_x1() {
1355   transition(vtos, vtos);
1356   // stack: ..., a, b, c
1357   __ load_ptr( 0, rcx);  // load c
1358   __ load_ptr( 1, rax);  // load b
1359   __ push_ptr(rax);      // push b
1360   __ push_ptr(rcx);      // push c
1361   // stack: ..., a, b, c, b, c
1362   __ store_ptr(3, rcx);  // store c in b
1363   // stack: ..., a, c, c, b, c
1364   __ load_ptr( 4, rcx);  // load a
1365   __ store_ptr(2, rcx);  // store a in 2nd c
1366   // stack: ..., a, c, a, b, c
1367   __ store_ptr(4, rax);  // store b in a
1368   // stack: ..., b, c, a, b, c
1369 }
1370 
1371 void TemplateTable::dup2_x2() {
1372   transition(vtos, vtos);
1373   // stack: ..., a, b, c, d
1374   __ load_ptr( 0, rcx);  // load d
1375   __ load_ptr( 1, rax);  // load c
1376   __ push_ptr(rax);      // push c
1377   __ push_ptr(rcx);      // push d
1378   // stack: ..., a, b, c, d, c, d
1379   __ load_ptr( 4, rax);  // load b
1380   __ store_ptr(2, rax);  // store b in d
1381   __ store_ptr(4, rcx);  // store d in b
1382   // stack: ..., a, d, c, b, c, d
1383   __ load_ptr( 5, rcx);  // load a
1384   __ load_ptr( 3, rax);  // load c
1385   __ store_ptr(3, rcx);  // store a in c
1386   __ store_ptr(5, rax);  // store c in a
1387   // stack: ..., c, d, a, b, c, d
1388 }
1389 
1390 void TemplateTable::swap() {
1391   transition(vtos, vtos);
1392   // stack: ..., a, b
1393   __ load_ptr( 1, rcx);  // load a
1394   __ load_ptr( 0, rax);  // load b
1395   __ store_ptr(0, rcx);  // store a in b
1396   __ store_ptr(1, rax);  // store b in a
1397   // stack: ..., b, a
1398 }
1399 
1400 void TemplateTable::iop2(Operation op) {
1401   transition(itos, itos);
1402   switch (op) {
1403   case add  :                    __ pop_i(rdx); __ addl (rax, rdx); break;
1404   case sub  : __ movl(rdx, rax); __ pop_i(rax); __ subl (rax, rdx); break;
1405   case mul  :                    __ pop_i(rdx); __ imull(rax, rdx); break;
1406   case _and :                    __ pop_i(rdx); __ andl (rax, rdx); break;
1407   case _or  :                    __ pop_i(rdx); __ orl  (rax, rdx); break;
1408   case _xor :                    __ pop_i(rdx); __ xorl (rax, rdx); break;
1409   case shl  : __ movl(rcx, rax); __ pop_i(rax); __ shll (rax);      break;
1410   case shr  : __ movl(rcx, rax); __ pop_i(rax); __ sarl (rax);      break;
1411   case ushr : __ movl(rcx, rax); __ pop_i(rax); __ shrl (rax);      break;
1412   default   : ShouldNotReachHere();
1413   }
1414 }
1415 
1416 void TemplateTable::lop2(Operation op) {
1417   transition(ltos, ltos);
1418 #ifdef _LP64
1419   switch (op) {
1420   case add  :                    __ pop_l(rdx); __ addptr(rax, rdx); break;
1421   case sub  : __ mov(rdx, rax);  __ pop_l(rax); __ subptr(rax, rdx); break;
1422   case _and :                    __ pop_l(rdx); __ andptr(rax, rdx); break;
1423   case _or  :                    __ pop_l(rdx); __ orptr (rax, rdx); break;
1424   case _xor :                    __ pop_l(rdx); __ xorptr(rax, rdx); break;
1425   default   : ShouldNotReachHere();
1426   }
1427 #else
1428   __ pop_l(rbx, rcx);
1429   switch (op) {
1430     case add  : __ addl(rax, rbx); __ adcl(rdx, rcx); break;
1431     case sub  : __ subl(rbx, rax); __ sbbl(rcx, rdx);
1432                 __ mov (rax, rbx); __ mov (rdx, rcx); break;
1433     case _and : __ andl(rax, rbx); __ andl(rdx, rcx); break;
1434     case _or  : __ orl (rax, rbx); __ orl (rdx, rcx); break;
1435     case _xor : __ xorl(rax, rbx); __ xorl(rdx, rcx); break;
1436     default   : ShouldNotReachHere();
1437   }
1438 #endif
1439 }
1440 
1441 void TemplateTable::idiv() {
1442   transition(itos, itos);
1443   __ movl(rcx, rax);
1444   __ pop_i(rax);
1445   // Note: could xor rax and ecx and compare with (-1 ^ min_int). If
1446   //       they are not equal, one could do a normal division (no correction
1447   //       needed), which may speed up this implementation for the common case.
1448   //       (see also JVM spec., p.243 & p.271)
1449   __ corrected_idivl(rcx);
1450 }
1451 
1452 void TemplateTable::irem() {
1453   transition(itos, itos);
1454   __ movl(rcx, rax);
1455   __ pop_i(rax);
1456   // Note: could xor rax and ecx and compare with (-1 ^ min_int). If
1457   //       they are not equal, one could do a normal division (no correction
1458   //       needed), which may speed up this implementation for the common case.
1459   //       (see also JVM spec., p.243 & p.271)
1460   __ corrected_idivl(rcx);
1461   __ movl(rax, rdx);
1462 }
1463 
1464 void TemplateTable::lmul() {
1465   transition(ltos, ltos);
1466 #ifdef _LP64
1467   __ pop_l(rdx);
1468   __ imulq(rax, rdx);
1469 #else
1470   __ pop_l(rbx, rcx);
1471   __ push(rcx); __ push(rbx);
1472   __ push(rdx); __ push(rax);
1473   __ lmul(2 * wordSize, 0);
1474   __ addptr(rsp, 4 * wordSize);  // take off temporaries
1475 #endif
1476 }
1477 
1478 void TemplateTable::ldiv() {
1479   transition(ltos, ltos);
1480 #ifdef _LP64
1481   __ mov(rcx, rax);
1482   __ pop_l(rax);
1483   // generate explicit div0 check
1484   __ testq(rcx, rcx);
1485   __ jump_cc(Assembler::zero,
1486              ExternalAddress(Interpreter::_throw_ArithmeticException_entry));
1487   // Note: could xor rax and rcx and compare with (-1 ^ min_int). If
1488   //       they are not equal, one could do a normal division (no correction
1489   //       needed), which may speed up this implementation for the common case.
1490   //       (see also JVM spec., p.243 & p.271)
1491   __ corrected_idivq(rcx); // kills rbx
1492 #else
1493   __ pop_l(rbx, rcx);
1494   __ push(rcx); __ push(rbx);
1495   __ push(rdx); __ push(rax);
1496   // check if y = 0
1497   __ orl(rax, rdx);
1498   __ jump_cc(Assembler::zero,
1499              ExternalAddress(Interpreter::_throw_ArithmeticException_entry));
1500   __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::ldiv));
1501   __ addptr(rsp, 4 * wordSize);  // take off temporaries
1502 #endif
1503 }
1504 
1505 void TemplateTable::lrem() {
1506   transition(ltos, ltos);
1507 #ifdef _LP64
1508   __ mov(rcx, rax);
1509   __ pop_l(rax);
1510   __ testq(rcx, rcx);
1511   __ jump_cc(Assembler::zero,
1512              ExternalAddress(Interpreter::_throw_ArithmeticException_entry));
1513   // Note: could xor rax and rcx and compare with (-1 ^ min_int). If
1514   //       they are not equal, one could do a normal division (no correction
1515   //       needed), which may speed up this implementation for the common case.
1516   //       (see also JVM spec., p.243 & p.271)
1517   __ corrected_idivq(rcx); // kills rbx
1518   __ mov(rax, rdx);
1519 #else
1520   __ pop_l(rbx, rcx);
1521   __ push(rcx); __ push(rbx);
1522   __ push(rdx); __ push(rax);
1523   // check if y = 0
1524   __ orl(rax, rdx);
1525   __ jump_cc(Assembler::zero,
1526              ExternalAddress(Interpreter::_throw_ArithmeticException_entry));
1527   __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::lrem));
1528   __ addptr(rsp, 4 * wordSize);
1529 #endif
1530 }
1531 
1532 void TemplateTable::lshl() {
1533   transition(itos, ltos);
1534   __ movl(rcx, rax);                             // get shift count
1535   #ifdef _LP64
1536   __ pop_l(rax);                                 // get shift value
1537   __ shlq(rax);
1538 #else
1539   __ pop_l(rax, rdx);                            // get shift value
1540   __ lshl(rdx, rax);
1541 #endif
1542 }
1543 
1544 void TemplateTable::lshr() {
1545 #ifdef _LP64
1546   transition(itos, ltos);
1547   __ movl(rcx, rax);                             // get shift count
1548   __ pop_l(rax);                                 // get shift value
1549   __ sarq(rax);
1550 #else
1551   transition(itos, ltos);
1552   __ mov(rcx, rax);                              // get shift count
1553   __ pop_l(rax, rdx);                            // get shift value
1554   __ lshr(rdx, rax, true);
1555 #endif
1556 }
1557 
1558 void TemplateTable::lushr() {
1559   transition(itos, ltos);
1560 #ifdef _LP64
1561   __ movl(rcx, rax);                             // get shift count
1562   __ pop_l(rax);                                 // get shift value
1563   __ shrq(rax);
1564 #else
1565   __ mov(rcx, rax);                              // get shift count
1566   __ pop_l(rax, rdx);                            // get shift value
1567   __ lshr(rdx, rax);
1568 #endif
1569 }
1570 
1571 void TemplateTable::fop2(Operation op) {
1572   transition(ftos, ftos);
1573 
1574   if (UseSSE >= 1) {
1575     switch (op) {
1576     case add:
1577       __ addss(xmm0, at_rsp());
1578       __ addptr(rsp, Interpreter::stackElementSize);
1579       break;
1580     case sub:
1581       __ movflt(xmm1, xmm0);
1582       __ pop_f(xmm0);
1583       __ subss(xmm0, xmm1);
1584       break;
1585     case mul:
1586       __ mulss(xmm0, at_rsp());
1587       __ addptr(rsp, Interpreter::stackElementSize);
1588       break;
1589     case div:
1590       __ movflt(xmm1, xmm0);
1591       __ pop_f(xmm0);
1592       __ divss(xmm0, xmm1);
1593       break;
1594     case rem:
1595       // On x86_64 platforms the SharedRuntime::frem method is called to perform the
1596       // modulo operation. The frem method calls the function
1597       // double fmod(double x, double y) in math.h. The documentation of fmod states:
1598       // "If x or y is a NaN, a NaN is returned." without specifying what type of NaN
1599       // (signalling or quiet) is returned.
1600       //
1601       // On x86_32 platforms the FPU is used to perform the modulo operation. The
1602       // reason is that on 32-bit Windows the sign of modulo operations diverges from
1603       // what is considered the standard (e.g., -0.0f % -3.14f is 0.0f (and not -0.0f).
1604       // The fprem instruction used on x86_32 is functionally equivalent to
1605       // SharedRuntime::frem in that it returns a NaN.
1606 #ifdef _LP64
1607       __ movflt(xmm1, xmm0);
1608       __ pop_f(xmm0);
1609       __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::frem), 2);
1610 #else
1611       __ push_f(xmm0);
1612       __ pop_f();
1613       __ fld_s(at_rsp());
1614       __ fremr(rax);
1615       __ f2ieee();
1616       __ pop(rax);  // pop second operand off the stack
1617       __ push_f();
1618       __ pop_f(xmm0);
1619 #endif
1620       break;
1621     default:
1622       ShouldNotReachHere();
1623       break;
1624     }
1625   } else {
1626 #ifdef _LP64
1627     ShouldNotReachHere();
1628 #else
1629     switch (op) {
1630     case add: __ fadd_s (at_rsp());                break;
1631     case sub: __ fsubr_s(at_rsp());                break;
1632     case mul: __ fmul_s (at_rsp());                break;
1633     case div: __ fdivr_s(at_rsp());                break;
1634     case rem: __ fld_s  (at_rsp()); __ fremr(rax); break;
1635     default : ShouldNotReachHere();
1636     }
1637     __ f2ieee();
1638     __ pop(rax);  // pop second operand off the stack
1639 #endif // _LP64
1640   }
1641 }
1642 
1643 void TemplateTable::dop2(Operation op) {
1644   transition(dtos, dtos);
1645   if (UseSSE >= 2) {
1646     switch (op) {
1647     case add:
1648       __ addsd(xmm0, at_rsp());
1649       __ addptr(rsp, 2 * Interpreter::stackElementSize);
1650       break;
1651     case sub:
1652       __ movdbl(xmm1, xmm0);
1653       __ pop_d(xmm0);
1654       __ subsd(xmm0, xmm1);
1655       break;
1656     case mul:
1657       __ mulsd(xmm0, at_rsp());
1658       __ addptr(rsp, 2 * Interpreter::stackElementSize);
1659       break;
1660     case div:
1661       __ movdbl(xmm1, xmm0);
1662       __ pop_d(xmm0);
1663       __ divsd(xmm0, xmm1);
1664       break;
1665     case rem:
1666       // Similar to fop2(), the modulo operation is performed using the
1667       // SharedRuntime::drem method (on x86_64 platforms) or using the
1668       // FPU (on x86_32 platforms) for the same reasons as mentioned in fop2().
1669 #ifdef _LP64
1670       __ movdbl(xmm1, xmm0);
1671       __ pop_d(xmm0);
1672       __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::drem), 2);
1673 #else
1674       __ push_d(xmm0);
1675       __ pop_d();
1676       __ fld_d(at_rsp());
1677       __ fremr(rax);
1678       __ d2ieee();
1679       __ pop(rax);
1680       __ pop(rdx);
1681       __ push_d();
1682       __ pop_d(xmm0);
1683 #endif
1684       break;
1685     default:
1686       ShouldNotReachHere();
1687       break;
1688     }
1689   } else {
1690 #ifdef _LP64
1691     ShouldNotReachHere();
1692 #else
1693     switch (op) {
1694     case add: __ fadd_d (at_rsp());                break;
1695     case sub: __ fsubr_d(at_rsp());                break;
1696     case mul: {
1697       Label L_strict;
1698       Label L_join;
1699       const Address access_flags      (rcx, Method::access_flags_offset());
1700       __ get_method(rcx);
1701       __ movl(rcx, access_flags);
1702       __ testl(rcx, JVM_ACC_STRICT);
1703       __ jccb(Assembler::notZero, L_strict);
1704       __ fmul_d (at_rsp());
1705       __ jmpb(L_join);
1706       __ bind(L_strict);
1707       __ fld_x(ExternalAddress(StubRoutines::addr_fpu_subnormal_bias1()));
1708       __ fmulp();
1709       __ fmul_d (at_rsp());
1710       __ fld_x(ExternalAddress(StubRoutines::addr_fpu_subnormal_bias2()));
1711       __ fmulp();
1712       __ bind(L_join);
1713       break;
1714     }
1715     case div: {
1716       Label L_strict;
1717       Label L_join;
1718       const Address access_flags      (rcx, Method::access_flags_offset());
1719       __ get_method(rcx);
1720       __ movl(rcx, access_flags);
1721       __ testl(rcx, JVM_ACC_STRICT);
1722       __ jccb(Assembler::notZero, L_strict);
1723       __ fdivr_d(at_rsp());
1724       __ jmp(L_join);
1725       __ bind(L_strict);
1726       __ fld_x(ExternalAddress(StubRoutines::addr_fpu_subnormal_bias1()));
1727       __ fmul_d (at_rsp());
1728       __ fdivrp();
1729       __ fld_x(ExternalAddress(StubRoutines::addr_fpu_subnormal_bias2()));
1730       __ fmulp();
1731       __ bind(L_join);
1732       break;
1733     }
1734     case rem: __ fld_d  (at_rsp()); __ fremr(rax); break;
1735     default : ShouldNotReachHere();
1736     }
1737     __ d2ieee();
1738     // Pop double precision number from rsp.
1739     __ pop(rax);
1740     __ pop(rdx);
1741 #endif
1742   }
1743 }
1744 
1745 void TemplateTable::ineg() {
1746   transition(itos, itos);
1747   __ negl(rax);
1748 }
1749 
1750 void TemplateTable::lneg() {
1751   transition(ltos, ltos);
1752   LP64_ONLY(__ negq(rax));
1753   NOT_LP64(__ lneg(rdx, rax));
1754 }
1755 
1756 // Note: 'double' and 'long long' have 32-bits alignment on x86.
1757 static jlong* double_quadword(jlong *adr, jlong lo, jlong hi) {
1758   // Use the expression (adr)&(~0xF) to provide 128-bits aligned address
1759   // of 128-bits operands for SSE instructions.
1760   jlong *operand = (jlong*)(((intptr_t)adr)&((intptr_t)(~0xF)));
1761   // Store the value to a 128-bits operand.
1762   operand[0] = lo;
1763   operand[1] = hi;
1764   return operand;
1765 }
1766 
1767 // Buffer for 128-bits masks used by SSE instructions.
1768 static jlong float_signflip_pool[2*2];
1769 static jlong double_signflip_pool[2*2];
1770 
1771 void TemplateTable::fneg() {
1772   transition(ftos, ftos);
1773   if (UseSSE >= 1) {
1774     static jlong *float_signflip  = double_quadword(&float_signflip_pool[1],  CONST64(0x8000000080000000),  CONST64(0x8000000080000000));
1775     __ xorps(xmm0, ExternalAddress((address) float_signflip));
1776   } else {
1777     LP64_ONLY(ShouldNotReachHere());
1778     NOT_LP64(__ fchs());
1779   }
1780 }
1781 
1782 void TemplateTable::dneg() {
1783   transition(dtos, dtos);
1784   if (UseSSE >= 2) {
1785     static jlong *double_signflip =
1786       double_quadword(&double_signflip_pool[1], CONST64(0x8000000000000000), CONST64(0x8000000000000000));
1787     __ xorpd(xmm0, ExternalAddress((address) double_signflip));
1788   } else {
1789 #ifdef _LP64
1790     ShouldNotReachHere();
1791 #else
1792     __ fchs();
1793 #endif
1794   }
1795 }
1796 
1797 void TemplateTable::iinc() {
1798   transition(vtos, vtos);
1799   __ load_signed_byte(rdx, at_bcp(2)); // get constant
1800   locals_index(rbx);
1801   __ addl(iaddress(rbx), rdx);
1802 }
1803 
1804 void TemplateTable::wide_iinc() {
1805   transition(vtos, vtos);
1806   __ movl(rdx, at_bcp(4)); // get constant
1807   locals_index_wide(rbx);
1808   __ bswapl(rdx); // swap bytes & sign-extend constant
1809   __ sarl(rdx, 16);
1810   __ addl(iaddress(rbx), rdx);
1811   // Note: should probably use only one movl to get both
1812   //       the index and the constant -> fix this
1813 }
1814 
1815 void TemplateTable::convert() {
1816 #ifdef _LP64
1817   // Checking
1818 #ifdef ASSERT
1819   {
1820     TosState tos_in  = ilgl;
1821     TosState tos_out = ilgl;
1822     switch (bytecode()) {
1823     case Bytecodes::_i2l: // fall through
1824     case Bytecodes::_i2f: // fall through
1825     case Bytecodes::_i2d: // fall through
1826     case Bytecodes::_i2b: // fall through
1827     case Bytecodes::_i2c: // fall through
1828     case Bytecodes::_i2s: tos_in = itos; break;
1829     case Bytecodes::_l2i: // fall through
1830     case Bytecodes::_l2f: // fall through
1831     case Bytecodes::_l2d: tos_in = ltos; break;
1832     case Bytecodes::_f2i: // fall through
1833     case Bytecodes::_f2l: // fall through
1834     case Bytecodes::_f2d: tos_in = ftos; break;
1835     case Bytecodes::_d2i: // fall through
1836     case Bytecodes::_d2l: // fall through
1837     case Bytecodes::_d2f: tos_in = dtos; break;
1838     default             : ShouldNotReachHere();
1839     }
1840     switch (bytecode()) {
1841     case Bytecodes::_l2i: // fall through
1842     case Bytecodes::_f2i: // fall through
1843     case Bytecodes::_d2i: // fall through
1844     case Bytecodes::_i2b: // fall through
1845     case Bytecodes::_i2c: // fall through
1846     case Bytecodes::_i2s: tos_out = itos; break;
1847     case Bytecodes::_i2l: // fall through
1848     case Bytecodes::_f2l: // fall through
1849     case Bytecodes::_d2l: tos_out = ltos; break;
1850     case Bytecodes::_i2f: // fall through
1851     case Bytecodes::_l2f: // fall through
1852     case Bytecodes::_d2f: tos_out = ftos; break;
1853     case Bytecodes::_i2d: // fall through
1854     case Bytecodes::_l2d: // fall through
1855     case Bytecodes::_f2d: tos_out = dtos; break;
1856     default             : ShouldNotReachHere();
1857     }
1858     transition(tos_in, tos_out);
1859   }
1860 #endif // ASSERT
1861 
1862   static const int64_t is_nan = 0x8000000000000000L;
1863 
1864   // Conversion
1865   switch (bytecode()) {
1866   case Bytecodes::_i2l:
1867     __ movslq(rax, rax);
1868     break;
1869   case Bytecodes::_i2f:
1870     __ cvtsi2ssl(xmm0, rax);
1871     break;
1872   case Bytecodes::_i2d:
1873     __ cvtsi2sdl(xmm0, rax);
1874     break;
1875   case Bytecodes::_i2b:
1876     __ movsbl(rax, rax);
1877     break;
1878   case Bytecodes::_i2c:
1879     __ movzwl(rax, rax);
1880     break;
1881   case Bytecodes::_i2s:
1882     __ movswl(rax, rax);
1883     break;
1884   case Bytecodes::_l2i:
1885     __ movl(rax, rax);
1886     break;
1887   case Bytecodes::_l2f:
1888     __ cvtsi2ssq(xmm0, rax);
1889     break;
1890   case Bytecodes::_l2d:
1891     __ cvtsi2sdq(xmm0, rax);
1892     break;
1893   case Bytecodes::_f2i:
1894   {
1895     Label L;
1896     __ cvttss2sil(rax, xmm0);
1897     __ cmpl(rax, 0x80000000); // NaN or overflow/underflow?
1898     __ jcc(Assembler::notEqual, L);
1899     __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::f2i), 1);
1900     __ bind(L);
1901   }
1902     break;
1903   case Bytecodes::_f2l:
1904   {
1905     Label L;
1906     __ cvttss2siq(rax, xmm0);
1907     // NaN or overflow/underflow?
1908     __ cmp64(rax, ExternalAddress((address) &is_nan));
1909     __ jcc(Assembler::notEqual, L);
1910     __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::f2l), 1);
1911     __ bind(L);
1912   }
1913     break;
1914   case Bytecodes::_f2d:
1915     __ cvtss2sd(xmm0, xmm0);
1916     break;
1917   case Bytecodes::_d2i:
1918   {
1919     Label L;
1920     __ cvttsd2sil(rax, xmm0);
1921     __ cmpl(rax, 0x80000000); // NaN or overflow/underflow?
1922     __ jcc(Assembler::notEqual, L);
1923     __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::d2i), 1);
1924     __ bind(L);
1925   }
1926     break;
1927   case Bytecodes::_d2l:
1928   {
1929     Label L;
1930     __ cvttsd2siq(rax, xmm0);
1931     // NaN or overflow/underflow?
1932     __ cmp64(rax, ExternalAddress((address) &is_nan));
1933     __ jcc(Assembler::notEqual, L);
1934     __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::d2l), 1);
1935     __ bind(L);
1936   }
1937     break;
1938   case Bytecodes::_d2f:
1939     __ cvtsd2ss(xmm0, xmm0);
1940     break;
1941   default:
1942     ShouldNotReachHere();
1943   }
1944 #else
1945   // Checking
1946 #ifdef ASSERT
1947   { TosState tos_in  = ilgl;
1948     TosState tos_out = ilgl;
1949     switch (bytecode()) {
1950       case Bytecodes::_i2l: // fall through
1951       case Bytecodes::_i2f: // fall through
1952       case Bytecodes::_i2d: // fall through
1953       case Bytecodes::_i2b: // fall through
1954       case Bytecodes::_i2c: // fall through
1955       case Bytecodes::_i2s: tos_in = itos; break;
1956       case Bytecodes::_l2i: // fall through
1957       case Bytecodes::_l2f: // fall through
1958       case Bytecodes::_l2d: tos_in = ltos; break;
1959       case Bytecodes::_f2i: // fall through
1960       case Bytecodes::_f2l: // fall through
1961       case Bytecodes::_f2d: tos_in = ftos; break;
1962       case Bytecodes::_d2i: // fall through
1963       case Bytecodes::_d2l: // fall through
1964       case Bytecodes::_d2f: tos_in = dtos; break;
1965       default             : ShouldNotReachHere();
1966     }
1967     switch (bytecode()) {
1968       case Bytecodes::_l2i: // fall through
1969       case Bytecodes::_f2i: // fall through
1970       case Bytecodes::_d2i: // fall through
1971       case Bytecodes::_i2b: // fall through
1972       case Bytecodes::_i2c: // fall through
1973       case Bytecodes::_i2s: tos_out = itos; break;
1974       case Bytecodes::_i2l: // fall through
1975       case Bytecodes::_f2l: // fall through
1976       case Bytecodes::_d2l: tos_out = ltos; break;
1977       case Bytecodes::_i2f: // fall through
1978       case Bytecodes::_l2f: // fall through
1979       case Bytecodes::_d2f: tos_out = ftos; break;
1980       case Bytecodes::_i2d: // fall through
1981       case Bytecodes::_l2d: // fall through
1982       case Bytecodes::_f2d: tos_out = dtos; break;
1983       default             : ShouldNotReachHere();
1984     }
1985     transition(tos_in, tos_out);
1986   }
1987 #endif // ASSERT
1988 
1989   // Conversion
1990   // (Note: use push(rcx)/pop(rcx) for 1/2-word stack-ptr manipulation)
1991   switch (bytecode()) {
1992     case Bytecodes::_i2l:
1993       __ extend_sign(rdx, rax);
1994       break;
1995     case Bytecodes::_i2f:
1996       if (UseSSE >= 1) {
1997         __ cvtsi2ssl(xmm0, rax);
1998       } else {
1999         __ push(rax);          // store int on tos
2000         __ fild_s(at_rsp());   // load int to ST0
2001         __ f2ieee();           // truncate to float size
2002         __ pop(rcx);           // adjust rsp
2003       }
2004       break;
2005     case Bytecodes::_i2d:
2006       if (UseSSE >= 2) {
2007         __ cvtsi2sdl(xmm0, rax);
2008       } else {
2009       __ push(rax);          // add one slot for d2ieee()
2010       __ push(rax);          // store int on tos
2011       __ fild_s(at_rsp());   // load int to ST0
2012       __ d2ieee();           // truncate to double size
2013       __ pop(rcx);           // adjust rsp
2014       __ pop(rcx);
2015       }
2016       break;
2017     case Bytecodes::_i2b:
2018       __ shll(rax, 24);      // truncate upper 24 bits
2019       __ sarl(rax, 24);      // and sign-extend byte
2020       LP64_ONLY(__ movsbl(rax, rax));
2021       break;
2022     case Bytecodes::_i2c:
2023       __ andl(rax, 0xFFFF);  // truncate upper 16 bits
2024       LP64_ONLY(__ movzwl(rax, rax));
2025       break;
2026     case Bytecodes::_i2s:
2027       __ shll(rax, 16);      // truncate upper 16 bits
2028       __ sarl(rax, 16);      // and sign-extend short
2029       LP64_ONLY(__ movswl(rax, rax));
2030       break;
2031     case Bytecodes::_l2i:
2032       /* nothing to do */
2033       break;
2034     case Bytecodes::_l2f:
2035       // On 64-bit platforms, the cvtsi2ssq instruction is used to convert
2036       // 64-bit long values to floats. On 32-bit platforms it is not possible
2037       // to use that instruction with 64-bit operands, therefore the FPU is
2038       // used to perform the conversion.
2039       __ push(rdx);          // store long on tos
2040       __ push(rax);
2041       __ fild_d(at_rsp());   // load long to ST0
2042       __ f2ieee();           // truncate to float size
2043       __ pop(rcx);           // adjust rsp
2044       __ pop(rcx);
2045       if (UseSSE >= 1) {
2046         __ push_f();
2047         __ pop_f(xmm0);
2048       }
2049       break;
2050     case Bytecodes::_l2d:
2051       // On 32-bit platforms the FPU is used for conversion because on
2052       // 32-bit platforms it is not not possible to use the cvtsi2sdq
2053       // instruction with 64-bit operands.
2054       __ push(rdx);          // store long on tos
2055       __ push(rax);
2056       __ fild_d(at_rsp());   // load long to ST0
2057       __ d2ieee();           // truncate to double size
2058       __ pop(rcx);           // adjust rsp
2059       __ pop(rcx);
2060       if (UseSSE >= 2) {
2061         __ push_d();
2062         __ pop_d(xmm0);
2063       }
2064       break;
2065     case Bytecodes::_f2i:
2066       // SharedRuntime::f2i does not differentiate between sNaNs and qNaNs
2067       // as it returns 0 for any NaN.
2068       if (UseSSE >= 1) {
2069         __ push_f(xmm0);
2070       } else {
2071         __ push(rcx);          // reserve space for argument
2072         __ fstp_s(at_rsp());   // pass float argument on stack
2073       }
2074       __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::f2i), 1);
2075       break;
2076     case Bytecodes::_f2l:
2077       // SharedRuntime::f2l does not differentiate between sNaNs and qNaNs
2078       // as it returns 0 for any NaN.
2079       if (UseSSE >= 1) {
2080        __ push_f(xmm0);
2081       } else {
2082         __ push(rcx);          // reserve space for argument
2083         __ fstp_s(at_rsp());   // pass float argument on stack
2084       }
2085       __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::f2l), 1);
2086       break;
2087     case Bytecodes::_f2d:
2088       if (UseSSE < 1) {
2089         /* nothing to do */
2090       } else if (UseSSE == 1) {
2091         __ push_f(xmm0);
2092         __ pop_f();
2093       } else { // UseSSE >= 2
2094         __ cvtss2sd(xmm0, xmm0);
2095       }
2096       break;
2097     case Bytecodes::_d2i:
2098       if (UseSSE >= 2) {
2099         __ push_d(xmm0);
2100       } else {
2101         __ push(rcx);          // reserve space for argument
2102         __ push(rcx);
2103         __ fstp_d(at_rsp());   // pass double argument on stack
2104       }
2105       __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::d2i), 2);
2106       break;
2107     case Bytecodes::_d2l:
2108       if (UseSSE >= 2) {
2109         __ push_d(xmm0);
2110       } else {
2111         __ push(rcx);          // reserve space for argument
2112         __ push(rcx);
2113         __ fstp_d(at_rsp());   // pass double argument on stack
2114       }
2115       __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::d2l), 2);
2116       break;
2117     case Bytecodes::_d2f:
2118       if (UseSSE <= 1) {
2119         __ push(rcx);          // reserve space for f2ieee()
2120         __ f2ieee();           // truncate to float size
2121         __ pop(rcx);           // adjust rsp
2122         if (UseSSE == 1) {
2123           // The cvtsd2ss instruction is not available if UseSSE==1, therefore
2124           // the conversion is performed using the FPU in this case.
2125           __ push_f();
2126           __ pop_f(xmm0);
2127         }
2128       } else { // UseSSE >= 2
2129         __ cvtsd2ss(xmm0, xmm0);
2130       }
2131       break;
2132     default             :
2133       ShouldNotReachHere();
2134   }
2135 #endif
2136 }
2137 
2138 void TemplateTable::lcmp() {
2139   transition(ltos, itos);
2140 #ifdef _LP64
2141   Label done;
2142   __ pop_l(rdx);
2143   __ cmpq(rdx, rax);
2144   __ movl(rax, -1);
2145   __ jccb(Assembler::less, done);
2146   __ setb(Assembler::notEqual, rax);
2147   __ movzbl(rax, rax);
2148   __ bind(done);
2149 #else
2150 
2151   // y = rdx:rax
2152   __ pop_l(rbx, rcx);             // get x = rcx:rbx
2153   __ lcmp2int(rcx, rbx, rdx, rax);// rcx := cmp(x, y)
2154   __ mov(rax, rcx);
2155 #endif
2156 }
2157 
2158 void TemplateTable::float_cmp(bool is_float, int unordered_result) {
2159   if ((is_float && UseSSE >= 1) ||
2160       (!is_float && UseSSE >= 2)) {
2161     Label done;
2162     if (is_float) {
2163       // XXX get rid of pop here, use ... reg, mem32
2164       __ pop_f(xmm1);
2165       __ ucomiss(xmm1, xmm0);
2166     } else {
2167       // XXX get rid of pop here, use ... reg, mem64
2168       __ pop_d(xmm1);
2169       __ ucomisd(xmm1, xmm0);
2170     }
2171     if (unordered_result < 0) {
2172       __ movl(rax, -1);
2173       __ jccb(Assembler::parity, done);
2174       __ jccb(Assembler::below, done);
2175       __ setb(Assembler::notEqual, rdx);
2176       __ movzbl(rax, rdx);
2177     } else {
2178       __ movl(rax, 1);
2179       __ jccb(Assembler::parity, done);
2180       __ jccb(Assembler::above, done);
2181       __ movl(rax, 0);
2182       __ jccb(Assembler::equal, done);
2183       __ decrementl(rax);
2184     }
2185     __ bind(done);
2186   } else {
2187 #ifdef _LP64
2188     ShouldNotReachHere();
2189 #else
2190     if (is_float) {
2191       __ fld_s(at_rsp());
2192     } else {
2193       __ fld_d(at_rsp());
2194       __ pop(rdx);
2195     }
2196     __ pop(rcx);
2197     __ fcmp2int(rax, unordered_result < 0);
2198 #endif // _LP64
2199   }
2200 }
2201 
2202 void TemplateTable::branch(bool is_jsr, bool is_wide) {
2203   __ get_method(rcx); // rcx holds method
2204   __ profile_taken_branch(rax, rbx); // rax holds updated MDP, rbx
2205                                      // holds bumped taken count
2206 
2207   const ByteSize be_offset = MethodCounters::backedge_counter_offset() +
2208                              InvocationCounter::counter_offset();
2209   const ByteSize inv_offset = MethodCounters::invocation_counter_offset() +
2210                               InvocationCounter::counter_offset();
2211 
2212   // Load up edx with the branch displacement
2213   if (is_wide) {
2214     __ movl(rdx, at_bcp(1));
2215   } else {
2216     __ load_signed_short(rdx, at_bcp(1));
2217   }
2218   __ bswapl(rdx);
2219 
2220   if (!is_wide) {
2221     __ sarl(rdx, 16);
2222   }
2223   LP64_ONLY(__ movl2ptr(rdx, rdx));
2224 
2225   // Handle all the JSR stuff here, then exit.
2226   // It's much shorter and cleaner than intermingling with the non-JSR
2227   // normal-branch stuff occurring below.
2228   if (is_jsr) {
2229     // Pre-load the next target bytecode into rbx
2230     __ load_unsigned_byte(rbx, Address(rbcp, rdx, Address::times_1, 0));
2231 
2232     // compute return address as bci in rax
2233     __ lea(rax, at_bcp((is_wide ? 5 : 3) -
2234                         in_bytes(ConstMethod::codes_offset())));
2235     __ subptr(rax, Address(rcx, Method::const_offset()));
2236     // Adjust the bcp in r13 by the displacement in rdx
2237     __ addptr(rbcp, rdx);
2238     // jsr returns atos that is not an oop
2239     __ push_i(rax);
2240     __ dispatch_only(vtos, true);
2241     return;
2242   }
2243 
2244   // Normal (non-jsr) branch handling
2245 
2246   // Adjust the bcp in r13 by the displacement in rdx
2247   __ addptr(rbcp, rdx);
2248 
2249   assert(UseLoopCounter || !UseOnStackReplacement,
2250          "on-stack-replacement requires loop counters");
2251   Label backedge_counter_overflow;
2252   Label profile_method;
2253   Label dispatch;
2254   if (UseLoopCounter) {
2255     // increment backedge counter for backward branches
2256     // rax: MDO
2257     // rbx: MDO bumped taken-count
2258     // rcx: method
2259     // rdx: target offset
2260     // r13: target bcp
2261     // r14: locals pointer
2262     __ testl(rdx, rdx);             // check if forward or backward branch
2263     __ jcc(Assembler::positive, dispatch); // count only if backward branch
2264 
2265     // check if MethodCounters exists
2266     Label has_counters;
2267     __ movptr(rax, Address(rcx, Method::method_counters_offset()));
2268     __ testptr(rax, rax);
2269     __ jcc(Assembler::notZero, has_counters);
2270     __ push(rdx);
2271     __ push(rcx);
2272     __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::build_method_counters),
2273                rcx);
2274     __ pop(rcx);
2275     __ pop(rdx);
2276     __ movptr(rax, Address(rcx, Method::method_counters_offset()));
2277     __ testptr(rax, rax);
2278     __ jcc(Assembler::zero, dispatch);
2279     __ bind(has_counters);
2280 
2281     if (TieredCompilation) {
2282       Label no_mdo;
2283       int increment = InvocationCounter::count_increment;
2284       if (ProfileInterpreter) {
2285         // Are we profiling?
2286         __ movptr(rbx, Address(rcx, in_bytes(Method::method_data_offset())));
2287         __ testptr(rbx, rbx);
2288         __ jccb(Assembler::zero, no_mdo);
2289         // Increment the MDO backedge counter
2290         const Address mdo_backedge_counter(rbx, in_bytes(MethodData::backedge_counter_offset()) +
2291                                            in_bytes(InvocationCounter::counter_offset()));
2292         const Address mask(rbx, in_bytes(MethodData::backedge_mask_offset()));
2293         __ increment_mask_and_jump(mdo_backedge_counter, increment, mask, rax, false, Assembler::zero,
2294                                    UseOnStackReplacement ? &backedge_counter_overflow : NULL);
2295         __ jmp(dispatch);
2296       }
2297       __ bind(no_mdo);
2298       // Increment backedge counter in MethodCounters*
2299       __ movptr(rcx, Address(rcx, Method::method_counters_offset()));
2300       const Address mask(rcx, in_bytes(MethodCounters::backedge_mask_offset()));
2301       __ increment_mask_and_jump(Address(rcx, be_offset), increment, mask,
2302                                  rax, false, Assembler::zero,
2303                                  UseOnStackReplacement ? &backedge_counter_overflow : NULL);
2304     } else { // not TieredCompilation
2305       // increment counter
2306       __ movptr(rcx, Address(rcx, Method::method_counters_offset()));
2307       __ movl(rax, Address(rcx, be_offset));        // load backedge counter
2308       __ incrementl(rax, InvocationCounter::count_increment); // increment counter
2309       __ movl(Address(rcx, be_offset), rax);        // store counter
2310 
2311       __ movl(rax, Address(rcx, inv_offset));    // load invocation counter
2312 
2313       __ andl(rax, InvocationCounter::count_mask_value); // and the status bits
2314       __ addl(rax, Address(rcx, be_offset));        // add both counters
2315 
2316       if (ProfileInterpreter) {
2317         // Test to see if we should create a method data oop
2318         __ cmp32(rax, Address(rcx, in_bytes(MethodCounters::interpreter_profile_limit_offset())));
2319         __ jcc(Assembler::less, dispatch);
2320 
2321         // if no method data exists, go to profile method
2322         __ test_method_data_pointer(rax, profile_method);
2323 
2324         if (UseOnStackReplacement) {
2325           // check for overflow against rbx which is the MDO taken count
2326           __ cmp32(rbx, Address(rcx, in_bytes(MethodCounters::interpreter_backward_branch_limit_offset())));
2327           __ jcc(Assembler::below, dispatch);
2328 
2329           // When ProfileInterpreter is on, the backedge_count comes
2330           // from the MethodData*, which value does not get reset on
2331           // the call to frequency_counter_overflow().  To avoid
2332           // excessive calls to the overflow routine while the method is
2333           // being compiled, add a second test to make sure the overflow
2334           // function is called only once every overflow_frequency.
2335           const int overflow_frequency = 1024;
2336           __ andl(rbx, overflow_frequency - 1);
2337           __ jcc(Assembler::zero, backedge_counter_overflow);
2338 
2339         }
2340       } else {
2341         if (UseOnStackReplacement) {
2342           // check for overflow against rax, which is the sum of the
2343           // counters
2344           __ cmp32(rax, Address(rcx, in_bytes(MethodCounters::interpreter_backward_branch_limit_offset())));
2345           __ jcc(Assembler::aboveEqual, backedge_counter_overflow);
2346 
2347         }
2348       }
2349     }
2350     __ bind(dispatch);
2351   }
2352 
2353   // Pre-load the next target bytecode into rbx
2354   __ load_unsigned_byte(rbx, Address(rbcp, 0));
2355 
2356   // continue with the bytecode @ target
2357   // rax: return bci for jsr's, unused otherwise
2358   // rbx: target bytecode
2359   // r13: target bcp
2360   __ dispatch_only(vtos, true);
2361 
2362   if (UseLoopCounter) {
2363     if (ProfileInterpreter) {
2364       // Out-of-line code to allocate method data oop.
2365       __ bind(profile_method);
2366       __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::profile_method));
2367       __ set_method_data_pointer_for_bcp();
2368       __ jmp(dispatch);
2369     }
2370 
2371     if (UseOnStackReplacement) {
2372       // invocation counter overflow
2373       __ bind(backedge_counter_overflow);
2374       __ negptr(rdx);
2375       __ addptr(rdx, rbcp); // branch bcp
2376       // IcoResult frequency_counter_overflow([JavaThread*], address branch_bcp)
2377       __ call_VM(noreg,
2378                  CAST_FROM_FN_PTR(address,
2379                                   InterpreterRuntime::frequency_counter_overflow),
2380                  rdx);
2381 
2382       // rax: osr nmethod (osr ok) or NULL (osr not possible)
2383       // rdx: scratch
2384       // r14: locals pointer
2385       // r13: bcp
2386       __ testptr(rax, rax);                        // test result
2387       __ jcc(Assembler::zero, dispatch);         // no osr if null
2388       // nmethod may have been invalidated (VM may block upon call_VM return)
2389       __ cmpb(Address(rax, nmethod::state_offset()), nmethod::in_use);
2390       __ jcc(Assembler::notEqual, dispatch);
2391 
2392       // We have the address of an on stack replacement routine in rax.
2393       // In preparation of invoking it, first we must migrate the locals
2394       // and monitors from off the interpreter frame on the stack.
2395       // Ensure to save the osr nmethod over the migration call,
2396       // it will be preserved in rbx.
2397       __ mov(rbx, rax);
2398 
2399       NOT_LP64(__ get_thread(rcx));
2400 
2401       call_VM(noreg, CAST_FROM_FN_PTR(address, SharedRuntime::OSR_migration_begin));
2402 
2403       // rax is OSR buffer, move it to expected parameter location
2404       LP64_ONLY(__ mov(j_rarg0, rax));
2405       NOT_LP64(__ mov(rcx, rax));
2406       // We use j_rarg definitions here so that registers don't conflict as parameter
2407       // registers change across platforms as we are in the midst of a calling
2408       // sequence to the OSR nmethod and we don't want collision. These are NOT parameters.
2409 
2410       const Register retaddr   = LP64_ONLY(j_rarg2) NOT_LP64(rdi);
2411       const Register sender_sp = LP64_ONLY(j_rarg1) NOT_LP64(rdx);
2412 
2413       // pop the interpreter frame
2414       __ movptr(sender_sp, Address(rbp, frame::interpreter_frame_sender_sp_offset * wordSize)); // get sender sp
2415       __ leave();                                // remove frame anchor
2416       __ pop(retaddr);                           // get return address
2417       __ mov(rsp, sender_sp);                   // set sp to sender sp
2418       // Ensure compiled code always sees stack at proper alignment
2419       __ andptr(rsp, -(StackAlignmentInBytes));
2420 
2421       // unlike x86 we need no specialized return from compiled code
2422       // to the interpreter or the call stub.
2423 
2424       // push the return address
2425       __ push(retaddr);
2426 
2427       // and begin the OSR nmethod
2428       __ jmp(Address(rbx, nmethod::osr_entry_point_offset()));
2429     }
2430   }
2431 }
2432 
2433 void TemplateTable::if_0cmp(Condition cc) {
2434   transition(itos, vtos);
2435   // assume branch is more often taken than not (loops use backward branches)
2436   Label not_taken;
2437   __ testl(rax, rax);
2438   __ jcc(j_not(cc), not_taken);
2439   branch(false, false);
2440   __ bind(not_taken);
2441   __ profile_not_taken_branch(rax);
2442 }
2443 
2444 void TemplateTable::if_icmp(Condition cc) {
2445   transition(itos, vtos);
2446   // assume branch is more often taken than not (loops use backward branches)
2447   Label not_taken;
2448   __ pop_i(rdx);
2449   __ cmpl(rdx, rax);
2450   __ jcc(j_not(cc), not_taken);
2451   branch(false, false);
2452   __ bind(not_taken);
2453   __ profile_not_taken_branch(rax);
2454 }
2455 
2456 void TemplateTable::if_nullcmp(Condition cc) {
2457   transition(atos, vtos);
2458   // assume branch is more often taken than not (loops use backward branches)
2459   Label not_taken;
2460   __ testptr(rax, rax);
2461   __ jcc(j_not(cc), not_taken);
2462   branch(false, false);
2463   __ bind(not_taken);
2464   __ profile_not_taken_branch(rax);
2465 }
2466 
2467 void TemplateTable::if_acmp(Condition cc) {
2468   transition(atos, vtos);
2469   // assume branch is more often taken than not (loops use backward branches)
2470   Label taken, not_taken;
2471   __ pop_ptr(rdx);
2472 
2473   const int is_value_mask = markOopDesc::always_locked_pattern;
2474   if (EnableValhalla) {
2475     __ cmpoop(rdx, rax);
2476     __ jcc(Assembler::equal, (cc == equal) ? taken : not_taken);
2477 
2478     // might be substitutable, test if either rax or rdx is null
2479     __ movptr(rbx, rdx);
2480     __ andptr(rbx, rax);
2481     __ testptr(rbx, rbx);
2482     __ jcc(Assembler::zero, (cc == equal) ? not_taken : taken);
2483 
2484     // and both are values ?
2485     __ movptr(rbx, Address(rdx, oopDesc::mark_offset_in_bytes()));
2486     __ andptr(rbx, is_value_mask);
2487     __ movptr(rcx, Address(rax, oopDesc::mark_offset_in_bytes()));
2488     __ andptr(rbx, is_value_mask);
2489     __ andptr(rbx, rcx);
2490     __ cmpl(rbx, is_value_mask);
2491     __ jcc(Assembler::notEqual, (cc == equal) ? not_taken : taken);
2492 
2493     // same value klass ?
2494     __ load_metadata(rbx, rdx);
2495     __ load_metadata(rcx, rax);
2496     __ cmpptr(rbx, rcx);
2497     __ jcc(Assembler::notEqual, (cc == equal) ? not_taken : taken);
2498 
2499     // Know both are the same type, let's test for substitutability...
2500     if (cc == equal) {
2501       invoke_is_substitutable(rax, rdx, taken, not_taken);
2502     } else {
2503       invoke_is_substitutable(rax, rdx, not_taken, taken);
2504     }
2505     __ stop("Not reachable");
2506   }
2507 
2508   __ cmpoop(rdx, rax);
2509   __ jcc(j_not(cc), not_taken);
2510   __ bind(taken);
2511   branch(false, false);
2512   __ bind(not_taken);
2513   __ profile_not_taken_branch(rax);
2514 }
2515 
2516 void TemplateTable::invoke_is_substitutable(Register aobj, Register bobj,
2517                                             Label& is_subst, Label& not_subst) {
2518   __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::is_substitutable), aobj, bobj);
2519   // Restored...rax answer, jmp to outcome...
2520   __ testl(rax, rax);
2521   __ jcc(Assembler::zero, not_subst);
2522   __ jmp(is_subst);
2523 }
2524 
2525 void TemplateTable::ret() {
2526   transition(vtos, vtos);
2527   locals_index(rbx);
2528   LP64_ONLY(__ movslq(rbx, iaddress(rbx))); // get return bci, compute return bcp
2529   NOT_LP64(__ movptr(rbx, iaddress(rbx)));
2530   __ profile_ret(rbx, rcx);
2531   __ get_method(rax);
2532   __ movptr(rbcp, Address(rax, Method::const_offset()));
2533   __ lea(rbcp, Address(rbcp, rbx, Address::times_1,
2534                       ConstMethod::codes_offset()));
2535   __ dispatch_next(vtos, 0, true);
2536 }
2537 
2538 void TemplateTable::wide_ret() {
2539   transition(vtos, vtos);
2540   locals_index_wide(rbx);
2541   __ movptr(rbx, aaddress(rbx)); // get return bci, compute return bcp
2542   __ profile_ret(rbx, rcx);
2543   __ get_method(rax);
2544   __ movptr(rbcp, Address(rax, Method::const_offset()));
2545   __ lea(rbcp, Address(rbcp, rbx, Address::times_1, ConstMethod::codes_offset()));
2546   __ dispatch_next(vtos, 0, true);
2547 }
2548 
2549 void TemplateTable::tableswitch() {
2550   Label default_case, continue_execution;
2551   transition(itos, vtos);
2552 
2553   // align r13/rsi
2554   __ lea(rbx, at_bcp(BytesPerInt));
2555   __ andptr(rbx, -BytesPerInt);
2556   // load lo & hi
2557   __ movl(rcx, Address(rbx, BytesPerInt));
2558   __ movl(rdx, Address(rbx, 2 * BytesPerInt));
2559   __ bswapl(rcx);
2560   __ bswapl(rdx);
2561   // check against lo & hi
2562   __ cmpl(rax, rcx);
2563   __ jcc(Assembler::less, default_case);
2564   __ cmpl(rax, rdx);
2565   __ jcc(Assembler::greater, default_case);
2566   // lookup dispatch offset
2567   __ subl(rax, rcx);
2568   __ movl(rdx, Address(rbx, rax, Address::times_4, 3 * BytesPerInt));
2569   __ profile_switch_case(rax, rbx, rcx);
2570   // continue execution
2571   __ bind(continue_execution);
2572   __ bswapl(rdx);
2573   LP64_ONLY(__ movl2ptr(rdx, rdx));
2574   __ load_unsigned_byte(rbx, Address(rbcp, rdx, Address::times_1));
2575   __ addptr(rbcp, rdx);
2576   __ dispatch_only(vtos, true);
2577   // handle default
2578   __ bind(default_case);
2579   __ profile_switch_default(rax);
2580   __ movl(rdx, Address(rbx, 0));
2581   __ jmp(continue_execution);
2582 }
2583 
2584 void TemplateTable::lookupswitch() {
2585   transition(itos, itos);
2586   __ stop("lookupswitch bytecode should have been rewritten");
2587 }
2588 
2589 void TemplateTable::fast_linearswitch() {
2590   transition(itos, vtos);
2591   Label loop_entry, loop, found, continue_execution;
2592   // bswap rax so we can avoid bswapping the table entries
2593   __ bswapl(rax);
2594   // align r13
2595   __ lea(rbx, at_bcp(BytesPerInt)); // btw: should be able to get rid of
2596                                     // this instruction (change offsets
2597                                     // below)
2598   __ andptr(rbx, -BytesPerInt);
2599   // set counter
2600   __ movl(rcx, Address(rbx, BytesPerInt));
2601   __ bswapl(rcx);
2602   __ jmpb(loop_entry);
2603   // table search
2604   __ bind(loop);
2605   __ cmpl(rax, Address(rbx, rcx, Address::times_8, 2 * BytesPerInt));
2606   __ jcc(Assembler::equal, found);
2607   __ bind(loop_entry);
2608   __ decrementl(rcx);
2609   __ jcc(Assembler::greaterEqual, loop);
2610   // default case
2611   __ profile_switch_default(rax);
2612   __ movl(rdx, Address(rbx, 0));
2613   __ jmp(continue_execution);
2614   // entry found -> get offset
2615   __ bind(found);
2616   __ movl(rdx, Address(rbx, rcx, Address::times_8, 3 * BytesPerInt));
2617   __ profile_switch_case(rcx, rax, rbx);
2618   // continue execution
2619   __ bind(continue_execution);
2620   __ bswapl(rdx);
2621   __ movl2ptr(rdx, rdx);
2622   __ load_unsigned_byte(rbx, Address(rbcp, rdx, Address::times_1));
2623   __ addptr(rbcp, rdx);
2624   __ dispatch_only(vtos, true);
2625 }
2626 
2627 void TemplateTable::fast_binaryswitch() {
2628   transition(itos, vtos);
2629   // Implementation using the following core algorithm:
2630   //
2631   // int binary_search(int key, LookupswitchPair* array, int n) {
2632   //   // Binary search according to "Methodik des Programmierens" by
2633   //   // Edsger W. Dijkstra and W.H.J. Feijen, Addison Wesley Germany 1985.
2634   //   int i = 0;
2635   //   int j = n;
2636   //   while (i+1 < j) {
2637   //     // invariant P: 0 <= i < j <= n and (a[i] <= key < a[j] or Q)
2638   //     // with      Q: for all i: 0 <= i < n: key < a[i]
2639   //     // where a stands for the array and assuming that the (inexisting)
2640   //     // element a[n] is infinitely big.
2641   //     int h = (i + j) >> 1;
2642   //     // i < h < j
2643   //     if (key < array[h].fast_match()) {
2644   //       j = h;
2645   //     } else {
2646   //       i = h;
2647   //     }
2648   //   }
2649   //   // R: a[i] <= key < a[i+1] or Q
2650   //   // (i.e., if key is within array, i is the correct index)
2651   //   return i;
2652   // }
2653 
2654   // Register allocation
2655   const Register key   = rax; // already set (tosca)
2656   const Register array = rbx;
2657   const Register i     = rcx;
2658   const Register j     = rdx;
2659   const Register h     = rdi;
2660   const Register temp  = rsi;
2661 
2662   // Find array start
2663   NOT_LP64(__ save_bcp());
2664 
2665   __ lea(array, at_bcp(3 * BytesPerInt)); // btw: should be able to
2666                                           // get rid of this
2667                                           // instruction (change
2668                                           // offsets below)
2669   __ andptr(array, -BytesPerInt);
2670 
2671   // Initialize i & j
2672   __ xorl(i, i);                            // i = 0;
2673   __ movl(j, Address(array, -BytesPerInt)); // j = length(array);
2674 
2675   // Convert j into native byteordering
2676   __ bswapl(j);
2677 
2678   // And start
2679   Label entry;
2680   __ jmp(entry);
2681 
2682   // binary search loop
2683   {
2684     Label loop;
2685     __ bind(loop);
2686     // int h = (i + j) >> 1;
2687     __ leal(h, Address(i, j, Address::times_1)); // h = i + j;
2688     __ sarl(h, 1);                               // h = (i + j) >> 1;
2689     // if (key < array[h].fast_match()) {
2690     //   j = h;
2691     // } else {
2692     //   i = h;
2693     // }
2694     // Convert array[h].match to native byte-ordering before compare
2695     __ movl(temp, Address(array, h, Address::times_8));
2696     __ bswapl(temp);
2697     __ cmpl(key, temp);
2698     // j = h if (key <  array[h].fast_match())
2699     __ cmov32(Assembler::less, j, h);
2700     // i = h if (key >= array[h].fast_match())
2701     __ cmov32(Assembler::greaterEqual, i, h);
2702     // while (i+1 < j)
2703     __ bind(entry);
2704     __ leal(h, Address(i, 1)); // i+1
2705     __ cmpl(h, j);             // i+1 < j
2706     __ jcc(Assembler::less, loop);
2707   }
2708 
2709   // end of binary search, result index is i (must check again!)
2710   Label default_case;
2711   // Convert array[i].match to native byte-ordering before compare
2712   __ movl(temp, Address(array, i, Address::times_8));
2713   __ bswapl(temp);
2714   __ cmpl(key, temp);
2715   __ jcc(Assembler::notEqual, default_case);
2716 
2717   // entry found -> j = offset
2718   __ movl(j , Address(array, i, Address::times_8, BytesPerInt));
2719   __ profile_switch_case(i, key, array);
2720   __ bswapl(j);
2721   LP64_ONLY(__ movslq(j, j));
2722 
2723   NOT_LP64(__ restore_bcp());
2724   NOT_LP64(__ restore_locals());                           // restore rdi
2725 
2726   __ load_unsigned_byte(rbx, Address(rbcp, j, Address::times_1));
2727   __ addptr(rbcp, j);
2728   __ dispatch_only(vtos, true);
2729 
2730   // default case -> j = default offset
2731   __ bind(default_case);
2732   __ profile_switch_default(i);
2733   __ movl(j, Address(array, -2 * BytesPerInt));
2734   __ bswapl(j);
2735   LP64_ONLY(__ movslq(j, j));
2736 
2737   NOT_LP64(__ restore_bcp());
2738   NOT_LP64(__ restore_locals());
2739 
2740   __ load_unsigned_byte(rbx, Address(rbcp, j, Address::times_1));
2741   __ addptr(rbcp, j);
2742   __ dispatch_only(vtos, true);
2743 }
2744 
2745 void TemplateTable::_return(TosState state) {
2746   transition(state, state);
2747 
2748   assert(_desc->calls_vm(),
2749          "inconsistent calls_vm information"); // call in remove_activation
2750 
2751   if (_desc->bytecode() == Bytecodes::_return_register_finalizer) {
2752     assert(state == vtos, "only valid state");
2753     Register robj = LP64_ONLY(c_rarg1) NOT_LP64(rax);
2754     __ movptr(robj, aaddress(0));
2755     __ load_klass(rdi, robj);
2756     __ movl(rdi, Address(rdi, Klass::access_flags_offset()));
2757     __ testl(rdi, JVM_ACC_HAS_FINALIZER);
2758     Label skip_register_finalizer;
2759     __ jcc(Assembler::zero, skip_register_finalizer);
2760 
2761     __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::register_finalizer), robj);
2762 
2763     __ bind(skip_register_finalizer);
2764   }
2765 
2766   if (SafepointMechanism::uses_thread_local_poll() && _desc->bytecode() != Bytecodes::_return_register_finalizer) {
2767     Label no_safepoint;
2768     NOT_PRODUCT(__ block_comment("Thread-local Safepoint poll"));
2769 #ifdef _LP64
2770     __ testb(Address(r15_thread, Thread::polling_page_offset()), SafepointMechanism::poll_bit());
2771 #else
2772     const Register thread = rdi;
2773     __ get_thread(thread);
2774     __ testb(Address(thread, Thread::polling_page_offset()), SafepointMechanism::poll_bit());
2775 #endif
2776     __ jcc(Assembler::zero, no_safepoint);
2777     __ push(state);
2778     __ call_VM(noreg, CAST_FROM_FN_PTR(address,
2779                                     InterpreterRuntime::at_safepoint));
2780     __ pop(state);
2781     __ bind(no_safepoint);
2782   }
2783 
2784   // Narrow result if state is itos but result type is smaller.
2785   // Need to narrow in the return bytecode rather than in generate_return_entry
2786   // since compiled code callers expect the result to already be narrowed.
2787   if (state == itos) {
2788     __ narrow(rax);
2789   }
2790 
2791   __ remove_activation(state, rbcp, true, true, true);
2792 
2793   __ jmp(rbcp);
2794 }
2795 
2796 // ----------------------------------------------------------------------------
2797 // Volatile variables demand their effects be made known to all CPU's
2798 // in order.  Store buffers on most chips allow reads & writes to
2799 // reorder; the JMM's ReadAfterWrite.java test fails in -Xint mode
2800 // without some kind of memory barrier (i.e., it's not sufficient that
2801 // the interpreter does not reorder volatile references, the hardware
2802 // also must not reorder them).
2803 //
2804 // According to the new Java Memory Model (JMM):
2805 // (1) All volatiles are serialized wrt to each other.  ALSO reads &
2806 //     writes act as aquire & release, so:
2807 // (2) A read cannot let unrelated NON-volatile memory refs that
2808 //     happen after the read float up to before the read.  It's OK for
2809 //     non-volatile memory refs that happen before the volatile read to
2810 //     float down below it.
2811 // (3) Similar a volatile write cannot let unrelated NON-volatile
2812 //     memory refs that happen BEFORE the write float down to after the
2813 //     write.  It's OK for non-volatile memory refs that happen after the
2814 //     volatile write to float up before it.
2815 //
2816 // We only put in barriers around volatile refs (they are expensive),
2817 // not _between_ memory refs (that would require us to track the
2818 // flavor of the previous memory refs).  Requirements (2) and (3)
2819 // require some barriers before volatile stores and after volatile
2820 // loads.  These nearly cover requirement (1) but miss the
2821 // volatile-store-volatile-load case.  This final case is placed after
2822 // volatile-stores although it could just as well go before
2823 // volatile-loads.
2824 
2825 void TemplateTable::volatile_barrier(Assembler::Membar_mask_bits order_constraint ) {
2826   // Helper function to insert a is-volatile test and memory barrier
2827   __ membar(order_constraint);
2828 }
2829 
2830 void TemplateTable::resolve_cache_and_index(int byte_no,
2831                                             Register cache,
2832                                             Register index,
2833                                             size_t index_size) {
2834   const Register temp = rbx;
2835   assert_different_registers(cache, index, temp);
2836 
2837   Label L_clinit_barrier_slow;
2838   Label resolved;
2839 
2840   Bytecodes::Code code = bytecode();
2841   switch (code) {
2842   case Bytecodes::_nofast_getfield: code = Bytecodes::_getfield; break;
2843   case Bytecodes::_nofast_putfield: code = Bytecodes::_putfield; break;
2844   default: break;
2845   }
2846 
2847   assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range");
2848   __ get_cache_and_index_and_bytecode_at_bcp(cache, index, temp, byte_no, 1, index_size);
2849   __ cmpl(temp, code);  // have we resolved this bytecode?
2850   __ jcc(Assembler::equal, resolved);
2851 
2852   // resolve first time through
2853   // Class initialization barrier slow path lands here as well.
2854   __ bind(L_clinit_barrier_slow);
2855   address entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_from_cache);
2856   __ movl(temp, code);
2857   __ call_VM(noreg, entry, temp);
2858   // Update registers with resolved info
2859   __ get_cache_and_index_at_bcp(cache, index, 1, index_size);
2860 
2861   __ bind(resolved);
2862 
2863   // Class initialization barrier for static methods
2864   if (VM_Version::supports_fast_class_init_checks() && bytecode() == Bytecodes::_invokestatic) {
2865     const Register method = temp;
2866     const Register klass  = temp;
2867     const Register thread = LP64_ONLY(r15_thread) NOT_LP64(noreg);
2868     assert(thread != noreg, "x86_32 not supported");
2869 
2870     __ load_resolved_method_at_index(byte_no, method, cache, index);
2871     __ load_method_holder(klass, method);
2872     __ clinit_barrier(klass, thread, NULL /*L_fast_path*/, &L_clinit_barrier_slow);
2873   }
2874 }
2875 
2876 // The cache and index registers must be set before call
2877 void TemplateTable::load_field_cp_cache_entry(Register obj,
2878                                               Register cache,
2879                                               Register index,
2880                                               Register off,
2881                                               Register flags,
2882                                               bool is_static = false) {
2883   assert_different_registers(cache, index, flags, off);
2884 
2885   ByteSize cp_base_offset = ConstantPoolCache::base_offset();
2886   // Field offset
2887   __ movptr(off, Address(cache, index, Address::times_ptr,
2888                          in_bytes(cp_base_offset +
2889                                   ConstantPoolCacheEntry::f2_offset())));
2890   // Flags
2891   __ movl(flags, Address(cache, index, Address::times_ptr,
2892                          in_bytes(cp_base_offset +
2893                                   ConstantPoolCacheEntry::flags_offset())));
2894 
2895   // klass overwrite register
2896   if (is_static) {
2897     __ movptr(obj, Address(cache, index, Address::times_ptr,
2898                            in_bytes(cp_base_offset +
2899                                     ConstantPoolCacheEntry::f1_offset())));
2900     const int mirror_offset = in_bytes(Klass::java_mirror_offset());
2901     __ movptr(obj, Address(obj, mirror_offset));
2902     __ resolve_oop_handle(obj);
2903   }
2904 }
2905 
2906 void TemplateTable::load_invoke_cp_cache_entry(int byte_no,
2907                                                Register method,
2908                                                Register itable_index,
2909                                                Register flags,
2910                                                bool is_invokevirtual,
2911                                                bool is_invokevfinal, /*unused*/
2912                                                bool is_invokedynamic) {
2913   // setup registers
2914   const Register cache = rcx;
2915   const Register index = rdx;
2916   assert_different_registers(method, flags);
2917   assert_different_registers(method, cache, index);
2918   assert_different_registers(itable_index, flags);
2919   assert_different_registers(itable_index, cache, index);
2920   // determine constant pool cache field offsets
2921   assert(is_invokevirtual == (byte_no == f2_byte), "is_invokevirtual flag redundant");
2922   const int flags_offset = in_bytes(ConstantPoolCache::base_offset() +
2923                                     ConstantPoolCacheEntry::flags_offset());
2924   // access constant pool cache fields
2925   const int index_offset = in_bytes(ConstantPoolCache::base_offset() +
2926                                     ConstantPoolCacheEntry::f2_offset());
2927 
2928   size_t index_size = (is_invokedynamic ? sizeof(u4) : sizeof(u2));
2929   resolve_cache_and_index(byte_no, cache, index, index_size);
2930   __ load_resolved_method_at_index(byte_no, method, cache, index);
2931 
2932   if (itable_index != noreg) {
2933     // pick up itable or appendix index from f2 also:
2934     __ movptr(itable_index, Address(cache, index, Address::times_ptr, index_offset));
2935   }
2936   __ movl(flags, Address(cache, index, Address::times_ptr, flags_offset));
2937 }
2938 
2939 // The registers cache and index expected to be set before call.
2940 // Correct values of the cache and index registers are preserved.
2941 void TemplateTable::jvmti_post_field_access(Register cache,
2942                                             Register index,
2943                                             bool is_static,
2944                                             bool has_tos) {
2945   if (JvmtiExport::can_post_field_access()) {
2946     // Check to see if a field access watch has been set before we take
2947     // the time to call into the VM.
2948     Label L1;
2949     assert_different_registers(cache, index, rax);
2950     __ mov32(rax, ExternalAddress((address) JvmtiExport::get_field_access_count_addr()));
2951     __ testl(rax,rax);
2952     __ jcc(Assembler::zero, L1);
2953 
2954     // cache entry pointer
2955     __ addptr(cache, in_bytes(ConstantPoolCache::base_offset()));
2956     __ shll(index, LogBytesPerWord);
2957     __ addptr(cache, index);
2958     if (is_static) {
2959       __ xorptr(rax, rax);      // NULL object reference
2960     } else {
2961       __ pop(atos);         // Get the object
2962       __ verify_oop(rax);
2963       __ push(atos);        // Restore stack state
2964     }
2965     // rax,:   object pointer or NULL
2966     // cache: cache entry pointer
2967     __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_access),
2968                rax, cache);
2969     __ get_cache_and_index_at_bcp(cache, index, 1);
2970     __ bind(L1);
2971   }
2972 }
2973 
2974 void TemplateTable::pop_and_check_object(Register r) {
2975   __ pop_ptr(r);
2976   __ null_check(r);  // for field access must check obj.
2977   __ verify_oop(r);
2978 }
2979 
2980 void TemplateTable::getfield_or_static(int byte_no, bool is_static, RewriteControl rc) {
2981   transition(vtos, vtos);
2982 
2983   const Register cache = rcx;
2984   const Register index = rdx;
2985   const Register obj   = LP64_ONLY(c_rarg3) NOT_LP64(rcx);
2986   const Register off   = rbx;
2987   const Register flags = rax;
2988   const Register bc    = LP64_ONLY(c_rarg3) NOT_LP64(rcx); // uses same reg as obj, so don't mix them
2989   const Register flags2 = rdx;
2990 
2991   resolve_cache_and_index(byte_no, cache, index, sizeof(u2));
2992   jvmti_post_field_access(cache, index, is_static, false);
2993   load_field_cp_cache_entry(obj, cache, index, off, flags, is_static);
2994 
2995   const Address field(obj, off, Address::times_1, 0*wordSize);
2996 
2997   Label Done, notByte, notBool, notInt, notShort, notChar, notLong, notFloat, notObj, notValueType;
2998 
2999   if (!is_static) {
3000     __ movptr(rcx, Address(cache, index, Address::times_ptr,
3001                            in_bytes(ConstantPoolCache::base_offset() +
3002                                     ConstantPoolCacheEntry::f1_offset())));
3003   }
3004 
3005   __ movl(flags2, flags);
3006 
3007   __ shrl(flags, ConstantPoolCacheEntry::tos_state_shift);
3008   // Make sure we don't need to mask edx after the above shift
3009   assert(btos == 0, "change code, btos != 0");
3010 
3011   __ andl(flags, ConstantPoolCacheEntry::tos_state_mask);
3012 
3013   __ jcc(Assembler::notZero, notByte);
3014   // btos
3015   if (!is_static) pop_and_check_object(obj);
3016   __ access_load_at(T_BYTE, IN_HEAP, rax, field, noreg, noreg);
3017   __ push(btos);
3018   // Rewrite bytecode to be faster
3019   if (!is_static && rc == may_rewrite) {
3020     patch_bytecode(Bytecodes::_fast_bgetfield, bc, rbx);
3021   }
3022   __ jmp(Done);
3023 
3024   __ bind(notByte);
3025 
3026   __ cmpl(flags, ztos);
3027   __ jcc(Assembler::notEqual, notBool);
3028    if (!is_static) pop_and_check_object(obj);
3029   // ztos (same code as btos)
3030   __ access_load_at(T_BOOLEAN, IN_HEAP, rax, field, noreg, noreg);
3031   __ push(ztos);
3032   // Rewrite bytecode to be faster
3033   if (!is_static && rc == may_rewrite) {
3034     // use btos rewriting, no truncating to t/f bit is needed for getfield.
3035     patch_bytecode(Bytecodes::_fast_bgetfield, bc, rbx);
3036   }
3037   __ jmp(Done);
3038 
3039   __ bind(notBool);
3040   __ cmpl(flags, atos);
3041   __ jcc(Assembler::notEqual, notObj);
3042   // atos
3043   if (!EnableValhalla) {
3044     if (!is_static) pop_and_check_object(obj);
3045     do_oop_load(_masm, field, rax);
3046     __ push(atos);
3047     if (!is_static && rc == may_rewrite) {
3048       patch_bytecode(Bytecodes::_fast_agetfield, bc, rbx);
3049     }
3050     __ jmp(Done);
3051   } else {
3052     if (is_static) {
3053       __ load_heap_oop(rax, field);
3054       Label isFlattenable, uninitialized;
3055       // Issue below if the static field has not been initialized yet
3056       __ test_field_is_flattenable(flags2, rscratch1, isFlattenable);
3057         // Not flattenable case
3058         __ push(atos);
3059         __ jmp(Done);
3060       // Flattenable case, must not return null even if uninitialized
3061       __ bind(isFlattenable);
3062         __ testptr(rax, rax);
3063         __ jcc(Assembler::zero, uninitialized);
3064           __ push(atos);
3065           __ jmp(Done);
3066         __ bind(uninitialized);
3067           __ andl(flags2, ConstantPoolCacheEntry::field_index_mask);
3068           __ call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::uninitialized_static_value_field),
3069                  obj, flags2);
3070           __ verify_oop(rax);
3071           __ push(atos);
3072           __ jmp(Done);
3073     } else {
3074       Label isFlattened, nonnull, isFlattenable, rewriteFlattenable;
3075       __ test_field_is_flattenable(flags2, rscratch1, isFlattenable);
3076         // Non-flattenable field case, also covers the object case
3077         pop_and_check_object(obj);
3078         __ load_heap_oop(rax, field);
3079         __ push(atos);
3080         if (rc == may_rewrite) {
3081           patch_bytecode(Bytecodes::_fast_agetfield, bc, rbx);
3082         }
3083         __ jmp(Done);
3084       __ bind(isFlattenable);
3085         __ test_field_is_flattened(flags2, rscratch1, isFlattened);
3086           // Non-flattened field case
3087           pop_and_check_object(obj);
3088           __ load_heap_oop(rax, field);
3089           __ testptr(rax, rax);
3090           __ jcc(Assembler::notZero, nonnull);
3091             __ andl(flags2, ConstantPoolCacheEntry::field_index_mask);
3092             __ call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::uninitialized_instance_value_field),
3093                        obj, flags2);
3094           __ bind(nonnull);
3095           __ verify_oop(rax);
3096           __ push(atos);
3097           __ jmp(rewriteFlattenable);
3098         __ bind(isFlattened);
3099           __ andl(flags2, ConstantPoolCacheEntry::field_index_mask);
3100           pop_and_check_object(rbx);
3101           call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::read_flattened_field),
3102                   rbx, flags2, rcx);
3103           __ verify_oop(rax);
3104           __ push(atos);
3105       __ bind(rewriteFlattenable);
3106       if (rc == may_rewrite) {
3107         patch_bytecode(Bytecodes::_fast_qgetfield, bc, rbx);
3108       }
3109       __ jmp(Done);
3110     }
3111   }
3112 
3113   __ bind(notObj);
3114 
3115   if (!is_static) pop_and_check_object(obj);
3116 
3117   __ cmpl(flags, itos);
3118   __ jcc(Assembler::notEqual, notInt);
3119   // itos
3120   __ access_load_at(T_INT, IN_HEAP, rax, field, noreg, noreg);
3121   __ push(itos);
3122   // Rewrite bytecode to be faster
3123   if (!is_static && rc == may_rewrite) {
3124     patch_bytecode(Bytecodes::_fast_igetfield, bc, rbx);
3125   }
3126   __ jmp(Done);
3127 
3128   __ bind(notInt);
3129   __ cmpl(flags, ctos);
3130   __ jcc(Assembler::notEqual, notChar);
3131   // ctos
3132   __ access_load_at(T_CHAR, IN_HEAP, rax, field, noreg, noreg);
3133   __ push(ctos);
3134   // Rewrite bytecode to be faster
3135   if (!is_static && rc == may_rewrite) {
3136     patch_bytecode(Bytecodes::_fast_cgetfield, bc, rbx);
3137   }
3138   __ jmp(Done);
3139 
3140   __ bind(notChar);
3141   __ cmpl(flags, stos);
3142   __ jcc(Assembler::notEqual, notShort);
3143   // stos
3144   __ access_load_at(T_SHORT, IN_HEAP, rax, field, noreg, noreg);
3145   __ push(stos);
3146   // Rewrite bytecode to be faster
3147   if (!is_static && rc == may_rewrite) {
3148     patch_bytecode(Bytecodes::_fast_sgetfield, bc, rbx);
3149   }
3150   __ jmp(Done);
3151 
3152   __ bind(notShort);
3153   __ cmpl(flags, ltos);
3154   __ jcc(Assembler::notEqual, notLong);
3155   // ltos
3156     // Generate code as if volatile (x86_32).  There just aren't enough registers to
3157     // save that information and this code is faster than the test.
3158   __ access_load_at(T_LONG, IN_HEAP | MO_RELAXED, noreg /* ltos */, field, noreg, noreg);
3159   __ push(ltos);
3160   // Rewrite bytecode to be faster
3161   LP64_ONLY(if (!is_static && rc == may_rewrite) patch_bytecode(Bytecodes::_fast_lgetfield, bc, rbx));
3162   __ jmp(Done);
3163 
3164   __ bind(notLong);
3165   __ cmpl(flags, ftos);
3166   __ jcc(Assembler::notEqual, notFloat);
3167   // ftos
3168 
3169   __ access_load_at(T_FLOAT, IN_HEAP, noreg /* ftos */, field, noreg, noreg);
3170   __ push(ftos);
3171   // Rewrite bytecode to be faster
3172   if (!is_static && rc == may_rewrite) {
3173     patch_bytecode(Bytecodes::_fast_fgetfield, bc, rbx);
3174   }
3175   __ jmp(Done);
3176 
3177   __ bind(notFloat);
3178 #ifdef ASSERT
3179   Label notDouble;
3180   __ cmpl(flags, dtos);
3181   __ jcc(Assembler::notEqual, notDouble);
3182 #endif
3183   // dtos
3184   // MO_RELAXED: for the case of volatile field, in fact it adds no extra work for the underlying implementation
3185   __ access_load_at(T_DOUBLE, IN_HEAP | MO_RELAXED, noreg /* dtos */, field, noreg, noreg);
3186   __ push(dtos);
3187   // Rewrite bytecode to be faster
3188   if (!is_static && rc == may_rewrite) {
3189     patch_bytecode(Bytecodes::_fast_dgetfield, bc, rbx);
3190   }
3191 #ifdef ASSERT
3192   __ jmp(Done);
3193 
3194   __ bind(notDouble);
3195   __ stop("Bad state");
3196 #endif
3197 
3198   __ bind(Done);
3199   // [jk] not needed currently
3200   // volatile_barrier(Assembler::Membar_mask_bits(Assembler::LoadLoad |
3201   //                                              Assembler::LoadStore));
3202 }
3203 
3204 void TemplateTable::getfield(int byte_no) {
3205   getfield_or_static(byte_no, false);
3206 }
3207 
3208 void TemplateTable::nofast_getfield(int byte_no) {
3209   getfield_or_static(byte_no, false, may_not_rewrite);
3210 }
3211 
3212 void TemplateTable::getstatic(int byte_no) {
3213   getfield_or_static(byte_no, true);
3214 }
3215 
3216 void TemplateTable::withfield() {
3217   transition(vtos, atos);
3218 
3219   Register cache = LP64_ONLY(c_rarg1) NOT_LP64(rcx);
3220   Register index = LP64_ONLY(c_rarg2) NOT_LP64(rdx);
3221 
3222   resolve_cache_and_index(f2_byte, cache, index, sizeof(u2));
3223 
3224   call_VM(rbx, CAST_FROM_FN_PTR(address, InterpreterRuntime::withfield), cache);
3225   // new value type is returned in rbx
3226   // stack adjustement is returned in rax
3227   __ verify_oop(rbx);
3228   __ addptr(rsp, rax);
3229   __ movptr(rax, rbx);
3230 }
3231 
3232 // The registers cache and index expected to be set before call.
3233 // The function may destroy various registers, just not the cache and index registers.
3234 void TemplateTable::jvmti_post_field_mod(Register cache, Register index, bool is_static) {
3235 
3236   const Register robj = LP64_ONLY(c_rarg2)   NOT_LP64(rax);
3237   const Register RBX  = LP64_ONLY(c_rarg1)   NOT_LP64(rbx);
3238   const Register RCX  = LP64_ONLY(c_rarg3)   NOT_LP64(rcx);
3239   const Register RDX  = LP64_ONLY(rscratch1) NOT_LP64(rdx);
3240 
3241   ByteSize cp_base_offset = ConstantPoolCache::base_offset();
3242 
3243   if (JvmtiExport::can_post_field_modification()) {
3244     // Check to see if a field modification watch has been set before
3245     // we take the time to call into the VM.
3246     Label L1;
3247     assert_different_registers(cache, index, rax);
3248     __ mov32(rax, ExternalAddress((address)JvmtiExport::get_field_modification_count_addr()));
3249     __ testl(rax, rax);
3250     __ jcc(Assembler::zero, L1);
3251 
3252     __ get_cache_and_index_at_bcp(robj, RDX, 1);
3253 
3254 
3255     if (is_static) {
3256       // Life is simple.  Null out the object pointer.
3257       __ xorl(RBX, RBX);
3258 
3259     } else {
3260       // Life is harder. The stack holds the value on top, followed by
3261       // the object.  We don't know the size of the value, though; it
3262       // could be one or two words depending on its type. As a result,
3263       // we must find the type to determine where the object is.
3264 #ifndef _LP64
3265       Label two_word, valsize_known;
3266 #endif
3267       __ movl(RCX, Address(robj, RDX,
3268                            Address::times_ptr,
3269                            in_bytes(cp_base_offset +
3270                                      ConstantPoolCacheEntry::flags_offset())));
3271       NOT_LP64(__ mov(rbx, rsp));
3272       __ shrl(RCX, ConstantPoolCacheEntry::tos_state_shift);
3273 
3274       // Make sure we don't need to mask rcx after the above shift
3275       ConstantPoolCacheEntry::verify_tos_state_shift();
3276 #ifdef _LP64
3277       __ movptr(c_rarg1, at_tos_p1());  // initially assume a one word jvalue
3278       __ cmpl(c_rarg3, ltos);
3279       __ cmovptr(Assembler::equal,
3280                  c_rarg1, at_tos_p2()); // ltos (two word jvalue)
3281       __ cmpl(c_rarg3, dtos);
3282       __ cmovptr(Assembler::equal,
3283                  c_rarg1, at_tos_p2()); // dtos (two word jvalue)
3284 #else
3285       __ cmpl(rcx, ltos);
3286       __ jccb(Assembler::equal, two_word);
3287       __ cmpl(rcx, dtos);
3288       __ jccb(Assembler::equal, two_word);
3289       __ addptr(rbx, Interpreter::expr_offset_in_bytes(1)); // one word jvalue (not ltos, dtos)
3290       __ jmpb(valsize_known);
3291 
3292       __ bind(two_word);
3293       __ addptr(rbx, Interpreter::expr_offset_in_bytes(2)); // two words jvalue
3294 
3295       __ bind(valsize_known);
3296       // setup object pointer
3297       __ movptr(rbx, Address(rbx, 0));
3298 #endif
3299     }
3300     // cache entry pointer
3301     __ addptr(robj, in_bytes(cp_base_offset));
3302     __ shll(RDX, LogBytesPerWord);
3303     __ addptr(robj, RDX);
3304     // object (tos)
3305     __ mov(RCX, rsp);
3306     // c_rarg1: object pointer set up above (NULL if static)
3307     // c_rarg2: cache entry pointer
3308     // c_rarg3: jvalue object on the stack
3309     __ call_VM(noreg,
3310                CAST_FROM_FN_PTR(address,
3311                                 InterpreterRuntime::post_field_modification),
3312                RBX, robj, RCX);
3313     __ get_cache_and_index_at_bcp(cache, index, 1);
3314     __ bind(L1);
3315   }
3316 }
3317 
3318 void TemplateTable::putfield_or_static(int byte_no, bool is_static, RewriteControl rc) {
3319   transition(vtos, vtos);
3320 
3321   const Register cache = rcx;
3322   const Register index = rdx;
3323   const Register obj   = rcx;
3324   const Register off   = rbx;
3325   const Register flags = rax;
3326   const Register flags2 = rdx;
3327 
3328   resolve_cache_and_index(byte_no, cache, index, sizeof(u2));
3329   jvmti_post_field_mod(cache, index, is_static);
3330   load_field_cp_cache_entry(obj, cache, index, off, flags, is_static);
3331 
3332   // [jk] not needed currently
3333   // volatile_barrier(Assembler::Membar_mask_bits(Assembler::LoadStore |
3334   //                                              Assembler::StoreStore));
3335 
3336   Label notVolatile, Done;
3337   __ movl(rdx, flags);
3338   __ shrl(rdx, ConstantPoolCacheEntry::is_volatile_shift);
3339   __ andl(rdx, 0x1);
3340 
3341   // Check for volatile store
3342   __ testl(rdx, rdx);
3343   __ movl(flags2, flags);
3344   __ jcc(Assembler::zero, notVolatile);
3345 
3346   putfield_or_static_helper(byte_no, is_static, rc, obj, off, flags, flags2);
3347   volatile_barrier(Assembler::Membar_mask_bits(Assembler::StoreLoad |
3348                                                Assembler::StoreStore));
3349   __ jmp(Done);
3350   __ bind(notVolatile);
3351 
3352   putfield_or_static_helper(byte_no, is_static, rc, obj, off, flags, flags2);
3353 
3354   __ bind(Done);
3355 }
3356 
3357 void TemplateTable::putfield_or_static_helper(int byte_no, bool is_static, RewriteControl rc,
3358                                               Register obj, Register off, Register flags, Register flags2) {
3359 
3360   // field addresses
3361   const Address field(obj, off, Address::times_1, 0*wordSize);
3362   NOT_LP64( const Address hi(obj, off, Address::times_1, 1*wordSize);)
3363 
3364   Label notByte, notBool, notInt, notShort, notChar,
3365         notLong, notFloat, notObj, notValueType;
3366   Label Done;
3367 
3368   const Register bc    = LP64_ONLY(c_rarg3) NOT_LP64(rcx);
3369 
3370   __ shrl(flags, ConstantPoolCacheEntry::tos_state_shift);
3371 
3372   assert(btos == 0, "change code, btos != 0");
3373   __ andl(flags, ConstantPoolCacheEntry::tos_state_mask);
3374   __ jcc(Assembler::notZero, notByte);
3375 
3376   // btos
3377   {
3378     __ pop(btos);
3379     if (!is_static) pop_and_check_object(obj);
3380     __ access_store_at(T_BYTE, IN_HEAP, field, rax, noreg, noreg);
3381     if (!is_static && rc == may_rewrite) {
3382       patch_bytecode(Bytecodes::_fast_bputfield, bc, rbx, true, byte_no);
3383     }
3384     __ jmp(Done);
3385   }
3386 
3387   __ bind(notByte);
3388   __ cmpl(flags, ztos);
3389   __ jcc(Assembler::notEqual, notBool);
3390 
3391   // ztos
3392   {
3393     __ pop(ztos);
3394     if (!is_static) pop_and_check_object(obj);
3395     __ access_store_at(T_BOOLEAN, IN_HEAP, field, rax, noreg, noreg);
3396     if (!is_static && rc == may_rewrite) {
3397       patch_bytecode(Bytecodes::_fast_zputfield, bc, rbx, true, byte_no);
3398     }
3399     __ jmp(Done);
3400   }
3401 
3402   __ bind(notBool);
3403   __ cmpl(flags, atos);
3404   __ jcc(Assembler::notEqual, notObj);
3405 
3406   // atos
3407   {
3408     if (!EnableValhalla) {
3409       __ pop(atos);
3410       if (!is_static) pop_and_check_object(obj);
3411       // Store into the field
3412       do_oop_store(_masm, field, rax);
3413       if (!is_static && rc == may_rewrite) {
3414         patch_bytecode(Bytecodes::_fast_aputfield, bc, rbx, true, byte_no);
3415       }
3416       __ jmp(Done);
3417     } else {
3418       __ pop(atos);
3419       if (is_static) {
3420         Label notFlattenable, notBuffered;
3421         __ test_field_is_not_flattenable(flags2, rscratch1, notFlattenable);
3422         __ null_check(rax);
3423         __ bind(notFlattenable);
3424         do_oop_store(_masm, field, rax);
3425         __ jmp(Done);
3426       } else {
3427         Label isFlattenable, isFlattened, notBuffered, notBuffered2, rewriteNotFlattenable, rewriteFlattenable;
3428         __ test_field_is_flattenable(flags2, rscratch1, isFlattenable);
3429         // Not flattenable case, covers not flattenable values and objects
3430         pop_and_check_object(obj);
3431         // Store into the field
3432         do_oop_store(_masm, field, rax);
3433         __ bind(rewriteNotFlattenable);
3434         if (rc == may_rewrite) {
3435           patch_bytecode(Bytecodes::_fast_aputfield, bc, rbx, true, byte_no);
3436         }
3437         __ jmp(Done);
3438         // Implementation of the flattenable semantic
3439         __ bind(isFlattenable);
3440         __ null_check(rax);
3441         __ test_field_is_flattened(flags2, rscratch1, isFlattened);
3442         // Not flattened case
3443         pop_and_check_object(obj);
3444         // Store into the field
3445         do_oop_store(_masm, field, rax);
3446         __ jmp(rewriteFlattenable);
3447         __ bind(isFlattened);
3448         pop_and_check_object(obj);
3449         call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::write_flattened_value),
3450                 rax, off, obj);
3451         __ bind(rewriteFlattenable);
3452         if (rc == may_rewrite) {
3453           patch_bytecode(Bytecodes::_fast_qputfield, bc, rbx, true, byte_no);
3454         }
3455         __ jmp(Done);
3456       }
3457     }
3458   }
3459 
3460   __ bind(notObj);
3461   __ cmpl(flags, itos);
3462   __ jcc(Assembler::notEqual, notInt);
3463 
3464   // itos
3465   {
3466     __ pop(itos);
3467     if (!is_static) pop_and_check_object(obj);
3468     __ access_store_at(T_INT, IN_HEAP, field, rax, noreg, noreg);
3469     if (!is_static && rc == may_rewrite) {
3470       patch_bytecode(Bytecodes::_fast_iputfield, bc, rbx, true, byte_no);
3471     }
3472     __ jmp(Done);
3473   }
3474 
3475   __ bind(notInt);
3476   __ cmpl(flags, ctos);
3477   __ jcc(Assembler::notEqual, notChar);
3478 
3479   // ctos
3480   {
3481     __ pop(ctos);
3482     if (!is_static) pop_and_check_object(obj);
3483     __ access_store_at(T_CHAR, IN_HEAP, field, rax, noreg, noreg);
3484     if (!is_static && rc == may_rewrite) {
3485       patch_bytecode(Bytecodes::_fast_cputfield, bc, rbx, true, byte_no);
3486     }
3487     __ jmp(Done);
3488   }
3489 
3490   __ bind(notChar);
3491   __ cmpl(flags, stos);
3492   __ jcc(Assembler::notEqual, notShort);
3493 
3494   // stos
3495   {
3496     __ pop(stos);
3497     if (!is_static) pop_and_check_object(obj);
3498     __ access_store_at(T_SHORT, IN_HEAP, field, rax, noreg, noreg);
3499     if (!is_static && rc == may_rewrite) {
3500       patch_bytecode(Bytecodes::_fast_sputfield, bc, rbx, true, byte_no);
3501     }
3502     __ jmp(Done);
3503   }
3504 
3505   __ bind(notShort);
3506   __ cmpl(flags, ltos);
3507   __ jcc(Assembler::notEqual, notLong);
3508 
3509   // ltos
3510   {
3511     __ pop(ltos);
3512     if (!is_static) pop_and_check_object(obj);
3513     // MO_RELAXED: generate atomic store for the case of volatile field (important for x86_32)
3514     __ access_store_at(T_LONG, IN_HEAP | MO_RELAXED, field, noreg /* ltos*/, noreg, noreg);
3515 #ifdef _LP64
3516     if (!is_static && rc == may_rewrite) {
3517       patch_bytecode(Bytecodes::_fast_lputfield, bc, rbx, true, byte_no);
3518     }
3519 #endif // _LP64
3520     __ jmp(Done);
3521   }
3522 
3523   __ bind(notLong);
3524   __ cmpl(flags, ftos);
3525   __ jcc(Assembler::notEqual, notFloat);
3526 
3527   // ftos
3528   {
3529     __ pop(ftos);
3530     if (!is_static) pop_and_check_object(obj);
3531     __ access_store_at(T_FLOAT, IN_HEAP, field, noreg /* ftos */, noreg, noreg);
3532     if (!is_static && rc == may_rewrite) {
3533       patch_bytecode(Bytecodes::_fast_fputfield, bc, rbx, true, byte_no);
3534     }
3535     __ jmp(Done);
3536   }
3537 
3538   __ bind(notFloat);
3539 #ifdef ASSERT
3540   Label notDouble;
3541   __ cmpl(flags, dtos);
3542   __ jcc(Assembler::notEqual, notDouble);
3543 #endif
3544 
3545   // dtos
3546   {
3547     __ pop(dtos);
3548     if (!is_static) pop_and_check_object(obj);
3549     // MO_RELAXED: for the case of volatile field, in fact it adds no extra work for the underlying implementation
3550     __ access_store_at(T_DOUBLE, IN_HEAP | MO_RELAXED, field, noreg /* dtos */, noreg, noreg);
3551     if (!is_static && rc == may_rewrite) {
3552       patch_bytecode(Bytecodes::_fast_dputfield, bc, rbx, true, byte_no);
3553     }
3554   }
3555 
3556 #ifdef ASSERT
3557   __ jmp(Done);
3558 
3559   __ bind(notDouble);
3560   __ stop("Bad state");
3561 #endif
3562 
3563   __ bind(Done);
3564 }
3565 
3566 void TemplateTable::putfield(int byte_no) {
3567   putfield_or_static(byte_no, false);
3568 }
3569 
3570 void TemplateTable::nofast_putfield(int byte_no) {
3571   putfield_or_static(byte_no, false, may_not_rewrite);
3572 }
3573 
3574 void TemplateTable::putstatic(int byte_no) {
3575   putfield_or_static(byte_no, true);
3576 }
3577 
3578 void TemplateTable::jvmti_post_fast_field_mod() {
3579 
3580   const Register scratch = LP64_ONLY(c_rarg3) NOT_LP64(rcx);
3581 
3582   if (JvmtiExport::can_post_field_modification()) {
3583     // Check to see if a field modification watch has been set before
3584     // we take the time to call into the VM.
3585     Label L2;
3586     __ mov32(scratch, ExternalAddress((address)JvmtiExport::get_field_modification_count_addr()));
3587     __ testl(scratch, scratch);
3588     __ jcc(Assembler::zero, L2);
3589     __ pop_ptr(rbx);                  // copy the object pointer from tos
3590     __ verify_oop(rbx);
3591     __ push_ptr(rbx);                 // put the object pointer back on tos
3592     // Save tos values before call_VM() clobbers them. Since we have
3593     // to do it for every data type, we use the saved values as the
3594     // jvalue object.
3595     switch (bytecode()) {          // load values into the jvalue object
3596     case Bytecodes::_fast_qputfield: //fall through
3597     case Bytecodes::_fast_aputfield: __ push_ptr(rax); break;
3598     case Bytecodes::_fast_bputfield: // fall through
3599     case Bytecodes::_fast_zputfield: // fall through
3600     case Bytecodes::_fast_sputfield: // fall through
3601     case Bytecodes::_fast_cputfield: // fall through
3602     case Bytecodes::_fast_iputfield: __ push_i(rax); break;
3603     case Bytecodes::_fast_dputfield: __ push(dtos); break;
3604     case Bytecodes::_fast_fputfield: __ push(ftos); break;
3605     case Bytecodes::_fast_lputfield: __ push_l(rax); break;
3606 
3607     default:
3608       ShouldNotReachHere();
3609     }
3610     __ mov(scratch, rsp);             // points to jvalue on the stack
3611     // access constant pool cache entry
3612     LP64_ONLY(__ get_cache_entry_pointer_at_bcp(c_rarg2, rax, 1));
3613     NOT_LP64(__ get_cache_entry_pointer_at_bcp(rax, rdx, 1));
3614     __ verify_oop(rbx);
3615     // rbx: object pointer copied above
3616     // c_rarg2: cache entry pointer
3617     // c_rarg3: jvalue object on the stack
3618     LP64_ONLY(__ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_modification), rbx, c_rarg2, c_rarg3));
3619     NOT_LP64(__ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_modification), rbx, rax, rcx));
3620 
3621     switch (bytecode()) {             // restore tos values
3622     case Bytecodes::_fast_qputfield: // fall through
3623     case Bytecodes::_fast_aputfield: __ pop_ptr(rax); break;
3624     case Bytecodes::_fast_bputfield: // fall through
3625     case Bytecodes::_fast_zputfield: // fall through
3626     case Bytecodes::_fast_sputfield: // fall through
3627     case Bytecodes::_fast_cputfield: // fall through
3628     case Bytecodes::_fast_iputfield: __ pop_i(rax); break;
3629     case Bytecodes::_fast_dputfield: __ pop(dtos); break;
3630     case Bytecodes::_fast_fputfield: __ pop(ftos); break;
3631     case Bytecodes::_fast_lputfield: __ pop_l(rax); break;
3632     default: break;
3633     }
3634     __ bind(L2);
3635   }
3636 }
3637 
3638 void TemplateTable::fast_storefield(TosState state) {
3639   transition(state, vtos);
3640 
3641   ByteSize base = ConstantPoolCache::base_offset();
3642 
3643   jvmti_post_fast_field_mod();
3644 
3645   // access constant pool cache
3646   __ get_cache_and_index_at_bcp(rcx, rbx, 1);
3647 
3648   // test for volatile with rdx but rdx is tos register for lputfield.
3649   __ movl(rdx, Address(rcx, rbx, Address::times_ptr,
3650                        in_bytes(base +
3651                                 ConstantPoolCacheEntry::flags_offset())));
3652 
3653   // replace index with field offset from cache entry
3654   __ movptr(rbx, Address(rcx, rbx, Address::times_ptr,
3655                          in_bytes(base + ConstantPoolCacheEntry::f2_offset())));
3656 
3657   // [jk] not needed currently
3658   // volatile_barrier(Assembler::Membar_mask_bits(Assembler::LoadStore |
3659   //                                              Assembler::StoreStore));
3660 
3661   Label notVolatile, Done;
3662   if (bytecode() == Bytecodes::_fast_qputfield) {
3663     __ movl(rscratch2, rdx);
3664   }
3665 
3666   __ shrl(rdx, ConstantPoolCacheEntry::is_volatile_shift);
3667   __ andl(rdx, 0x1);
3668 
3669   // Get object from stack
3670   pop_and_check_object(rcx);
3671 
3672   // field address
3673   const Address field(rcx, rbx, Address::times_1);
3674 
3675   // Check for volatile store
3676   __ testl(rdx, rdx);
3677   __ jcc(Assembler::zero, notVolatile);
3678 
3679   fast_storefield_helper(field, rax);
3680   volatile_barrier(Assembler::Membar_mask_bits(Assembler::StoreLoad |
3681                                                Assembler::StoreStore));
3682   __ jmp(Done);
3683   __ bind(notVolatile);
3684 
3685   fast_storefield_helper(field, rax);
3686 
3687   __ bind(Done);
3688 }
3689 
3690 void TemplateTable::fast_storefield_helper(Address field, Register rax) {
3691 
3692   // access field
3693   switch (bytecode()) {
3694   case Bytecodes::_fast_qputfield:
3695     {
3696       Label isFlattened, done;
3697       __ null_check(rax);
3698       __ test_field_is_flattened(rscratch2, rscratch1, isFlattened);
3699       // No Flattened case
3700       do_oop_store(_masm, field, rax);
3701       __ jmp(done);
3702       __ bind(isFlattened);
3703       call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::write_flattened_value),
3704           rax, rbx, rcx);
3705       __ bind(done);
3706     }
3707     break;
3708   case Bytecodes::_fast_aputfield:
3709     {
3710       do_oop_store(_masm, field, rax);
3711     }
3712     break;
3713   case Bytecodes::_fast_lputfield:
3714 #ifdef _LP64
3715     __ access_store_at(T_LONG, IN_HEAP, field, noreg /* ltos */, noreg, noreg);
3716 #else
3717   __ stop("should not be rewritten");
3718 #endif
3719     break;
3720   case Bytecodes::_fast_iputfield:
3721     __ access_store_at(T_INT, IN_HEAP, field, rax, noreg, noreg);
3722     break;
3723   case Bytecodes::_fast_zputfield:
3724     __ access_store_at(T_BOOLEAN, IN_HEAP, field, rax, noreg, noreg);
3725     break;
3726   case Bytecodes::_fast_bputfield:
3727     __ access_store_at(T_BYTE, IN_HEAP, field, rax, noreg, noreg);
3728     break;
3729   case Bytecodes::_fast_sputfield:
3730     __ access_store_at(T_SHORT, IN_HEAP, field, rax, noreg, noreg);
3731     break;
3732   case Bytecodes::_fast_cputfield:
3733     __ access_store_at(T_CHAR, IN_HEAP, field, rax, noreg, noreg);
3734     break;
3735   case Bytecodes::_fast_fputfield:
3736     __ access_store_at(T_FLOAT, IN_HEAP, field, noreg /* ftos*/, noreg, noreg);
3737     break;
3738   case Bytecodes::_fast_dputfield:
3739     __ access_store_at(T_DOUBLE, IN_HEAP, field, noreg /* dtos*/, noreg, noreg);
3740     break;
3741   default:
3742     ShouldNotReachHere();
3743   }
3744 }
3745 
3746 void TemplateTable::fast_accessfield(TosState state) {
3747   transition(atos, state);
3748 
3749   // Do the JVMTI work here to avoid disturbing the register state below
3750   if (JvmtiExport::can_post_field_access()) {
3751     // Check to see if a field access watch has been set before we
3752     // take the time to call into the VM.
3753     Label L1;
3754     __ mov32(rcx, ExternalAddress((address) JvmtiExport::get_field_access_count_addr()));
3755     __ testl(rcx, rcx);
3756     __ jcc(Assembler::zero, L1);
3757     // access constant pool cache entry
3758     LP64_ONLY(__ get_cache_entry_pointer_at_bcp(c_rarg2, rcx, 1));
3759     NOT_LP64(__ get_cache_entry_pointer_at_bcp(rcx, rdx, 1));
3760     __ verify_oop(rax);
3761     __ push_ptr(rax);  // save object pointer before call_VM() clobbers it
3762     LP64_ONLY(__ mov(c_rarg1, rax));
3763     // c_rarg1: object pointer copied above
3764     // c_rarg2: cache entry pointer
3765     LP64_ONLY(__ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_access), c_rarg1, c_rarg2));
3766     NOT_LP64(__ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_access), rax, rcx));
3767     __ pop_ptr(rax); // restore object pointer
3768     __ bind(L1);
3769   }
3770 
3771   // access constant pool cache
3772   __ get_cache_and_index_at_bcp(rcx, rbx, 1);
3773   // replace index with field offset from cache entry
3774   // [jk] not needed currently
3775   // __ movl(rdx, Address(rcx, rbx, Address::times_8,
3776   //                      in_bytes(ConstantPoolCache::base_offset() +
3777   //                               ConstantPoolCacheEntry::flags_offset())));
3778   // __ shrl(rdx, ConstantPoolCacheEntry::is_volatile_shift);
3779   // __ andl(rdx, 0x1);
3780   //
3781   __ movptr(rdx, Address(rcx, rbx, Address::times_ptr,
3782                          in_bytes(ConstantPoolCache::base_offset() +
3783                                   ConstantPoolCacheEntry::f2_offset())));
3784 
3785   // rax: object
3786   __ verify_oop(rax);
3787   __ null_check(rax);
3788   Address field(rax, rdx, Address::times_1);
3789 
3790   // access field
3791   switch (bytecode()) {
3792   case Bytecodes::_fast_qgetfield:
3793     {
3794       Label isFlattened, nonnull, Done;
3795       __ movptr(rscratch1, Address(rcx, rbx, Address::times_ptr,
3796                                    in_bytes(ConstantPoolCache::base_offset() +
3797                                             ConstantPoolCacheEntry::flags_offset())));
3798       __ test_field_is_flattened(rscratch1, rscratch2, isFlattened);
3799         // Non-flattened field case
3800         __ movptr(rscratch1, rax);
3801         __ load_heap_oop(rax, field);
3802         __ testptr(rax, rax);
3803         __ jcc(Assembler::notZero, nonnull);
3804           __ movptr(rax, rscratch1);
3805           __ movl(rcx, Address(rcx, rbx, Address::times_ptr,
3806                              in_bytes(ConstantPoolCache::base_offset() +
3807                                       ConstantPoolCacheEntry::flags_offset())));
3808           __ andl(rcx, ConstantPoolCacheEntry::field_index_mask);
3809           __ call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::uninitialized_instance_value_field),
3810                      rax, rcx);
3811         __ bind(nonnull);
3812         __ verify_oop(rax);
3813         __ jmp(Done);
3814       __ bind(isFlattened);
3815         __ movl(rdx, Address(rcx, rbx, Address::times_ptr,
3816                            in_bytes(ConstantPoolCache::base_offset() +
3817                                     ConstantPoolCacheEntry::flags_offset())));
3818         __ andl(rdx, ConstantPoolCacheEntry::field_index_mask);
3819         __ movptr(rcx, Address(rcx, rbx, Address::times_ptr,
3820                                      in_bytes(ConstantPoolCache::base_offset() +
3821                                               ConstantPoolCacheEntry::f1_offset())));
3822         call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::read_flattened_field),
3823                 rax, rdx, rcx);
3824         __ verify_oop(rax);
3825       __ bind(Done);
3826     }
3827     break;
3828   case Bytecodes::_fast_agetfield:
3829     do_oop_load(_masm, field, rax);
3830     __ verify_oop(rax);
3831     break;
3832   case Bytecodes::_fast_lgetfield:
3833 #ifdef _LP64
3834     __ access_load_at(T_LONG, IN_HEAP, noreg /* ltos */, field, noreg, noreg);
3835 #else
3836   __ stop("should not be rewritten");
3837 #endif
3838     break;
3839   case Bytecodes::_fast_igetfield:
3840     __ access_load_at(T_INT, IN_HEAP, rax, field, noreg, noreg);
3841     break;
3842   case Bytecodes::_fast_bgetfield:
3843     __ access_load_at(T_BYTE, IN_HEAP, rax, field, noreg, noreg);
3844     break;
3845   case Bytecodes::_fast_sgetfield:
3846     __ access_load_at(T_SHORT, IN_HEAP, rax, field, noreg, noreg);
3847     break;
3848   case Bytecodes::_fast_cgetfield:
3849     __ access_load_at(T_CHAR, IN_HEAP, rax, field, noreg, noreg);
3850     break;
3851   case Bytecodes::_fast_fgetfield:
3852     __ access_load_at(T_FLOAT, IN_HEAP, noreg /* ftos */, field, noreg, noreg);
3853     break;
3854   case Bytecodes::_fast_dgetfield:
3855     __ access_load_at(T_DOUBLE, IN_HEAP, noreg /* dtos */, field, noreg, noreg);
3856     break;
3857   default:
3858     ShouldNotReachHere();
3859   }
3860   // [jk] not needed currently
3861   //   Label notVolatile;
3862   //   __ testl(rdx, rdx);
3863   //   __ jcc(Assembler::zero, notVolatile);
3864   //   __ membar(Assembler::LoadLoad);
3865   //   __ bind(notVolatile);
3866 }
3867 
3868 void TemplateTable::fast_xaccess(TosState state) {
3869   transition(vtos, state);
3870 
3871   // get receiver
3872   __ movptr(rax, aaddress(0));
3873   // access constant pool cache
3874   __ get_cache_and_index_at_bcp(rcx, rdx, 2);
3875   __ movptr(rbx,
3876             Address(rcx, rdx, Address::times_ptr,
3877                     in_bytes(ConstantPoolCache::base_offset() +
3878                              ConstantPoolCacheEntry::f2_offset())));
3879   // make sure exception is reported in correct bcp range (getfield is
3880   // next instruction)
3881   __ increment(rbcp);
3882   __ null_check(rax);
3883   const Address field = Address(rax, rbx, Address::times_1, 0*wordSize);
3884   switch (state) {
3885   case itos:
3886     __ access_load_at(T_INT, IN_HEAP, rax, field, noreg, noreg);
3887     break;
3888   case atos:
3889     do_oop_load(_masm, field, rax);
3890     __ verify_oop(rax);
3891     break;
3892   case ftos:
3893     __ access_load_at(T_FLOAT, IN_HEAP, noreg /* ftos */, field, noreg, noreg);
3894     break;
3895   default:
3896     ShouldNotReachHere();
3897   }
3898 
3899   // [jk] not needed currently
3900   // Label notVolatile;
3901   // __ movl(rdx, Address(rcx, rdx, Address::times_8,
3902   //                      in_bytes(ConstantPoolCache::base_offset() +
3903   //                               ConstantPoolCacheEntry::flags_offset())));
3904   // __ shrl(rdx, ConstantPoolCacheEntry::is_volatile_shift);
3905   // __ testl(rdx, 0x1);
3906   // __ jcc(Assembler::zero, notVolatile);
3907   // __ membar(Assembler::LoadLoad);
3908   // __ bind(notVolatile);
3909 
3910   __ decrement(rbcp);
3911 }
3912 
3913 //-----------------------------------------------------------------------------
3914 // Calls
3915 
3916 void TemplateTable::count_calls(Register method, Register temp) {
3917   // implemented elsewhere
3918   ShouldNotReachHere();
3919 }
3920 
3921 void TemplateTable::prepare_invoke(int byte_no,
3922                                    Register method,  // linked method (or i-klass)
3923                                    Register index,   // itable index, MethodType, etc.
3924                                    Register recv,    // if caller wants to see it
3925                                    Register flags    // if caller wants to test it
3926                                    ) {
3927   // determine flags
3928   const Bytecodes::Code code = bytecode();
3929   const bool is_invokeinterface  = code == Bytecodes::_invokeinterface;
3930   const bool is_invokedynamic    = code == Bytecodes::_invokedynamic;
3931   const bool is_invokehandle     = code == Bytecodes::_invokehandle;
3932   const bool is_invokevirtual    = code == Bytecodes::_invokevirtual;
3933   const bool is_invokespecial    = code == Bytecodes::_invokespecial;
3934   const bool load_receiver       = (recv  != noreg);
3935   const bool save_flags          = (flags != noreg);
3936   assert(load_receiver == (code != Bytecodes::_invokestatic && code != Bytecodes::_invokedynamic), "");
3937   assert(save_flags    == (is_invokeinterface || is_invokevirtual), "need flags for vfinal");
3938   assert(flags == noreg || flags == rdx, "");
3939   assert(recv  == noreg || recv  == rcx, "");
3940 
3941   // setup registers & access constant pool cache
3942   if (recv  == noreg)  recv  = rcx;
3943   if (flags == noreg)  flags = rdx;
3944   assert_different_registers(method, index, recv, flags);
3945 
3946   // save 'interpreter return address'
3947   __ save_bcp();
3948 
3949   load_invoke_cp_cache_entry(byte_no, method, index, flags, is_invokevirtual, false, is_invokedynamic);
3950 
3951   // maybe push appendix to arguments (just before return address)
3952   if (is_invokedynamic || is_invokehandle) {
3953     Label L_no_push;
3954     __ testl(flags, (1 << ConstantPoolCacheEntry::has_appendix_shift));
3955     __ jcc(Assembler::zero, L_no_push);
3956     // Push the appendix as a trailing parameter.
3957     // This must be done before we get the receiver,
3958     // since the parameter_size includes it.
3959     __ push(rbx);
3960     __ mov(rbx, index);
3961     __ load_resolved_reference_at_index(index, rbx);
3962     __ pop(rbx);
3963     __ push(index);  // push appendix (MethodType, CallSite, etc.)
3964     __ bind(L_no_push);
3965   }
3966 
3967   // load receiver if needed (after appendix is pushed so parameter size is correct)
3968   // Note: no return address pushed yet
3969   if (load_receiver) {
3970     __ movl(recv, flags);
3971     __ andl(recv, ConstantPoolCacheEntry::parameter_size_mask);
3972     const int no_return_pc_pushed_yet = -1;  // argument slot correction before we push return address
3973     const int receiver_is_at_end      = -1;  // back off one slot to get receiver
3974     Address recv_addr = __ argument_address(recv, no_return_pc_pushed_yet + receiver_is_at_end);
3975     __ movptr(recv, recv_addr);
3976     __ verify_oop(recv);
3977   }
3978 
3979   if (save_flags) {
3980     __ movl(rbcp, flags);
3981   }
3982 
3983   // compute return type
3984   __ shrl(flags, ConstantPoolCacheEntry::tos_state_shift);
3985   // Make sure we don't need to mask flags after the above shift
3986   ConstantPoolCacheEntry::verify_tos_state_shift();
3987   // load return address
3988   {
3989     const address table_addr = (address) Interpreter::invoke_return_entry_table_for(code);
3990     ExternalAddress table(table_addr);
3991     LP64_ONLY(__ lea(rscratch1, table));
3992     LP64_ONLY(__ movptr(flags, Address(rscratch1, flags, Address::times_ptr)));
3993     NOT_LP64(__ movptr(flags, ArrayAddress(table, Address(noreg, flags, Address::times_ptr))));
3994   }
3995 
3996   // push return address
3997   __ push(flags);
3998 
3999   // Restore flags value from the constant pool cache, and restore rsi
4000   // for later null checks.  r13 is the bytecode pointer
4001   if (save_flags) {
4002     __ movl(flags, rbcp);
4003     __ restore_bcp();
4004   }
4005 }
4006 
4007 void TemplateTable::invokevirtual_helper(Register index,
4008                                          Register recv,
4009                                          Register flags) {
4010   // Uses temporary registers rax, rdx
4011   assert_different_registers(index, recv, rax, rdx);
4012   assert(index == rbx, "");
4013   assert(recv  == rcx, "");
4014 
4015   // Test for an invoke of a final method
4016   Label notFinal;
4017   __ movl(rax, flags);
4018   __ andl(rax, (1 << ConstantPoolCacheEntry::is_vfinal_shift));
4019   __ jcc(Assembler::zero, notFinal);
4020 
4021   const Register method = index;  // method must be rbx
4022   assert(method == rbx,
4023          "Method* must be rbx for interpreter calling convention");
4024 
4025   // do the call - the index is actually the method to call
4026   // that is, f2 is a vtable index if !is_vfinal, else f2 is a Method*
4027 
4028   // It's final, need a null check here!
4029   __ null_check(recv);
4030 
4031   // profile this call
4032   __ profile_final_call(rax);
4033   __ profile_arguments_type(rax, method, rbcp, true);
4034 
4035   __ jump_from_interpreted(method, rax);
4036 
4037   __ bind(notFinal);
4038 
4039   // get receiver klass
4040   __ null_check(recv, oopDesc::klass_offset_in_bytes());
4041   __ load_klass(rax, recv);
4042 
4043   // profile this call
4044   __ profile_virtual_call(rax, rlocals, rdx);
4045   // get target Method* & entry point
4046   __ lookup_virtual_method(rax, index, method);
4047   __ profile_called_method(method, rdx, rbcp);
4048 
4049   __ profile_arguments_type(rdx, method, rbcp, true);
4050   __ jump_from_interpreted(method, rdx);
4051 }
4052 
4053 void TemplateTable::invokevirtual(int byte_no) {
4054   transition(vtos, vtos);
4055   assert(byte_no == f2_byte, "use this argument");
4056   prepare_invoke(byte_no,
4057                  rbx,    // method or vtable index
4058                  noreg,  // unused itable index
4059                  rcx, rdx); // recv, flags
4060 
4061   // rbx: index
4062   // rcx: receiver
4063   // rdx: flags
4064 
4065   invokevirtual_helper(rbx, rcx, rdx);
4066 }
4067 
4068 void TemplateTable::invokespecial(int byte_no) {
4069   transition(vtos, vtos);
4070   assert(byte_no == f1_byte, "use this argument");
4071   prepare_invoke(byte_no, rbx, noreg,  // get f1 Method*
4072                  rcx);  // get receiver also for null check
4073   __ verify_oop(rcx);
4074   __ null_check(rcx);
4075   // do the call
4076   __ profile_call(rax);
4077   __ profile_arguments_type(rax, rbx, rbcp, false);
4078   __ jump_from_interpreted(rbx, rax);
4079 }
4080 
4081 void TemplateTable::invokestatic(int byte_no) {
4082   transition(vtos, vtos);
4083   assert(byte_no == f1_byte, "use this argument");
4084   prepare_invoke(byte_no, rbx);  // get f1 Method*
4085   // do the call
4086   __ profile_call(rax);
4087   __ profile_arguments_type(rax, rbx, rbcp, false);
4088   __ jump_from_interpreted(rbx, rax);
4089 }
4090 
4091 
4092 void TemplateTable::fast_invokevfinal(int byte_no) {
4093   transition(vtos, vtos);
4094   assert(byte_no == f2_byte, "use this argument");
4095   __ stop("fast_invokevfinal not used on x86");
4096 }
4097 
4098 
4099 void TemplateTable::invokeinterface(int byte_no) {
4100   transition(vtos, vtos);
4101   assert(byte_no == f1_byte, "use this argument");
4102   prepare_invoke(byte_no, rax, rbx,  // get f1 Klass*, f2 Method*
4103                  rcx, rdx); // recv, flags
4104 
4105   // rax: reference klass (from f1) if interface method
4106   // rbx: method (from f2)
4107   // rcx: receiver
4108   // rdx: flags
4109 
4110   // First check for Object case, then private interface method,
4111   // then regular interface method.
4112 
4113   // Special case of invokeinterface called for virtual method of
4114   // java.lang.Object.  See cpCache.cpp for details.
4115   Label notObjectMethod;
4116   __ movl(rlocals, rdx);
4117   __ andl(rlocals, (1 << ConstantPoolCacheEntry::is_forced_virtual_shift));
4118   __ jcc(Assembler::zero, notObjectMethod);
4119   invokevirtual_helper(rbx, rcx, rdx);
4120   // no return from above
4121   __ bind(notObjectMethod);
4122 
4123   Label no_such_interface; // for receiver subtype check
4124   Register recvKlass; // used for exception processing
4125 
4126   // Check for private method invocation - indicated by vfinal
4127   Label notVFinal;
4128   __ movl(rlocals, rdx);
4129   __ andl(rlocals, (1 << ConstantPoolCacheEntry::is_vfinal_shift));
4130   __ jcc(Assembler::zero, notVFinal);
4131 
4132   // Get receiver klass into rlocals - also a null check
4133   __ null_check(rcx, oopDesc::klass_offset_in_bytes());
4134   __ load_klass(rlocals, rcx);
4135 
4136   Label subtype;
4137   __ check_klass_subtype(rlocals, rax, rbcp, subtype);
4138   // If we get here the typecheck failed
4139   recvKlass = rdx;
4140   __ mov(recvKlass, rlocals); // shuffle receiver class for exception use
4141   __ jmp(no_such_interface);
4142 
4143   __ bind(subtype);
4144 
4145   // do the call - rbx is actually the method to call
4146 
4147   __ profile_final_call(rdx);
4148   __ profile_arguments_type(rdx, rbx, rbcp, true);
4149 
4150   __ jump_from_interpreted(rbx, rdx);
4151   // no return from above
4152   __ bind(notVFinal);
4153 
4154   // Get receiver klass into rdx - also a null check
4155   __ restore_locals();  // restore r14
4156   __ null_check(rcx, oopDesc::klass_offset_in_bytes());
4157   __ load_klass(rdx, rcx);
4158 
4159   Label no_such_method;
4160 
4161   // Preserve method for throw_AbstractMethodErrorVerbose.
4162   __ mov(rcx, rbx);
4163   // Receiver subtype check against REFC.
4164   // Superklass in rax. Subklass in rdx. Blows rcx, rdi.
4165   __ lookup_interface_method(// inputs: rec. class, interface, itable index
4166                              rdx, rax, noreg,
4167                              // outputs: scan temp. reg, scan temp. reg
4168                              rbcp, rlocals,
4169                              no_such_interface,
4170                              /*return_method=*/false);
4171 
4172   // profile this call
4173   __ restore_bcp(); // rbcp was destroyed by receiver type check
4174   __ profile_virtual_call(rdx, rbcp, rlocals);
4175 
4176   // Get declaring interface class from method, and itable index
4177   __ load_method_holder(rax, rbx);
4178   __ movl(rbx, Address(rbx, Method::itable_index_offset()));
4179   __ subl(rbx, Method::itable_index_max);
4180   __ negl(rbx);
4181 
4182   // Preserve recvKlass for throw_AbstractMethodErrorVerbose.
4183   __ mov(rlocals, rdx);
4184   __ lookup_interface_method(// inputs: rec. class, interface, itable index
4185                              rlocals, rax, rbx,
4186                              // outputs: method, scan temp. reg
4187                              rbx, rbcp,
4188                              no_such_interface);
4189 
4190   // rbx: Method* to call
4191   // rcx: receiver
4192   // Check for abstract method error
4193   // Note: This should be done more efficiently via a throw_abstract_method_error
4194   //       interpreter entry point and a conditional jump to it in case of a null
4195   //       method.
4196   __ testptr(rbx, rbx);
4197   __ jcc(Assembler::zero, no_such_method);
4198 
4199   __ profile_called_method(rbx, rbcp, rdx);
4200   __ profile_arguments_type(rdx, rbx, rbcp, true);
4201 
4202   // do the call
4203   // rcx: receiver
4204   // rbx,: Method*
4205   __ jump_from_interpreted(rbx, rdx);
4206   __ should_not_reach_here();
4207 
4208   // exception handling code follows...
4209   // note: must restore interpreter registers to canonical
4210   //       state for exception handling to work correctly!
4211 
4212   __ bind(no_such_method);
4213   // throw exception
4214   __ pop(rbx);           // pop return address (pushed by prepare_invoke)
4215   __ restore_bcp();      // rbcp must be correct for exception handler   (was destroyed)
4216   __ restore_locals();   // make sure locals pointer is correct as well (was destroyed)
4217   // Pass arguments for generating a verbose error message.
4218 #ifdef _LP64
4219   recvKlass = c_rarg1;
4220   Register method    = c_rarg2;
4221   if (recvKlass != rdx) { __ movq(recvKlass, rdx); }
4222   if (method != rcx)    { __ movq(method, rcx);    }
4223 #else
4224   recvKlass = rdx;
4225   Register method    = rcx;
4226 #endif
4227   __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_AbstractMethodErrorVerbose),
4228              recvKlass, method);
4229   // The call_VM checks for exception, so we should never return here.
4230   __ should_not_reach_here();
4231 
4232   __ bind(no_such_interface);
4233   // throw exception
4234   __ pop(rbx);           // pop return address (pushed by prepare_invoke)
4235   __ restore_bcp();      // rbcp must be correct for exception handler   (was destroyed)
4236   __ restore_locals();   // make sure locals pointer is correct as well (was destroyed)
4237   // Pass arguments for generating a verbose error message.
4238   LP64_ONLY( if (recvKlass != rdx) { __ movq(recvKlass, rdx); } )
4239   __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_IncompatibleClassChangeErrorVerbose),
4240              recvKlass, rax);
4241   // the call_VM checks for exception, so we should never return here.
4242   __ should_not_reach_here();
4243 }
4244 
4245 void TemplateTable::invokehandle(int byte_no) {
4246   transition(vtos, vtos);
4247   assert(byte_no == f1_byte, "use this argument");
4248   const Register rbx_method = rbx;
4249   const Register rax_mtype  = rax;
4250   const Register rcx_recv   = rcx;
4251   const Register rdx_flags  = rdx;
4252 
4253   prepare_invoke(byte_no, rbx_method, rax_mtype, rcx_recv);
4254   __ verify_method_ptr(rbx_method);
4255   __ verify_oop(rcx_recv);
4256   __ null_check(rcx_recv);
4257 
4258   // rax: MethodType object (from cpool->resolved_references[f1], if necessary)
4259   // rbx: MH.invokeExact_MT method (from f2)
4260 
4261   // Note:  rax_mtype is already pushed (if necessary) by prepare_invoke
4262 
4263   // FIXME: profile the LambdaForm also
4264   __ profile_final_call(rax);
4265   __ profile_arguments_type(rdx, rbx_method, rbcp, true);
4266 
4267   __ jump_from_interpreted(rbx_method, rdx);
4268 }
4269 
4270 void TemplateTable::invokedynamic(int byte_no) {
4271   transition(vtos, vtos);
4272   assert(byte_no == f1_byte, "use this argument");
4273 
4274   const Register rbx_method   = rbx;
4275   const Register rax_callsite = rax;
4276 
4277   prepare_invoke(byte_no, rbx_method, rax_callsite);
4278 
4279   // rax: CallSite object (from cpool->resolved_references[f1])
4280   // rbx: MH.linkToCallSite method (from f2)
4281 
4282   // Note:  rax_callsite is already pushed by prepare_invoke
4283 
4284   // %%% should make a type profile for any invokedynamic that takes a ref argument
4285   // profile this call
4286   __ profile_call(rbcp);
4287   __ profile_arguments_type(rdx, rbx_method, rbcp, false);
4288 
4289   __ verify_oop(rax_callsite);
4290 
4291   __ jump_from_interpreted(rbx_method, rdx);
4292 }
4293 
4294 //-----------------------------------------------------------------------------
4295 // Allocation
4296 
4297 void TemplateTable::_new() {
4298   transition(vtos, atos);
4299   __ get_unsigned_2_byte_index_at_bcp(rdx, 1);
4300   Label slow_case;
4301   Label slow_case_no_pop;
4302   Label done;
4303   Label initialize_header;
4304   Label initialize_object;  // including clearing the fields
4305 
4306   __ get_cpool_and_tags(rcx, rax);
4307 
4308   // Make sure the class we're about to instantiate has been resolved.
4309   // This is done before loading InstanceKlass to be consistent with the order
4310   // how Constant Pool is updated (see ConstantPool::klass_at_put)
4311   const int tags_offset = Array<u1>::base_offset_in_bytes();
4312   __ cmpb(Address(rax, rdx, Address::times_1, tags_offset), JVM_CONSTANT_Class);
4313   __ jcc(Assembler::notEqual, slow_case_no_pop);
4314 
4315   // get InstanceKlass
4316   __ load_resolved_klass_at_index(rcx, rcx, rdx);
4317   __ push(rcx);  // save the contexts of klass for initializing the header
4318 
4319   // make sure klass is initialized & doesn't have finalizer
4320   // make sure klass is fully initialized
4321   __ cmpb(Address(rcx, InstanceKlass::init_state_offset()), InstanceKlass::fully_initialized);
4322   __ jcc(Assembler::notEqual, slow_case);
4323 
4324   // get instance_size in InstanceKlass (scaled to a count of bytes)
4325   __ movl(rdx, Address(rcx, Klass::layout_helper_offset()));
4326   // test to see if it has a finalizer or is malformed in some way
4327   __ testl(rdx, Klass::_lh_instance_slow_path_bit);
4328   __ jcc(Assembler::notZero, slow_case);
4329 
4330   // Allocate the instance:
4331   //  If TLAB is enabled:
4332   //    Try to allocate in the TLAB.
4333   //    If fails, go to the slow path.
4334   //  Else If inline contiguous allocations are enabled:
4335   //    Try to allocate in eden.
4336   //    If fails due to heap end, go to slow path.
4337   //
4338   //  If TLAB is enabled OR inline contiguous is enabled:
4339   //    Initialize the allocation.
4340   //    Exit.
4341   //
4342   //  Go to slow path.
4343 
4344   const bool allow_shared_alloc =
4345     Universe::heap()->supports_inline_contig_alloc();
4346 
4347   const Register thread = LP64_ONLY(r15_thread) NOT_LP64(rcx);
4348 #ifndef _LP64
4349   if (UseTLAB || allow_shared_alloc) {
4350     __ get_thread(thread);
4351   }
4352 #endif // _LP64
4353 
4354   if (UseTLAB) {
4355     __ tlab_allocate(thread, rax, rdx, 0, rcx, rbx, slow_case);
4356     if (ZeroTLAB) {
4357       // the fields have been already cleared
4358       __ jmp(initialize_header);
4359     } else {
4360       // initialize both the header and fields
4361       __ jmp(initialize_object);
4362     }
4363   } else {
4364     // Allocation in the shared Eden, if allowed.
4365     //
4366     // rdx: instance size in bytes
4367     __ eden_allocate(thread, rax, rdx, 0, rbx, slow_case);
4368   }
4369 
4370   // If UseTLAB or allow_shared_alloc are true, the object is created above and
4371   // there is an initialize need. Otherwise, skip and go to the slow path.
4372   if (UseTLAB || allow_shared_alloc) {
4373     // The object is initialized before the header.  If the object size is
4374     // zero, go directly to the header initialization.
4375     __ bind(initialize_object);
4376     __ decrement(rdx, sizeof(oopDesc));
4377     __ jcc(Assembler::zero, initialize_header);
4378 
4379     // Initialize topmost object field, divide rdx by 8, check if odd and
4380     // test if zero.
4381     __ xorl(rcx, rcx);    // use zero reg to clear memory (shorter code)
4382     __ shrl(rdx, LogBytesPerLong); // divide by 2*oopSize and set carry flag if odd
4383 
4384     // rdx must have been multiple of 8
4385 #ifdef ASSERT
4386     // make sure rdx was multiple of 8
4387     Label L;
4388     // Ignore partial flag stall after shrl() since it is debug VM
4389     __ jcc(Assembler::carryClear, L);
4390     __ stop("object size is not multiple of 2 - adjust this code");
4391     __ bind(L);
4392     // rdx must be > 0, no extra check needed here
4393 #endif
4394 
4395     // initialize remaining object fields: rdx was a multiple of 8
4396     { Label loop;
4397     __ bind(loop);
4398     __ movptr(Address(rax, rdx, Address::times_8, sizeof(oopDesc) - 1*oopSize), rcx);
4399     NOT_LP64(__ movptr(Address(rax, rdx, Address::times_8, sizeof(oopDesc) - 2*oopSize), rcx));
4400     __ decrement(rdx);
4401     __ jcc(Assembler::notZero, loop);
4402     }
4403 
4404     // initialize object header only.
4405     __ bind(initialize_header);
4406     if (UseBiasedLocking) {
4407       __ pop(rcx);   // get saved klass back in the register.
4408       __ movptr(rbx, Address(rcx, Klass::prototype_header_offset()));
4409       __ movptr(Address(rax, oopDesc::mark_offset_in_bytes ()), rbx);
4410     } else {
4411       __ movptr(Address(rax, oopDesc::mark_offset_in_bytes ()),
4412                 (intptr_t)markOopDesc::prototype()); // header
4413       __ pop(rcx);   // get saved klass back in the register.
4414     }
4415 #ifdef _LP64
4416     __ xorl(rsi, rsi); // use zero reg to clear memory (shorter code)
4417     __ store_klass_gap(rax, rsi);  // zero klass gap for compressed oops
4418 #endif
4419     __ store_klass(rax, rcx);  // klass
4420 
4421     {
4422       SkipIfEqual skip_if(_masm, &DTraceAllocProbes, 0);
4423       // Trigger dtrace event for fastpath
4424       __ push(atos);
4425       __ call_VM_leaf(
4426            CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_object_alloc), rax);
4427       __ pop(atos);
4428     }
4429 
4430     __ jmp(done);
4431   }
4432 
4433   // slow case
4434   __ bind(slow_case);
4435   __ pop(rcx);   // restore stack pointer to what it was when we came in.
4436   __ bind(slow_case_no_pop);
4437 
4438   Register rarg1 = LP64_ONLY(c_rarg1) NOT_LP64(rax);
4439   Register rarg2 = LP64_ONLY(c_rarg2) NOT_LP64(rdx);
4440 
4441   __ get_constant_pool(rarg1);
4442   __ get_unsigned_2_byte_index_at_bcp(rarg2, 1);
4443   call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::_new), rarg1, rarg2);
4444    __ verify_oop(rax);
4445 
4446   // continue
4447   __ bind(done);
4448 }
4449 
4450 void TemplateTable::defaultvalue() {
4451   transition(vtos, atos);
4452 
4453   Register rarg1 = LP64_ONLY(c_rarg1) NOT_LP64(rcx);
4454   Register rarg2 = LP64_ONLY(c_rarg2) NOT_LP64(rdx);
4455 
4456   __ get_unsigned_2_byte_index_at_bcp(rarg2, 1);
4457   __ get_constant_pool(rarg1);
4458 
4459   call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::defaultvalue),
4460       rarg1, rarg2);
4461   __ verify_oop(rax);
4462 }
4463 
4464 void TemplateTable::newarray() {
4465   transition(itos, atos);
4466   Register rarg1 = LP64_ONLY(c_rarg1) NOT_LP64(rdx);
4467   __ load_unsigned_byte(rarg1, at_bcp(1));
4468   call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::newarray),
4469           rarg1, rax);
4470 }
4471 
4472 void TemplateTable::anewarray() {
4473   transition(itos, atos);
4474 
4475   Register rarg1 = LP64_ONLY(c_rarg1) NOT_LP64(rcx);
4476   Register rarg2 = LP64_ONLY(c_rarg2) NOT_LP64(rdx);
4477 
4478   __ get_unsigned_2_byte_index_at_bcp(rarg2, 1);
4479   __ get_constant_pool(rarg1);
4480   call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::anewarray),
4481           rarg1, rarg2, rax);
4482 }
4483 
4484 void TemplateTable::arraylength() {
4485   transition(atos, itos);
4486   __ null_check(rax, arrayOopDesc::length_offset_in_bytes());
4487   __ movl(rax, Address(rax, arrayOopDesc::length_offset_in_bytes()));
4488 }
4489 
4490 void TemplateTable::checkcast() {
4491   transition(atos, atos);
4492   Label done, is_null, ok_is_subtype, quicked, resolved;
4493   __ testptr(rax, rax); // object is in rax
4494   __ jcc(Assembler::zero, is_null);
4495 
4496   // Get cpool & tags index
4497   __ get_cpool_and_tags(rcx, rdx); // rcx=cpool, rdx=tags array
4498   __ get_unsigned_2_byte_index_at_bcp(rbx, 1); // rbx=index
4499   // See if bytecode has already been quicked
4500   __ movzbl(rdx, Address(rdx, rbx,
4501       Address::times_1,
4502       Array<u1>::base_offset_in_bytes()));
4503   __ andl (rdx, ~JVM_CONSTANT_QDescBit);
4504   __ cmpl(rdx, JVM_CONSTANT_Class);
4505   __ jcc(Assembler::equal, quicked);
4506   __ push(atos); // save receiver for result, and for GC
4507   call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc));
4508 
4509   // vm_result_2 has metadata result
4510 #ifndef _LP64
4511   // borrow rdi from locals
4512   __ get_thread(rdi);
4513   __ get_vm_result_2(rax, rdi);
4514   __ restore_locals();
4515 #else
4516   __ get_vm_result_2(rax, r15_thread);
4517 #endif
4518 
4519   __ pop_ptr(rdx); // restore receiver
4520   __ jmpb(resolved);
4521 
4522   // Get superklass in rax and subklass in rbx
4523   __ bind(quicked);
4524   __ mov(rdx, rax); // Save object in rdx; rax needed for subtype check
4525   __ load_resolved_klass_at_index(rax, rcx, rbx);
4526 
4527   __ bind(resolved);
4528   __ load_klass(rbx, rdx);
4529 
4530   // Generate subtype check.  Blows rcx, rdi.  Object in rdx.
4531   // Superklass in rax.  Subklass in rbx.
4532   __ gen_subtype_check(rbx, ok_is_subtype);
4533 
4534   // Come here on failure
4535   __ push_ptr(rdx);
4536   // object is at TOS
4537   __ jump(ExternalAddress(Interpreter::_throw_ClassCastException_entry));
4538 
4539   // Come here on success
4540   __ bind(ok_is_subtype);
4541   __ mov(rax, rdx); // Restore object in rdx
4542   __ jmp(done);
4543 
4544   __ bind(is_null);
4545 
4546   // Collect counts on whether this check-cast sees NULLs a lot or not.
4547   if (ProfileInterpreter) {
4548     __ profile_null_seen(rcx);
4549   }
4550 
4551   if (EnableValhalla) {
4552     // Get cpool & tags index
4553     __ get_cpool_and_tags(rcx, rdx); // rcx=cpool, rdx=tags array
4554     __ get_unsigned_2_byte_index_at_bcp(rbx, 1); // rbx=index
4555     // See if CP entry is a Q-descriptor
4556     __ movzbl(rcx, Address(rdx, rbx,
4557         Address::times_1,
4558         Array<u1>::base_offset_in_bytes()));
4559     __ andl (rcx, JVM_CONSTANT_QDescBit);
4560     __ cmpl(rcx, JVM_CONSTANT_QDescBit);
4561     __ jcc(Assembler::notEqual, done);
4562     __ jump(ExternalAddress(Interpreter::_throw_NullPointerException_entry));
4563   }
4564 
4565   __ bind(done);
4566 }
4567 
4568 void TemplateTable::instanceof() {
4569   transition(atos, itos);
4570   Label done, is_null, ok_is_subtype, quicked, resolved;
4571   __ testptr(rax, rax);
4572   __ jcc(Assembler::zero, is_null);
4573 
4574   // Get cpool & tags index
4575   __ get_cpool_and_tags(rcx, rdx); // rcx=cpool, rdx=tags array
4576   __ get_unsigned_2_byte_index_at_bcp(rbx, 1); // rbx=index
4577   // See if bytecode has already been quicked
4578   __ movzbl(rdx, Address(rdx, rbx,
4579         Address::times_1,
4580         Array<u1>::base_offset_in_bytes()));
4581   __ andl (rdx, ~JVM_CONSTANT_QDescBit);
4582   __ cmpl(rdx, JVM_CONSTANT_Class);
4583   __ jcc(Assembler::equal, quicked);
4584 
4585   __ push(atos); // save receiver for result, and for GC
4586   call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc));
4587   // vm_result_2 has metadata result
4588 
4589 #ifndef _LP64
4590   // borrow rdi from locals
4591   __ get_thread(rdi);
4592   __ get_vm_result_2(rax, rdi);
4593   __ restore_locals();
4594 #else
4595   __ get_vm_result_2(rax, r15_thread);
4596 #endif
4597 
4598   __ pop_ptr(rdx); // restore receiver
4599   __ verify_oop(rdx);
4600   __ load_klass(rdx, rdx);
4601   __ jmpb(resolved);
4602 
4603   // Get superklass in rax and subklass in rdx
4604   __ bind(quicked);
4605   __ load_klass(rdx, rax);
4606   __ load_resolved_klass_at_index(rax, rcx, rbx);
4607 
4608   __ bind(resolved);
4609 
4610   // Generate subtype check.  Blows rcx, rdi
4611   // Superklass in rax.  Subklass in rdx.
4612   __ gen_subtype_check(rdx, ok_is_subtype);
4613 
4614   // Come here on failure
4615   __ xorl(rax, rax);
4616   __ jmpb(done);
4617   // Come here on success
4618   __ bind(ok_is_subtype);
4619   __ movl(rax, 1);
4620 
4621   // Collect counts on whether this test sees NULLs a lot or not.
4622   if (ProfileInterpreter) {
4623     __ jmp(done);
4624     __ bind(is_null);
4625     __ profile_null_seen(rcx);
4626   } else {
4627     __ bind(is_null);   // same as 'done'
4628   }
4629   __ bind(done);
4630   // rax = 0: obj == NULL or  obj is not an instanceof the specified klass
4631   // rax = 1: obj != NULL and obj is     an instanceof the specified klass
4632 }
4633 
4634 //----------------------------------------------------------------------------------------------------
4635 // Breakpoints
4636 void TemplateTable::_breakpoint() {
4637   // Note: We get here even if we are single stepping..
4638   // jbug insists on setting breakpoints at every bytecode
4639   // even if we are in single step mode.
4640 
4641   transition(vtos, vtos);
4642 
4643   Register rarg = LP64_ONLY(c_rarg1) NOT_LP64(rcx);
4644 
4645   // get the unpatched byte code
4646   __ get_method(rarg);
4647   __ call_VM(noreg,
4648              CAST_FROM_FN_PTR(address,
4649                               InterpreterRuntime::get_original_bytecode_at),
4650              rarg, rbcp);
4651   __ mov(rbx, rax);  // why?
4652 
4653   // post the breakpoint event
4654   __ get_method(rarg);
4655   __ call_VM(noreg,
4656              CAST_FROM_FN_PTR(address, InterpreterRuntime::_breakpoint),
4657              rarg, rbcp);
4658 
4659   // complete the execution of original bytecode
4660   __ dispatch_only_normal(vtos);
4661 }
4662 
4663 //-----------------------------------------------------------------------------
4664 // Exceptions
4665 
4666 void TemplateTable::athrow() {
4667   transition(atos, vtos);
4668   __ null_check(rax);
4669   __ jump(ExternalAddress(Interpreter::throw_exception_entry()));
4670 }
4671 
4672 //-----------------------------------------------------------------------------
4673 // Synchronization
4674 //
4675 // Note: monitorenter & exit are symmetric routines; which is reflected
4676 //       in the assembly code structure as well
4677 //
4678 // Stack layout:
4679 //
4680 // [expressions  ] <--- rsp               = expression stack top
4681 // ..
4682 // [expressions  ]
4683 // [monitor entry] <--- monitor block top = expression stack bot
4684 // ..
4685 // [monitor entry]
4686 // [frame data   ] <--- monitor block bot
4687 // ...
4688 // [saved rbp    ] <--- rbp
4689 void TemplateTable::monitorenter() {
4690   transition(atos, vtos);
4691 
4692   // check for NULL object
4693   __ null_check(rax);
4694 
4695   __ resolve(IS_NOT_NULL, rax);
4696 
4697   const int is_value_mask = markOopDesc::always_locked_pattern;
4698   Label has_identity;
4699   __ movptr(rbx, Address(rax, oopDesc::mark_offset_in_bytes()));
4700   __ andptr(rbx, is_value_mask);
4701   __ cmpl(rbx, is_value_mask);
4702   __ jcc(Assembler::notEqual, has_identity);
4703   __ call_VM(noreg, CAST_FROM_FN_PTR(address,
4704                      InterpreterRuntime::throw_illegal_monitor_state_exception));
4705   __ should_not_reach_here();
4706   __ bind(has_identity);
4707 
4708   const Address monitor_block_top(
4709         rbp, frame::interpreter_frame_monitor_block_top_offset * wordSize);
4710   const Address monitor_block_bot(
4711         rbp, frame::interpreter_frame_initial_sp_offset * wordSize);
4712   const int entry_size = frame::interpreter_frame_monitor_size() * wordSize;
4713 
4714   Label allocated;
4715 
4716   Register rtop = LP64_ONLY(c_rarg3) NOT_LP64(rcx);
4717   Register rbot = LP64_ONLY(c_rarg2) NOT_LP64(rbx);
4718   Register rmon = LP64_ONLY(c_rarg1) NOT_LP64(rdx);
4719 
4720   // initialize entry pointer
4721   __ xorl(rmon, rmon); // points to free slot or NULL
4722 
4723   // find a free slot in the monitor block (result in rmon)
4724   {
4725     Label entry, loop, exit;
4726     __ movptr(rtop, monitor_block_top); // points to current entry,
4727                                         // starting with top-most entry
4728     __ lea(rbot, monitor_block_bot);    // points to word before bottom
4729                                         // of monitor block
4730     __ jmpb(entry);
4731 
4732     __ bind(loop);
4733     // check if current entry is used
4734     __ cmpptr(Address(rtop, BasicObjectLock::obj_offset_in_bytes()), (int32_t) NULL_WORD);
4735     // if not used then remember entry in rmon
4736     __ cmovptr(Assembler::equal, rmon, rtop);   // cmov => cmovptr
4737     // check if current entry is for same object
4738     __ cmpptr(rax, Address(rtop, BasicObjectLock::obj_offset_in_bytes()));
4739     // if same object then stop searching
4740     __ jccb(Assembler::equal, exit);
4741     // otherwise advance to next entry
4742     __ addptr(rtop, entry_size);
4743     __ bind(entry);
4744     // check if bottom reached
4745     __ cmpptr(rtop, rbot);
4746     // if not at bottom then check this entry
4747     __ jcc(Assembler::notEqual, loop);
4748     __ bind(exit);
4749   }
4750 
4751   __ testptr(rmon, rmon); // check if a slot has been found
4752   __ jcc(Assembler::notZero, allocated); // if found, continue with that one
4753 
4754   // allocate one if there's no free slot
4755   {
4756     Label entry, loop;
4757     // 1. compute new pointers          // rsp: old expression stack top
4758     __ movptr(rmon, monitor_block_bot); // rmon: old expression stack bottom
4759     __ subptr(rsp, entry_size);         // move expression stack top
4760     __ subptr(rmon, entry_size);        // move expression stack bottom
4761     __ mov(rtop, rsp);                  // set start value for copy loop
4762     __ movptr(monitor_block_bot, rmon); // set new monitor block bottom
4763     __ jmp(entry);
4764     // 2. move expression stack contents
4765     __ bind(loop);
4766     __ movptr(rbot, Address(rtop, entry_size)); // load expression stack
4767                                                 // word from old location
4768     __ movptr(Address(rtop, 0), rbot);          // and store it at new location
4769     __ addptr(rtop, wordSize);                  // advance to next word
4770     __ bind(entry);
4771     __ cmpptr(rtop, rmon);                      // check if bottom reached
4772     __ jcc(Assembler::notEqual, loop);          // if not at bottom then
4773                                                 // copy next word
4774   }
4775 
4776   // call run-time routine
4777   // rmon: points to monitor entry
4778   __ bind(allocated);
4779 
4780   // Increment bcp to point to the next bytecode, so exception
4781   // handling for async. exceptions work correctly.
4782   // The object has already been poped from the stack, so the
4783   // expression stack looks correct.
4784   __ increment(rbcp);
4785 
4786   // store object
4787   __ movptr(Address(rmon, BasicObjectLock::obj_offset_in_bytes()), rax);
4788   __ lock_object(rmon);
4789 
4790   // check to make sure this monitor doesn't cause stack overflow after locking
4791   __ save_bcp();  // in case of exception
4792   __ generate_stack_overflow_check(0);
4793 
4794   // The bcp has already been incremented. Just need to dispatch to
4795   // next instruction.
4796   __ dispatch_next(vtos);
4797 }
4798 
4799 void TemplateTable::monitorexit() {
4800   transition(atos, vtos);
4801 
4802   // check for NULL object
4803   __ null_check(rax);
4804 
4805   __ resolve(IS_NOT_NULL, rax);
4806 
4807   const int is_value_mask = markOopDesc::always_locked_pattern;
4808   Label has_identity;
4809   __ movptr(rbx, Address(rax, oopDesc::mark_offset_in_bytes()));
4810   __ andptr(rbx, is_value_mask);
4811   __ cmpl(rbx, is_value_mask);
4812   __ jcc(Assembler::notEqual, has_identity);
4813   __ call_VM(noreg, CAST_FROM_FN_PTR(address,
4814                      InterpreterRuntime::throw_illegal_monitor_state_exception));
4815   __ should_not_reach_here();
4816   __ bind(has_identity);
4817 
4818   const Address monitor_block_top(
4819         rbp, frame::interpreter_frame_monitor_block_top_offset * wordSize);
4820   const Address monitor_block_bot(
4821         rbp, frame::interpreter_frame_initial_sp_offset * wordSize);
4822   const int entry_size = frame::interpreter_frame_monitor_size() * wordSize;
4823 
4824   Register rtop = LP64_ONLY(c_rarg1) NOT_LP64(rdx);
4825   Register rbot = LP64_ONLY(c_rarg2) NOT_LP64(rbx);
4826 
4827   Label found;
4828 
4829   // find matching slot
4830   {
4831     Label entry, loop;
4832     __ movptr(rtop, monitor_block_top); // points to current entry,
4833                                         // starting with top-most entry
4834     __ lea(rbot, monitor_block_bot);    // points to word before bottom
4835                                         // of monitor block
4836     __ jmpb(entry);
4837 
4838     __ bind(loop);
4839     // check if current entry is for same object
4840     __ cmpptr(rax, Address(rtop, BasicObjectLock::obj_offset_in_bytes()));
4841     // if same object then stop searching
4842     __ jcc(Assembler::equal, found);
4843     // otherwise advance to next entry
4844     __ addptr(rtop, entry_size);
4845     __ bind(entry);
4846     // check if bottom reached
4847     __ cmpptr(rtop, rbot);
4848     // if not at bottom then check this entry
4849     __ jcc(Assembler::notEqual, loop);
4850   }
4851 
4852   // error handling. Unlocking was not block-structured
4853   __ call_VM(noreg, CAST_FROM_FN_PTR(address,
4854                    InterpreterRuntime::throw_illegal_monitor_state_exception));
4855   __ should_not_reach_here();
4856 
4857   // call run-time routine
4858   __ bind(found);
4859   __ push_ptr(rax); // make sure object is on stack (contract with oopMaps)
4860   __ unlock_object(rtop);
4861   __ pop_ptr(rax); // discard object
4862 }
4863 
4864 // Wide instructions
4865 void TemplateTable::wide() {
4866   transition(vtos, vtos);
4867   __ load_unsigned_byte(rbx, at_bcp(1));
4868   ExternalAddress wtable((address)Interpreter::_wentry_point);
4869   __ jump(ArrayAddress(wtable, Address(noreg, rbx, Address::times_ptr)));
4870   // Note: the rbcp increment step is part of the individual wide bytecode implementations
4871 }
4872 
4873 // Multi arrays
4874 void TemplateTable::multianewarray() {
4875   transition(vtos, atos);
4876 
4877   Register rarg = LP64_ONLY(c_rarg1) NOT_LP64(rax);
4878   __ load_unsigned_byte(rax, at_bcp(3)); // get number of dimensions
4879   // last dim is on top of stack; we want address of first one:
4880   // first_addr = last_addr + (ndims - 1) * stackElementSize - 1*wordsize
4881   // the latter wordSize to point to the beginning of the array.
4882   __ lea(rarg, Address(rsp, rax, Interpreter::stackElementScale(), -wordSize));
4883   call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::multianewarray), rarg);
4884   __ load_unsigned_byte(rbx, at_bcp(3));
4885   __ lea(rsp, Address(rsp, rbx, Interpreter::stackElementScale()));  // get rid of counts
4886 }