1 /*
   2  * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "asm/macroAssembler.hpp"
  27 #include "compiler/disassembler.hpp"
  28 #include "interpreter/interpreter.hpp"
  29 #include "interpreter/interpreterRuntime.hpp"
  30 #include "interpreter/interp_masm.hpp"
  31 #include "interpreter/templateTable.hpp"
  32 #include "memory/universe.hpp"
  33 #include "oops/methodData.hpp"
  34 #include "oops/objArrayKlass.hpp"
  35 #include "oops/oop.inline.hpp"
  36 #include "prims/methodHandles.hpp"
  37 #include "runtime/frame.inline.hpp"
  38 #include "runtime/safepointMechanism.hpp"
  39 #include "runtime/sharedRuntime.hpp"
  40 #include "runtime/stubRoutines.hpp"
  41 #include "runtime/synchronizer.hpp"
  42 #include "utilities/macros.hpp"
  43 
  44 #define __ Disassembler::hook<InterpreterMacroAssembler>(__FILE__, __LINE__, _masm)->
  45 
  46 // Global Register Names
  47 static const Register rbcp     = LP64_ONLY(r13) NOT_LP64(rsi);
  48 static const Register rlocals  = LP64_ONLY(r14) NOT_LP64(rdi);
  49 
  50 // Platform-dependent initialization
  51 void TemplateTable::pd_initialize() {
  52   // No x86 specific initialization
  53 }
  54 
  55 // Address Computation: local variables
  56 static inline Address iaddress(int n) {
  57   return Address(rlocals, Interpreter::local_offset_in_bytes(n));
  58 }
  59 
  60 static inline Address laddress(int n) {
  61   return iaddress(n + 1);
  62 }
  63 
  64 #ifndef _LP64
  65 static inline Address haddress(int n) {
  66   return iaddress(n + 0);
  67 }
  68 #endif
  69 
  70 static inline Address faddress(int n) {
  71   return iaddress(n);
  72 }
  73 
  74 static inline Address daddress(int n) {
  75   return laddress(n);
  76 }
  77 
  78 static inline Address aaddress(int n) {
  79   return iaddress(n);
  80 }
  81 
  82 static inline Address iaddress(Register r) {
  83   return Address(rlocals, r, Address::times_ptr);
  84 }
  85 
  86 static inline Address laddress(Register r) {
  87   return Address(rlocals, r, Address::times_ptr, Interpreter::local_offset_in_bytes(1));
  88 }
  89 
  90 #ifndef _LP64
  91 static inline Address haddress(Register r)       {
  92   return Address(rlocals, r, Interpreter::stackElementScale(), Interpreter::local_offset_in_bytes(0));
  93 }
  94 #endif
  95 
  96 static inline Address faddress(Register r) {
  97   return iaddress(r);
  98 }
  99 
 100 static inline Address daddress(Register r) {
 101   return laddress(r);
 102 }
 103 
 104 static inline Address aaddress(Register r) {
 105   return iaddress(r);
 106 }
 107 
 108 
 109 // expression stack
 110 // (Note: Must not use symmetric equivalents at_rsp_m1/2 since they store
 111 // data beyond the rsp which is potentially unsafe in an MT environment;
 112 // an interrupt may overwrite that data.)
 113 static inline Address at_rsp   () {
 114   return Address(rsp, 0);
 115 }
 116 
 117 // At top of Java expression stack which may be different than esp().  It
 118 // isn't for category 1 objects.
 119 static inline Address at_tos   () {
 120   return Address(rsp,  Interpreter::expr_offset_in_bytes(0));
 121 }
 122 
 123 static inline Address at_tos_p1() {
 124   return Address(rsp,  Interpreter::expr_offset_in_bytes(1));
 125 }
 126 
 127 static inline Address at_tos_p2() {
 128   return Address(rsp,  Interpreter::expr_offset_in_bytes(2));
 129 }
 130 
 131 // Condition conversion
 132 static Assembler::Condition j_not(TemplateTable::Condition cc) {
 133   switch (cc) {
 134   case TemplateTable::equal        : return Assembler::notEqual;
 135   case TemplateTable::not_equal    : return Assembler::equal;
 136   case TemplateTable::less         : return Assembler::greaterEqual;
 137   case TemplateTable::less_equal   : return Assembler::greater;
 138   case TemplateTable::greater      : return Assembler::lessEqual;
 139   case TemplateTable::greater_equal: return Assembler::less;
 140   }
 141   ShouldNotReachHere();
 142   return Assembler::zero;
 143 }
 144 
 145 
 146 
 147 // Miscelaneous helper routines
 148 // Store an oop (or NULL) at the address described by obj.
 149 // If val == noreg this means store a NULL
 150 
 151 
 152 static void do_oop_store(InterpreterMacroAssembler* _masm,
 153                          Address dst,
 154                          Register val,
 155                          DecoratorSet decorators = 0) {
 156   assert(val == noreg || val == rax, "parameter is just for looks");
 157   __ store_heap_oop(dst, val, rdx, rbx, decorators);
 158 }
 159 
 160 static void do_oop_load(InterpreterMacroAssembler* _masm,
 161                         Address src,
 162                         Register dst,
 163                         DecoratorSet decorators = 0) {
 164   __ load_heap_oop(dst, src, rdx, rbx, decorators);
 165 }
 166 
 167 Address TemplateTable::at_bcp(int offset) {
 168   assert(_desc->uses_bcp(), "inconsistent uses_bcp information");
 169   return Address(rbcp, offset);
 170 }
 171 
 172 
 173 void TemplateTable::patch_bytecode(Bytecodes::Code bc, Register bc_reg,
 174                                    Register temp_reg, bool load_bc_into_bc_reg/*=true*/,
 175                                    int byte_no) {
 176   if (!RewriteBytecodes)  return;
 177   Label L_patch_done;
 178 
 179   switch (bc) {
 180   case Bytecodes::_fast_qputfield:
 181   case Bytecodes::_fast_aputfield:
 182   case Bytecodes::_fast_bputfield:
 183   case Bytecodes::_fast_zputfield:
 184   case Bytecodes::_fast_cputfield:
 185   case Bytecodes::_fast_dputfield:
 186   case Bytecodes::_fast_fputfield:
 187   case Bytecodes::_fast_iputfield:
 188   case Bytecodes::_fast_lputfield:
 189   case Bytecodes::_fast_sputfield:
 190     {
 191       // We skip bytecode quickening for putfield instructions when
 192       // the put_code written to the constant pool cache is zero.
 193       // This is required so that every execution of this instruction
 194       // calls out to InterpreterRuntime::resolve_get_put to do
 195       // additional, required work.
 196       assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range");
 197       assert(load_bc_into_bc_reg, "we use bc_reg as temp");
 198       __ get_cache_and_index_and_bytecode_at_bcp(temp_reg, bc_reg, temp_reg, byte_no, 1);
 199       __ movl(bc_reg, bc);
 200       __ cmpl(temp_reg, (int) 0);
 201       __ jcc(Assembler::zero, L_patch_done);  // don't patch
 202     }
 203     break;
 204   default:
 205     assert(byte_no == -1, "sanity");
 206     // the pair bytecodes have already done the load.
 207     if (load_bc_into_bc_reg) {
 208       __ movl(bc_reg, bc);
 209     }
 210   }
 211 
 212   if (JvmtiExport::can_post_breakpoint()) {
 213     Label L_fast_patch;
 214     // if a breakpoint is present we can't rewrite the stream directly
 215     __ movzbl(temp_reg, at_bcp(0));
 216     __ cmpl(temp_reg, Bytecodes::_breakpoint);
 217     __ jcc(Assembler::notEqual, L_fast_patch);
 218     __ get_method(temp_reg);
 219     // Let breakpoint table handling rewrite to quicker bytecode
 220     __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::set_original_bytecode_at), temp_reg, rbcp, bc_reg);
 221 #ifndef ASSERT
 222     __ jmpb(L_patch_done);
 223 #else
 224     __ jmp(L_patch_done);
 225 #endif
 226     __ bind(L_fast_patch);
 227   }
 228 
 229 #ifdef ASSERT
 230   Label L_okay;
 231   __ load_unsigned_byte(temp_reg, at_bcp(0));
 232   __ cmpl(temp_reg, (int) Bytecodes::java_code(bc));
 233   __ jcc(Assembler::equal, L_okay);
 234   __ cmpl(temp_reg, bc_reg);
 235   __ jcc(Assembler::equal, L_okay);
 236   __ stop("patching the wrong bytecode");
 237   __ bind(L_okay);
 238 #endif
 239 
 240   // patch bytecode
 241   __ movb(at_bcp(0), bc_reg);
 242   __ bind(L_patch_done);
 243 }
 244 // Individual instructions
 245 
 246 
 247 void TemplateTable::nop() {
 248   transition(vtos, vtos);
 249   // nothing to do
 250 }
 251 
 252 void TemplateTable::shouldnotreachhere() {
 253   transition(vtos, vtos);
 254   __ stop("shouldnotreachhere bytecode");
 255 }
 256 
 257 void TemplateTable::aconst_null() {
 258   transition(vtos, atos);
 259   __ xorl(rax, rax);
 260 }
 261 
 262 void TemplateTable::iconst(int value) {
 263   transition(vtos, itos);
 264   if (value == 0) {
 265     __ xorl(rax, rax);
 266   } else {
 267     __ movl(rax, value);
 268   }
 269 }
 270 
 271 void TemplateTable::lconst(int value) {
 272   transition(vtos, ltos);
 273   if (value == 0) {
 274     __ xorl(rax, rax);
 275   } else {
 276     __ movl(rax, value);
 277   }
 278 #ifndef _LP64
 279   assert(value >= 0, "check this code");
 280   __ xorptr(rdx, rdx);
 281 #endif
 282 }
 283 
 284 
 285 
 286 void TemplateTable::fconst(int value) {
 287   transition(vtos, ftos);
 288   if (UseSSE >= 1) {
 289     static float one = 1.0f, two = 2.0f;
 290     switch (value) {
 291     case 0:
 292       __ xorps(xmm0, xmm0);
 293       break;
 294     case 1:
 295       __ movflt(xmm0, ExternalAddress((address) &one));
 296       break;
 297     case 2:
 298       __ movflt(xmm0, ExternalAddress((address) &two));
 299       break;
 300     default:
 301       ShouldNotReachHere();
 302       break;
 303     }
 304   } else {
 305 #ifdef _LP64
 306     ShouldNotReachHere();
 307 #else
 308            if (value == 0) { __ fldz();
 309     } else if (value == 1) { __ fld1();
 310     } else if (value == 2) { __ fld1(); __ fld1(); __ faddp(); // should do a better solution here
 311     } else                 { ShouldNotReachHere();
 312     }
 313 #endif // _LP64
 314   }
 315 }
 316 
 317 void TemplateTable::dconst(int value) {
 318   transition(vtos, dtos);
 319   if (UseSSE >= 2) {
 320     static double one = 1.0;
 321     switch (value) {
 322     case 0:
 323       __ xorpd(xmm0, xmm0);
 324       break;
 325     case 1:
 326       __ movdbl(xmm0, ExternalAddress((address) &one));
 327       break;
 328     default:
 329       ShouldNotReachHere();
 330       break;
 331     }
 332   } else {
 333 #ifdef _LP64
 334     ShouldNotReachHere();
 335 #else
 336            if (value == 0) { __ fldz();
 337     } else if (value == 1) { __ fld1();
 338     } else                 { ShouldNotReachHere();
 339     }
 340 #endif
 341   }
 342 }
 343 
 344 void TemplateTable::bipush() {
 345   transition(vtos, itos);
 346   __ load_signed_byte(rax, at_bcp(1));
 347 }
 348 
 349 void TemplateTable::sipush() {
 350   transition(vtos, itos);
 351   __ load_unsigned_short(rax, at_bcp(1));
 352   __ bswapl(rax);
 353   __ sarl(rax, 16);
 354 }
 355 
 356 void TemplateTable::ldc(bool wide) {
 357   transition(vtos, vtos);
 358   Register rarg = NOT_LP64(rcx) LP64_ONLY(c_rarg1);
 359   Label call_ldc, notFloat, notClass, notInt, Done;
 360 
 361   if (wide) {
 362     __ get_unsigned_2_byte_index_at_bcp(rbx, 1);
 363   } else {
 364     __ load_unsigned_byte(rbx, at_bcp(1));
 365   }
 366 
 367   __ get_cpool_and_tags(rcx, rax);
 368   const int base_offset = ConstantPool::header_size() * wordSize;
 369   const int tags_offset = Array<u1>::base_offset_in_bytes();
 370 
 371   // get type
 372   __ movzbl(rdx, Address(rax, rbx, Address::times_1, tags_offset));
 373 
 374   // unresolved class - get the resolved class
 375   __ cmpl(rdx, JVM_CONSTANT_UnresolvedClass);
 376   __ jccb(Assembler::equal, call_ldc);
 377 
 378   // unresolved class in error state - call into runtime to throw the error
 379   // from the first resolution attempt
 380   __ cmpl(rdx, JVM_CONSTANT_UnresolvedClassInError);
 381   __ jccb(Assembler::equal, call_ldc);
 382 
 383   // resolved class - need to call vm to get java mirror of the class
 384   __ cmpl(rdx, JVM_CONSTANT_Class);
 385   __ jcc(Assembler::notEqual, notClass);
 386 
 387   __ bind(call_ldc);
 388 
 389   __ movl(rarg, wide);
 390   call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::ldc), rarg);
 391 
 392   __ push(atos);
 393   __ jmp(Done);
 394 
 395   __ bind(notClass);
 396   __ cmpl(rdx, JVM_CONSTANT_Float);
 397   __ jccb(Assembler::notEqual, notFloat);
 398 
 399   // ftos
 400   __ load_float(Address(rcx, rbx, Address::times_ptr, base_offset));
 401   __ push(ftos);
 402   __ jmp(Done);
 403 
 404   __ bind(notFloat);
 405   __ cmpl(rdx, JVM_CONSTANT_Integer);
 406   __ jccb(Assembler::notEqual, notInt);
 407 
 408   // itos
 409   __ movl(rax, Address(rcx, rbx, Address::times_ptr, base_offset));
 410   __ push(itos);
 411   __ jmp(Done);
 412 
 413   // assume the tag is for condy; if not, the VM runtime will tell us
 414   __ bind(notInt);
 415   condy_helper(Done);
 416 
 417   __ bind(Done);
 418 }
 419 
 420 // Fast path for caching oop constants.
 421 void TemplateTable::fast_aldc(bool wide) {
 422   transition(vtos, atos);
 423 
 424   Register result = rax;
 425   Register tmp = rdx;
 426   Register rarg = NOT_LP64(rcx) LP64_ONLY(c_rarg1);
 427   int index_size = wide ? sizeof(u2) : sizeof(u1);
 428 
 429   Label resolved;
 430 
 431   // We are resolved if the resolved reference cache entry contains a
 432   // non-null object (String, MethodType, etc.)
 433   assert_different_registers(result, tmp);
 434   __ get_cache_index_at_bcp(tmp, 1, index_size);
 435   __ load_resolved_reference_at_index(result, tmp);
 436   __ testptr(result, result);
 437   __ jcc(Assembler::notZero, resolved);
 438 
 439   address entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_ldc);
 440 
 441   // first time invocation - must resolve first
 442   __ movl(rarg, (int)bytecode());
 443   __ call_VM(result, entry, rarg);
 444   __ bind(resolved);
 445 
 446   { // Check for the null sentinel.
 447     // If we just called the VM, it already did the mapping for us,
 448     // but it's harmless to retry.
 449     Label notNull;
 450     ExternalAddress null_sentinel((address)Universe::the_null_sentinel_addr());
 451     __ movptr(tmp, null_sentinel);
 452     __ cmpptr(tmp, result);
 453     __ jccb(Assembler::notEqual, notNull);
 454     __ xorptr(result, result);  // NULL object reference
 455     __ bind(notNull);
 456   }
 457 
 458   if (VerifyOops) {
 459     __ verify_oop(result);
 460   }
 461 }
 462 
 463 void TemplateTable::ldc2_w() {
 464   transition(vtos, vtos);
 465   Label notDouble, notLong, Done;
 466   __ get_unsigned_2_byte_index_at_bcp(rbx, 1);
 467 
 468   __ get_cpool_and_tags(rcx, rax);
 469   const int base_offset = ConstantPool::header_size() * wordSize;
 470   const int tags_offset = Array<u1>::base_offset_in_bytes();
 471 
 472   // get type
 473   __ movzbl(rdx, Address(rax, rbx, Address::times_1, tags_offset));
 474   __ cmpl(rdx, JVM_CONSTANT_Double);
 475   __ jccb(Assembler::notEqual, notDouble);
 476 
 477   // dtos
 478   __ load_double(Address(rcx, rbx, Address::times_ptr, base_offset));
 479   __ push(dtos);
 480 
 481   __ jmp(Done);
 482   __ bind(notDouble);
 483   __ cmpl(rdx, JVM_CONSTANT_Long);
 484   __ jccb(Assembler::notEqual, notLong);
 485 
 486   // ltos
 487   __ movptr(rax, Address(rcx, rbx, Address::times_ptr, base_offset + 0 * wordSize));
 488   NOT_LP64(__ movptr(rdx, Address(rcx, rbx, Address::times_ptr, base_offset + 1 * wordSize)));
 489   __ push(ltos);
 490   __ jmp(Done);
 491 
 492   __ bind(notLong);
 493   condy_helper(Done);
 494 
 495   __ bind(Done);
 496 }
 497 
 498 void TemplateTable::condy_helper(Label& Done) {
 499   const Register obj = rax;
 500   const Register off = rbx;
 501   const Register flags = rcx;
 502   const Register rarg = NOT_LP64(rcx) LP64_ONLY(c_rarg1);
 503   __ movl(rarg, (int)bytecode());
 504   call_VM(obj, CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_ldc), rarg);
 505 #ifndef _LP64
 506   // borrow rdi from locals
 507   __ get_thread(rdi);
 508   __ get_vm_result_2(flags, rdi);
 509   __ restore_locals();
 510 #else
 511   __ get_vm_result_2(flags, r15_thread);
 512 #endif
 513   // VMr = obj = base address to find primitive value to push
 514   // VMr2 = flags = (tos, off) using format of CPCE::_flags
 515   __ movl(off, flags);
 516   __ andl(off, ConstantPoolCacheEntry::field_index_mask);
 517   const Address field(obj, off, Address::times_1, 0*wordSize);
 518 
 519   // What sort of thing are we loading?
 520   __ shrl(flags, ConstantPoolCacheEntry::tos_state_shift);
 521   __ andl(flags, ConstantPoolCacheEntry::tos_state_mask);
 522 
 523   switch (bytecode()) {
 524   case Bytecodes::_ldc:
 525   case Bytecodes::_ldc_w:
 526     {
 527       // tos in (itos, ftos, stos, btos, ctos, ztos)
 528       Label notInt, notFloat, notShort, notByte, notChar, notBool;
 529       __ cmpl(flags, itos);
 530       __ jcc(Assembler::notEqual, notInt);
 531       // itos
 532       __ movl(rax, field);
 533       __ push(itos);
 534       __ jmp(Done);
 535 
 536       __ bind(notInt);
 537       __ cmpl(flags, ftos);
 538       __ jcc(Assembler::notEqual, notFloat);
 539       // ftos
 540       __ load_float(field);
 541       __ push(ftos);
 542       __ jmp(Done);
 543 
 544       __ bind(notFloat);
 545       __ cmpl(flags, stos);
 546       __ jcc(Assembler::notEqual, notShort);
 547       // stos
 548       __ load_signed_short(rax, field);
 549       __ push(stos);
 550       __ jmp(Done);
 551 
 552       __ bind(notShort);
 553       __ cmpl(flags, btos);
 554       __ jcc(Assembler::notEqual, notByte);
 555       // btos
 556       __ load_signed_byte(rax, field);
 557       __ push(btos);
 558       __ jmp(Done);
 559 
 560       __ bind(notByte);
 561       __ cmpl(flags, ctos);
 562       __ jcc(Assembler::notEqual, notChar);
 563       // ctos
 564       __ load_unsigned_short(rax, field);
 565       __ push(ctos);
 566       __ jmp(Done);
 567 
 568       __ bind(notChar);
 569       __ cmpl(flags, ztos);
 570       __ jcc(Assembler::notEqual, notBool);
 571       // ztos
 572       __ load_signed_byte(rax, field);
 573       __ push(ztos);
 574       __ jmp(Done);
 575 
 576       __ bind(notBool);
 577       break;
 578     }
 579 
 580   case Bytecodes::_ldc2_w:
 581     {
 582       Label notLong, notDouble;
 583       __ cmpl(flags, ltos);
 584       __ jcc(Assembler::notEqual, notLong);
 585       // ltos
 586       __ movptr(rax, field);
 587       NOT_LP64(__ movptr(rdx, field.plus_disp(4)));
 588       __ push(ltos);
 589       __ jmp(Done);
 590 
 591       __ bind(notLong);
 592       __ cmpl(flags, dtos);
 593       __ jcc(Assembler::notEqual, notDouble);
 594       // dtos
 595       __ load_double(field);
 596       __ push(dtos);
 597       __ jmp(Done);
 598 
 599       __ bind(notDouble);
 600       break;
 601     }
 602 
 603   default:
 604     ShouldNotReachHere();
 605   }
 606 
 607   __ stop("bad ldc/condy");
 608 }
 609 
 610 void TemplateTable::locals_index(Register reg, int offset) {
 611   __ load_unsigned_byte(reg, at_bcp(offset));
 612   __ negptr(reg);
 613 }
 614 
 615 void TemplateTable::iload() {
 616   iload_internal();
 617 }
 618 
 619 void TemplateTable::nofast_iload() {
 620   iload_internal(may_not_rewrite);
 621 }
 622 
 623 void TemplateTable::iload_internal(RewriteControl rc) {
 624   transition(vtos, itos);
 625   if (RewriteFrequentPairs && rc == may_rewrite) {
 626     Label rewrite, done;
 627     const Register bc = LP64_ONLY(c_rarg3) NOT_LP64(rcx);
 628     LP64_ONLY(assert(rbx != bc, "register damaged"));
 629 
 630     // get next byte
 631     __ load_unsigned_byte(rbx,
 632                           at_bcp(Bytecodes::length_for(Bytecodes::_iload)));
 633     // if _iload, wait to rewrite to iload2.  We only want to rewrite the
 634     // last two iloads in a pair.  Comparing against fast_iload means that
 635     // the next bytecode is neither an iload or a caload, and therefore
 636     // an iload pair.
 637     __ cmpl(rbx, Bytecodes::_iload);
 638     __ jcc(Assembler::equal, done);
 639 
 640     __ cmpl(rbx, Bytecodes::_fast_iload);
 641     __ movl(bc, Bytecodes::_fast_iload2);
 642 
 643     __ jccb(Assembler::equal, rewrite);
 644 
 645     // if _caload, rewrite to fast_icaload
 646     __ cmpl(rbx, Bytecodes::_caload);
 647     __ movl(bc, Bytecodes::_fast_icaload);
 648     __ jccb(Assembler::equal, rewrite);
 649 
 650     // rewrite so iload doesn't check again.
 651     __ movl(bc, Bytecodes::_fast_iload);
 652 
 653     // rewrite
 654     // bc: fast bytecode
 655     __ bind(rewrite);
 656     patch_bytecode(Bytecodes::_iload, bc, rbx, false);
 657     __ bind(done);
 658   }
 659 
 660   // Get the local value into tos
 661   locals_index(rbx);
 662   __ movl(rax, iaddress(rbx));
 663 }
 664 
 665 void TemplateTable::fast_iload2() {
 666   transition(vtos, itos);
 667   locals_index(rbx);
 668   __ movl(rax, iaddress(rbx));
 669   __ push(itos);
 670   locals_index(rbx, 3);
 671   __ movl(rax, iaddress(rbx));
 672 }
 673 
 674 void TemplateTable::fast_iload() {
 675   transition(vtos, itos);
 676   locals_index(rbx);
 677   __ movl(rax, iaddress(rbx));
 678 }
 679 
 680 void TemplateTable::lload() {
 681   transition(vtos, ltos);
 682   locals_index(rbx);
 683   __ movptr(rax, laddress(rbx));
 684   NOT_LP64(__ movl(rdx, haddress(rbx)));
 685 }
 686 
 687 void TemplateTable::fload() {
 688   transition(vtos, ftos);
 689   locals_index(rbx);
 690   __ load_float(faddress(rbx));
 691 }
 692 
 693 void TemplateTable::dload() {
 694   transition(vtos, dtos);
 695   locals_index(rbx);
 696   __ load_double(daddress(rbx));
 697 }
 698 
 699 void TemplateTable::aload() {
 700   transition(vtos, atos);
 701   locals_index(rbx);
 702   __ movptr(rax, aaddress(rbx));
 703 }
 704 
 705 void TemplateTable::locals_index_wide(Register reg) {
 706   __ load_unsigned_short(reg, at_bcp(2));
 707   __ bswapl(reg);
 708   __ shrl(reg, 16);
 709   __ negptr(reg);
 710 }
 711 
 712 void TemplateTable::wide_iload() {
 713   transition(vtos, itos);
 714   locals_index_wide(rbx);
 715   __ movl(rax, iaddress(rbx));
 716 }
 717 
 718 void TemplateTable::wide_lload() {
 719   transition(vtos, ltos);
 720   locals_index_wide(rbx);
 721   __ movptr(rax, laddress(rbx));
 722   NOT_LP64(__ movl(rdx, haddress(rbx)));
 723 }
 724 
 725 void TemplateTable::wide_fload() {
 726   transition(vtos, ftos);
 727   locals_index_wide(rbx);
 728   __ load_float(faddress(rbx));
 729 }
 730 
 731 void TemplateTable::wide_dload() {
 732   transition(vtos, dtos);
 733   locals_index_wide(rbx);
 734   __ load_double(daddress(rbx));
 735 }
 736 
 737 void TemplateTable::wide_aload() {
 738   transition(vtos, atos);
 739   locals_index_wide(rbx);
 740   __ movptr(rax, aaddress(rbx));
 741 }
 742 
 743 void TemplateTable::index_check(Register array, Register index) {
 744   // Pop ptr into array
 745   __ pop_ptr(array);
 746   index_check_without_pop(array, index);
 747 }
 748 
 749 void TemplateTable::index_check_without_pop(Register array, Register index) {
 750   // destroys rbx
 751   // check array
 752   __ null_check(array, arrayOopDesc::length_offset_in_bytes());
 753   // sign extend index for use by indexed load
 754   __ movl2ptr(index, index);
 755   // check index
 756   __ cmpl(index, Address(array, arrayOopDesc::length_offset_in_bytes()));
 757   if (index != rbx) {
 758     // ??? convention: move aberrant index into rbx for exception message
 759     assert(rbx != array, "different registers");
 760     __ movl(rbx, index);
 761   }
 762   Label skip;
 763   __ jccb(Assembler::below, skip);
 764   // Pass array to create more detailed exceptions.
 765   __ mov(NOT_LP64(rax) LP64_ONLY(c_rarg1), array);
 766   __ jump(ExternalAddress(Interpreter::_throw_ArrayIndexOutOfBoundsException_entry));
 767   __ bind(skip);
 768 }
 769 
 770 void TemplateTable::iaload() {
 771   transition(itos, itos);
 772   // rax: index
 773   // rdx: array
 774   index_check(rdx, rax); // kills rbx
 775   __ movl(rax, Address(rdx, rax,
 776                        Address::times_4,
 777                        arrayOopDesc::base_offset_in_bytes(T_INT)));
 778 }
 779 
 780 void TemplateTable::laload() {
 781   transition(itos, ltos);
 782   // rax: index
 783   // rdx: array
 784   index_check(rdx, rax); // kills rbx
 785   NOT_LP64(__ mov(rbx, rax));
 786   // rbx,: index
 787   __ movptr(rax, Address(rdx, rbx, Address::times_8, arrayOopDesc::base_offset_in_bytes(T_LONG) + 0 * wordSize));
 788   NOT_LP64(__ movl(rdx, Address(rdx, rbx, Address::times_8, arrayOopDesc::base_offset_in_bytes(T_LONG) + 1 * wordSize)));
 789 }
 790 
 791 
 792 
 793 void TemplateTable::faload() {
 794   transition(itos, ftos);
 795   // rax: index
 796   // rdx: array
 797   index_check(rdx, rax); // kills rbx
 798   __ load_float(Address(rdx, rax,
 799                         Address::times_4,
 800                         arrayOopDesc::base_offset_in_bytes(T_FLOAT)));
 801 }
 802 
 803 void TemplateTable::daload() {
 804   transition(itos, dtos);
 805   // rax: index
 806   // rdx: array
 807   index_check(rdx, rax); // kills rbx
 808   __ load_double(Address(rdx, rax,
 809                          Address::times_8,
 810                          arrayOopDesc::base_offset_in_bytes(T_DOUBLE)));
 811 }
 812 
 813 void TemplateTable::aaload() {
 814   transition(itos, atos);
 815 
 816   Register array = rcx;
 817   Register index = rax;
 818 
 819   index_check(array, index); // kills rbx
 820   if (ValueArrayFlatten) {
 821     Label is_flat_array, done;
 822     __ test_flat_array_oop(array, rbx, is_flat_array);
 823     do_oop_load(_masm,
 824                 Address(array, index,
 825                         UseCompressedOops ? Address::times_4 : Address::times_ptr,
 826                         arrayOopDesc::base_offset_in_bytes(T_OBJECT)),
 827                 rax,
 828                 IN_HEAP_ARRAY);
 829     __ jmp(done);
 830     __ bind(is_flat_array);
 831     __ call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::value_array_load), array, index);
 832     __ bind(done);
 833   } else {
 834     do_oop_load(_masm,
 835                 Address(array, index,
 836                         UseCompressedOops ? Address::times_4 : Address::times_ptr,
 837                         arrayOopDesc::base_offset_in_bytes(T_OBJECT)),
 838                 rax,
 839                 IN_HEAP_ARRAY);
 840   }
 841 }
 842 
 843 void TemplateTable::baload() {
 844   transition(itos, itos);
 845   // rax: index
 846   // rdx: array
 847   index_check(rdx, rax); // kills rbx
 848   __ load_signed_byte(rax, Address(rdx, rax, Address::times_1, arrayOopDesc::base_offset_in_bytes(T_BYTE)));
 849 }
 850 
 851 void TemplateTable::caload() {
 852   transition(itos, itos);
 853   // rax: index
 854   // rdx: array
 855   index_check(rdx, rax); // kills rbx
 856   __ load_unsigned_short(rax, Address(rdx, rax, Address::times_2, arrayOopDesc::base_offset_in_bytes(T_CHAR)));
 857 }
 858 
 859 // iload followed by caload frequent pair
 860 void TemplateTable::fast_icaload() {
 861   transition(vtos, itos);
 862   // load index out of locals
 863   locals_index(rbx);
 864   __ movl(rax, iaddress(rbx));
 865 
 866   // rax: index
 867   // rdx: array
 868   index_check(rdx, rax); // kills rbx
 869   __ load_unsigned_short(rax,
 870                          Address(rdx, rax,
 871                                  Address::times_2,
 872                                  arrayOopDesc::base_offset_in_bytes(T_CHAR)));
 873 }
 874 
 875 
 876 void TemplateTable::saload() {
 877   transition(itos, itos);
 878   // rax: index
 879   // rdx: array
 880   index_check(rdx, rax); // kills rbx
 881   __ load_signed_short(rax, Address(rdx, rax, Address::times_2, arrayOopDesc::base_offset_in_bytes(T_SHORT)));
 882 }
 883 
 884 void TemplateTable::iload(int n) {
 885   transition(vtos, itos);
 886   __ movl(rax, iaddress(n));
 887 }
 888 
 889 void TemplateTable::lload(int n) {
 890   transition(vtos, ltos);
 891   __ movptr(rax, laddress(n));
 892   NOT_LP64(__ movptr(rdx, haddress(n)));
 893 }
 894 
 895 void TemplateTable::fload(int n) {
 896   transition(vtos, ftos);
 897   __ load_float(faddress(n));
 898 }
 899 
 900 void TemplateTable::dload(int n) {
 901   transition(vtos, dtos);
 902   __ load_double(daddress(n));
 903 }
 904 
 905 void TemplateTable::aload(int n) {
 906   transition(vtos, atos);
 907   __ movptr(rax, aaddress(n));
 908 }
 909 
 910 void TemplateTable::aload_0() {
 911   aload_0_internal();
 912 }
 913 
 914 void TemplateTable::nofast_aload_0() {
 915   aload_0_internal(may_not_rewrite);
 916 }
 917 
 918 void TemplateTable::aload_0_internal(RewriteControl rc) {
 919   transition(vtos, atos);
 920   // According to bytecode histograms, the pairs:
 921   //
 922   // _aload_0, _fast_igetfield
 923   // _aload_0, _fast_agetfield
 924   // _aload_0, _fast_fgetfield
 925   //
 926   // occur frequently. If RewriteFrequentPairs is set, the (slow)
 927   // _aload_0 bytecode checks if the next bytecode is either
 928   // _fast_igetfield, _fast_agetfield or _fast_fgetfield and then
 929   // rewrites the current bytecode into a pair bytecode; otherwise it
 930   // rewrites the current bytecode into _fast_aload_0 that doesn't do
 931   // the pair check anymore.
 932   //
 933   // Note: If the next bytecode is _getfield, the rewrite must be
 934   //       delayed, otherwise we may miss an opportunity for a pair.
 935   //
 936   // Also rewrite frequent pairs
 937   //   aload_0, aload_1
 938   //   aload_0, iload_1
 939   // These bytecodes with a small amount of code are most profitable
 940   // to rewrite
 941   if (RewriteFrequentPairs && rc == may_rewrite) {
 942     Label rewrite, done;
 943 
 944     const Register bc = LP64_ONLY(c_rarg3) NOT_LP64(rcx);
 945     LP64_ONLY(assert(rbx != bc, "register damaged"));
 946 
 947     // get next byte
 948     __ load_unsigned_byte(rbx, at_bcp(Bytecodes::length_for(Bytecodes::_aload_0)));
 949 
 950     // if _getfield then wait with rewrite
 951     __ cmpl(rbx, Bytecodes::_getfield);
 952     __ jcc(Assembler::equal, done);
 953 
 954     // if _igetfield then rewrite to _fast_iaccess_0
 955     assert(Bytecodes::java_code(Bytecodes::_fast_iaccess_0) == Bytecodes::_aload_0, "fix bytecode definition");
 956     __ cmpl(rbx, Bytecodes::_fast_igetfield);
 957     __ movl(bc, Bytecodes::_fast_iaccess_0);
 958     __ jccb(Assembler::equal, rewrite);
 959 
 960     // if _agetfield then rewrite to _fast_aaccess_0
 961     assert(Bytecodes::java_code(Bytecodes::_fast_aaccess_0) == Bytecodes::_aload_0, "fix bytecode definition");
 962     assert(ValueTypesBufferMaxMemory == 0, "Such rewritting doesn't support flattened values yet");
 963     __ cmpl(rbx, Bytecodes::_fast_agetfield);
 964     __ movl(bc, Bytecodes::_fast_aaccess_0);
 965     __ jccb(Assembler::equal, rewrite);
 966 
 967     // if _fgetfield then rewrite to _fast_faccess_0
 968     assert(Bytecodes::java_code(Bytecodes::_fast_faccess_0) == Bytecodes::_aload_0, "fix bytecode definition");
 969     __ cmpl(rbx, Bytecodes::_fast_fgetfield);
 970     __ movl(bc, Bytecodes::_fast_faccess_0);
 971     __ jccb(Assembler::equal, rewrite);
 972 
 973     // else rewrite to _fast_aload0
 974     assert(Bytecodes::java_code(Bytecodes::_fast_aload_0) == Bytecodes::_aload_0, "fix bytecode definition");
 975     __ movl(bc, Bytecodes::_fast_aload_0);
 976 
 977     // rewrite
 978     // bc: fast bytecode
 979     __ bind(rewrite);
 980     patch_bytecode(Bytecodes::_aload_0, bc, rbx, false);
 981 
 982     __ bind(done);
 983   }
 984 
 985   // Do actual aload_0 (must do this after patch_bytecode which might call VM and GC might change oop).
 986   aload(0);
 987 }
 988 
 989 void TemplateTable::istore() {
 990   transition(itos, vtos);
 991   locals_index(rbx);
 992   __ movl(iaddress(rbx), rax);
 993 }
 994 
 995 
 996 void TemplateTable::lstore() {
 997   transition(ltos, vtos);
 998   locals_index(rbx);
 999   __ movptr(laddress(rbx), rax);
1000   NOT_LP64(__ movptr(haddress(rbx), rdx));
1001 }
1002 
1003 void TemplateTable::fstore() {
1004   transition(ftos, vtos);
1005   locals_index(rbx);
1006   __ store_float(faddress(rbx));
1007 }
1008 
1009 void TemplateTable::dstore() {
1010   transition(dtos, vtos);
1011   locals_index(rbx);
1012   __ store_double(daddress(rbx));
1013 }
1014 
1015 void TemplateTable::astore() {
1016   transition(vtos, vtos);
1017   __ pop_ptr(rax);
1018   locals_index(rbx);
1019   __ movptr(aaddress(rbx), rax);
1020 }
1021 
1022 void TemplateTable::wide_istore() {
1023   transition(vtos, vtos);
1024   __ pop_i();
1025   locals_index_wide(rbx);
1026   __ movl(iaddress(rbx), rax);
1027 }
1028 
1029 void TemplateTable::wide_lstore() {
1030   transition(vtos, vtos);
1031   NOT_LP64(__ pop_l(rax, rdx));
1032   LP64_ONLY(__ pop_l());
1033   locals_index_wide(rbx);
1034   __ movptr(laddress(rbx), rax);
1035   NOT_LP64(__ movl(haddress(rbx), rdx));
1036 }
1037 
1038 void TemplateTable::wide_fstore() {
1039 #ifdef _LP64
1040   transition(vtos, vtos);
1041   __ pop_f(xmm0);
1042   locals_index_wide(rbx);
1043   __ movflt(faddress(rbx), xmm0);
1044 #else
1045   wide_istore();
1046 #endif
1047 }
1048 
1049 void TemplateTable::wide_dstore() {
1050 #ifdef _LP64
1051   transition(vtos, vtos);
1052   __ pop_d(xmm0);
1053   locals_index_wide(rbx);
1054   __ movdbl(daddress(rbx), xmm0);
1055 #else
1056   wide_lstore();
1057 #endif
1058 }
1059 
1060 void TemplateTable::wide_astore() {
1061   transition(vtos, vtos);
1062   __ pop_ptr(rax);
1063   locals_index_wide(rbx);
1064   __ movptr(aaddress(rbx), rax);
1065 }
1066 
1067 void TemplateTable::iastore() {
1068   transition(itos, vtos);
1069   __ pop_i(rbx);
1070   // rax: value
1071   // rbx: index
1072   // rdx: array
1073   index_check(rdx, rbx); // prefer index in rbx
1074   __ movl(Address(rdx, rbx,
1075                   Address::times_4,
1076                   arrayOopDesc::base_offset_in_bytes(T_INT)),
1077           rax);
1078 }
1079 
1080 void TemplateTable::lastore() {
1081   transition(ltos, vtos);
1082   __ pop_i(rbx);
1083   // rax,: low(value)
1084   // rcx: array
1085   // rdx: high(value)
1086   index_check(rcx, rbx);  // prefer index in rbx,
1087   // rbx,: index
1088   __ movptr(Address(rcx, rbx, Address::times_8, arrayOopDesc::base_offset_in_bytes(T_LONG) + 0 * wordSize), rax);
1089   NOT_LP64(__ movl(Address(rcx, rbx, Address::times_8, arrayOopDesc::base_offset_in_bytes(T_LONG) + 1 * wordSize), rdx));
1090 }
1091 
1092 
1093 void TemplateTable::fastore() {
1094   transition(ftos, vtos);
1095   __ pop_i(rbx);
1096   // value is in UseSSE >= 1 ? xmm0 : ST(0)
1097   // rbx:  index
1098   // rdx:  array
1099   index_check(rdx, rbx); // prefer index in rbx
1100   __ store_float(Address(rdx, rbx, Address::times_4, arrayOopDesc::base_offset_in_bytes(T_FLOAT)));
1101 }
1102 
1103 void TemplateTable::dastore() {
1104   transition(dtos, vtos);
1105   __ pop_i(rbx);
1106   // value is in UseSSE >= 2 ? xmm0 : ST(0)
1107   // rbx:  index
1108   // rdx:  array
1109   index_check(rdx, rbx); // prefer index in rbx
1110   __ store_double(Address(rdx, rbx, Address::times_8, arrayOopDesc::base_offset_in_bytes(T_DOUBLE)));
1111 }
1112 
1113 void TemplateTable::aastore() {
1114   Label is_null, is_flat_array, ok_is_subtype, done;
1115   transition(vtos, vtos);
1116   // stack: ..., array, index, value
1117   __ movptr(rax, at_tos());    // value
1118   __ movl(rcx, at_tos_p1()); // index
1119   __ movptr(rdx, at_tos_p2()); // array
1120 
1121   Address element_address(rdx, rcx,
1122                           UseCompressedOops? Address::times_4 : Address::times_ptr,
1123                           arrayOopDesc::base_offset_in_bytes(T_OBJECT));
1124 
1125   index_check_without_pop(rdx, rcx);     // kills rbx
1126 
1127   __ testptr(rax, rax);
1128   __ jcc(Assembler::zero, is_null);
1129 
1130   // Move array class to rdi
1131   __ load_klass(rdi, rdx);
1132   if (ValueArrayFlatten) {
1133     __ test_flat_array_klass(rdi, rbx, is_flat_array);
1134   }
1135 
1136   // Move subklass into rbx
1137   __ load_klass(rbx, rax);
1138   // Move array element superklass into rax
1139   __ movptr(rax, Address(rdi,
1140                          ObjArrayKlass::element_klass_offset()));
1141 
1142   // Generate subtype check.  Blows rcx, rdi
1143   // Superklass in rax.  Subklass in rbx.
1144   // is "rbx <: rax" ? (value subclass <: array element superclass)
1145   __ gen_subtype_check(rbx, ok_is_subtype);
1146 
1147   // Come here on failure
1148   // object is at TOS
1149   __ jump(ExternalAddress(Interpreter::_throw_ArrayStoreException_entry));
1150 
1151   // Come here on success
1152   __ bind(ok_is_subtype);
1153 
1154   // Get the value we will store
1155   __ movptr(rax, at_tos());
1156   if (ValueTypesBufferMaxMemory > 0) {
1157     Label is_on_heap;
1158     __ test_value_is_not_buffered(rax, rbx, is_on_heap);
1159     __ push(rdx); // save precomputed element address, and convert buffer oop to heap oop
1160     __ call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::value_heap_copy), rax);
1161     __ pop(rdx);
1162     __ bind(is_on_heap);
1163   }
1164   __ movl(rcx, at_tos_p1()); // index
1165   // Now store using the appropriate barrier
1166   do_oop_store(_masm, element_address, rax, IN_HEAP_ARRAY);
1167   __ jmp(done);
1168 
1169   // Have a NULL in rax, rdx=array, ecx=index.  Store NULL at ary[idx]
1170   __ bind(is_null);
1171   __ profile_null_seen(rbx);
1172   if (EnableValhalla) {
1173     Label is_null_into_value_array_npe, store_null;
1174 
1175     __ load_klass(rdi, rdx);
1176     // No way to store null in flat array
1177     __ test_flat_array_klass(rdi, rbx, is_null_into_value_array_npe);
1178 
1179     // Use case for storing values in objArray where element_klass is specifically
1180     // a value type because they could not be flattened "for reasons",
1181     // these need to have the same semantics as flat arrays, i.e. NPE
1182     __ movptr(rdi, Address(rdi, ObjArrayKlass::element_klass_offset()));
1183     __ test_klass_is_value(rdi, rdi, is_null_into_value_array_npe);
1184     __ jmp(store_null);
1185 
1186     __ bind(is_null_into_value_array_npe);
1187     __ jump(ExternalAddress(Interpreter::_throw_NullPointerException_entry));
1188 
1189     __ bind(store_null);
1190   }
1191   // Store a NULL
1192   do_oop_store(_masm, element_address, noreg, IN_HEAP_ARRAY);
1193   __ jmp(done);
1194 
1195   if (EnableValhalla) {
1196     Label is_type_ok;
1197     __ bind(is_flat_array); // Store non-null value to flat
1198 
1199     // Simplistic type check...
1200 
1201     // Profile the not-null value's klass.
1202     __ load_klass(rbx, rax);
1203     __ profile_typecheck(rcx, rbx, rax); // blows rcx, and rax
1204     // Move element klass into rax
1205     __ movptr(rax, Address(rdi, ArrayKlass::element_klass_offset()));
1206     // flat value array needs exact type match
1207     // is "rax == rbx" (value subclass == array element superclass)
1208     __ cmpptr(rax, rbx);
1209     __ jccb(Assembler::equal, is_type_ok);
1210 
1211     __ profile_typecheck_failed(rcx);
1212     __ jump(ExternalAddress(Interpreter::_throw_ArrayStoreException_entry));
1213 
1214     __ bind(is_type_ok);
1215     __ movptr(rax, at_tos());  // value
1216     __ movl(rcx, at_tos_p1()); // index
1217     __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::value_array_store), rax, rdx, rcx);
1218   }
1219   // Pop stack arguments
1220   __ bind(done);
1221   __ addptr(rsp, 3 * Interpreter::stackElementSize);
1222 }
1223 
1224 void TemplateTable::bastore() {
1225   transition(itos, vtos);
1226   __ pop_i(rbx);
1227   // rax: value
1228   // rbx: index
1229   // rdx: array
1230   index_check(rdx, rbx); // prefer index in rbx
1231   // Need to check whether array is boolean or byte
1232   // since both types share the bastore bytecode.
1233   __ load_klass(rcx, rdx);
1234   __ movl(rcx, Address(rcx, Klass::layout_helper_offset()));
1235   int diffbit = Klass::layout_helper_boolean_diffbit();
1236   __ testl(rcx, diffbit);
1237   Label L_skip;
1238   __ jccb(Assembler::zero, L_skip);
1239   __ andl(rax, 1);  // if it is a T_BOOLEAN array, mask the stored value to 0/1
1240   __ bind(L_skip);
1241   __ movb(Address(rdx, rbx,
1242                   Address::times_1,
1243                   arrayOopDesc::base_offset_in_bytes(T_BYTE)),
1244           rax);
1245 }
1246 
1247 void TemplateTable::castore() {
1248   transition(itos, vtos);
1249   __ pop_i(rbx);
1250   // rax: value
1251   // rbx: index
1252   // rdx: array
1253   index_check(rdx, rbx);  // prefer index in rbx
1254   __ movw(Address(rdx, rbx,
1255                   Address::times_2,
1256                   arrayOopDesc::base_offset_in_bytes(T_CHAR)),
1257           rax);
1258 }
1259 
1260 
1261 void TemplateTable::sastore() {
1262   castore();
1263 }
1264 
1265 void TemplateTable::istore(int n) {
1266   transition(itos, vtos);
1267   __ movl(iaddress(n), rax);
1268 }
1269 
1270 void TemplateTable::lstore(int n) {
1271   transition(ltos, vtos);
1272   __ movptr(laddress(n), rax);
1273   NOT_LP64(__ movptr(haddress(n), rdx));
1274 }
1275 
1276 void TemplateTable::fstore(int n) {
1277   transition(ftos, vtos);
1278   __ store_float(faddress(n));
1279 }
1280 
1281 void TemplateTable::dstore(int n) {
1282   transition(dtos, vtos);
1283   __ store_double(daddress(n));
1284 }
1285 
1286 
1287 void TemplateTable::astore(int n) {
1288   transition(vtos, vtos);
1289   __ pop_ptr(rax);
1290   __ movptr(aaddress(n), rax);
1291 }
1292 
1293 void TemplateTable::pop() {
1294   transition(vtos, vtos);
1295   __ addptr(rsp, Interpreter::stackElementSize);
1296 }
1297 
1298 void TemplateTable::pop2() {
1299   transition(vtos, vtos);
1300   __ addptr(rsp, 2 * Interpreter::stackElementSize);
1301 }
1302 
1303 
1304 void TemplateTable::dup() {
1305   transition(vtos, vtos);
1306   __ load_ptr(0, rax);
1307   __ push_ptr(rax);
1308   // stack: ..., a, a
1309 }
1310 
1311 void TemplateTable::dup_x1() {
1312   transition(vtos, vtos);
1313   // stack: ..., a, b
1314   __ load_ptr( 0, rax);  // load b
1315   __ load_ptr( 1, rcx);  // load a
1316   __ store_ptr(1, rax);  // store b
1317   __ store_ptr(0, rcx);  // store a
1318   __ push_ptr(rax);      // push b
1319   // stack: ..., b, a, b
1320 }
1321 
1322 void TemplateTable::dup_x2() {
1323   transition(vtos, vtos);
1324   // stack: ..., a, b, c
1325   __ load_ptr( 0, rax);  // load c
1326   __ load_ptr( 2, rcx);  // load a
1327   __ store_ptr(2, rax);  // store c in a
1328   __ push_ptr(rax);      // push c
1329   // stack: ..., c, b, c, c
1330   __ load_ptr( 2, rax);  // load b
1331   __ store_ptr(2, rcx);  // store a in b
1332   // stack: ..., c, a, c, c
1333   __ store_ptr(1, rax);  // store b in c
1334   // stack: ..., c, a, b, c
1335 }
1336 
1337 void TemplateTable::dup2() {
1338   transition(vtos, vtos);
1339   // stack: ..., a, b
1340   __ load_ptr(1, rax);  // load a
1341   __ push_ptr(rax);     // push a
1342   __ load_ptr(1, rax);  // load b
1343   __ push_ptr(rax);     // push b
1344   // stack: ..., a, b, a, b
1345 }
1346 
1347 
1348 void TemplateTable::dup2_x1() {
1349   transition(vtos, vtos);
1350   // stack: ..., a, b, c
1351   __ load_ptr( 0, rcx);  // load c
1352   __ load_ptr( 1, rax);  // load b
1353   __ push_ptr(rax);      // push b
1354   __ push_ptr(rcx);      // push c
1355   // stack: ..., a, b, c, b, c
1356   __ store_ptr(3, rcx);  // store c in b
1357   // stack: ..., a, c, c, b, c
1358   __ load_ptr( 4, rcx);  // load a
1359   __ store_ptr(2, rcx);  // store a in 2nd c
1360   // stack: ..., a, c, a, b, c
1361   __ store_ptr(4, rax);  // store b in a
1362   // stack: ..., b, c, a, b, c
1363 }
1364 
1365 void TemplateTable::dup2_x2() {
1366   transition(vtos, vtos);
1367   // stack: ..., a, b, c, d
1368   __ load_ptr( 0, rcx);  // load d
1369   __ load_ptr( 1, rax);  // load c
1370   __ push_ptr(rax);      // push c
1371   __ push_ptr(rcx);      // push d
1372   // stack: ..., a, b, c, d, c, d
1373   __ load_ptr( 4, rax);  // load b
1374   __ store_ptr(2, rax);  // store b in d
1375   __ store_ptr(4, rcx);  // store d in b
1376   // stack: ..., a, d, c, b, c, d
1377   __ load_ptr( 5, rcx);  // load a
1378   __ load_ptr( 3, rax);  // load c
1379   __ store_ptr(3, rcx);  // store a in c
1380   __ store_ptr(5, rax);  // store c in a
1381   // stack: ..., c, d, a, b, c, d
1382 }
1383 
1384 void TemplateTable::swap() {
1385   transition(vtos, vtos);
1386   // stack: ..., a, b
1387   __ load_ptr( 1, rcx);  // load a
1388   __ load_ptr( 0, rax);  // load b
1389   __ store_ptr(0, rcx);  // store a in b
1390   __ store_ptr(1, rax);  // store b in a
1391   // stack: ..., b, a
1392 }
1393 
1394 void TemplateTable::iop2(Operation op) {
1395   transition(itos, itos);
1396   switch (op) {
1397   case add  :                    __ pop_i(rdx); __ addl (rax, rdx); break;
1398   case sub  : __ movl(rdx, rax); __ pop_i(rax); __ subl (rax, rdx); break;
1399   case mul  :                    __ pop_i(rdx); __ imull(rax, rdx); break;
1400   case _and :                    __ pop_i(rdx); __ andl (rax, rdx); break;
1401   case _or  :                    __ pop_i(rdx); __ orl  (rax, rdx); break;
1402   case _xor :                    __ pop_i(rdx); __ xorl (rax, rdx); break;
1403   case shl  : __ movl(rcx, rax); __ pop_i(rax); __ shll (rax);      break;
1404   case shr  : __ movl(rcx, rax); __ pop_i(rax); __ sarl (rax);      break;
1405   case ushr : __ movl(rcx, rax); __ pop_i(rax); __ shrl (rax);      break;
1406   default   : ShouldNotReachHere();
1407   }
1408 }
1409 
1410 void TemplateTable::lop2(Operation op) {
1411   transition(ltos, ltos);
1412 #ifdef _LP64
1413   switch (op) {
1414   case add  :                    __ pop_l(rdx); __ addptr(rax, rdx); break;
1415   case sub  : __ mov(rdx, rax);  __ pop_l(rax); __ subptr(rax, rdx); break;
1416   case _and :                    __ pop_l(rdx); __ andptr(rax, rdx); break;
1417   case _or  :                    __ pop_l(rdx); __ orptr (rax, rdx); break;
1418   case _xor :                    __ pop_l(rdx); __ xorptr(rax, rdx); break;
1419   default   : ShouldNotReachHere();
1420   }
1421 #else
1422   __ pop_l(rbx, rcx);
1423   switch (op) {
1424     case add  : __ addl(rax, rbx); __ adcl(rdx, rcx); break;
1425     case sub  : __ subl(rbx, rax); __ sbbl(rcx, rdx);
1426                 __ mov (rax, rbx); __ mov (rdx, rcx); break;
1427     case _and : __ andl(rax, rbx); __ andl(rdx, rcx); break;
1428     case _or  : __ orl (rax, rbx); __ orl (rdx, rcx); break;
1429     case _xor : __ xorl(rax, rbx); __ xorl(rdx, rcx); break;
1430     default   : ShouldNotReachHere();
1431   }
1432 #endif
1433 }
1434 
1435 void TemplateTable::idiv() {
1436   transition(itos, itos);
1437   __ movl(rcx, rax);
1438   __ pop_i(rax);
1439   // Note: could xor rax and ecx and compare with (-1 ^ min_int). If
1440   //       they are not equal, one could do a normal division (no correction
1441   //       needed), which may speed up this implementation for the common case.
1442   //       (see also JVM spec., p.243 & p.271)
1443   __ corrected_idivl(rcx);
1444 }
1445 
1446 void TemplateTable::irem() {
1447   transition(itos, itos);
1448   __ movl(rcx, rax);
1449   __ pop_i(rax);
1450   // Note: could xor rax and ecx and compare with (-1 ^ min_int). If
1451   //       they are not equal, one could do a normal division (no correction
1452   //       needed), which may speed up this implementation for the common case.
1453   //       (see also JVM spec., p.243 & p.271)
1454   __ corrected_idivl(rcx);
1455   __ movl(rax, rdx);
1456 }
1457 
1458 void TemplateTable::lmul() {
1459   transition(ltos, ltos);
1460 #ifdef _LP64
1461   __ pop_l(rdx);
1462   __ imulq(rax, rdx);
1463 #else
1464   __ pop_l(rbx, rcx);
1465   __ push(rcx); __ push(rbx);
1466   __ push(rdx); __ push(rax);
1467   __ lmul(2 * wordSize, 0);
1468   __ addptr(rsp, 4 * wordSize);  // take off temporaries
1469 #endif
1470 }
1471 
1472 void TemplateTable::ldiv() {
1473   transition(ltos, ltos);
1474 #ifdef _LP64
1475   __ mov(rcx, rax);
1476   __ pop_l(rax);
1477   // generate explicit div0 check
1478   __ testq(rcx, rcx);
1479   __ jump_cc(Assembler::zero,
1480              ExternalAddress(Interpreter::_throw_ArithmeticException_entry));
1481   // Note: could xor rax and rcx and compare with (-1 ^ min_int). If
1482   //       they are not equal, one could do a normal division (no correction
1483   //       needed), which may speed up this implementation for the common case.
1484   //       (see also JVM spec., p.243 & p.271)
1485   __ corrected_idivq(rcx); // kills rbx
1486 #else
1487   __ pop_l(rbx, rcx);
1488   __ push(rcx); __ push(rbx);
1489   __ push(rdx); __ push(rax);
1490   // check if y = 0
1491   __ orl(rax, rdx);
1492   __ jump_cc(Assembler::zero,
1493              ExternalAddress(Interpreter::_throw_ArithmeticException_entry));
1494   __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::ldiv));
1495   __ addptr(rsp, 4 * wordSize);  // take off temporaries
1496 #endif
1497 }
1498 
1499 void TemplateTable::lrem() {
1500   transition(ltos, ltos);
1501 #ifdef _LP64
1502   __ mov(rcx, rax);
1503   __ pop_l(rax);
1504   __ testq(rcx, rcx);
1505   __ jump_cc(Assembler::zero,
1506              ExternalAddress(Interpreter::_throw_ArithmeticException_entry));
1507   // Note: could xor rax and rcx and compare with (-1 ^ min_int). If
1508   //       they are not equal, one could do a normal division (no correction
1509   //       needed), which may speed up this implementation for the common case.
1510   //       (see also JVM spec., p.243 & p.271)
1511   __ corrected_idivq(rcx); // kills rbx
1512   __ mov(rax, rdx);
1513 #else
1514   __ pop_l(rbx, rcx);
1515   __ push(rcx); __ push(rbx);
1516   __ push(rdx); __ push(rax);
1517   // check if y = 0
1518   __ orl(rax, rdx);
1519   __ jump_cc(Assembler::zero,
1520              ExternalAddress(Interpreter::_throw_ArithmeticException_entry));
1521   __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::lrem));
1522   __ addptr(rsp, 4 * wordSize);
1523 #endif
1524 }
1525 
1526 void TemplateTable::lshl() {
1527   transition(itos, ltos);
1528   __ movl(rcx, rax);                             // get shift count
1529   #ifdef _LP64
1530   __ pop_l(rax);                                 // get shift value
1531   __ shlq(rax);
1532 #else
1533   __ pop_l(rax, rdx);                            // get shift value
1534   __ lshl(rdx, rax);
1535 #endif
1536 }
1537 
1538 void TemplateTable::lshr() {
1539 #ifdef _LP64
1540   transition(itos, ltos);
1541   __ movl(rcx, rax);                             // get shift count
1542   __ pop_l(rax);                                 // get shift value
1543   __ sarq(rax);
1544 #else
1545   transition(itos, ltos);
1546   __ mov(rcx, rax);                              // get shift count
1547   __ pop_l(rax, rdx);                            // get shift value
1548   __ lshr(rdx, rax, true);
1549 #endif
1550 }
1551 
1552 void TemplateTable::lushr() {
1553   transition(itos, ltos);
1554 #ifdef _LP64
1555   __ movl(rcx, rax);                             // get shift count
1556   __ pop_l(rax);                                 // get shift value
1557   __ shrq(rax);
1558 #else
1559   __ mov(rcx, rax);                              // get shift count
1560   __ pop_l(rax, rdx);                            // get shift value
1561   __ lshr(rdx, rax);
1562 #endif
1563 }
1564 
1565 void TemplateTable::fop2(Operation op) {
1566   transition(ftos, ftos);
1567 
1568   if (UseSSE >= 1) {
1569     switch (op) {
1570     case add:
1571       __ addss(xmm0, at_rsp());
1572       __ addptr(rsp, Interpreter::stackElementSize);
1573       break;
1574     case sub:
1575       __ movflt(xmm1, xmm0);
1576       __ pop_f(xmm0);
1577       __ subss(xmm0, xmm1);
1578       break;
1579     case mul:
1580       __ mulss(xmm0, at_rsp());
1581       __ addptr(rsp, Interpreter::stackElementSize);
1582       break;
1583     case div:
1584       __ movflt(xmm1, xmm0);
1585       __ pop_f(xmm0);
1586       __ divss(xmm0, xmm1);
1587       break;
1588     case rem:
1589       // On x86_64 platforms the SharedRuntime::frem method is called to perform the
1590       // modulo operation. The frem method calls the function
1591       // double fmod(double x, double y) in math.h. The documentation of fmod states:
1592       // "If x or y is a NaN, a NaN is returned." without specifying what type of NaN
1593       // (signalling or quiet) is returned.
1594       //
1595       // On x86_32 platforms the FPU is used to perform the modulo operation. The
1596       // reason is that on 32-bit Windows the sign of modulo operations diverges from
1597       // what is considered the standard (e.g., -0.0f % -3.14f is 0.0f (and not -0.0f).
1598       // The fprem instruction used on x86_32 is functionally equivalent to
1599       // SharedRuntime::frem in that it returns a NaN.
1600 #ifdef _LP64
1601       __ movflt(xmm1, xmm0);
1602       __ pop_f(xmm0);
1603       __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::frem), 2);
1604 #else
1605       __ push_f(xmm0);
1606       __ pop_f();
1607       __ fld_s(at_rsp());
1608       __ fremr(rax);
1609       __ f2ieee();
1610       __ pop(rax);  // pop second operand off the stack
1611       __ push_f();
1612       __ pop_f(xmm0);
1613 #endif
1614       break;
1615     default:
1616       ShouldNotReachHere();
1617       break;
1618     }
1619   } else {
1620 #ifdef _LP64
1621     ShouldNotReachHere();
1622 #else
1623     switch (op) {
1624     case add: __ fadd_s (at_rsp());                break;
1625     case sub: __ fsubr_s(at_rsp());                break;
1626     case mul: __ fmul_s (at_rsp());                break;
1627     case div: __ fdivr_s(at_rsp());                break;
1628     case rem: __ fld_s  (at_rsp()); __ fremr(rax); break;
1629     default : ShouldNotReachHere();
1630     }
1631     __ f2ieee();
1632     __ pop(rax);  // pop second operand off the stack
1633 #endif // _LP64
1634   }
1635 }
1636 
1637 void TemplateTable::dop2(Operation op) {
1638   transition(dtos, dtos);
1639   if (UseSSE >= 2) {
1640     switch (op) {
1641     case add:
1642       __ addsd(xmm0, at_rsp());
1643       __ addptr(rsp, 2 * Interpreter::stackElementSize);
1644       break;
1645     case sub:
1646       __ movdbl(xmm1, xmm0);
1647       __ pop_d(xmm0);
1648       __ subsd(xmm0, xmm1);
1649       break;
1650     case mul:
1651       __ mulsd(xmm0, at_rsp());
1652       __ addptr(rsp, 2 * Interpreter::stackElementSize);
1653       break;
1654     case div:
1655       __ movdbl(xmm1, xmm0);
1656       __ pop_d(xmm0);
1657       __ divsd(xmm0, xmm1);
1658       break;
1659     case rem:
1660       // Similar to fop2(), the modulo operation is performed using the
1661       // SharedRuntime::drem method (on x86_64 platforms) or using the
1662       // FPU (on x86_32 platforms) for the same reasons as mentioned in fop2().
1663 #ifdef _LP64
1664       __ movdbl(xmm1, xmm0);
1665       __ pop_d(xmm0);
1666       __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::drem), 2);
1667 #else
1668       __ push_d(xmm0);
1669       __ pop_d();
1670       __ fld_d(at_rsp());
1671       __ fremr(rax);
1672       __ d2ieee();
1673       __ pop(rax);
1674       __ pop(rdx);
1675       __ push_d();
1676       __ pop_d(xmm0);
1677 #endif
1678       break;
1679     default:
1680       ShouldNotReachHere();
1681       break;
1682     }
1683   } else {
1684 #ifdef _LP64
1685     ShouldNotReachHere();
1686 #else
1687     switch (op) {
1688     case add: __ fadd_d (at_rsp());                break;
1689     case sub: __ fsubr_d(at_rsp());                break;
1690     case mul: {
1691       Label L_strict;
1692       Label L_join;
1693       const Address access_flags      (rcx, Method::access_flags_offset());
1694       __ get_method(rcx);
1695       __ movl(rcx, access_flags);
1696       __ testl(rcx, JVM_ACC_STRICT);
1697       __ jccb(Assembler::notZero, L_strict);
1698       __ fmul_d (at_rsp());
1699       __ jmpb(L_join);
1700       __ bind(L_strict);
1701       __ fld_x(ExternalAddress(StubRoutines::addr_fpu_subnormal_bias1()));
1702       __ fmulp();
1703       __ fmul_d (at_rsp());
1704       __ fld_x(ExternalAddress(StubRoutines::addr_fpu_subnormal_bias2()));
1705       __ fmulp();
1706       __ bind(L_join);
1707       break;
1708     }
1709     case div: {
1710       Label L_strict;
1711       Label L_join;
1712       const Address access_flags      (rcx, Method::access_flags_offset());
1713       __ get_method(rcx);
1714       __ movl(rcx, access_flags);
1715       __ testl(rcx, JVM_ACC_STRICT);
1716       __ jccb(Assembler::notZero, L_strict);
1717       __ fdivr_d(at_rsp());
1718       __ jmp(L_join);
1719       __ bind(L_strict);
1720       __ fld_x(ExternalAddress(StubRoutines::addr_fpu_subnormal_bias1()));
1721       __ fmul_d (at_rsp());
1722       __ fdivrp();
1723       __ fld_x(ExternalAddress(StubRoutines::addr_fpu_subnormal_bias2()));
1724       __ fmulp();
1725       __ bind(L_join);
1726       break;
1727     }
1728     case rem: __ fld_d  (at_rsp()); __ fremr(rax); break;
1729     default : ShouldNotReachHere();
1730     }
1731     __ d2ieee();
1732     // Pop double precision number from rsp.
1733     __ pop(rax);
1734     __ pop(rdx);
1735 #endif
1736   }
1737 }
1738 
1739 void TemplateTable::ineg() {
1740   transition(itos, itos);
1741   __ negl(rax);
1742 }
1743 
1744 void TemplateTable::lneg() {
1745   transition(ltos, ltos);
1746   LP64_ONLY(__ negq(rax));
1747   NOT_LP64(__ lneg(rdx, rax));
1748 }
1749 
1750 // Note: 'double' and 'long long' have 32-bits alignment on x86.
1751 static jlong* double_quadword(jlong *adr, jlong lo, jlong hi) {
1752   // Use the expression (adr)&(~0xF) to provide 128-bits aligned address
1753   // of 128-bits operands for SSE instructions.
1754   jlong *operand = (jlong*)(((intptr_t)adr)&((intptr_t)(~0xF)));
1755   // Store the value to a 128-bits operand.
1756   operand[0] = lo;
1757   operand[1] = hi;
1758   return operand;
1759 }
1760 
1761 // Buffer for 128-bits masks used by SSE instructions.
1762 static jlong float_signflip_pool[2*2];
1763 static jlong double_signflip_pool[2*2];
1764 
1765 void TemplateTable::fneg() {
1766   transition(ftos, ftos);
1767   if (UseSSE >= 1) {
1768     static jlong *float_signflip  = double_quadword(&float_signflip_pool[1],  CONST64(0x8000000080000000),  CONST64(0x8000000080000000));
1769     __ xorps(xmm0, ExternalAddress((address) float_signflip));
1770   } else {
1771     LP64_ONLY(ShouldNotReachHere());
1772     NOT_LP64(__ fchs());
1773   }
1774 }
1775 
1776 void TemplateTable::dneg() {
1777   transition(dtos, dtos);
1778   if (UseSSE >= 2) {
1779     static jlong *double_signflip =
1780       double_quadword(&double_signflip_pool[1], CONST64(0x8000000000000000), CONST64(0x8000000000000000));
1781     __ xorpd(xmm0, ExternalAddress((address) double_signflip));
1782   } else {
1783 #ifdef _LP64
1784     ShouldNotReachHere();
1785 #else
1786     __ fchs();
1787 #endif
1788   }
1789 }
1790 
1791 void TemplateTable::iinc() {
1792   transition(vtos, vtos);
1793   __ load_signed_byte(rdx, at_bcp(2)); // get constant
1794   locals_index(rbx);
1795   __ addl(iaddress(rbx), rdx);
1796 }
1797 
1798 void TemplateTable::wide_iinc() {
1799   transition(vtos, vtos);
1800   __ movl(rdx, at_bcp(4)); // get constant
1801   locals_index_wide(rbx);
1802   __ bswapl(rdx); // swap bytes & sign-extend constant
1803   __ sarl(rdx, 16);
1804   __ addl(iaddress(rbx), rdx);
1805   // Note: should probably use only one movl to get both
1806   //       the index and the constant -> fix this
1807 }
1808 
1809 void TemplateTable::convert() {
1810 #ifdef _LP64
1811   // Checking
1812 #ifdef ASSERT
1813   {
1814     TosState tos_in  = ilgl;
1815     TosState tos_out = ilgl;
1816     switch (bytecode()) {
1817     case Bytecodes::_i2l: // fall through
1818     case Bytecodes::_i2f: // fall through
1819     case Bytecodes::_i2d: // fall through
1820     case Bytecodes::_i2b: // fall through
1821     case Bytecodes::_i2c: // fall through
1822     case Bytecodes::_i2s: tos_in = itos; break;
1823     case Bytecodes::_l2i: // fall through
1824     case Bytecodes::_l2f: // fall through
1825     case Bytecodes::_l2d: tos_in = ltos; break;
1826     case Bytecodes::_f2i: // fall through
1827     case Bytecodes::_f2l: // fall through
1828     case Bytecodes::_f2d: tos_in = ftos; break;
1829     case Bytecodes::_d2i: // fall through
1830     case Bytecodes::_d2l: // fall through
1831     case Bytecodes::_d2f: tos_in = dtos; break;
1832     default             : ShouldNotReachHere();
1833     }
1834     switch (bytecode()) {
1835     case Bytecodes::_l2i: // fall through
1836     case Bytecodes::_f2i: // fall through
1837     case Bytecodes::_d2i: // fall through
1838     case Bytecodes::_i2b: // fall through
1839     case Bytecodes::_i2c: // fall through
1840     case Bytecodes::_i2s: tos_out = itos; break;
1841     case Bytecodes::_i2l: // fall through
1842     case Bytecodes::_f2l: // fall through
1843     case Bytecodes::_d2l: tos_out = ltos; break;
1844     case Bytecodes::_i2f: // fall through
1845     case Bytecodes::_l2f: // fall through
1846     case Bytecodes::_d2f: tos_out = ftos; break;
1847     case Bytecodes::_i2d: // fall through
1848     case Bytecodes::_l2d: // fall through
1849     case Bytecodes::_f2d: tos_out = dtos; break;
1850     default             : ShouldNotReachHere();
1851     }
1852     transition(tos_in, tos_out);
1853   }
1854 #endif // ASSERT
1855 
1856   static const int64_t is_nan = 0x8000000000000000L;
1857 
1858   // Conversion
1859   switch (bytecode()) {
1860   case Bytecodes::_i2l:
1861     __ movslq(rax, rax);
1862     break;
1863   case Bytecodes::_i2f:
1864     __ cvtsi2ssl(xmm0, rax);
1865     break;
1866   case Bytecodes::_i2d:
1867     __ cvtsi2sdl(xmm0, rax);
1868     break;
1869   case Bytecodes::_i2b:
1870     __ movsbl(rax, rax);
1871     break;
1872   case Bytecodes::_i2c:
1873     __ movzwl(rax, rax);
1874     break;
1875   case Bytecodes::_i2s:
1876     __ movswl(rax, rax);
1877     break;
1878   case Bytecodes::_l2i:
1879     __ movl(rax, rax);
1880     break;
1881   case Bytecodes::_l2f:
1882     __ cvtsi2ssq(xmm0, rax);
1883     break;
1884   case Bytecodes::_l2d:
1885     __ cvtsi2sdq(xmm0, rax);
1886     break;
1887   case Bytecodes::_f2i:
1888   {
1889     Label L;
1890     __ cvttss2sil(rax, xmm0);
1891     __ cmpl(rax, 0x80000000); // NaN or overflow/underflow?
1892     __ jcc(Assembler::notEqual, L);
1893     __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::f2i), 1);
1894     __ bind(L);
1895   }
1896     break;
1897   case Bytecodes::_f2l:
1898   {
1899     Label L;
1900     __ cvttss2siq(rax, xmm0);
1901     // NaN or overflow/underflow?
1902     __ cmp64(rax, ExternalAddress((address) &is_nan));
1903     __ jcc(Assembler::notEqual, L);
1904     __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::f2l), 1);
1905     __ bind(L);
1906   }
1907     break;
1908   case Bytecodes::_f2d:
1909     __ cvtss2sd(xmm0, xmm0);
1910     break;
1911   case Bytecodes::_d2i:
1912   {
1913     Label L;
1914     __ cvttsd2sil(rax, xmm0);
1915     __ cmpl(rax, 0x80000000); // NaN or overflow/underflow?
1916     __ jcc(Assembler::notEqual, L);
1917     __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::d2i), 1);
1918     __ bind(L);
1919   }
1920     break;
1921   case Bytecodes::_d2l:
1922   {
1923     Label L;
1924     __ cvttsd2siq(rax, xmm0);
1925     // NaN or overflow/underflow?
1926     __ cmp64(rax, ExternalAddress((address) &is_nan));
1927     __ jcc(Assembler::notEqual, L);
1928     __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::d2l), 1);
1929     __ bind(L);
1930   }
1931     break;
1932   case Bytecodes::_d2f:
1933     __ cvtsd2ss(xmm0, xmm0);
1934     break;
1935   default:
1936     ShouldNotReachHere();
1937   }
1938 #else
1939   // Checking
1940 #ifdef ASSERT
1941   { TosState tos_in  = ilgl;
1942     TosState tos_out = ilgl;
1943     switch (bytecode()) {
1944       case Bytecodes::_i2l: // fall through
1945       case Bytecodes::_i2f: // fall through
1946       case Bytecodes::_i2d: // fall through
1947       case Bytecodes::_i2b: // fall through
1948       case Bytecodes::_i2c: // fall through
1949       case Bytecodes::_i2s: tos_in = itos; break;
1950       case Bytecodes::_l2i: // fall through
1951       case Bytecodes::_l2f: // fall through
1952       case Bytecodes::_l2d: tos_in = ltos; break;
1953       case Bytecodes::_f2i: // fall through
1954       case Bytecodes::_f2l: // fall through
1955       case Bytecodes::_f2d: tos_in = ftos; break;
1956       case Bytecodes::_d2i: // fall through
1957       case Bytecodes::_d2l: // fall through
1958       case Bytecodes::_d2f: tos_in = dtos; break;
1959       default             : ShouldNotReachHere();
1960     }
1961     switch (bytecode()) {
1962       case Bytecodes::_l2i: // fall through
1963       case Bytecodes::_f2i: // fall through
1964       case Bytecodes::_d2i: // fall through
1965       case Bytecodes::_i2b: // fall through
1966       case Bytecodes::_i2c: // fall through
1967       case Bytecodes::_i2s: tos_out = itos; break;
1968       case Bytecodes::_i2l: // fall through
1969       case Bytecodes::_f2l: // fall through
1970       case Bytecodes::_d2l: tos_out = ltos; break;
1971       case Bytecodes::_i2f: // fall through
1972       case Bytecodes::_l2f: // fall through
1973       case Bytecodes::_d2f: tos_out = ftos; break;
1974       case Bytecodes::_i2d: // fall through
1975       case Bytecodes::_l2d: // fall through
1976       case Bytecodes::_f2d: tos_out = dtos; break;
1977       default             : ShouldNotReachHere();
1978     }
1979     transition(tos_in, tos_out);
1980   }
1981 #endif // ASSERT
1982 
1983   // Conversion
1984   // (Note: use push(rcx)/pop(rcx) for 1/2-word stack-ptr manipulation)
1985   switch (bytecode()) {
1986     case Bytecodes::_i2l:
1987       __ extend_sign(rdx, rax);
1988       break;
1989     case Bytecodes::_i2f:
1990       if (UseSSE >= 1) {
1991         __ cvtsi2ssl(xmm0, rax);
1992       } else {
1993         __ push(rax);          // store int on tos
1994         __ fild_s(at_rsp());   // load int to ST0
1995         __ f2ieee();           // truncate to float size
1996         __ pop(rcx);           // adjust rsp
1997       }
1998       break;
1999     case Bytecodes::_i2d:
2000       if (UseSSE >= 2) {
2001         __ cvtsi2sdl(xmm0, rax);
2002       } else {
2003       __ push(rax);          // add one slot for d2ieee()
2004       __ push(rax);          // store int on tos
2005       __ fild_s(at_rsp());   // load int to ST0
2006       __ d2ieee();           // truncate to double size
2007       __ pop(rcx);           // adjust rsp
2008       __ pop(rcx);
2009       }
2010       break;
2011     case Bytecodes::_i2b:
2012       __ shll(rax, 24);      // truncate upper 24 bits
2013       __ sarl(rax, 24);      // and sign-extend byte
2014       LP64_ONLY(__ movsbl(rax, rax));
2015       break;
2016     case Bytecodes::_i2c:
2017       __ andl(rax, 0xFFFF);  // truncate upper 16 bits
2018       LP64_ONLY(__ movzwl(rax, rax));
2019       break;
2020     case Bytecodes::_i2s:
2021       __ shll(rax, 16);      // truncate upper 16 bits
2022       __ sarl(rax, 16);      // and sign-extend short
2023       LP64_ONLY(__ movswl(rax, rax));
2024       break;
2025     case Bytecodes::_l2i:
2026       /* nothing to do */
2027       break;
2028     case Bytecodes::_l2f:
2029       // On 64-bit platforms, the cvtsi2ssq instruction is used to convert
2030       // 64-bit long values to floats. On 32-bit platforms it is not possible
2031       // to use that instruction with 64-bit operands, therefore the FPU is
2032       // used to perform the conversion.
2033       __ push(rdx);          // store long on tos
2034       __ push(rax);
2035       __ fild_d(at_rsp());   // load long to ST0
2036       __ f2ieee();           // truncate to float size
2037       __ pop(rcx);           // adjust rsp
2038       __ pop(rcx);
2039       if (UseSSE >= 1) {
2040         __ push_f();
2041         __ pop_f(xmm0);
2042       }
2043       break;
2044     case Bytecodes::_l2d:
2045       // On 32-bit platforms the FPU is used for conversion because on
2046       // 32-bit platforms it is not not possible to use the cvtsi2sdq
2047       // instruction with 64-bit operands.
2048       __ push(rdx);          // store long on tos
2049       __ push(rax);
2050       __ fild_d(at_rsp());   // load long to ST0
2051       __ d2ieee();           // truncate to double size
2052       __ pop(rcx);           // adjust rsp
2053       __ pop(rcx);
2054       if (UseSSE >= 2) {
2055         __ push_d();
2056         __ pop_d(xmm0);
2057       }
2058       break;
2059     case Bytecodes::_f2i:
2060       // SharedRuntime::f2i does not differentiate between sNaNs and qNaNs
2061       // as it returns 0 for any NaN.
2062       if (UseSSE >= 1) {
2063         __ push_f(xmm0);
2064       } else {
2065         __ push(rcx);          // reserve space for argument
2066         __ fstp_s(at_rsp());   // pass float argument on stack
2067       }
2068       __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::f2i), 1);
2069       break;
2070     case Bytecodes::_f2l:
2071       // SharedRuntime::f2l does not differentiate between sNaNs and qNaNs
2072       // as it returns 0 for any NaN.
2073       if (UseSSE >= 1) {
2074        __ push_f(xmm0);
2075       } else {
2076         __ push(rcx);          // reserve space for argument
2077         __ fstp_s(at_rsp());   // pass float argument on stack
2078       }
2079       __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::f2l), 1);
2080       break;
2081     case Bytecodes::_f2d:
2082       if (UseSSE < 1) {
2083         /* nothing to do */
2084       } else if (UseSSE == 1) {
2085         __ push_f(xmm0);
2086         __ pop_f();
2087       } else { // UseSSE >= 2
2088         __ cvtss2sd(xmm0, xmm0);
2089       }
2090       break;
2091     case Bytecodes::_d2i:
2092       if (UseSSE >= 2) {
2093         __ push_d(xmm0);
2094       } else {
2095         __ push(rcx);          // reserve space for argument
2096         __ push(rcx);
2097         __ fstp_d(at_rsp());   // pass double argument on stack
2098       }
2099       __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::d2i), 2);
2100       break;
2101     case Bytecodes::_d2l:
2102       if (UseSSE >= 2) {
2103         __ push_d(xmm0);
2104       } else {
2105         __ push(rcx);          // reserve space for argument
2106         __ push(rcx);
2107         __ fstp_d(at_rsp());   // pass double argument on stack
2108       }
2109       __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::d2l), 2);
2110       break;
2111     case Bytecodes::_d2f:
2112       if (UseSSE <= 1) {
2113         __ push(rcx);          // reserve space for f2ieee()
2114         __ f2ieee();           // truncate to float size
2115         __ pop(rcx);           // adjust rsp
2116         if (UseSSE == 1) {
2117           // The cvtsd2ss instruction is not available if UseSSE==1, therefore
2118           // the conversion is performed using the FPU in this case.
2119           __ push_f();
2120           __ pop_f(xmm0);
2121         }
2122       } else { // UseSSE >= 2
2123         __ cvtsd2ss(xmm0, xmm0);
2124       }
2125       break;
2126     default             :
2127       ShouldNotReachHere();
2128   }
2129 #endif
2130 }
2131 
2132 void TemplateTable::lcmp() {
2133   transition(ltos, itos);
2134 #ifdef _LP64
2135   Label done;
2136   __ pop_l(rdx);
2137   __ cmpq(rdx, rax);
2138   __ movl(rax, -1);
2139   __ jccb(Assembler::less, done);
2140   __ setb(Assembler::notEqual, rax);
2141   __ movzbl(rax, rax);
2142   __ bind(done);
2143 #else
2144 
2145   // y = rdx:rax
2146   __ pop_l(rbx, rcx);             // get x = rcx:rbx
2147   __ lcmp2int(rcx, rbx, rdx, rax);// rcx := cmp(x, y)
2148   __ mov(rax, rcx);
2149 #endif
2150 }
2151 
2152 void TemplateTable::float_cmp(bool is_float, int unordered_result) {
2153   if ((is_float && UseSSE >= 1) ||
2154       (!is_float && UseSSE >= 2)) {
2155     Label done;
2156     if (is_float) {
2157       // XXX get rid of pop here, use ... reg, mem32
2158       __ pop_f(xmm1);
2159       __ ucomiss(xmm1, xmm0);
2160     } else {
2161       // XXX get rid of pop here, use ... reg, mem64
2162       __ pop_d(xmm1);
2163       __ ucomisd(xmm1, xmm0);
2164     }
2165     if (unordered_result < 0) {
2166       __ movl(rax, -1);
2167       __ jccb(Assembler::parity, done);
2168       __ jccb(Assembler::below, done);
2169       __ setb(Assembler::notEqual, rdx);
2170       __ movzbl(rax, rdx);
2171     } else {
2172       __ movl(rax, 1);
2173       __ jccb(Assembler::parity, done);
2174       __ jccb(Assembler::above, done);
2175       __ movl(rax, 0);
2176       __ jccb(Assembler::equal, done);
2177       __ decrementl(rax);
2178     }
2179     __ bind(done);
2180   } else {
2181 #ifdef _LP64
2182     ShouldNotReachHere();
2183 #else
2184     if (is_float) {
2185       __ fld_s(at_rsp());
2186     } else {
2187       __ fld_d(at_rsp());
2188       __ pop(rdx);
2189     }
2190     __ pop(rcx);
2191     __ fcmp2int(rax, unordered_result < 0);
2192 #endif // _LP64
2193   }
2194 }
2195 
2196 void TemplateTable::branch(bool is_jsr, bool is_wide) {
2197   if (ValueTypesThreadLocalRecycling) {
2198     Label no_vt_recycling, no_fixing_required;
2199     const Register thread1 = NOT_LP64(rbx) LP64_ONLY(r15_thread);
2200     NOT_LP64(__ get_thread(thread1));
2201     __ movptr(rbx, Address(thread1, in_bytes(JavaThread::vt_alloc_ptr_offset())));
2202     __ testptr(rbx, rbx);
2203     __ jcc(Assembler::zero, no_vt_recycling);
2204     __ movptr(rcx, Address(rbp, frame::interpreter_frame_vt_alloc_ptr_offset * wordSize));
2205     __ testptr(rcx, rcx);
2206     __ jcc(Assembler::notZero, no_fixing_required);
2207     // vt_alloc_ptr in JavaThread is non-null but frame vt_alloc_ptr is null
2208     // which means frame vt_alloc_ptr needs to be initialized
2209     __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::fix_frame_vt_alloc_ptr));
2210     __ movptr(rcx, Address(rbp, frame::interpreter_frame_vt_alloc_ptr_offset * wordSize));
2211     __ bind(no_fixing_required);
2212     __ testptr(rcx, rbx);
2213     __ jcc(Assembler::equal, no_vt_recycling);
2214     __ andptr(rcx, VTBufferChunk::chunk_mask());
2215     __ movl(rcx, Address(rcx, VTBufferChunk::index_offset()));
2216     __ andptr(rbx, VTBufferChunk::chunk_mask());
2217     __ movl(rbx, Address(rbx, VTBufferChunk::index_offset()));
2218     __ subl(rbx, rcx);
2219     __ get_method(rcx);
2220     __ movl(rcx, Address(rcx, Method::max_vt_buffer_offset()));
2221     __ cmpl(rbx, rcx);
2222     __ jcc(Assembler::lessEqual, no_vt_recycling);
2223     __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::recycle_buffered_values));
2224     __ bind(no_vt_recycling);
2225   }
2226 
2227   __ get_method(rcx); // rcx holds method
2228   __ profile_taken_branch(rax, rbx); // rax holds updated MDP, rbx
2229                                      // holds bumped taken count
2230 
2231   const ByteSize be_offset = MethodCounters::backedge_counter_offset() +
2232                              InvocationCounter::counter_offset();
2233   const ByteSize inv_offset = MethodCounters::invocation_counter_offset() +
2234                               InvocationCounter::counter_offset();
2235 
2236   // Load up edx with the branch displacement
2237   if (is_wide) {
2238     __ movl(rdx, at_bcp(1));
2239   } else {
2240     __ load_signed_short(rdx, at_bcp(1));
2241   }
2242   __ bswapl(rdx);
2243 
2244   if (!is_wide) {
2245     __ sarl(rdx, 16);
2246   }
2247   LP64_ONLY(__ movl2ptr(rdx, rdx));
2248 
2249   // Handle all the JSR stuff here, then exit.
2250   // It's much shorter and cleaner than intermingling with the non-JSR
2251   // normal-branch stuff occurring below.
2252   if (is_jsr) {
2253     // Pre-load the next target bytecode into rbx
2254     __ load_unsigned_byte(rbx, Address(rbcp, rdx, Address::times_1, 0));
2255 
2256     // compute return address as bci in rax
2257     __ lea(rax, at_bcp((is_wide ? 5 : 3) -
2258                         in_bytes(ConstMethod::codes_offset())));
2259     __ subptr(rax, Address(rcx, Method::const_offset()));
2260     // Adjust the bcp in r13 by the displacement in rdx
2261     __ addptr(rbcp, rdx);
2262     // jsr returns atos that is not an oop
2263     __ push_i(rax);
2264     __ dispatch_only(vtos, true);
2265     return;
2266   }
2267 
2268   // Normal (non-jsr) branch handling
2269 
2270   // Adjust the bcp in r13 by the displacement in rdx
2271   __ addptr(rbcp, rdx);
2272 
2273   assert(UseLoopCounter || !UseOnStackReplacement,
2274          "on-stack-replacement requires loop counters");
2275   Label backedge_counter_overflow;
2276   Label profile_method;
2277   Label dispatch;
2278   if (UseLoopCounter) {
2279     // increment backedge counter for backward branches
2280     // rax: MDO
2281     // rbx: MDO bumped taken-count
2282     // rcx: method
2283     // rdx: target offset
2284     // r13: target bcp
2285     // r14: locals pointer
2286     __ testl(rdx, rdx);             // check if forward or backward branch
2287     __ jcc(Assembler::positive, dispatch); // count only if backward branch
2288 
2289     // check if MethodCounters exists
2290     Label has_counters;
2291     __ movptr(rax, Address(rcx, Method::method_counters_offset()));
2292     __ testptr(rax, rax);
2293     __ jcc(Assembler::notZero, has_counters);
2294     __ push(rdx);
2295     __ push(rcx);
2296     __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::build_method_counters),
2297                rcx);
2298     __ pop(rcx);
2299     __ pop(rdx);
2300     __ movptr(rax, Address(rcx, Method::method_counters_offset()));
2301     __ testptr(rax, rax);
2302     __ jcc(Assembler::zero, dispatch);
2303     __ bind(has_counters);
2304 
2305     if (TieredCompilation) {
2306       Label no_mdo;
2307       int increment = InvocationCounter::count_increment;
2308       if (ProfileInterpreter) {
2309         // Are we profiling?
2310         __ movptr(rbx, Address(rcx, in_bytes(Method::method_data_offset())));
2311         __ testptr(rbx, rbx);
2312         __ jccb(Assembler::zero, no_mdo);
2313         // Increment the MDO backedge counter
2314         const Address mdo_backedge_counter(rbx, in_bytes(MethodData::backedge_counter_offset()) +
2315                                            in_bytes(InvocationCounter::counter_offset()));
2316         const Address mask(rbx, in_bytes(MethodData::backedge_mask_offset()));
2317         __ increment_mask_and_jump(mdo_backedge_counter, increment, mask,
2318                                    rax, false, Assembler::zero, &backedge_counter_overflow);
2319         __ jmp(dispatch);
2320       }
2321       __ bind(no_mdo);
2322       // Increment backedge counter in MethodCounters*
2323       __ movptr(rcx, Address(rcx, Method::method_counters_offset()));
2324       const Address mask(rcx, in_bytes(MethodCounters::backedge_mask_offset()));
2325       __ increment_mask_and_jump(Address(rcx, be_offset), increment, mask,
2326                                  rax, false, Assembler::zero, &backedge_counter_overflow);
2327     } else { // not TieredCompilation
2328       // increment counter
2329       __ movptr(rcx, Address(rcx, Method::method_counters_offset()));
2330       __ movl(rax, Address(rcx, be_offset));        // load backedge counter
2331       __ incrementl(rax, InvocationCounter::count_increment); // increment counter
2332       __ movl(Address(rcx, be_offset), rax);        // store counter
2333 
2334       __ movl(rax, Address(rcx, inv_offset));    // load invocation counter
2335 
2336       __ andl(rax, InvocationCounter::count_mask_value); // and the status bits
2337       __ addl(rax, Address(rcx, be_offset));        // add both counters
2338 
2339       if (ProfileInterpreter) {
2340         // Test to see if we should create a method data oop
2341         __ cmp32(rax, Address(rcx, in_bytes(MethodCounters::interpreter_profile_limit_offset())));
2342         __ jcc(Assembler::less, dispatch);
2343 
2344         // if no method data exists, go to profile method
2345         __ test_method_data_pointer(rax, profile_method);
2346 
2347         if (UseOnStackReplacement) {
2348           // check for overflow against rbx which is the MDO taken count
2349           __ cmp32(rbx, Address(rcx, in_bytes(MethodCounters::interpreter_backward_branch_limit_offset())));
2350           __ jcc(Assembler::below, dispatch);
2351 
2352           // When ProfileInterpreter is on, the backedge_count comes
2353           // from the MethodData*, which value does not get reset on
2354           // the call to frequency_counter_overflow().  To avoid
2355           // excessive calls to the overflow routine while the method is
2356           // being compiled, add a second test to make sure the overflow
2357           // function is called only once every overflow_frequency.
2358           const int overflow_frequency = 1024;
2359           __ andl(rbx, overflow_frequency - 1);
2360           __ jcc(Assembler::zero, backedge_counter_overflow);
2361 
2362         }
2363       } else {
2364         if (UseOnStackReplacement) {
2365           // check for overflow against rax, which is the sum of the
2366           // counters
2367           __ cmp32(rax, Address(rcx, in_bytes(MethodCounters::interpreter_backward_branch_limit_offset())));
2368           __ jcc(Assembler::aboveEqual, backedge_counter_overflow);
2369 
2370         }
2371       }
2372     }
2373     __ bind(dispatch);
2374   }
2375 
2376   // Pre-load the next target bytecode into rbx
2377   __ load_unsigned_byte(rbx, Address(rbcp, 0));
2378 
2379   // continue with the bytecode @ target
2380   // rax: return bci for jsr's, unused otherwise
2381   // rbx: target bytecode
2382   // r13: target bcp
2383   __ dispatch_only(vtos, true);
2384 
2385   if (UseLoopCounter) {
2386     if (ProfileInterpreter) {
2387       // Out-of-line code to allocate method data oop.
2388       __ bind(profile_method);
2389       __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::profile_method));
2390       __ set_method_data_pointer_for_bcp();
2391       __ jmp(dispatch);
2392     }
2393 
2394     if (UseOnStackReplacement) {
2395       // invocation counter overflow
2396       __ bind(backedge_counter_overflow);
2397       __ negptr(rdx);
2398       __ addptr(rdx, rbcp); // branch bcp
2399       // IcoResult frequency_counter_overflow([JavaThread*], address branch_bcp)
2400       __ call_VM(noreg,
2401                  CAST_FROM_FN_PTR(address,
2402                                   InterpreterRuntime::frequency_counter_overflow),
2403                  rdx);
2404 
2405       // rax: osr nmethod (osr ok) or NULL (osr not possible)
2406       // rdx: scratch
2407       // r14: locals pointer
2408       // r13: bcp
2409       __ testptr(rax, rax);                        // test result
2410       __ jcc(Assembler::zero, dispatch);         // no osr if null
2411       // nmethod may have been invalidated (VM may block upon call_VM return)
2412       __ cmpb(Address(rax, nmethod::state_offset()), nmethod::in_use);
2413       __ jcc(Assembler::notEqual, dispatch);
2414 
2415       // We have the address of an on stack replacement routine in rax.
2416       // In preparation of invoking it, first we must migrate the locals
2417       // and monitors from off the interpreter frame on the stack.
2418       // Ensure to save the osr nmethod over the migration call,
2419       // it will be preserved in rbx.
2420       __ mov(rbx, rax);
2421 
2422       NOT_LP64(__ get_thread(rcx));
2423 
2424       call_VM(noreg, CAST_FROM_FN_PTR(address, SharedRuntime::OSR_migration_begin));
2425 
2426       // rax is OSR buffer, move it to expected parameter location
2427       LP64_ONLY(__ mov(j_rarg0, rax));
2428       NOT_LP64(__ mov(rcx, rax));
2429       // We use j_rarg definitions here so that registers don't conflict as parameter
2430       // registers change across platforms as we are in the midst of a calling
2431       // sequence to the OSR nmethod and we don't want collision. These are NOT parameters.
2432 
2433       const Register retaddr   = LP64_ONLY(j_rarg2) NOT_LP64(rdi);
2434       const Register sender_sp = LP64_ONLY(j_rarg1) NOT_LP64(rdx);
2435 
2436       // pop the interpreter frame
2437       __ movptr(sender_sp, Address(rbp, frame::interpreter_frame_sender_sp_offset * wordSize)); // get sender sp
2438       __ leave();                                // remove frame anchor
2439       __ pop(retaddr);                           // get return address
2440       __ mov(rsp, sender_sp);                   // set sp to sender sp
2441       // Ensure compiled code always sees stack at proper alignment
2442       __ andptr(rsp, -(StackAlignmentInBytes));
2443 
2444       // unlike x86 we need no specialized return from compiled code
2445       // to the interpreter or the call stub.
2446 
2447       // push the return address
2448       __ push(retaddr);
2449 
2450       // and begin the OSR nmethod
2451       __ jmp(Address(rbx, nmethod::osr_entry_point_offset()));
2452     }
2453   }
2454 }
2455 
2456 void TemplateTable::if_0cmp(Condition cc) {
2457   transition(itos, vtos);
2458   // assume branch is more often taken than not (loops use backward branches)
2459   Label not_taken;
2460   __ testl(rax, rax);
2461   __ jcc(j_not(cc), not_taken);
2462   branch(false, false);
2463   __ bind(not_taken);
2464   __ profile_not_taken_branch(rax);
2465 }
2466 
2467 void TemplateTable::if_icmp(Condition cc) {
2468   transition(itos, vtos);
2469   // assume branch is more often taken than not (loops use backward branches)
2470   Label not_taken;
2471   __ pop_i(rdx);
2472   __ cmpl(rdx, rax);
2473   __ jcc(j_not(cc), not_taken);
2474   branch(false, false);
2475   __ bind(not_taken);
2476   __ profile_not_taken_branch(rax);
2477 }
2478 
2479 void TemplateTable::if_nullcmp(Condition cc) {
2480   transition(atos, vtos);
2481   // assume branch is more often taken than not (loops use backward branches)
2482   Label not_taken;
2483   __ testptr(rax, rax);
2484   __ jcc(j_not(cc), not_taken);
2485   branch(false, false);
2486   __ bind(not_taken);
2487   __ profile_not_taken_branch(rax);
2488 }
2489 
2490 void TemplateTable::if_acmp(Condition cc) {
2491   transition(atos, vtos);
2492   // assume branch is more often taken than not (loops use backward branches)
2493   Label not_taken;
2494   __ pop_ptr(rdx);
2495   __ cmpoop(rdx, rax);
2496   __ jcc(j_not(cc), not_taken);
2497   branch(false, false);
2498   __ bind(not_taken);
2499   __ profile_not_taken_branch(rax);
2500 }
2501 
2502 void TemplateTable::ret() {
2503   transition(vtos, vtos);
2504   locals_index(rbx);
2505   LP64_ONLY(__ movslq(rbx, iaddress(rbx))); // get return bci, compute return bcp
2506   NOT_LP64(__ movptr(rbx, iaddress(rbx)));
2507   __ profile_ret(rbx, rcx);
2508   __ get_method(rax);
2509   __ movptr(rbcp, Address(rax, Method::const_offset()));
2510   __ lea(rbcp, Address(rbcp, rbx, Address::times_1,
2511                       ConstMethod::codes_offset()));
2512   __ dispatch_next(vtos, 0, true);
2513 }
2514 
2515 void TemplateTable::wide_ret() {
2516   transition(vtos, vtos);
2517   locals_index_wide(rbx);
2518   __ movptr(rbx, aaddress(rbx)); // get return bci, compute return bcp
2519   __ profile_ret(rbx, rcx);
2520   __ get_method(rax);
2521   __ movptr(rbcp, Address(rax, Method::const_offset()));
2522   __ lea(rbcp, Address(rbcp, rbx, Address::times_1, ConstMethod::codes_offset()));
2523   __ dispatch_next(vtos, 0, true);
2524 }
2525 
2526 void TemplateTable::tableswitch() {
2527   Label default_case, continue_execution;
2528   transition(itos, vtos);
2529 
2530   // align r13/rsi
2531   __ lea(rbx, at_bcp(BytesPerInt));
2532   __ andptr(rbx, -BytesPerInt);
2533   // load lo & hi
2534   __ movl(rcx, Address(rbx, BytesPerInt));
2535   __ movl(rdx, Address(rbx, 2 * BytesPerInt));
2536   __ bswapl(rcx);
2537   __ bswapl(rdx);
2538   // check against lo & hi
2539   __ cmpl(rax, rcx);
2540   __ jcc(Assembler::less, default_case);
2541   __ cmpl(rax, rdx);
2542   __ jcc(Assembler::greater, default_case);
2543   // lookup dispatch offset
2544   __ subl(rax, rcx);
2545   __ movl(rdx, Address(rbx, rax, Address::times_4, 3 * BytesPerInt));
2546   __ profile_switch_case(rax, rbx, rcx);
2547   // continue execution
2548   __ bind(continue_execution);
2549   __ bswapl(rdx);
2550   LP64_ONLY(__ movl2ptr(rdx, rdx));
2551   __ load_unsigned_byte(rbx, Address(rbcp, rdx, Address::times_1));
2552   __ addptr(rbcp, rdx);
2553   __ dispatch_only(vtos, true);
2554   // handle default
2555   __ bind(default_case);
2556   __ profile_switch_default(rax);
2557   __ movl(rdx, Address(rbx, 0));
2558   __ jmp(continue_execution);
2559 }
2560 
2561 void TemplateTable::lookupswitch() {
2562   transition(itos, itos);
2563   __ stop("lookupswitch bytecode should have been rewritten");
2564 }
2565 
2566 void TemplateTable::fast_linearswitch() {
2567   transition(itos, vtos);
2568   Label loop_entry, loop, found, continue_execution;
2569   // bswap rax so we can avoid bswapping the table entries
2570   __ bswapl(rax);
2571   // align r13
2572   __ lea(rbx, at_bcp(BytesPerInt)); // btw: should be able to get rid of
2573                                     // this instruction (change offsets
2574                                     // below)
2575   __ andptr(rbx, -BytesPerInt);
2576   // set counter
2577   __ movl(rcx, Address(rbx, BytesPerInt));
2578   __ bswapl(rcx);
2579   __ jmpb(loop_entry);
2580   // table search
2581   __ bind(loop);
2582   __ cmpl(rax, Address(rbx, rcx, Address::times_8, 2 * BytesPerInt));
2583   __ jcc(Assembler::equal, found);
2584   __ bind(loop_entry);
2585   __ decrementl(rcx);
2586   __ jcc(Assembler::greaterEqual, loop);
2587   // default case
2588   __ profile_switch_default(rax);
2589   __ movl(rdx, Address(rbx, 0));
2590   __ jmp(continue_execution);
2591   // entry found -> get offset
2592   __ bind(found);
2593   __ movl(rdx, Address(rbx, rcx, Address::times_8, 3 * BytesPerInt));
2594   __ profile_switch_case(rcx, rax, rbx);
2595   // continue execution
2596   __ bind(continue_execution);
2597   __ bswapl(rdx);
2598   __ movl2ptr(rdx, rdx);
2599   __ load_unsigned_byte(rbx, Address(rbcp, rdx, Address::times_1));
2600   __ addptr(rbcp, rdx);
2601   __ dispatch_only(vtos, true);
2602 }
2603 
2604 void TemplateTable::fast_binaryswitch() {
2605   transition(itos, vtos);
2606   // Implementation using the following core algorithm:
2607   //
2608   // int binary_search(int key, LookupswitchPair* array, int n) {
2609   //   // Binary search according to "Methodik des Programmierens" by
2610   //   // Edsger W. Dijkstra and W.H.J. Feijen, Addison Wesley Germany 1985.
2611   //   int i = 0;
2612   //   int j = n;
2613   //   while (i+1 < j) {
2614   //     // invariant P: 0 <= i < j <= n and (a[i] <= key < a[j] or Q)
2615   //     // with      Q: for all i: 0 <= i < n: key < a[i]
2616   //     // where a stands for the array and assuming that the (inexisting)
2617   //     // element a[n] is infinitely big.
2618   //     int h = (i + j) >> 1;
2619   //     // i < h < j
2620   //     if (key < array[h].fast_match()) {
2621   //       j = h;
2622   //     } else {
2623   //       i = h;
2624   //     }
2625   //   }
2626   //   // R: a[i] <= key < a[i+1] or Q
2627   //   // (i.e., if key is within array, i is the correct index)
2628   //   return i;
2629   // }
2630 
2631   // Register allocation
2632   const Register key   = rax; // already set (tosca)
2633   const Register array = rbx;
2634   const Register i     = rcx;
2635   const Register j     = rdx;
2636   const Register h     = rdi;
2637   const Register temp  = rsi;
2638 
2639   // Find array start
2640   NOT_LP64(__ save_bcp());
2641 
2642   __ lea(array, at_bcp(3 * BytesPerInt)); // btw: should be able to
2643                                           // get rid of this
2644                                           // instruction (change
2645                                           // offsets below)
2646   __ andptr(array, -BytesPerInt);
2647 
2648   // Initialize i & j
2649   __ xorl(i, i);                            // i = 0;
2650   __ movl(j, Address(array, -BytesPerInt)); // j = length(array);
2651 
2652   // Convert j into native byteordering
2653   __ bswapl(j);
2654 
2655   // And start
2656   Label entry;
2657   __ jmp(entry);
2658 
2659   // binary search loop
2660   {
2661     Label loop;
2662     __ bind(loop);
2663     // int h = (i + j) >> 1;
2664     __ leal(h, Address(i, j, Address::times_1)); // h = i + j;
2665     __ sarl(h, 1);                               // h = (i + j) >> 1;
2666     // if (key < array[h].fast_match()) {
2667     //   j = h;
2668     // } else {
2669     //   i = h;
2670     // }
2671     // Convert array[h].match to native byte-ordering before compare
2672     __ movl(temp, Address(array, h, Address::times_8));
2673     __ bswapl(temp);
2674     __ cmpl(key, temp);
2675     // j = h if (key <  array[h].fast_match())
2676     __ cmov32(Assembler::less, j, h);
2677     // i = h if (key >= array[h].fast_match())
2678     __ cmov32(Assembler::greaterEqual, i, h);
2679     // while (i+1 < j)
2680     __ bind(entry);
2681     __ leal(h, Address(i, 1)); // i+1
2682     __ cmpl(h, j);             // i+1 < j
2683     __ jcc(Assembler::less, loop);
2684   }
2685 
2686   // end of binary search, result index is i (must check again!)
2687   Label default_case;
2688   // Convert array[i].match to native byte-ordering before compare
2689   __ movl(temp, Address(array, i, Address::times_8));
2690   __ bswapl(temp);
2691   __ cmpl(key, temp);
2692   __ jcc(Assembler::notEqual, default_case);
2693 
2694   // entry found -> j = offset
2695   __ movl(j , Address(array, i, Address::times_8, BytesPerInt));
2696   __ profile_switch_case(i, key, array);
2697   __ bswapl(j);
2698   LP64_ONLY(__ movslq(j, j));
2699 
2700   NOT_LP64(__ restore_bcp());
2701   NOT_LP64(__ restore_locals());                           // restore rdi
2702 
2703   __ load_unsigned_byte(rbx, Address(rbcp, j, Address::times_1));
2704   __ addptr(rbcp, j);
2705   __ dispatch_only(vtos, true);
2706 
2707   // default case -> j = default offset
2708   __ bind(default_case);
2709   __ profile_switch_default(i);
2710   __ movl(j, Address(array, -2 * BytesPerInt));
2711   __ bswapl(j);
2712   LP64_ONLY(__ movslq(j, j));
2713 
2714   NOT_LP64(__ restore_bcp());
2715   NOT_LP64(__ restore_locals());
2716 
2717   __ load_unsigned_byte(rbx, Address(rbcp, j, Address::times_1));
2718   __ addptr(rbcp, j);
2719   __ dispatch_only(vtos, true);
2720 }
2721 
2722 void TemplateTable::_return(TosState state) {
2723   transition(state, state);
2724 
2725   assert(_desc->calls_vm(),
2726          "inconsistent calls_vm information"); // call in remove_activation
2727 
2728   if (_desc->bytecode() == Bytecodes::_return_register_finalizer) {
2729     assert(state == vtos, "only valid state");
2730     Register robj = LP64_ONLY(c_rarg1) NOT_LP64(rax);
2731     __ movptr(robj, aaddress(0));
2732     __ load_klass(rdi, robj);
2733     __ movl(rdi, Address(rdi, Klass::access_flags_offset()));
2734     __ testl(rdi, JVM_ACC_HAS_FINALIZER);
2735     Label skip_register_finalizer;
2736     __ jcc(Assembler::zero, skip_register_finalizer);
2737 
2738     __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::register_finalizer), robj);
2739 
2740     __ bind(skip_register_finalizer);
2741   }
2742 
2743   if (SafepointMechanism::uses_thread_local_poll() && _desc->bytecode() != Bytecodes::_return_register_finalizer) {
2744     Label no_safepoint;
2745     NOT_PRODUCT(__ block_comment("Thread-local Safepoint poll"));
2746 #ifdef _LP64
2747     __ testb(Address(r15_thread, Thread::polling_page_offset()), SafepointMechanism::poll_bit());
2748 #else
2749     const Register thread = rdi;
2750     __ get_thread(thread);
2751     __ testb(Address(thread, Thread::polling_page_offset()), SafepointMechanism::poll_bit());
2752 #endif
2753     __ jcc(Assembler::zero, no_safepoint);
2754     __ push(state);
2755     __ call_VM(noreg, CAST_FROM_FN_PTR(address,
2756                                     InterpreterRuntime::at_safepoint));
2757     __ pop(state);
2758     __ bind(no_safepoint);
2759   }
2760 
2761   if (state == atos) {
2762     Label not_returning_null_vt;
2763     const Register method = rbx;
2764 
2765     __ testl(rax, rax);
2766     __ jcc(Assembler::notZero, not_returning_null_vt);
2767     __ get_method(method);
2768     __ cmpb(Address(rbx, Method::flags_offset() + in_ByteSize(1)), Method::byte_value_for_known_not_returning_vt());
2769     __ jcc(Assembler::equal, not_returning_null_vt);
2770     __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::deoptimize_caller_frame_for_vt), method);
2771     __ bind(not_returning_null_vt);
2772     if (ValueTypesBufferMaxMemory > 0) {
2773       Label notBuffered;
2774 
2775       __ test_value_is_not_buffered(rax, rbx, notBuffered);
2776       const Register thread1 = NOT_LP64(rcx) LP64_ONLY(r15_thread);
2777       __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::return_value), rax);
2778       NOT_LP64(__ get_thread(thread1));
2779       __ get_vm_result(rax, thread1);
2780       __ bind(notBuffered);
2781     }
2782   }
2783 
2784   // Narrow result if state is itos but result type is smaller.
2785   // Need to narrow in the return bytecode rather than in generate_return_entry
2786   // since compiled code callers expect the result to already be narrowed.
2787   if (state == itos) {
2788     __ narrow(rax);
2789   }
2790 
2791   __ remove_activation(state, rbcp, true, true, true, /*state == qtos*/ false && ValueTypeReturnedAsFields);
2792 
2793   __ jmp(rbcp);
2794 }
2795 
2796 // ----------------------------------------------------------------------------
2797 // Volatile variables demand their effects be made known to all CPU's
2798 // in order.  Store buffers on most chips allow reads & writes to
2799 // reorder; the JMM's ReadAfterWrite.java test fails in -Xint mode
2800 // without some kind of memory barrier (i.e., it's not sufficient that
2801 // the interpreter does not reorder volatile references, the hardware
2802 // also must not reorder them).
2803 //
2804 // According to the new Java Memory Model (JMM):
2805 // (1) All volatiles are serialized wrt to each other.  ALSO reads &
2806 //     writes act as aquire & release, so:
2807 // (2) A read cannot let unrelated NON-volatile memory refs that
2808 //     happen after the read float up to before the read.  It's OK for
2809 //     non-volatile memory refs that happen before the volatile read to
2810 //     float down below it.
2811 // (3) Similar a volatile write cannot let unrelated NON-volatile
2812 //     memory refs that happen BEFORE the write float down to after the
2813 //     write.  It's OK for non-volatile memory refs that happen after the
2814 //     volatile write to float up before it.
2815 //
2816 // We only put in barriers around volatile refs (they are expensive),
2817 // not _between_ memory refs (that would require us to track the
2818 // flavor of the previous memory refs).  Requirements (2) and (3)
2819 // require some barriers before volatile stores and after volatile
2820 // loads.  These nearly cover requirement (1) but miss the
2821 // volatile-store-volatile-load case.  This final case is placed after
2822 // volatile-stores although it could just as well go before
2823 // volatile-loads.
2824 
2825 void TemplateTable::volatile_barrier(Assembler::Membar_mask_bits order_constraint ) {
2826   // Helper function to insert a is-volatile test and memory barrier
2827   if(!os::is_MP()) return;    // Not needed on single CPU
2828   __ membar(order_constraint);
2829 }
2830 
2831 void TemplateTable::resolve_cache_and_index(int byte_no,
2832                                             Register Rcache,
2833                                             Register index,
2834                                             size_t index_size) {
2835   const Register temp = rbx;
2836   assert_different_registers(Rcache, index, temp);
2837 
2838   Label resolved;
2839 
2840   Bytecodes::Code code = bytecode();
2841   switch (code) {
2842   case Bytecodes::_nofast_getfield: code = Bytecodes::_getfield; break;
2843   case Bytecodes::_nofast_putfield: code = Bytecodes::_putfield; break;
2844   default: break;
2845   }
2846 
2847   assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range");
2848   __ get_cache_and_index_and_bytecode_at_bcp(Rcache, index, temp, byte_no, 1, index_size);
2849   __ cmpl(temp, code);  // have we resolved this bytecode?
2850   __ jcc(Assembler::equal, resolved);
2851 
2852   // resolve first time through
2853   address entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_from_cache);
2854   __ movl(temp, code);
2855   __ call_VM(noreg, entry, temp);
2856   // Update registers with resolved info
2857   __ get_cache_and_index_at_bcp(Rcache, index, 1, index_size);
2858   __ bind(resolved);
2859 }
2860 
2861 // The cache and index registers must be set before call
2862 void TemplateTable::load_field_cp_cache_entry(Register obj,
2863                                               Register cache,
2864                                               Register index,
2865                                               Register off,
2866                                               Register flags,
2867                                               bool is_static = false) {
2868   assert_different_registers(cache, index, flags, off);
2869 
2870   ByteSize cp_base_offset = ConstantPoolCache::base_offset();
2871   // Field offset
2872   __ movptr(off, Address(cache, index, Address::times_ptr,
2873                          in_bytes(cp_base_offset +
2874                                   ConstantPoolCacheEntry::f2_offset())));
2875   // Flags
2876   __ movl(flags, Address(cache, index, Address::times_ptr,
2877                          in_bytes(cp_base_offset +
2878                                   ConstantPoolCacheEntry::flags_offset())));
2879 
2880   // klass overwrite register
2881   if (is_static) {
2882     __ movptr(obj, Address(cache, index, Address::times_ptr,
2883                            in_bytes(cp_base_offset +
2884                                     ConstantPoolCacheEntry::f1_offset())));
2885     const int mirror_offset = in_bytes(Klass::java_mirror_offset());
2886     __ movptr(obj, Address(obj, mirror_offset));
2887     __ resolve_oop_handle(obj);
2888   }
2889 }
2890 
2891 void TemplateTable::load_invoke_cp_cache_entry(int byte_no,
2892                                                Register method,
2893                                                Register itable_index,
2894                                                Register flags,
2895                                                bool is_invokevirtual,
2896                                                bool is_invokevfinal, /*unused*/
2897                                                bool is_invokedynamic) {
2898   // setup registers
2899   const Register cache = rcx;
2900   const Register index = rdx;
2901   assert_different_registers(method, flags);
2902   assert_different_registers(method, cache, index);
2903   assert_different_registers(itable_index, flags);
2904   assert_different_registers(itable_index, cache, index);
2905   // determine constant pool cache field offsets
2906   assert(is_invokevirtual == (byte_no == f2_byte), "is_invokevirtual flag redundant");
2907   const int method_offset = in_bytes(
2908     ConstantPoolCache::base_offset() +
2909       ((byte_no == f2_byte)
2910        ? ConstantPoolCacheEntry::f2_offset()
2911        : ConstantPoolCacheEntry::f1_offset()));
2912   const int flags_offset = in_bytes(ConstantPoolCache::base_offset() +
2913                                     ConstantPoolCacheEntry::flags_offset());
2914   // access constant pool cache fields
2915   const int index_offset = in_bytes(ConstantPoolCache::base_offset() +
2916                                     ConstantPoolCacheEntry::f2_offset());
2917 
2918   size_t index_size = (is_invokedynamic ? sizeof(u4) : sizeof(u2));
2919   resolve_cache_and_index(byte_no, cache, index, index_size);
2920     __ movptr(method, Address(cache, index, Address::times_ptr, method_offset));
2921 
2922   if (itable_index != noreg) {
2923     // pick up itable or appendix index from f2 also:
2924     __ movptr(itable_index, Address(cache, index, Address::times_ptr, index_offset));
2925   }
2926   __ movl(flags, Address(cache, index, Address::times_ptr, flags_offset));
2927 }
2928 
2929 // The registers cache and index expected to be set before call.
2930 // Correct values of the cache and index registers are preserved.
2931 void TemplateTable::jvmti_post_field_access(Register cache,
2932                                             Register index,
2933                                             bool is_static,
2934                                             bool has_tos) {
2935   if (JvmtiExport::can_post_field_access()) {
2936     // Check to see if a field access watch has been set before we take
2937     // the time to call into the VM.
2938     Label L1;
2939     assert_different_registers(cache, index, rax);
2940     __ mov32(rax, ExternalAddress((address) JvmtiExport::get_field_access_count_addr()));
2941     __ testl(rax,rax);
2942     __ jcc(Assembler::zero, L1);
2943 
2944     // cache entry pointer
2945     __ addptr(cache, in_bytes(ConstantPoolCache::base_offset()));
2946     __ shll(index, LogBytesPerWord);
2947     __ addptr(cache, index);
2948     if (is_static) {
2949       __ xorptr(rax, rax);      // NULL object reference
2950     } else {
2951       __ pop(atos);         // Get the object
2952       __ verify_oop(rax);
2953       __ push(atos);        // Restore stack state
2954     }
2955     // rax,:   object pointer or NULL
2956     // cache: cache entry pointer
2957     __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_access),
2958                rax, cache);
2959     __ get_cache_and_index_at_bcp(cache, index, 1);
2960     __ bind(L1);
2961   }
2962 }
2963 
2964 void TemplateTable::pop_and_check_object(Register r) {
2965   __ pop_ptr(r);
2966   __ null_check(r);  // for field access must check obj.
2967   __ verify_oop(r);
2968 }
2969 
2970 void TemplateTable::getfield_or_static(int byte_no, bool is_static, RewriteControl rc) {
2971   transition(vtos, vtos);
2972 
2973   const Register cache = rcx;
2974   const Register index = rdx;
2975   const Register obj   = LP64_ONLY(c_rarg3) NOT_LP64(rcx);
2976   const Register off   = rbx;
2977   const Register flags = rax;
2978   const Register bc    = LP64_ONLY(c_rarg3) NOT_LP64(rcx); // uses same reg as obj, so don't mix them
2979   const Register flags2 = rdx;
2980 
2981   resolve_cache_and_index(byte_no, cache, index, sizeof(u2));
2982   jvmti_post_field_access(cache, index, is_static, false);
2983   load_field_cp_cache_entry(obj, cache, index, off, flags, is_static);
2984 
2985   const Address field(obj, off, Address::times_1, 0*wordSize);
2986   NOT_LP64(const Address hi(obj, off, Address::times_1, 1*wordSize));
2987 
2988   Label Done, notByte, notBool, notInt, notShort, notChar, notLong, notFloat, notObj, notValueType, notDouble;
2989 
2990   if (!is_static) {
2991     __ movptr(rcx, Address(cache, index, Address::times_ptr,
2992                            in_bytes(ConstantPoolCache::base_offset() +
2993                                     ConstantPoolCacheEntry::f1_offset())));
2994   }
2995 
2996   __ movl(flags2, flags);
2997 
2998   __ shrl(flags, ConstantPoolCacheEntry::tos_state_shift);
2999   // Make sure we don't need to mask edx after the above shift
3000   assert(btos == 0, "change code, btos != 0");
3001 
3002   __ andl(flags, ConstantPoolCacheEntry::tos_state_mask);
3003 
3004   __ jcc(Assembler::notZero, notByte);
3005   // btos
3006   if (!is_static) pop_and_check_object(obj);
3007   __ load_signed_byte(rax, field);
3008   __ push(btos);
3009   // Rewrite bytecode to be faster
3010   if (!is_static && rc == may_rewrite) {
3011     patch_bytecode(Bytecodes::_fast_bgetfield, bc, rbx);
3012   }
3013   __ jmp(Done);
3014 
3015   __ bind(notByte);
3016 
3017   __ cmpl(flags, ztos);
3018   __ jcc(Assembler::notEqual, notBool);
3019    if (!is_static) pop_and_check_object(obj);
3020   // ztos (same code as btos)
3021   __ load_signed_byte(rax, field);
3022   __ push(ztos);
3023   // Rewrite bytecode to be faster
3024   if (!is_static && rc == may_rewrite) {
3025     // use btos rewriting, no truncating to t/f bit is needed for getfield.
3026     patch_bytecode(Bytecodes::_fast_bgetfield, bc, rbx);
3027   }
3028   __ jmp(Done);
3029 
3030   __ bind(notBool);
3031   __ cmpl(flags, atos);
3032   __ jcc(Assembler::notEqual, notObj);
3033   // atos
3034   if (!EnableValhalla) {
3035     if (!is_static) pop_and_check_object(obj);
3036     do_oop_load(_masm, field, rax);
3037     __ push(atos);
3038     if (!is_static && rc == may_rewrite) {
3039       patch_bytecode(Bytecodes::_fast_agetfield, bc, rbx);
3040     }
3041     __ jmp(Done);
3042   } else {
3043     if (is_static) {
3044       __ load_heap_oop(rax, field);
3045       Label isFlattenable, uninitialized;
3046       // Issue below if the static field has not been initialized yet
3047       __ test_field_is_flattenable(flags2, rscratch1, isFlattenable);
3048         // Not flattenable case
3049         __ push(atos);
3050         __ jmp(Done);
3051       // Flattenable case, must not return null even if uninitialized
3052       __ bind(isFlattenable);
3053         __ testptr(rax, rax);
3054         __ jcc(Assembler::zero, uninitialized);
3055           __ push(atos);
3056           __ jmp(Done);
3057         __ bind(uninitialized);
3058           __ andl(flags2, ConstantPoolCacheEntry::field_index_mask);
3059           __ call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::uninitialized_static_value_field),
3060                  obj, flags2);
3061           __ verify_oop(rax);
3062           __ push(atos);
3063           __ jmp(Done);
3064     } else {
3065       Label isFlattened, nonnull, isFlattenable, rewriteFlattenable;
3066       __ test_field_is_flattenable(flags2, rscratch1, isFlattenable);
3067         // Non-flattenable field case, also covers the object case
3068         pop_and_check_object(obj);
3069         __ load_heap_oop(rax, field);
3070         __ push(atos);
3071         if (rc == may_rewrite) {
3072           patch_bytecode(Bytecodes::_fast_agetfield, bc, rbx);
3073         }
3074         __ jmp(Done);
3075       __ bind(isFlattenable);
3076         __ test_field_is_flattened(flags2, rscratch1, isFlattened);
3077           // Non-flattened field case
3078           pop_and_check_object(obj);
3079           __ load_heap_oop(rax, field);
3080           __ testptr(rax, rax);
3081           __ jcc(Assembler::notZero, nonnull);
3082             __ andl(flags2, ConstantPoolCacheEntry::field_index_mask);
3083             __ call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::uninitialized_instance_value_field),
3084                        obj, flags2);
3085           __ bind(nonnull);
3086           __ verify_oop(rax);
3087           __ push(atos);
3088           __ jmp(rewriteFlattenable);
3089         __ bind(isFlattened);
3090           __ andl(flags2, ConstantPoolCacheEntry::field_index_mask);
3091           pop_and_check_object(rbx);
3092           call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::read_flattened_field),
3093                   rbx, flags2, rcx);
3094           __ verify_oop(rax);
3095           __ push(atos);
3096       __ bind(rewriteFlattenable);
3097       if (rc == may_rewrite) {
3098         patch_bytecode(Bytecodes::_fast_qgetfield, bc, rbx);
3099       }
3100       __ jmp(Done);
3101     }
3102   }
3103 
3104   __ bind(notObj);
3105 
3106   if (!is_static) pop_and_check_object(obj);
3107 
3108   __ cmpl(flags, itos);
3109   __ jcc(Assembler::notEqual, notInt);
3110   // itos
3111   __ movl(rax, field);
3112   __ push(itos);
3113   // Rewrite bytecode to be faster
3114   if (!is_static && rc == may_rewrite) {
3115     patch_bytecode(Bytecodes::_fast_igetfield, bc, rbx);
3116   }
3117   __ jmp(Done);
3118 
3119   __ bind(notInt);
3120   __ cmpl(flags, ctos);
3121   __ jcc(Assembler::notEqual, notChar);
3122   // ctos
3123   __ load_unsigned_short(rax, field);
3124   __ push(ctos);
3125   // Rewrite bytecode to be faster
3126   if (!is_static && rc == may_rewrite) {
3127     patch_bytecode(Bytecodes::_fast_cgetfield, bc, rbx);
3128   }
3129   __ jmp(Done);
3130 
3131   __ bind(notChar);
3132   __ cmpl(flags, stos);
3133   __ jcc(Assembler::notEqual, notShort);
3134   // stos
3135   __ load_signed_short(rax, field);
3136   __ push(stos);
3137   // Rewrite bytecode to be faster
3138   if (!is_static && rc == may_rewrite) {
3139     patch_bytecode(Bytecodes::_fast_sgetfield, bc, rbx);
3140   }
3141   __ jmp(Done);
3142 
3143   __ bind(notShort);
3144   __ cmpl(flags, ltos);
3145   __ jcc(Assembler::notEqual, notLong);
3146   // ltos
3147 
3148 #ifndef _LP64
3149   // Generate code as if volatile.  There just aren't enough registers to
3150   // save that information and this code is faster than the test.
3151   __ fild_d(field);                // Must load atomically
3152   __ subptr(rsp,2*wordSize);    // Make space for store
3153   __ fistp_d(Address(rsp,0));
3154   __ pop(rax);
3155   __ pop(rdx);
3156 #else
3157   __ movq(rax, field);
3158 #endif
3159 
3160   __ push(ltos);
3161   // Rewrite bytecode to be faster
3162   LP64_ONLY(if (!is_static && rc == may_rewrite) patch_bytecode(Bytecodes::_fast_lgetfield, bc, rbx));
3163   __ jmp(Done);
3164 
3165   __ bind(notLong);
3166   __ cmpl(flags, ftos);
3167   __ jcc(Assembler::notEqual, notFloat);
3168   // ftos
3169 
3170   __ load_float(field);
3171   __ push(ftos);
3172   // Rewrite bytecode to be faster
3173   if (!is_static && rc == may_rewrite) {
3174     patch_bytecode(Bytecodes::_fast_fgetfield, bc, rbx);
3175   }
3176   __ jmp(Done);
3177 
3178   __ bind(notFloat);
3179 #ifdef ASSERT
3180   __ cmpl(flags, dtos);
3181   __ jcc(Assembler::notEqual, notDouble);
3182 #endif
3183   // dtos
3184   __ load_double(field);
3185   __ push(dtos);
3186   // Rewrite bytecode to be faster
3187   if (!is_static && rc == may_rewrite) {
3188     patch_bytecode(Bytecodes::_fast_dgetfield, bc, rbx);
3189   }
3190 #ifdef ASSERT
3191   __ jmp(Done);
3192 
3193 
3194   __ bind(notDouble);
3195   __ stop("Bad state");
3196 #endif
3197 
3198   __ bind(Done);
3199   // [jk] not needed currently
3200   // volatile_barrier(Assembler::Membar_mask_bits(Assembler::LoadLoad |
3201   //                                              Assembler::LoadStore));
3202 }
3203 
3204 void TemplateTable::getfield(int byte_no) {
3205   getfield_or_static(byte_no, false);
3206 }
3207 
3208 void TemplateTable::nofast_getfield(int byte_no) {
3209   getfield_or_static(byte_no, false, may_not_rewrite);
3210 }
3211 
3212 void TemplateTable::getstatic(int byte_no) {
3213   getfield_or_static(byte_no, true);
3214 }
3215 
3216 void TemplateTable::withfield() {
3217   transition(vtos, atos);
3218 
3219   Register cache = LP64_ONLY(c_rarg1) NOT_LP64(rcx);
3220   Register index = LP64_ONLY(c_rarg2) NOT_LP64(rdx);
3221 
3222   resolve_cache_and_index(f2_byte, cache, index, sizeof(u2));
3223 
3224   call_VM(rbx, CAST_FROM_FN_PTR(address, InterpreterRuntime::withfield), cache);
3225   // new value type is returned in rbx
3226   // stack adjustement is returned in rax
3227   __ verify_oop(rbx);
3228   __ addptr(rsp, rax);
3229   __ movptr(rax, rbx);
3230 }
3231 
3232 // The registers cache and index expected to be set before call.
3233 // The function may destroy various registers, just not the cache and index registers.
3234 void TemplateTable::jvmti_post_field_mod(Register cache, Register index, bool is_static) {
3235 
3236   const Register robj = LP64_ONLY(c_rarg2)   NOT_LP64(rax);
3237   const Register RBX  = LP64_ONLY(c_rarg1)   NOT_LP64(rbx);
3238   const Register RCX  = LP64_ONLY(c_rarg3)   NOT_LP64(rcx);
3239   const Register RDX  = LP64_ONLY(rscratch1) NOT_LP64(rdx);
3240 
3241   ByteSize cp_base_offset = ConstantPoolCache::base_offset();
3242 
3243   if (JvmtiExport::can_post_field_modification()) {
3244     // Check to see if a field modification watch has been set before
3245     // we take the time to call into the VM.
3246     Label L1;
3247     assert_different_registers(cache, index, rax);
3248     __ mov32(rax, ExternalAddress((address)JvmtiExport::get_field_modification_count_addr()));
3249     __ testl(rax, rax);
3250     __ jcc(Assembler::zero, L1);
3251 
3252     __ get_cache_and_index_at_bcp(robj, RDX, 1);
3253 
3254 
3255     if (is_static) {
3256       // Life is simple.  Null out the object pointer.
3257       __ xorl(RBX, RBX);
3258 
3259     } else {
3260       // Life is harder. The stack holds the value on top, followed by
3261       // the object.  We don't know the size of the value, though; it
3262       // could be one or two words depending on its type. As a result,
3263       // we must find the type to determine where the object is.
3264 #ifndef _LP64
3265       Label two_word, valsize_known;
3266 #endif
3267       __ movl(RCX, Address(robj, RDX,
3268                            Address::times_ptr,
3269                            in_bytes(cp_base_offset +
3270                                      ConstantPoolCacheEntry::flags_offset())));
3271       NOT_LP64(__ mov(rbx, rsp));
3272       __ shrl(RCX, ConstantPoolCacheEntry::tos_state_shift);
3273 
3274       // Make sure we don't need to mask rcx after the above shift
3275       ConstantPoolCacheEntry::verify_tos_state_shift();
3276 #ifdef _LP64
3277       __ movptr(c_rarg1, at_tos_p1());  // initially assume a one word jvalue
3278       __ cmpl(c_rarg3, ltos);
3279       __ cmovptr(Assembler::equal,
3280                  c_rarg1, at_tos_p2()); // ltos (two word jvalue)
3281       __ cmpl(c_rarg3, dtos);
3282       __ cmovptr(Assembler::equal,
3283                  c_rarg1, at_tos_p2()); // dtos (two word jvalue)
3284 #else
3285       __ cmpl(rcx, ltos);
3286       __ jccb(Assembler::equal, two_word);
3287       __ cmpl(rcx, dtos);
3288       __ jccb(Assembler::equal, two_word);
3289       __ addptr(rbx, Interpreter::expr_offset_in_bytes(1)); // one word jvalue (not ltos, dtos)
3290       __ jmpb(valsize_known);
3291 
3292       __ bind(two_word);
3293       __ addptr(rbx, Interpreter::expr_offset_in_bytes(2)); // two words jvalue
3294 
3295       __ bind(valsize_known);
3296       // setup object pointer
3297       __ movptr(rbx, Address(rbx, 0));
3298 #endif
3299     }
3300     // cache entry pointer
3301     __ addptr(robj, in_bytes(cp_base_offset));
3302     __ shll(RDX, LogBytesPerWord);
3303     __ addptr(robj, RDX);
3304     // object (tos)
3305     __ mov(RCX, rsp);
3306     // c_rarg1: object pointer set up above (NULL if static)
3307     // c_rarg2: cache entry pointer
3308     // c_rarg3: jvalue object on the stack
3309     __ call_VM(noreg,
3310                CAST_FROM_FN_PTR(address,
3311                                 InterpreterRuntime::post_field_modification),
3312                RBX, robj, RCX);
3313     __ get_cache_and_index_at_bcp(cache, index, 1);
3314     __ bind(L1);
3315   }
3316 }
3317 
3318 void TemplateTable::putfield_or_static(int byte_no, bool is_static, RewriteControl rc) {
3319   transition(vtos, vtos);
3320 
3321   const Register cache = rcx;
3322   const Register index = rdx;
3323   const Register obj   = rcx;
3324   const Register off   = rbx;
3325   const Register flags = rax;
3326   const Register bc    = LP64_ONLY(c_rarg3) NOT_LP64(rcx);
3327   const Register flags2 = rdx;
3328 
3329   resolve_cache_and_index(byte_no, cache, index, sizeof(u2));
3330   jvmti_post_field_mod(cache, index, is_static);
3331   load_field_cp_cache_entry(obj, cache, index, off, flags, is_static);
3332 
3333   // [jk] not needed currently
3334   // volatile_barrier(Assembler::Membar_mask_bits(Assembler::LoadStore |
3335   //                                              Assembler::StoreStore));
3336 
3337   Label notVolatile, Done;
3338 
3339   __ movl(flags2, flags);
3340 
3341   // field addresses
3342   const Address field(obj, off, Address::times_1, 0*wordSize);
3343   NOT_LP64( const Address hi(obj, off, Address::times_1, 1*wordSize);)
3344 
3345   Label notByte, notBool, notInt, notShort, notChar,
3346         notLong, notFloat, notObj, notValueType, notDouble;
3347 
3348   __ shrl(flags, ConstantPoolCacheEntry::tos_state_shift);
3349 
3350   assert(btos == 0, "change code, btos != 0");
3351   __ andl(flags, ConstantPoolCacheEntry::tos_state_mask);
3352   __ jcc(Assembler::notZero, notByte);
3353 
3354   // btos
3355   {
3356     __ pop(btos);
3357     if (!is_static) pop_and_check_object(obj);
3358     __ movb(field, rax);
3359     if (!is_static && rc == may_rewrite) {
3360       patch_bytecode(Bytecodes::_fast_bputfield, bc, rbx, true, byte_no);
3361     }
3362     __ jmp(Done);
3363   }
3364 
3365   __ bind(notByte);
3366   __ cmpl(flags, ztos);
3367   __ jcc(Assembler::notEqual, notBool);
3368 
3369   // ztos
3370   {
3371     __ pop(ztos);
3372     if (!is_static) pop_and_check_object(obj);
3373     __ andl(rax, 0x1);
3374     __ movb(field, rax);
3375     if (!is_static && rc == may_rewrite) {
3376       patch_bytecode(Bytecodes::_fast_zputfield, bc, rbx, true, byte_no);
3377     }
3378     __ jmp(Done);
3379   }
3380 
3381   __ bind(notBool);
3382   __ cmpl(flags, atos);
3383   __ jcc(Assembler::notEqual, notObj);
3384 
3385   // atos
3386   {
3387     if (!EnableValhalla) {
3388       __ pop(atos);
3389       if (!is_static) pop_and_check_object(obj);
3390       // Store into the field
3391       do_oop_store(_masm, field, rax);
3392       if (!is_static && rc == may_rewrite) {
3393         patch_bytecode(Bytecodes::_fast_aputfield, bc, rbx, true, byte_no);
3394       }
3395       __ jmp(Done);
3396     } else {
3397       __ pop(atos);
3398       if (is_static) {
3399         Label notFlattenable, notBuffered;
3400         __ test_field_is_not_flattenable(flags2, rscratch1, notFlattenable);
3401         __ null_check(rax);
3402         __ bind(notFlattenable);
3403         if (ValueTypesBufferMaxMemory > 0) {
3404           __ test_value_is_not_buffered(rax, rscratch1, notBuffered);
3405           call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::write_heap_copy),
3406                   rax, off, obj);
3407           __ jmp(Done);
3408           __ bind(notBuffered);
3409         }
3410         do_oop_store(_masm, field, rax);
3411         __ jmp(Done);
3412       } else {
3413         Label isFlattenable, isFlattened, notBuffered, notBuffered2, rewriteNotFlattenable, rewriteFlattenable;
3414         __ test_field_is_flattenable(flags2, rscratch1, isFlattenable);
3415         // Not flattenable case, covers not flattenable values and objects
3416         pop_and_check_object(obj);
3417         // Store into the field
3418         if (ValueTypesBufferMaxMemory > 0) {
3419           __ test_value_is_not_buffered(rax, rscratch1, notBuffered);
3420           call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::write_heap_copy),
3421                   rax, off, obj);
3422           __ jmp(rewriteNotFlattenable);
3423           __ bind(notBuffered);
3424         }
3425         do_oop_store(_masm, field, rax);
3426         __ bind(rewriteNotFlattenable);
3427         if (rc == may_rewrite) {
3428           patch_bytecode(Bytecodes::_fast_aputfield, bc, rbx, true, byte_no);
3429         }
3430         __ jmp(Done);
3431         // Implementation of the flattenable semantic
3432         __ bind(isFlattenable);
3433         __ null_check(rax);
3434         __ test_field_is_flattened(flags2, rscratch1, isFlattened);
3435         // Not flattened case
3436         if (ValueTypesBufferMaxMemory > 0) {
3437           __ test_value_is_not_buffered(rax, rscratch1, notBuffered2);
3438           pop_and_check_object(obj);
3439           call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::write_heap_copy),
3440                   rax, off, obj);
3441           __ jmp(rewriteFlattenable);
3442           __ bind(notBuffered2);
3443         }
3444         pop_and_check_object(obj);
3445         // Store into the field
3446         do_oop_store(_masm, field, rax);
3447         __ jmp(rewriteFlattenable);
3448         __ bind(isFlattened);
3449         pop_and_check_object(obj);
3450         call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::write_flattened_value),
3451                 rax, off, obj);
3452         __ bind(rewriteFlattenable);
3453         if (rc == may_rewrite) {
3454           patch_bytecode(Bytecodes::_fast_qputfield, bc, rbx, true, byte_no);
3455         }
3456         __ jmp(Done);
3457       }
3458     }
3459   }
3460 
3461   __ bind(notObj);
3462   __ cmpl(flags, itos);
3463   __ jcc(Assembler::notEqual, notInt);
3464 
3465   // itos
3466   {
3467     __ pop(itos);
3468     if (!is_static) pop_and_check_object(obj);
3469     __ movl(field, rax);
3470     if (!is_static && rc == may_rewrite) {
3471       patch_bytecode(Bytecodes::_fast_iputfield, bc, rbx, true, byte_no);
3472     }
3473     __ jmp(Done);
3474   }
3475 
3476   __ bind(notInt);
3477   __ cmpl(flags, ctos);
3478   __ jcc(Assembler::notEqual, notChar);
3479 
3480   // ctos
3481   {
3482     __ pop(ctos);
3483     if (!is_static) pop_and_check_object(obj);
3484     __ movw(field, rax);
3485     if (!is_static && rc == may_rewrite) {
3486       patch_bytecode(Bytecodes::_fast_cputfield, bc, rbx, true, byte_no);
3487     }
3488     __ jmp(Done);
3489   }
3490 
3491   __ bind(notChar);
3492   __ cmpl(flags, stos);
3493   __ jcc(Assembler::notEqual, notShort);
3494 
3495   // stos
3496   {
3497     __ pop(stos);
3498     if (!is_static) pop_and_check_object(obj);
3499     __ movw(field, rax);
3500     if (!is_static && rc == may_rewrite) {
3501       patch_bytecode(Bytecodes::_fast_sputfield, bc, rbx, true, byte_no);
3502     }
3503     __ jmp(Done);
3504   }
3505 
3506   __ bind(notShort);
3507   __ cmpl(flags, ltos);
3508   __ jcc(Assembler::notEqual, notLong);
3509 
3510   // ltos
3511 #ifdef _LP64
3512   {
3513     __ pop(ltos);
3514     if (!is_static) pop_and_check_object(obj);
3515     __ movq(field, rax);
3516     if (!is_static && rc == may_rewrite) {
3517       patch_bytecode(Bytecodes::_fast_lputfield, bc, rbx, true, byte_no);
3518     }
3519     __ jmp(Done);
3520   }
3521 #else
3522   {
3523     Label notVolatileLong;
3524     __ testl(rdx, rdx);
3525     __ jcc(Assembler::zero, notVolatileLong);
3526 
3527     __ pop(ltos);  // overwrites rdx, do this after testing volatile.
3528     if (!is_static) pop_and_check_object(obj);
3529 
3530     // Replace with real volatile test
3531     __ push(rdx);
3532     __ push(rax);                 // Must update atomically with FIST
3533     __ fild_d(Address(rsp,0));    // So load into FPU register
3534     __ fistp_d(field);            // and put into memory atomically
3535     __ addptr(rsp, 2*wordSize);
3536     // volatile_barrier();
3537     volatile_barrier(Assembler::Membar_mask_bits(Assembler::StoreLoad |
3538                                                  Assembler::StoreStore));
3539     // Don't rewrite volatile version
3540     __ jmp(notVolatile);
3541 
3542     __ bind(notVolatileLong);
3543 
3544     __ pop(ltos);  // overwrites rdx
3545     if (!is_static) pop_and_check_object(obj);
3546     __ movptr(hi, rdx);
3547     __ movptr(field, rax);
3548     // Don't rewrite to _fast_lputfield for potential volatile case.
3549     __ jmp(notVolatile);
3550   }
3551 #endif // _LP64
3552 
3553   __ bind(notLong);
3554   __ cmpl(flags, ftos);
3555   __ jcc(Assembler::notEqual, notFloat);
3556 
3557   // ftos
3558   {
3559     __ pop(ftos);
3560     if (!is_static) pop_and_check_object(obj);
3561     __ store_float(field);
3562     if (!is_static && rc == may_rewrite) {
3563       patch_bytecode(Bytecodes::_fast_fputfield, bc, rbx, true, byte_no);
3564     }
3565     __ jmp(Done);
3566   }
3567 
3568   __ bind(notFloat);
3569 #ifdef ASSERT
3570   __ cmpl(flags, dtos);
3571   __ jcc(Assembler::notEqual, notDouble);
3572 #endif
3573 
3574   // dtos
3575   {
3576     __ pop(dtos);
3577     if (!is_static) pop_and_check_object(obj);
3578     __ store_double(field);
3579     if (!is_static && rc == may_rewrite) {
3580       patch_bytecode(Bytecodes::_fast_dputfield, bc, rbx, true, byte_no);
3581     }
3582   }
3583 
3584 #ifdef ASSERT
3585   __ jmp(Done);
3586 
3587   __ bind(notDouble);
3588   __ stop("Bad state");
3589 #endif
3590 
3591   __ bind(Done);
3592 
3593   __ shrl(flags2, ConstantPoolCacheEntry::is_volatile_shift);
3594   __ andl(flags2, 0x1);
3595 
3596   // Check for volatile store
3597   __ testl(flags2, flags2);
3598   __ jcc(Assembler::zero, notVolatile);
3599   volatile_barrier(Assembler::Membar_mask_bits(Assembler::StoreLoad |
3600                                                Assembler::StoreStore));
3601   __ bind(notVolatile);
3602 }
3603 
3604 void TemplateTable::putfield(int byte_no) {
3605   putfield_or_static(byte_no, false);
3606 }
3607 
3608 void TemplateTable::nofast_putfield(int byte_no) {
3609   putfield_or_static(byte_no, false, may_not_rewrite);
3610 }
3611 
3612 void TemplateTable::putstatic(int byte_no) {
3613   putfield_or_static(byte_no, true);
3614 }
3615 
3616 void TemplateTable::jvmti_post_fast_field_mod() {
3617 
3618   const Register scratch = LP64_ONLY(c_rarg3) NOT_LP64(rcx);
3619 
3620   if (JvmtiExport::can_post_field_modification()) {
3621     // Check to see if a field modification watch has been set before
3622     // we take the time to call into the VM.
3623     Label L2;
3624     __ mov32(scratch, ExternalAddress((address)JvmtiExport::get_field_modification_count_addr()));
3625     __ testl(scratch, scratch);
3626     __ jcc(Assembler::zero, L2);
3627     __ pop_ptr(rbx);                  // copy the object pointer from tos
3628     __ verify_oop(rbx);
3629     __ push_ptr(rbx);                 // put the object pointer back on tos
3630     // Save tos values before call_VM() clobbers them. Since we have
3631     // to do it for every data type, we use the saved values as the
3632     // jvalue object.
3633     switch (bytecode()) {          // load values into the jvalue object
3634     case Bytecodes::_fast_qputfield: //fall through
3635     case Bytecodes::_fast_aputfield: __ push_ptr(rax); break;
3636     case Bytecodes::_fast_bputfield: // fall through
3637     case Bytecodes::_fast_zputfield: // fall through
3638     case Bytecodes::_fast_sputfield: // fall through
3639     case Bytecodes::_fast_cputfield: // fall through
3640     case Bytecodes::_fast_iputfield: __ push_i(rax); break;
3641     case Bytecodes::_fast_dputfield: __ push(dtos); break;
3642     case Bytecodes::_fast_fputfield: __ push(ftos); break;
3643     case Bytecodes::_fast_lputfield: __ push_l(rax); break;
3644 
3645     default:
3646       ShouldNotReachHere();
3647     }
3648     __ mov(scratch, rsp);             // points to jvalue on the stack
3649     // access constant pool cache entry
3650     LP64_ONLY(__ get_cache_entry_pointer_at_bcp(c_rarg2, rax, 1));
3651     NOT_LP64(__ get_cache_entry_pointer_at_bcp(rax, rdx, 1));
3652     __ verify_oop(rbx);
3653     // rbx: object pointer copied above
3654     // c_rarg2: cache entry pointer
3655     // c_rarg3: jvalue object on the stack
3656     LP64_ONLY(__ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_modification), rbx, c_rarg2, c_rarg3));
3657     NOT_LP64(__ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_modification), rbx, rax, rcx));
3658 
3659     switch (bytecode()) {             // restore tos values
3660     case Bytecodes::_fast_qputfield: // fall through
3661     case Bytecodes::_fast_aputfield: __ pop_ptr(rax); break;
3662     case Bytecodes::_fast_bputfield: // fall through
3663     case Bytecodes::_fast_zputfield: // fall through
3664     case Bytecodes::_fast_sputfield: // fall through
3665     case Bytecodes::_fast_cputfield: // fall through
3666     case Bytecodes::_fast_iputfield: __ pop_i(rax); break;
3667     case Bytecodes::_fast_dputfield: __ pop(dtos); break;
3668     case Bytecodes::_fast_fputfield: __ pop(ftos); break;
3669     case Bytecodes::_fast_lputfield: __ pop_l(rax); break;
3670     default: break;
3671     }
3672     __ bind(L2);
3673   }
3674 }
3675 
3676 void TemplateTable::fast_storefield(TosState state) {
3677   transition(state, vtos);
3678 
3679   ByteSize base = ConstantPoolCache::base_offset();
3680 
3681   jvmti_post_fast_field_mod();
3682 
3683   // access constant pool cache
3684   __ get_cache_and_index_at_bcp(rcx, rbx, 1);
3685 
3686   // test for volatile with rdx but rdx is tos register for lputfield.
3687   __ movl(rdx, Address(rcx, rbx, Address::times_ptr,
3688                        in_bytes(base +
3689                                 ConstantPoolCacheEntry::flags_offset())));
3690 
3691   // replace index with field offset from cache entry
3692   __ movptr(rbx, Address(rcx, rbx, Address::times_ptr,
3693                          in_bytes(base + ConstantPoolCacheEntry::f2_offset())));
3694 
3695   // [jk] not needed currently
3696   // volatile_barrier(Assembler::Membar_mask_bits(Assembler::LoadStore |
3697   //                                              Assembler::StoreStore));
3698 
3699   if (bytecode() == Bytecodes::_fast_qputfield) {
3700     __ movl(rscratch2, rdx);
3701   }
3702 
3703   Label notVolatile;
3704   __ shrl(rdx, ConstantPoolCacheEntry::is_volatile_shift);
3705   __ andl(rdx, 0x1);
3706 
3707   // Get object from stack
3708   pop_and_check_object(rcx);
3709 
3710   // field address
3711   const Address field(rcx, rbx, Address::times_1);
3712 
3713   // access field
3714   switch (bytecode()) {
3715   case Bytecodes::_fast_qputfield:
3716     {
3717       Label isFlattened, notBuffered, done;
3718       __ null_check(rax);
3719       __ test_field_is_flattened(rscratch2, rscratch1, isFlattened);
3720       // No Flattened case
3721       if (ValueTypesBufferMaxMemory > 0) {
3722         __ test_value_is_not_buffered(rax, rscratch1, notBuffered);
3723         call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::write_heap_copy),
3724                     rax, rbx, rcx);
3725         __ jmp(done);
3726         __ bind(notBuffered);
3727       }
3728       do_oop_store(_masm, field, rax);
3729       __ jmp(done);
3730       __ bind(isFlattened);
3731       call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::write_flattened_value),
3732           rax, rbx, rcx);
3733       __ bind(done);
3734     }
3735     break;
3736   case Bytecodes::_fast_aputfield:
3737     {
3738       Label notBuffered, done;
3739       if (ValueTypesBufferMaxMemory > 0) {
3740         __ test_value_is_not_buffered(rax, rscratch1, notBuffered);
3741         call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::write_heap_copy),
3742             rax, rbx, rcx);
3743         __ jmp(done);
3744         __ bind(notBuffered);
3745       }
3746       do_oop_store(_masm, field, rax);
3747       __ bind(done);
3748     }
3749     break;
3750   case Bytecodes::_fast_lputfield:
3751 #ifdef _LP64
3752   __ movq(field, rax);
3753 #else
3754   __ stop("should not be rewritten");
3755 #endif
3756     break;
3757   case Bytecodes::_fast_iputfield:
3758     __ movl(field, rax);
3759     break;
3760   case Bytecodes::_fast_zputfield:
3761     __ andl(rax, 0x1);  // boolean is true if LSB is 1
3762     // fall through to bputfield
3763   case Bytecodes::_fast_bputfield:
3764     __ movb(field, rax);
3765     break;
3766   case Bytecodes::_fast_sputfield:
3767     // fall through
3768   case Bytecodes::_fast_cputfield:
3769     __ movw(field, rax);
3770     break;
3771   case Bytecodes::_fast_fputfield:
3772     __ store_float(field);
3773     break;
3774   case Bytecodes::_fast_dputfield:
3775     __ store_double(field);
3776     break;
3777   default:
3778     ShouldNotReachHere();
3779   }
3780 
3781   // Check for volatile store
3782   __ testl(rdx, rdx);
3783   __ jcc(Assembler::zero, notVolatile);
3784   volatile_barrier(Assembler::Membar_mask_bits(Assembler::StoreLoad |
3785                                                Assembler::StoreStore));
3786   __ bind(notVolatile);
3787 }
3788 
3789 void TemplateTable::fast_accessfield(TosState state) {
3790   transition(atos, state);
3791 
3792   // Do the JVMTI work here to avoid disturbing the register state below
3793   if (JvmtiExport::can_post_field_access()) {
3794     // Check to see if a field access watch has been set before we
3795     // take the time to call into the VM.
3796     Label L1;
3797     __ mov32(rcx, ExternalAddress((address) JvmtiExport::get_field_access_count_addr()));
3798     __ testl(rcx, rcx);
3799     __ jcc(Assembler::zero, L1);
3800     // access constant pool cache entry
3801     LP64_ONLY(__ get_cache_entry_pointer_at_bcp(c_rarg2, rcx, 1));
3802     NOT_LP64(__ get_cache_entry_pointer_at_bcp(rcx, rdx, 1));
3803     __ verify_oop(rax);
3804     __ push_ptr(rax);  // save object pointer before call_VM() clobbers it
3805     LP64_ONLY(__ mov(c_rarg1, rax));
3806     // c_rarg1: object pointer copied above
3807     // c_rarg2: cache entry pointer
3808     LP64_ONLY(__ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_access), c_rarg1, c_rarg2));
3809     NOT_LP64(__ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_access), rax, rcx));
3810     __ pop_ptr(rax); // restore object pointer
3811     __ bind(L1);
3812   }
3813 
3814   // access constant pool cache
3815   __ get_cache_and_index_at_bcp(rcx, rbx, 1);
3816   // replace index with field offset from cache entry
3817   // [jk] not needed currently
3818   // if (os::is_MP()) {
3819   //   __ movl(rdx, Address(rcx, rbx, Address::times_8,
3820   //                        in_bytes(ConstantPoolCache::base_offset() +
3821   //                                 ConstantPoolCacheEntry::flags_offset())));
3822   //   __ shrl(rdx, ConstantPoolCacheEntry::is_volatile_shift);
3823   //   __ andl(rdx, 0x1);
3824   // }
3825   __ movptr(rdx, Address(rcx, rbx, Address::times_ptr,
3826                          in_bytes(ConstantPoolCache::base_offset() +
3827                                   ConstantPoolCacheEntry::f2_offset())));
3828 
3829   // rax: object
3830   __ verify_oop(rax);
3831   __ null_check(rax);
3832   Address field(rax, rdx, Address::times_1);
3833 
3834   // access field
3835   switch (bytecode()) {
3836   case Bytecodes::_fast_qgetfield:
3837     {
3838       Label isFlattened, nonnull, Done;
3839       __ movptr(rscratch1, Address(rcx, rbx, Address::times_ptr,
3840                                    in_bytes(ConstantPoolCache::base_offset() +
3841                                             ConstantPoolCacheEntry::flags_offset())));
3842       __ test_field_is_flattened(rscratch1, rscratch2, isFlattened);
3843         // Non-flattened field case
3844         __ movptr(rscratch1, rax);
3845         __ load_heap_oop(rax, field);
3846         __ testptr(rax, rax);
3847         __ jcc(Assembler::notZero, nonnull);
3848           __ movptr(rax, rscratch1);
3849           __ movl(rcx, Address(rcx, rbx, Address::times_ptr,
3850                              in_bytes(ConstantPoolCache::base_offset() +
3851                                       ConstantPoolCacheEntry::flags_offset())));
3852           __ andl(rcx, ConstantPoolCacheEntry::field_index_mask);
3853           __ call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::uninitialized_instance_value_field),
3854                      rax, rcx);
3855         __ bind(nonnull);
3856         __ verify_oop(rax);
3857         __ jmp(Done);
3858       __ bind(isFlattened);
3859         __ movl(rdx, Address(rcx, rbx, Address::times_ptr,
3860                            in_bytes(ConstantPoolCache::base_offset() +
3861                                     ConstantPoolCacheEntry::flags_offset())));
3862         __ andl(rdx, ConstantPoolCacheEntry::field_index_mask);
3863         __ movptr(rcx, Address(rcx, rbx, Address::times_ptr,
3864                                      in_bytes(ConstantPoolCache::base_offset() +
3865                                               ConstantPoolCacheEntry::f1_offset())));
3866         call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::read_flattened_field),
3867                 rax, rdx, rcx);
3868         __ verify_oop(rax);
3869       __ bind(Done);
3870     }
3871     break;
3872   case Bytecodes::_fast_agetfield:
3873     do_oop_load(_masm, field, rax);
3874     __ verify_oop(rax);
3875     break;
3876   case Bytecodes::_fast_lgetfield:
3877 #ifdef _LP64
3878   __ movq(rax, field);
3879 #else
3880   __ stop("should not be rewritten");
3881 #endif
3882     break;
3883   case Bytecodes::_fast_igetfield:
3884     __ movl(rax, field);
3885     break;
3886   case Bytecodes::_fast_bgetfield:
3887     __ movsbl(rax, field);
3888     break;
3889   case Bytecodes::_fast_sgetfield:
3890     __ load_signed_short(rax, field);
3891     break;
3892   case Bytecodes::_fast_cgetfield:
3893     __ load_unsigned_short(rax, field);
3894     break;
3895   case Bytecodes::_fast_fgetfield:
3896     __ load_float(field);
3897     break;
3898   case Bytecodes::_fast_dgetfield:
3899     __ load_double(field);
3900     break;
3901   default:
3902     ShouldNotReachHere();
3903   }
3904   // [jk] not needed currently
3905   // if (os::is_MP()) {
3906   //   Label notVolatile;
3907   //   __ testl(rdx, rdx);
3908   //   __ jcc(Assembler::zero, notVolatile);
3909   //   __ membar(Assembler::LoadLoad);
3910   //   __ bind(notVolatile);
3911   //};
3912 }
3913 
3914 void TemplateTable::fast_xaccess(TosState state) {
3915   transition(vtos, state);
3916 
3917   // get receiver
3918   __ movptr(rax, aaddress(0));
3919   // access constant pool cache
3920   __ get_cache_and_index_at_bcp(rcx, rdx, 2);
3921   __ movptr(rbx,
3922             Address(rcx, rdx, Address::times_ptr,
3923                     in_bytes(ConstantPoolCache::base_offset() +
3924                              ConstantPoolCacheEntry::f2_offset())));
3925   // make sure exception is reported in correct bcp range (getfield is
3926   // next instruction)
3927   __ increment(rbcp);
3928   __ null_check(rax);
3929   const Address field = Address(rax, rbx, Address::times_1, 0*wordSize);
3930   switch (state) {
3931   case itos:
3932     __ movl(rax, field);
3933     break;
3934   case atos:
3935     do_oop_load(_masm, field, rax);
3936     __ verify_oop(rax);
3937     break;
3938   case ftos:
3939     __ load_float(field);
3940     break;
3941   default:
3942     ShouldNotReachHere();
3943   }
3944 
3945   // [jk] not needed currently
3946   // if (os::is_MP()) {
3947   //   Label notVolatile;
3948   //   __ movl(rdx, Address(rcx, rdx, Address::times_8,
3949   //                        in_bytes(ConstantPoolCache::base_offset() +
3950   //                                 ConstantPoolCacheEntry::flags_offset())));
3951   //   __ shrl(rdx, ConstantPoolCacheEntry::is_volatile_shift);
3952   //   __ testl(rdx, 0x1);
3953   //   __ jcc(Assembler::zero, notVolatile);
3954   //   __ membar(Assembler::LoadLoad);
3955   //   __ bind(notVolatile);
3956   // }
3957 
3958   __ decrement(rbcp);
3959 }
3960 
3961 //-----------------------------------------------------------------------------
3962 // Calls
3963 
3964 void TemplateTable::count_calls(Register method, Register temp) {
3965   // implemented elsewhere
3966   ShouldNotReachHere();
3967 }
3968 
3969 void TemplateTable::prepare_invoke(int byte_no,
3970                                    Register method,  // linked method (or i-klass)
3971                                    Register index,   // itable index, MethodType, etc.
3972                                    Register recv,    // if caller wants to see it
3973                                    Register flags    // if caller wants to test it
3974                                    ) {
3975   // determine flags
3976   const Bytecodes::Code code = bytecode();
3977   const bool is_invokeinterface  = code == Bytecodes::_invokeinterface;
3978   const bool is_invokedynamic    = code == Bytecodes::_invokedynamic;
3979   const bool is_invokehandle     = code == Bytecodes::_invokehandle;
3980   const bool is_invokevirtual    = code == Bytecodes::_invokevirtual;
3981   const bool is_invokespecial    = code == Bytecodes::_invokespecial;
3982   const bool load_receiver       = (recv  != noreg);
3983   const bool save_flags          = (flags != noreg);
3984   assert(load_receiver == (code != Bytecodes::_invokestatic && code != Bytecodes::_invokedynamic), "");
3985   assert(save_flags    == (is_invokeinterface || is_invokevirtual), "need flags for vfinal");
3986   assert(flags == noreg || flags == rdx, "");
3987   assert(recv  == noreg || recv  == rcx, "");
3988 
3989   // setup registers & access constant pool cache
3990   if (recv  == noreg)  recv  = rcx;
3991   if (flags == noreg)  flags = rdx;
3992   assert_different_registers(method, index, recv, flags);
3993 
3994   // save 'interpreter return address'
3995   __ save_bcp();
3996 
3997   load_invoke_cp_cache_entry(byte_no, method, index, flags, is_invokevirtual, false, is_invokedynamic);
3998 
3999   // maybe push appendix to arguments (just before return address)
4000   if (is_invokedynamic || is_invokehandle) {
4001     Label L_no_push;
4002     __ testl(flags, (1 << ConstantPoolCacheEntry::has_appendix_shift));
4003     __ jcc(Assembler::zero, L_no_push);
4004     // Push the appendix as a trailing parameter.
4005     // This must be done before we get the receiver,
4006     // since the parameter_size includes it.
4007     __ push(rbx);
4008     __ mov(rbx, index);
4009     assert(ConstantPoolCacheEntry::_indy_resolved_references_appendix_offset == 0, "appendix expected at index+0");
4010     __ load_resolved_reference_at_index(index, rbx);
4011     __ pop(rbx);
4012     __ push(index);  // push appendix (MethodType, CallSite, etc.)
4013     __ bind(L_no_push);
4014   }
4015 
4016   // load receiver if needed (after appendix is pushed so parameter size is correct)
4017   // Note: no return address pushed yet
4018   if (load_receiver) {
4019     __ movl(recv, flags);
4020     __ andl(recv, ConstantPoolCacheEntry::parameter_size_mask);
4021     const int no_return_pc_pushed_yet = -1;  // argument slot correction before we push return address
4022     const int receiver_is_at_end      = -1;  // back off one slot to get receiver
4023     Address recv_addr = __ argument_address(recv, no_return_pc_pushed_yet + receiver_is_at_end);
4024     __ movptr(recv, recv_addr);
4025     __ verify_oop(recv);
4026   }
4027 
4028   if (save_flags) {
4029     __ movl(rbcp, flags);
4030   }
4031 
4032   // compute return type
4033   __ shrl(flags, ConstantPoolCacheEntry::tos_state_shift);
4034   // Make sure we don't need to mask flags after the above shift
4035   ConstantPoolCacheEntry::verify_tos_state_shift();
4036   // load return address
4037   {
4038     const address table_addr = (address) Interpreter::invoke_return_entry_table_for(code);
4039     ExternalAddress table(table_addr);
4040     LP64_ONLY(__ lea(rscratch1, table));
4041     LP64_ONLY(__ movptr(flags, Address(rscratch1, flags, Address::times_ptr)));
4042     NOT_LP64(__ movptr(flags, ArrayAddress(table, Address(noreg, flags, Address::times_ptr))));
4043   }
4044 
4045   // push return address
4046   __ push(flags);
4047 
4048   // Restore flags value from the constant pool cache, and restore rsi
4049   // for later null checks.  r13 is the bytecode pointer
4050   if (save_flags) {
4051     __ movl(flags, rbcp);
4052     __ restore_bcp();
4053   }
4054 }
4055 
4056 void TemplateTable::invokevirtual_helper(Register index,
4057                                          Register recv,
4058                                          Register flags) {
4059   // Uses temporary registers rax, rdx
4060   assert_different_registers(index, recv, rax, rdx);
4061   assert(index == rbx, "");
4062   assert(recv  == rcx, "");
4063 
4064   // Test for an invoke of a final method
4065   Label notFinal;
4066   __ movl(rax, flags);
4067   __ andl(rax, (1 << ConstantPoolCacheEntry::is_vfinal_shift));
4068   __ jcc(Assembler::zero, notFinal);
4069 
4070   const Register method = index;  // method must be rbx
4071   assert(method == rbx,
4072          "Method* must be rbx for interpreter calling convention");
4073 
4074   // do the call - the index is actually the method to call
4075   // that is, f2 is a vtable index if !is_vfinal, else f2 is a Method*
4076 
4077   // It's final, need a null check here!
4078   __ null_check(recv);
4079 
4080   // profile this call
4081   __ profile_final_call(rax);
4082   __ profile_arguments_type(rax, method, rbcp, true);
4083 
4084   __ jump_from_interpreted(method, rax);
4085 
4086   __ bind(notFinal);
4087 
4088   // get receiver klass
4089   __ null_check(recv, oopDesc::klass_offset_in_bytes());
4090   __ load_klass(rax, recv);
4091 
4092   // profile this call
4093   __ profile_virtual_call(rax, rlocals, rdx);
4094   // get target Method* & entry point
4095   __ lookup_virtual_method(rax, index, method);
4096   __ profile_called_method(method, rdx, rbcp);
4097 
4098   __ profile_arguments_type(rdx, method, rbcp, true);
4099   __ jump_from_interpreted(method, rdx);
4100 }
4101 
4102 void TemplateTable::invokevirtual(int byte_no) {
4103   transition(vtos, vtos);
4104   assert(byte_no == f2_byte, "use this argument");
4105   prepare_invoke(byte_no,
4106                  rbx,    // method or vtable index
4107                  noreg,  // unused itable index
4108                  rcx, rdx); // recv, flags
4109 
4110   // rbx: index
4111   // rcx: receiver
4112   // rdx: flags
4113 
4114   invokevirtual_helper(rbx, rcx, rdx);
4115 }
4116 
4117 void TemplateTable::invokespecial(int byte_no) {
4118   transition(vtos, vtos);
4119   assert(byte_no == f1_byte, "use this argument");
4120   prepare_invoke(byte_no, rbx, noreg,  // get f1 Method*
4121                  rcx);  // get receiver also for null check
4122   __ verify_oop(rcx);
4123   __ null_check(rcx);
4124   // do the call
4125   __ profile_call(rax);
4126   __ profile_arguments_type(rax, rbx, rbcp, false);
4127   __ jump_from_interpreted(rbx, rax);
4128 }
4129 
4130 void TemplateTable::invokestatic(int byte_no) {
4131   transition(vtos, vtos);
4132   assert(byte_no == f1_byte, "use this argument");
4133   prepare_invoke(byte_no, rbx);  // get f1 Method*
4134   // do the call
4135   __ profile_call(rax);
4136   __ profile_arguments_type(rax, rbx, rbcp, false);
4137   __ jump_from_interpreted(rbx, rax);
4138 }
4139 
4140 
4141 void TemplateTable::fast_invokevfinal(int byte_no) {
4142   transition(vtos, vtos);
4143   assert(byte_no == f2_byte, "use this argument");
4144   __ stop("fast_invokevfinal not used on x86");
4145 }
4146 
4147 
4148 void TemplateTable::invokeinterface(int byte_no) {
4149   transition(vtos, vtos);
4150   assert(byte_no == f1_byte, "use this argument");
4151   prepare_invoke(byte_no, rax, rbx,  // get f1 Klass*, f2 Method*
4152                  rcx, rdx); // recv, flags
4153 
4154   // rax: reference klass (from f1)
4155   // rbx: method (from f2)
4156   // rcx: receiver
4157   // rdx: flags
4158 
4159   // Special case of invokeinterface called for virtual method of
4160   // java.lang.Object.  See cpCacheOop.cpp for details.
4161   // This code isn't produced by javac, but could be produced by
4162   // another compliant java compiler.
4163   Label notMethod;
4164   __ movl(rlocals, rdx);
4165   __ andl(rlocals, (1 << ConstantPoolCacheEntry::is_forced_virtual_shift));
4166 
4167   __ jcc(Assembler::zero, notMethod);
4168 
4169   invokevirtual_helper(rbx, rcx, rdx);
4170   __ bind(notMethod);
4171 
4172   // Get receiver klass into rdx - also a null check
4173   __ restore_locals();  // restore r14
4174   __ null_check(rcx, oopDesc::klass_offset_in_bytes());
4175   __ load_klass(rdx, rcx);
4176 
4177   Label no_such_interface, no_such_method;
4178 
4179   // Preserve method for throw_AbstractMethodErrorVerbose.
4180   __ mov(rcx, rbx);
4181   // Receiver subtype check against REFC.
4182   // Superklass in rax. Subklass in rdx. Blows rcx, rdi.
4183   __ lookup_interface_method(// inputs: rec. class, interface, itable index
4184                              rdx, rax, noreg,
4185                              // outputs: scan temp. reg, scan temp. reg
4186                              rbcp, rlocals,
4187                              no_such_interface,
4188                              /*return_method=*/false);
4189 
4190   // profile this call
4191   __ restore_bcp(); // rbcp was destroyed by receiver type check
4192   __ profile_virtual_call(rdx, rbcp, rlocals);
4193 
4194   // Get declaring interface class from method, and itable index
4195   __ movptr(rax, Address(rbx, Method::const_offset()));
4196   __ movptr(rax, Address(rax, ConstMethod::constants_offset()));
4197   __ movptr(rax, Address(rax, ConstantPool::pool_holder_offset_in_bytes()));
4198   __ movl(rbx, Address(rbx, Method::itable_index_offset()));
4199   __ subl(rbx, Method::itable_index_max);
4200   __ negl(rbx);
4201 
4202   // Preserve recvKlass for throw_AbstractMethodErrorVerbose.
4203   __ mov(rlocals, rdx);
4204   __ lookup_interface_method(// inputs: rec. class, interface, itable index
4205                              rlocals, rax, rbx,
4206                              // outputs: method, scan temp. reg
4207                              rbx, rbcp,
4208                              no_such_interface);
4209 
4210   // rbx: Method* to call
4211   // rcx: receiver
4212   // Check for abstract method error
4213   // Note: This should be done more efficiently via a throw_abstract_method_error
4214   //       interpreter entry point and a conditional jump to it in case of a null
4215   //       method.
4216   __ testptr(rbx, rbx);
4217   __ jcc(Assembler::zero, no_such_method);
4218 
4219   __ profile_called_method(rbx, rbcp, rdx);
4220   __ profile_arguments_type(rdx, rbx, rbcp, true);
4221 
4222   // do the call
4223   // rcx: receiver
4224   // rbx,: Method*
4225   __ jump_from_interpreted(rbx, rdx);
4226   __ should_not_reach_here();
4227 
4228   // exception handling code follows...
4229   // note: must restore interpreter registers to canonical
4230   //       state for exception handling to work correctly!
4231 
4232   __ bind(no_such_method);
4233   // throw exception
4234   __ pop(rbx);           // pop return address (pushed by prepare_invoke)
4235   __ restore_bcp();      // rbcp must be correct for exception handler   (was destroyed)
4236   __ restore_locals();   // make sure locals pointer is correct as well (was destroyed)
4237   // Pass arguments for generating a verbose error message.
4238 #ifdef _LP64
4239   Register recvKlass = c_rarg1;
4240   Register method    = c_rarg2;
4241   if (recvKlass != rdx) { __ movq(recvKlass, rdx); }
4242   if (method != rcx)    { __ movq(method, rcx);    }
4243 #else
4244   Register recvKlass = rdx;
4245   Register method    = rcx;
4246 #endif
4247   __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_AbstractMethodErrorVerbose),
4248              recvKlass, method);
4249   // The call_VM checks for exception, so we should never return here.
4250   __ should_not_reach_here();
4251 
4252   __ bind(no_such_interface);
4253   // throw exception
4254   __ pop(rbx);           // pop return address (pushed by prepare_invoke)
4255   __ restore_bcp();      // rbcp must be correct for exception handler   (was destroyed)
4256   __ restore_locals();   // make sure locals pointer is correct as well (was destroyed)
4257   // Pass arguments for generating a verbose error message.
4258   LP64_ONLY( if (recvKlass != rdx) { __ movq(recvKlass, rdx); } )
4259   __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_IncompatibleClassChangeErrorVerbose),
4260              recvKlass, rax);
4261   // the call_VM checks for exception, so we should never return here.
4262   __ should_not_reach_here();
4263 }
4264 
4265 void TemplateTable::invokehandle(int byte_no) {
4266   transition(vtos, vtos);
4267   assert(byte_no == f1_byte, "use this argument");
4268   const Register rbx_method = rbx;
4269   const Register rax_mtype  = rax;
4270   const Register rcx_recv   = rcx;
4271   const Register rdx_flags  = rdx;
4272 
4273   prepare_invoke(byte_no, rbx_method, rax_mtype, rcx_recv);
4274   __ verify_method_ptr(rbx_method);
4275   __ verify_oop(rcx_recv);
4276   __ null_check(rcx_recv);
4277 
4278   // rax: MethodType object (from cpool->resolved_references[f1], if necessary)
4279   // rbx: MH.invokeExact_MT method (from f2)
4280 
4281   // Note:  rax_mtype is already pushed (if necessary) by prepare_invoke
4282 
4283   // FIXME: profile the LambdaForm also
4284   __ profile_final_call(rax);
4285   __ profile_arguments_type(rdx, rbx_method, rbcp, true);
4286 
4287   __ jump_from_interpreted(rbx_method, rdx);
4288 }
4289 
4290 void TemplateTable::invokedynamic(int byte_no) {
4291   transition(vtos, vtos);
4292   assert(byte_no == f1_byte, "use this argument");
4293 
4294   const Register rbx_method   = rbx;
4295   const Register rax_callsite = rax;
4296 
4297   prepare_invoke(byte_no, rbx_method, rax_callsite);
4298 
4299   // rax: CallSite object (from cpool->resolved_references[f1])
4300   // rbx: MH.linkToCallSite method (from f2)
4301 
4302   // Note:  rax_callsite is already pushed by prepare_invoke
4303 
4304   // %%% should make a type profile for any invokedynamic that takes a ref argument
4305   // profile this call
4306   __ profile_call(rbcp);
4307   __ profile_arguments_type(rdx, rbx_method, rbcp, false);
4308 
4309   __ verify_oop(rax_callsite);
4310 
4311   __ jump_from_interpreted(rbx_method, rdx);
4312 }
4313 
4314 //-----------------------------------------------------------------------------
4315 // Allocation
4316 
4317 void TemplateTable::_new() {
4318   transition(vtos, atos);
4319   __ get_unsigned_2_byte_index_at_bcp(rdx, 1);
4320   Label slow_case;
4321   Label slow_case_no_pop;
4322   Label done;
4323   Label initialize_header;
4324   Label initialize_object;  // including clearing the fields
4325 
4326   __ get_cpool_and_tags(rcx, rax);
4327 
4328   // Make sure the class we're about to instantiate has been resolved.
4329   // This is done before loading InstanceKlass to be consistent with the order
4330   // how Constant Pool is updated (see ConstantPool::klass_at_put)
4331   const int tags_offset = Array<u1>::base_offset_in_bytes();
4332   __ cmpb(Address(rax, rdx, Address::times_1, tags_offset), JVM_CONSTANT_Class);
4333   __ jcc(Assembler::notEqual, slow_case_no_pop);
4334 
4335   // get InstanceKlass
4336   __ load_resolved_klass_at_index(rcx, rdx, rcx);
4337   __ push(rcx);  // save the contexts of klass for initializing the header
4338 
4339   // make sure klass is initialized & doesn't have finalizer
4340   // make sure klass is fully initialized
4341   __ cmpb(Address(rcx, InstanceKlass::init_state_offset()), InstanceKlass::fully_initialized);
4342   __ jcc(Assembler::notEqual, slow_case);
4343 
4344   // get instance_size in InstanceKlass (scaled to a count of bytes)
4345   __ movl(rdx, Address(rcx, Klass::layout_helper_offset()));
4346   // test to see if it has a finalizer or is malformed in some way
4347   __ testl(rdx, Klass::_lh_instance_slow_path_bit);
4348   __ jcc(Assembler::notZero, slow_case);
4349 
4350   // Allocate the instance:
4351   //  If TLAB is enabled:
4352   //    Try to allocate in the TLAB.
4353   //    If fails, go to the slow path.
4354   //  Else If inline contiguous allocations are enabled:
4355   //    Try to allocate in eden.
4356   //    If fails due to heap end, go to slow path.
4357   //
4358   //  If TLAB is enabled OR inline contiguous is enabled:
4359   //    Initialize the allocation.
4360   //    Exit.
4361   //
4362   //  Go to slow path.
4363 
4364   const bool allow_shared_alloc =
4365     Universe::heap()->supports_inline_contig_alloc();
4366 
4367   const Register thread = LP64_ONLY(r15_thread) NOT_LP64(rcx);
4368 #ifndef _LP64
4369   if (UseTLAB || allow_shared_alloc) {
4370     __ get_thread(thread);
4371   }
4372 #endif // _LP64
4373 
4374   if (UseTLAB) {
4375     __ movptr(rax, Address(thread, in_bytes(JavaThread::tlab_top_offset())));
4376     __ lea(rbx, Address(rax, rdx, Address::times_1));
4377     __ cmpptr(rbx, Address(thread, in_bytes(JavaThread::tlab_end_offset())));
4378     __ jcc(Assembler::above, slow_case);
4379     __ movptr(Address(thread, in_bytes(JavaThread::tlab_top_offset())), rbx);
4380     if (ZeroTLAB) {
4381       // the fields have been already cleared
4382       __ jmp(initialize_header);
4383     } else {
4384       // initialize both the header and fields
4385       __ jmp(initialize_object);
4386     }
4387   } else {
4388     // Allocation in the shared Eden, if allowed.
4389     //
4390     // rdx: instance size in bytes
4391     if (allow_shared_alloc) {
4392       ExternalAddress heap_top((address)Universe::heap()->top_addr());
4393       ExternalAddress heap_end((address)Universe::heap()->end_addr());
4394 
4395       Label retry;
4396       __ bind(retry);
4397       __ movptr(rax, heap_top);
4398       __ lea(rbx, Address(rax, rdx, Address::times_1));
4399       __ cmpptr(rbx, heap_end);
4400       __ jcc(Assembler::above, slow_case);
4401 
4402       // Compare rax, with the top addr, and if still equal, store the new
4403       // top addr in rbx, at the address of the top addr pointer. Sets ZF if was
4404       // equal, and clears it otherwise. Use lock prefix for atomicity on MPs.
4405       //
4406       // rax,: object begin
4407       // rbx,: object end
4408       // rdx: instance size in bytes
4409       __ locked_cmpxchgptr(rbx, heap_top);
4410 
4411       // if someone beat us on the allocation, try again, otherwise continue
4412       __ jcc(Assembler::notEqual, retry);
4413 
4414       __ incr_allocated_bytes(thread, rdx, 0);
4415     }
4416   }
4417 
4418   // If UseTLAB or allow_shared_alloc are true, the object is created above and
4419   // there is an initialize need. Otherwise, skip and go to the slow path.
4420   if (UseTLAB || allow_shared_alloc) {
4421     // The object is initialized before the header.  If the object size is
4422     // zero, go directly to the header initialization.
4423     __ bind(initialize_object);
4424     __ decrement(rdx, sizeof(oopDesc));
4425     __ jcc(Assembler::zero, initialize_header);
4426 
4427     // Initialize topmost object field, divide rdx by 8, check if odd and
4428     // test if zero.
4429     __ xorl(rcx, rcx);    // use zero reg to clear memory (shorter code)
4430     __ shrl(rdx, LogBytesPerLong); // divide by 2*oopSize and set carry flag if odd
4431 
4432     // rdx must have been multiple of 8
4433 #ifdef ASSERT
4434     // make sure rdx was multiple of 8
4435     Label L;
4436     // Ignore partial flag stall after shrl() since it is debug VM
4437     __ jccb(Assembler::carryClear, L);
4438     __ stop("object size is not multiple of 2 - adjust this code");
4439     __ bind(L);
4440     // rdx must be > 0, no extra check needed here
4441 #endif
4442 
4443     // initialize remaining object fields: rdx was a multiple of 8
4444     { Label loop;
4445     __ bind(loop);
4446     __ movptr(Address(rax, rdx, Address::times_8, sizeof(oopDesc) - 1*oopSize), rcx);
4447     NOT_LP64(__ movptr(Address(rax, rdx, Address::times_8, sizeof(oopDesc) - 2*oopSize), rcx));
4448     __ decrement(rdx);
4449     __ jcc(Assembler::notZero, loop);
4450     }
4451 
4452     // initialize object header only.
4453     __ bind(initialize_header);
4454     if (UseBiasedLocking) {
4455       __ pop(rcx);   // get saved klass back in the register.
4456       __ movptr(rbx, Address(rcx, Klass::prototype_header_offset()));
4457       __ movptr(Address(rax, oopDesc::mark_offset_in_bytes ()), rbx);
4458     } else {
4459       __ movptr(Address(rax, oopDesc::mark_offset_in_bytes ()),
4460                 (intptr_t)markOopDesc::prototype()); // header
4461       __ pop(rcx);   // get saved klass back in the register.
4462     }
4463 #ifdef _LP64
4464     __ xorl(rsi, rsi); // use zero reg to clear memory (shorter code)
4465     __ store_klass_gap(rax, rsi);  // zero klass gap for compressed oops
4466 #endif
4467     __ store_klass(rax, rcx);  // klass
4468 
4469     {
4470       SkipIfEqual skip_if(_masm, &DTraceAllocProbes, 0);
4471       // Trigger dtrace event for fastpath
4472       __ push(atos);
4473       __ call_VM_leaf(
4474            CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_object_alloc), rax);
4475       __ pop(atos);
4476     }
4477 
4478     __ jmp(done);
4479   }
4480 
4481   // slow case
4482   __ bind(slow_case);
4483   __ pop(rcx);   // restore stack pointer to what it was when we came in.
4484   __ bind(slow_case_no_pop);
4485 
4486   Register rarg1 = LP64_ONLY(c_rarg1) NOT_LP64(rax);
4487   Register rarg2 = LP64_ONLY(c_rarg2) NOT_LP64(rdx);
4488 
4489   __ get_constant_pool(rarg1);
4490   __ get_unsigned_2_byte_index_at_bcp(rarg2, 1);
4491   call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::_new), rarg1, rarg2);
4492    __ verify_oop(rax);
4493 
4494   // continue
4495   __ bind(done);
4496 }
4497 
4498 void TemplateTable::defaultvalue() {
4499   transition(vtos, atos);
4500 
4501   Register rarg1 = LP64_ONLY(c_rarg1) NOT_LP64(rcx);
4502   Register rarg2 = LP64_ONLY(c_rarg2) NOT_LP64(rdx);
4503 
4504   __ get_unsigned_2_byte_index_at_bcp(rarg2, 1);
4505   __ get_constant_pool(rarg1);
4506 
4507   call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::defaultvalue),
4508       rarg1, rarg2);
4509   __ verify_oop(rax);
4510 }
4511 
4512 void TemplateTable::newarray() {
4513   transition(itos, atos);
4514   Register rarg1 = LP64_ONLY(c_rarg1) NOT_LP64(rdx);
4515   __ load_unsigned_byte(rarg1, at_bcp(1));
4516   call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::newarray),
4517           rarg1, rax);
4518 }
4519 
4520 void TemplateTable::anewarray() {
4521   transition(itos, atos);
4522 
4523   Register rarg1 = LP64_ONLY(c_rarg1) NOT_LP64(rcx);
4524   Register rarg2 = LP64_ONLY(c_rarg2) NOT_LP64(rdx);
4525 
4526   __ get_unsigned_2_byte_index_at_bcp(rarg2, 1);
4527   __ get_constant_pool(rarg1);
4528   call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::anewarray),
4529           rarg1, rarg2, rax);
4530 }
4531 
4532 void TemplateTable::arraylength() {
4533   transition(atos, itos);
4534   __ null_check(rax, arrayOopDesc::length_offset_in_bytes());
4535   __ movl(rax, Address(rax, arrayOopDesc::length_offset_in_bytes()));
4536 }
4537 
4538 void TemplateTable::checkcast() {
4539   transition(atos, atos);
4540   Label done, is_null, ok_is_subtype, quicked, resolved;
4541   __ testptr(rax, rax); // object is in rax
4542   __ jcc(Assembler::zero, is_null);
4543 
4544   // Get cpool & tags index
4545   __ get_cpool_and_tags(rcx, rdx); // rcx=cpool, rdx=tags array
4546   __ get_unsigned_2_byte_index_at_bcp(rbx, 1); // rbx=index
4547   // See if bytecode has already been quicked
4548   __ cmpb(Address(rdx, rbx,
4549                   Address::times_1,
4550                   Array<u1>::base_offset_in_bytes()),
4551           JVM_CONSTANT_Class);
4552   __ jcc(Assembler::equal, quicked);
4553   __ push(atos); // save receiver for result, and for GC
4554   call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc));
4555 
4556   // vm_result_2 has metadata result
4557 #ifndef _LP64
4558   // borrow rdi from locals
4559   __ get_thread(rdi);
4560   __ get_vm_result_2(rax, rdi);
4561   __ restore_locals();
4562 #else
4563   __ get_vm_result_2(rax, r15_thread);
4564 #endif
4565 
4566   __ pop_ptr(rdx); // restore receiver
4567   __ jmpb(resolved);
4568 
4569   // Get superklass in rax and subklass in rbx
4570   __ bind(quicked);
4571   __ mov(rdx, rax); // Save object in rdx; rax needed for subtype check
4572   __ load_resolved_klass_at_index(rcx, rbx, rax);
4573 
4574   __ bind(resolved);
4575   __ load_klass(rbx, rdx);
4576 
4577   // Generate subtype check.  Blows rcx, rdi.  Object in rdx.
4578   // Superklass in rax.  Subklass in rbx.
4579   __ gen_subtype_check(rbx, ok_is_subtype);
4580 
4581   // Come here on failure
4582   __ push_ptr(rdx);
4583   // object is at TOS
4584   __ jump(ExternalAddress(Interpreter::_throw_ClassCastException_entry));
4585 
4586   // Come here on success
4587   __ bind(ok_is_subtype);
4588   __ mov(rax, rdx); // Restore object in rdx
4589 
4590   // Collect counts on whether this check-cast sees NULLs a lot or not.
4591   if (ProfileInterpreter) {
4592     __ jmp(done);
4593     __ bind(is_null);
4594     __ profile_null_seen(rcx);
4595   } else {
4596     __ bind(is_null);   // same as 'done'
4597   }
4598   __ bind(done);
4599 }
4600 
4601 void TemplateTable::instanceof() {
4602   transition(atos, itos);
4603   Label done, is_null, ok_is_subtype, quicked, resolved;
4604   __ testptr(rax, rax);
4605   __ jcc(Assembler::zero, is_null);
4606 
4607   // Get cpool & tags index
4608   __ get_cpool_and_tags(rcx, rdx); // rcx=cpool, rdx=tags array
4609   __ get_unsigned_2_byte_index_at_bcp(rbx, 1); // rbx=index
4610   // See if bytecode has already been quicked
4611   __ cmpb(Address(rdx, rbx,
4612                   Address::times_1,
4613                   Array<u1>::base_offset_in_bytes()),
4614           JVM_CONSTANT_Class);
4615   __ jcc(Assembler::equal, quicked);
4616 
4617   __ push(atos); // save receiver for result, and for GC
4618   call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc));
4619   // vm_result_2 has metadata result
4620 
4621 #ifndef _LP64
4622   // borrow rdi from locals
4623   __ get_thread(rdi);
4624   __ get_vm_result_2(rax, rdi);
4625   __ restore_locals();
4626 #else
4627   __ get_vm_result_2(rax, r15_thread);
4628 #endif
4629 
4630   __ pop_ptr(rdx); // restore receiver
4631   __ verify_oop(rdx);
4632   __ load_klass(rdx, rdx);
4633   __ jmpb(resolved);
4634 
4635   // Get superklass in rax and subklass in rdx
4636   __ bind(quicked);
4637   __ load_klass(rdx, rax);
4638   __ load_resolved_klass_at_index(rcx, rbx, rax);
4639 
4640   __ bind(resolved);
4641 
4642   // Generate subtype check.  Blows rcx, rdi
4643   // Superklass in rax.  Subklass in rdx.
4644   __ gen_subtype_check(rdx, ok_is_subtype);
4645 
4646   // Come here on failure
4647   __ xorl(rax, rax);
4648   __ jmpb(done);
4649   // Come here on success
4650   __ bind(ok_is_subtype);
4651   __ movl(rax, 1);
4652 
4653   // Collect counts on whether this test sees NULLs a lot or not.
4654   if (ProfileInterpreter) {
4655     __ jmp(done);
4656     __ bind(is_null);
4657     __ profile_null_seen(rcx);
4658   } else {
4659     __ bind(is_null);   // same as 'done'
4660   }
4661   __ bind(done);
4662   // rax = 0: obj == NULL or  obj is not an instanceof the specified klass
4663   // rax = 1: obj != NULL and obj is     an instanceof the specified klass
4664 }
4665 
4666 //----------------------------------------------------------------------------------------------------
4667 // Breakpoints
4668 void TemplateTable::_breakpoint() {
4669   // Note: We get here even if we are single stepping..
4670   // jbug insists on setting breakpoints at every bytecode
4671   // even if we are in single step mode.
4672 
4673   transition(vtos, vtos);
4674 
4675   Register rarg = LP64_ONLY(c_rarg1) NOT_LP64(rcx);
4676 
4677   // get the unpatched byte code
4678   __ get_method(rarg);
4679   __ call_VM(noreg,
4680              CAST_FROM_FN_PTR(address,
4681                               InterpreterRuntime::get_original_bytecode_at),
4682              rarg, rbcp);
4683   __ mov(rbx, rax);  // why?
4684 
4685   // post the breakpoint event
4686   __ get_method(rarg);
4687   __ call_VM(noreg,
4688              CAST_FROM_FN_PTR(address, InterpreterRuntime::_breakpoint),
4689              rarg, rbcp);
4690 
4691   // complete the execution of original bytecode
4692   __ dispatch_only_normal(vtos);
4693 }
4694 
4695 //-----------------------------------------------------------------------------
4696 // Exceptions
4697 
4698 void TemplateTable::athrow() {
4699   transition(atos, vtos);
4700   __ null_check(rax);
4701   __ jump(ExternalAddress(Interpreter::throw_exception_entry()));
4702 }
4703 
4704 //-----------------------------------------------------------------------------
4705 // Synchronization
4706 //
4707 // Note: monitorenter & exit are symmetric routines; which is reflected
4708 //       in the assembly code structure as well
4709 //
4710 // Stack layout:
4711 //
4712 // [expressions  ] <--- rsp               = expression stack top
4713 // ..
4714 // [expressions  ]
4715 // [monitor entry] <--- monitor block top = expression stack bot
4716 // ..
4717 // [monitor entry]
4718 // [frame data   ] <--- monitor block bot
4719 // ...
4720 // [saved rbp    ] <--- rbp
4721 void TemplateTable::monitorenter() {
4722   transition(atos, vtos);
4723 
4724   // check for NULL object
4725   __ null_check(rax);
4726 
4727   const Address monitor_block_top(
4728         rbp, frame::interpreter_frame_monitor_block_top_offset * wordSize);
4729   const Address monitor_block_bot(
4730         rbp, frame::interpreter_frame_initial_sp_offset * wordSize);
4731   const int entry_size = frame::interpreter_frame_monitor_size() * wordSize;
4732 
4733   Label allocated;
4734 
4735   Register rtop = LP64_ONLY(c_rarg3) NOT_LP64(rcx);
4736   Register rbot = LP64_ONLY(c_rarg2) NOT_LP64(rbx);
4737   Register rmon = LP64_ONLY(c_rarg1) NOT_LP64(rdx);
4738 
4739   // initialize entry pointer
4740   __ xorl(rmon, rmon); // points to free slot or NULL
4741 
4742   // find a free slot in the monitor block (result in rmon)
4743   {
4744     Label entry, loop, exit;
4745     __ movptr(rtop, monitor_block_top); // points to current entry,
4746                                         // starting with top-most entry
4747     __ lea(rbot, monitor_block_bot);    // points to word before bottom
4748                                         // of monitor block
4749     __ jmpb(entry);
4750 
4751     __ bind(loop);
4752     // check if current entry is used
4753     __ cmpptr(Address(rtop, BasicObjectLock::obj_offset_in_bytes()), (int32_t) NULL_WORD);
4754     // if not used then remember entry in rmon
4755     __ cmovptr(Assembler::equal, rmon, rtop);   // cmov => cmovptr
4756     // check if current entry is for same object
4757     __ cmpptr(rax, Address(rtop, BasicObjectLock::obj_offset_in_bytes()));
4758     // if same object then stop searching
4759     __ jccb(Assembler::equal, exit);
4760     // otherwise advance to next entry
4761     __ addptr(rtop, entry_size);
4762     __ bind(entry);
4763     // check if bottom reached
4764     __ cmpptr(rtop, rbot);
4765     // if not at bottom then check this entry
4766     __ jcc(Assembler::notEqual, loop);
4767     __ bind(exit);
4768   }
4769 
4770   __ testptr(rmon, rmon); // check if a slot has been found
4771   __ jcc(Assembler::notZero, allocated); // if found, continue with that one
4772 
4773   // allocate one if there's no free slot
4774   {
4775     Label entry, loop;
4776     // 1. compute new pointers          // rsp: old expression stack top
4777     __ movptr(rmon, monitor_block_bot); // rmon: old expression stack bottom
4778     __ subptr(rsp, entry_size);         // move expression stack top
4779     __ subptr(rmon, entry_size);        // move expression stack bottom
4780     __ mov(rtop, rsp);                  // set start value for copy loop
4781     __ movptr(monitor_block_bot, rmon); // set new monitor block bottom
4782     __ jmp(entry);
4783     // 2. move expression stack contents
4784     __ bind(loop);
4785     __ movptr(rbot, Address(rtop, entry_size)); // load expression stack
4786                                                 // word from old location
4787     __ movptr(Address(rtop, 0), rbot);          // and store it at new location
4788     __ addptr(rtop, wordSize);                  // advance to next word
4789     __ bind(entry);
4790     __ cmpptr(rtop, rmon);                      // check if bottom reached
4791     __ jcc(Assembler::notEqual, loop);          // if not at bottom then
4792                                                 // copy next word
4793   }
4794 
4795   // call run-time routine
4796   // rmon: points to monitor entry
4797   __ bind(allocated);
4798 
4799   // Increment bcp to point to the next bytecode, so exception
4800   // handling for async. exceptions work correctly.
4801   // The object has already been poped from the stack, so the
4802   // expression stack looks correct.
4803   __ increment(rbcp);
4804 
4805   // store object
4806   __ movptr(Address(rmon, BasicObjectLock::obj_offset_in_bytes()), rax);
4807   __ lock_object(rmon);
4808 
4809   // check to make sure this monitor doesn't cause stack overflow after locking
4810   __ save_bcp();  // in case of exception
4811   __ generate_stack_overflow_check(0);
4812 
4813   // The bcp has already been incremented. Just need to dispatch to
4814   // next instruction.
4815   __ dispatch_next(vtos);
4816 }
4817 
4818 void TemplateTable::monitorexit() {
4819   transition(atos, vtos);
4820 
4821   // check for NULL object
4822   __ null_check(rax);
4823 
4824   const Address monitor_block_top(
4825         rbp, frame::interpreter_frame_monitor_block_top_offset * wordSize);
4826   const Address monitor_block_bot(
4827         rbp, frame::interpreter_frame_initial_sp_offset * wordSize);
4828   const int entry_size = frame::interpreter_frame_monitor_size() * wordSize;
4829 
4830   Register rtop = LP64_ONLY(c_rarg1) NOT_LP64(rdx);
4831   Register rbot = LP64_ONLY(c_rarg2) NOT_LP64(rbx);
4832 
4833   Label found;
4834 
4835   // find matching slot
4836   {
4837     Label entry, loop;
4838     __ movptr(rtop, monitor_block_top); // points to current entry,
4839                                         // starting with top-most entry
4840     __ lea(rbot, monitor_block_bot);    // points to word before bottom
4841                                         // of monitor block
4842     __ jmpb(entry);
4843 
4844     __ bind(loop);
4845     // check if current entry is for same object
4846     __ cmpptr(rax, Address(rtop, BasicObjectLock::obj_offset_in_bytes()));
4847     // if same object then stop searching
4848     __ jcc(Assembler::equal, found);
4849     // otherwise advance to next entry
4850     __ addptr(rtop, entry_size);
4851     __ bind(entry);
4852     // check if bottom reached
4853     __ cmpptr(rtop, rbot);
4854     // if not at bottom then check this entry
4855     __ jcc(Assembler::notEqual, loop);
4856   }
4857 
4858   // error handling. Unlocking was not block-structured
4859   __ call_VM(noreg, CAST_FROM_FN_PTR(address,
4860                    InterpreterRuntime::throw_illegal_monitor_state_exception));
4861   __ should_not_reach_here();
4862 
4863   // call run-time routine
4864   __ bind(found);
4865   __ push_ptr(rax); // make sure object is on stack (contract with oopMaps)
4866   __ unlock_object(rtop);
4867   __ pop_ptr(rax); // discard object
4868 }
4869 
4870 // Wide instructions
4871 void TemplateTable::wide() {
4872   transition(vtos, vtos);
4873   __ load_unsigned_byte(rbx, at_bcp(1));
4874   ExternalAddress wtable((address)Interpreter::_wentry_point);
4875   __ jump(ArrayAddress(wtable, Address(noreg, rbx, Address::times_ptr)));
4876   // Note: the rbcp increment step is part of the individual wide bytecode implementations
4877 }
4878 
4879 // Multi arrays
4880 void TemplateTable::multianewarray() {
4881   transition(vtos, atos);
4882 
4883   Register rarg = LP64_ONLY(c_rarg1) NOT_LP64(rax);
4884   __ load_unsigned_byte(rax, at_bcp(3)); // get number of dimensions
4885   // last dim is on top of stack; we want address of first one:
4886   // first_addr = last_addr + (ndims - 1) * stackElementSize - 1*wordsize
4887   // the latter wordSize to point to the beginning of the array.
4888   __ lea(rarg, Address(rsp, rax, Interpreter::stackElementScale(), -wordSize));
4889   call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::multianewarray), rarg);
4890   __ load_unsigned_byte(rbx, at_bcp(3));
4891   __ lea(rsp, Address(rsp, rbx, Interpreter::stackElementScale()));  // get rid of counts
4892 }