1 /*
   2  * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "asm/macroAssembler.hpp"
  27 #include "interpreter/interpreter.hpp"
  28 #include "interpreter/interpreterRuntime.hpp"
  29 #include "interpreter/templateTable.hpp"
  30 #include "memory/universe.inline.hpp"
  31 #include "oops/methodData.hpp"
  32 #include "oops/objArrayKlass.hpp"
  33 #include "oops/oop.inline.hpp"
  34 #include "prims/methodHandles.hpp"
  35 #include "runtime/sharedRuntime.hpp"
  36 #include "runtime/stubRoutines.hpp"
  37 #include "runtime/synchronizer.hpp"
  38 #include "utilities/macros.hpp"
  39 
  40 #ifndef CC_INTERP
  41 #define __ _masm->
  42 
  43 //----------------------------------------------------------------------------------------------------
  44 // Platform-dependent initialization
  45 
  46 void TemplateTable::pd_initialize() {
  47   // No i486 specific initialization
  48 }
  49 
  50 //----------------------------------------------------------------------------------------------------
  51 // Address computation
  52 
  53 // local variables
  54 static inline Address iaddress(int n)            {
  55   return Address(rdi, Interpreter::local_offset_in_bytes(n));
  56 }
  57 
  58 static inline Address laddress(int n)            { return iaddress(n + 1); }
  59 static inline Address haddress(int n)            { return iaddress(n + 0); }
  60 static inline Address faddress(int n)            { return iaddress(n); }
  61 static inline Address daddress(int n)            { return laddress(n); }
  62 static inline Address aaddress(int n)            { return iaddress(n); }
  63 
  64 static inline Address iaddress(Register r)       {
  65   return Address(rdi, r, Interpreter::stackElementScale());
  66 }
  67 static inline Address laddress(Register r)       {
  68   return Address(rdi, r, Interpreter::stackElementScale(), Interpreter::local_offset_in_bytes(1));
  69 }
  70 static inline Address haddress(Register r)       {
  71   return Address(rdi, r, Interpreter::stackElementScale(), Interpreter::local_offset_in_bytes(0));
  72 }
  73 
  74 static inline Address faddress(Register r)       { return iaddress(r); }
  75 static inline Address daddress(Register r)       { return laddress(r); }
  76 static inline Address aaddress(Register r)       { return iaddress(r); }
  77 
  78 // expression stack
  79 // (Note: Must not use symmetric equivalents at_rsp_m1/2 since they store
  80 // data beyond the rsp which is potentially unsafe in an MT environment;
  81 // an interrupt may overwrite that data.)
  82 static inline Address at_rsp   () {
  83   return Address(rsp, 0);
  84 }
  85 
  86 // At top of Java expression stack which may be different than rsp().  It
  87 // isn't for category 1 objects.
  88 static inline Address at_tos   () {
  89   Address tos = Address(rsp,  Interpreter::expr_offset_in_bytes(0));
  90   return tos;
  91 }
  92 
  93 static inline Address at_tos_p1() {
  94   return Address(rsp,  Interpreter::expr_offset_in_bytes(1));
  95 }
  96 
  97 static inline Address at_tos_p2() {
  98   return Address(rsp,  Interpreter::expr_offset_in_bytes(2));
  99 }
 100 
 101 // Condition conversion
 102 static Assembler::Condition j_not(TemplateTable::Condition cc) {
 103   switch (cc) {
 104     case TemplateTable::equal        : return Assembler::notEqual;
 105     case TemplateTable::not_equal    : return Assembler::equal;
 106     case TemplateTable::less         : return Assembler::greaterEqual;
 107     case TemplateTable::less_equal   : return Assembler::greater;
 108     case TemplateTable::greater      : return Assembler::lessEqual;
 109     case TemplateTable::greater_equal: return Assembler::less;
 110   }
 111   ShouldNotReachHere();
 112   return Assembler::zero;
 113 }
 114 
 115 
 116 //----------------------------------------------------------------------------------------------------
 117 // Miscelaneous helper routines
 118 
 119 // Store an oop (or NULL) at the address described by obj.
 120 // If val == noreg this means store a NULL
 121 
 122 static void do_oop_store(InterpreterMacroAssembler* _masm,
 123                          Address obj,
 124                          Register val,
 125                          BarrierSet::Name barrier,
 126                          bool precise) {
 127   assert(val == noreg || val == rax, "parameter is just for looks");
 128   switch (barrier) {
 129 #if INCLUDE_ALL_GCS
 130     case BarrierSet::G1SATBCT:
 131     case BarrierSet::G1SATBCTLogging:
 132       {
 133         // flatten object address if needed
 134         // We do it regardless of precise because we need the registers
 135         if (obj.index() == noreg && obj.disp() == 0) {
 136           if (obj.base() != rdx) {
 137             __ movl(rdx, obj.base());
 138           }
 139         } else {
 140           __ leal(rdx, obj);
 141         }
 142         __ get_thread(rcx);
 143         __ save_bcp();
 144         __ g1_write_barrier_pre(rdx /* obj */,
 145                                 rbx /* pre_val */,
 146                                 rcx /* thread */,
 147                                 rsi /* tmp */,
 148                                 val != noreg /* tosca_live */,
 149                                 false /* expand_call */);
 150 
 151         // Do the actual store
 152         // noreg means NULL
 153         if (val == noreg) {
 154           __ movptr(Address(rdx, 0), NULL_WORD);
 155           // No post barrier for NULL
 156         } else {
 157           __ movl(Address(rdx, 0), val);
 158           __ g1_write_barrier_post(rdx /* store_adr */,
 159                                    val /* new_val */,
 160                                    rcx /* thread */,
 161                                    rbx /* tmp */,
 162                                    rsi /* tmp2 */);
 163         }
 164         __ restore_bcp();
 165 
 166       }
 167       break;
 168 #endif // INCLUDE_ALL_GCS
 169     case BarrierSet::CardTableModRef:
 170     case BarrierSet::CardTableExtension:
 171       {
 172         if (val == noreg) {
 173           __ movptr(obj, NULL_WORD);
 174         } else {
 175           __ movl(obj, val);
 176           // flatten object address if needed
 177           if (!precise || (obj.index() == noreg && obj.disp() == 0)) {
 178             __ store_check(obj.base());
 179           } else {
 180             __ leal(rdx, obj);
 181             __ store_check(rdx);
 182           }
 183         }
 184       }
 185       break;
 186     case BarrierSet::ModRef:
 187     case BarrierSet::Other:
 188       if (val == noreg) {
 189         __ movptr(obj, NULL_WORD);
 190       } else {
 191         __ movl(obj, val);
 192       }
 193       break;
 194     default      :
 195       ShouldNotReachHere();
 196 
 197   }
 198 }
 199 
 200 Address TemplateTable::at_bcp(int offset) {
 201   assert(_desc->uses_bcp(), "inconsistent uses_bcp information");
 202   return Address(rsi, offset);
 203 }
 204 
 205 
 206 void TemplateTable::patch_bytecode(Bytecodes::Code bc, Register bc_reg,
 207                                    Register temp_reg, bool load_bc_into_bc_reg/*=true*/,
 208                                    int byte_no) {
 209   if (!RewriteBytecodes)  return;
 210   Label L_patch_done;
 211 
 212   switch (bc) {
 213   case Bytecodes::_fast_aputfield:
 214   case Bytecodes::_fast_bputfield:
 215   case Bytecodes::_fast_cputfield:
 216   case Bytecodes::_fast_dputfield:
 217   case Bytecodes::_fast_fputfield:
 218   case Bytecodes::_fast_iputfield:
 219   case Bytecodes::_fast_lputfield:
 220   case Bytecodes::_fast_sputfield:
 221     {
 222       // We skip bytecode quickening for putfield instructions when
 223       // the put_code written to the constant pool cache is zero.
 224       // This is required so that every execution of this instruction
 225       // calls out to InterpreterRuntime::resolve_get_put to do
 226       // additional, required work.
 227       assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range");
 228       assert(load_bc_into_bc_reg, "we use bc_reg as temp");
 229       __ get_cache_and_index_and_bytecode_at_bcp(bc_reg, temp_reg, temp_reg, byte_no, 1);
 230       __ movl(bc_reg, bc);
 231       __ cmpl(temp_reg, (int) 0);
 232       __ jcc(Assembler::zero, L_patch_done);  // don't patch
 233     }
 234     break;
 235   default:
 236     assert(byte_no == -1, "sanity");
 237     // the pair bytecodes have already done the load.
 238     if (load_bc_into_bc_reg) {
 239       __ movl(bc_reg, bc);
 240     }
 241   }
 242 
 243   if (JvmtiExport::can_post_breakpoint()) {
 244     Label L_fast_patch;
 245     // if a breakpoint is present we can't rewrite the stream directly
 246     __ movzbl(temp_reg, at_bcp(0));
 247     __ cmpl(temp_reg, Bytecodes::_breakpoint);
 248     __ jcc(Assembler::notEqual, L_fast_patch);
 249     __ get_method(temp_reg);
 250     // Let breakpoint table handling rewrite to quicker bytecode
 251     __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::set_original_bytecode_at), temp_reg, rsi, bc_reg);
 252 #ifndef ASSERT
 253     __ jmpb(L_patch_done);
 254 #else
 255     __ jmp(L_patch_done);
 256 #endif
 257     __ bind(L_fast_patch);
 258   }
 259 
 260 #ifdef ASSERT
 261   Label L_okay;
 262   __ load_unsigned_byte(temp_reg, at_bcp(0));
 263   __ cmpl(temp_reg, (int)Bytecodes::java_code(bc));
 264   __ jccb(Assembler::equal, L_okay);
 265   __ cmpl(temp_reg, bc_reg);
 266   __ jcc(Assembler::equal, L_okay);
 267   __ stop("patching the wrong bytecode");
 268   __ bind(L_okay);
 269 #endif
 270 
 271   // patch bytecode
 272   __ movb(at_bcp(0), bc_reg);
 273   __ bind(L_patch_done);
 274 }
 275 
 276 //----------------------------------------------------------------------------------------------------
 277 // Individual instructions
 278 
 279 void TemplateTable::nop() {
 280   transition(vtos, vtos);
 281   // nothing to do
 282 }
 283 
 284 void TemplateTable::shouldnotreachhere() {
 285   transition(vtos, vtos);
 286   __ stop("shouldnotreachhere bytecode");
 287 }
 288 
 289 
 290 
 291 void TemplateTable::aconst_null() {
 292   transition(vtos, atos);
 293   __ xorptr(rax, rax);
 294 }
 295 
 296 
 297 void TemplateTable::iconst(int value) {
 298   transition(vtos, itos);
 299   if (value == 0) {
 300     __ xorptr(rax, rax);
 301   } else {
 302     __ movptr(rax, value);
 303   }
 304 }
 305 
 306 
 307 void TemplateTable::lconst(int value) {
 308   transition(vtos, ltos);
 309   if (value == 0) {
 310     __ xorptr(rax, rax);
 311   } else {
 312     __ movptr(rax, value);
 313   }
 314   assert(value >= 0, "check this code");
 315   __ xorptr(rdx, rdx);
 316 }
 317 
 318 
 319 void TemplateTable::fconst(int value) {
 320   transition(vtos, ftos);
 321          if (value == 0) { __ fldz();
 322   } else if (value == 1) { __ fld1();
 323   } else if (value == 2) { __ fld1(); __ fld1(); __ faddp(); // should do a better solution here
 324   } else                 { ShouldNotReachHere();
 325   }
 326 }
 327 
 328 
 329 void TemplateTable::dconst(int value) {
 330   transition(vtos, dtos);
 331          if (value == 0) { __ fldz();
 332   } else if (value == 1) { __ fld1();
 333   } else                 { ShouldNotReachHere();
 334   }
 335 }
 336 
 337 
 338 void TemplateTable::bipush() {
 339   transition(vtos, itos);
 340   __ load_signed_byte(rax, at_bcp(1));
 341 }
 342 
 343 
 344 void TemplateTable::sipush() {
 345   transition(vtos, itos);
 346   __ load_unsigned_short(rax, at_bcp(1));
 347   __ bswapl(rax);
 348   __ sarl(rax, 16);
 349 }
 350 
 351 void TemplateTable::ldc(bool wide) {
 352   transition(vtos, vtos);
 353   Label call_ldc, notFloat, notClass, Done;
 354 
 355   if (wide) {
 356     __ get_unsigned_2_byte_index_at_bcp(rbx, 1);
 357   } else {
 358     __ load_unsigned_byte(rbx, at_bcp(1));
 359   }
 360   __ get_cpool_and_tags(rcx, rax);
 361   const int base_offset = ConstantPool::header_size() * wordSize;
 362   const int tags_offset = Array<u1>::base_offset_in_bytes();
 363 
 364   // get type
 365   __ xorptr(rdx, rdx);
 366   __ movb(rdx, Address(rax, rbx, Address::times_1, tags_offset));
 367 
 368   // unresolved class - get the resolved class
 369   __ cmpl(rdx, JVM_CONSTANT_UnresolvedClass);
 370   __ jccb(Assembler::equal, call_ldc);
 371 
 372   // unresolved class in error (resolution failed) - call into runtime
 373   // so that the same error from first resolution attempt is thrown.
 374   __ cmpl(rdx, JVM_CONSTANT_UnresolvedClassInError);
 375   __ jccb(Assembler::equal, call_ldc);
 376 
 377   // resolved class - need to call vm to get java mirror of the class
 378   __ cmpl(rdx, JVM_CONSTANT_Class);
 379   __ jcc(Assembler::notEqual, notClass);
 380 
 381   __ bind(call_ldc);
 382   __ movl(rcx, wide);
 383   call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::ldc), rcx);
 384   __ push(atos);
 385   __ jmp(Done);
 386 
 387   __ bind(notClass);
 388   __ cmpl(rdx, JVM_CONSTANT_Float);
 389   __ jccb(Assembler::notEqual, notFloat);
 390   // ftos
 391   __ fld_s(    Address(rcx, rbx, Address::times_ptr, base_offset));
 392   __ push(ftos);
 393   __ jmp(Done);
 394 
 395   __ bind(notFloat);
 396 #ifdef ASSERT
 397   { Label L;
 398     __ cmpl(rdx, JVM_CONSTANT_Integer);
 399     __ jcc(Assembler::equal, L);
 400     // String and Object are rewritten to fast_aldc
 401     __ stop("unexpected tag type in ldc");
 402     __ bind(L);
 403   }
 404 #endif
 405   // itos JVM_CONSTANT_Integer only
 406   __ movl(rax, Address(rcx, rbx, Address::times_ptr, base_offset));
 407   __ push(itos);
 408   __ bind(Done);
 409 }
 410 
 411 // Fast path for caching oop constants.
 412 void TemplateTable::fast_aldc(bool wide) {
 413   transition(vtos, atos);
 414 
 415   Register result = rax;
 416   Register tmp = rdx;
 417   int index_size = wide ? sizeof(u2) : sizeof(u1);
 418 
 419   Label resolved;
 420 
 421   // We are resolved if the resolved reference cache entry contains a
 422   // non-null object (String, MethodType, etc.)
 423   assert_different_registers(result, tmp);
 424   __ get_cache_index_at_bcp(tmp, 1, index_size);
 425   __ load_resolved_reference_at_index(result, tmp);
 426   __ testl(result, result);
 427   __ jcc(Assembler::notZero, resolved);
 428 
 429   address entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_ldc);
 430 
 431   // first time invocation - must resolve first
 432   __ movl(tmp, (int)bytecode());
 433   __ call_VM(result, entry, tmp);
 434 
 435   __ bind(resolved);
 436 
 437   if (VerifyOops) {
 438     __ verify_oop(result);
 439   }
 440 }
 441 
 442 void TemplateTable::ldc2_w() {
 443   transition(vtos, vtos);
 444   Label Long, Done;
 445   __ get_unsigned_2_byte_index_at_bcp(rbx, 1);
 446 
 447   __ get_cpool_and_tags(rcx, rax);
 448   const int base_offset = ConstantPool::header_size() * wordSize;
 449   const int tags_offset = Array<u1>::base_offset_in_bytes();
 450 
 451   // get type
 452   __ cmpb(Address(rax, rbx, Address::times_1, tags_offset), JVM_CONSTANT_Double);
 453   __ jccb(Assembler::notEqual, Long);
 454   // dtos
 455   __ fld_d(    Address(rcx, rbx, Address::times_ptr, base_offset));
 456   __ push(dtos);
 457   __ jmpb(Done);
 458 
 459   __ bind(Long);
 460   // ltos
 461   __ movptr(rax, Address(rcx, rbx, Address::times_ptr, base_offset + 0 * wordSize));
 462   NOT_LP64(__ movptr(rdx, Address(rcx, rbx, Address::times_ptr, base_offset + 1 * wordSize)));
 463 
 464   __ push(ltos);
 465 
 466   __ bind(Done);
 467 }
 468 
 469 
 470 void TemplateTable::locals_index(Register reg, int offset) {
 471   __ load_unsigned_byte(reg, at_bcp(offset));
 472   __ negptr(reg);
 473 }
 474 
 475 
 476 void TemplateTable::iload() {
 477   transition(vtos, itos);
 478   if (RewriteFrequentPairs) {
 479     Label rewrite, done;
 480 
 481     // get next byte
 482     __ load_unsigned_byte(rbx, at_bcp(Bytecodes::length_for(Bytecodes::_iload)));
 483     // if _iload, wait to rewrite to iload2.  We only want to rewrite the
 484     // last two iloads in a pair.  Comparing against fast_iload means that
 485     // the next bytecode is neither an iload or a caload, and therefore
 486     // an iload pair.
 487     __ cmpl(rbx, Bytecodes::_iload);
 488     __ jcc(Assembler::equal, done);
 489 
 490     __ cmpl(rbx, Bytecodes::_fast_iload);
 491     __ movl(rcx, Bytecodes::_fast_iload2);
 492     __ jccb(Assembler::equal, rewrite);
 493 
 494     // if _caload, rewrite to fast_icaload
 495     __ cmpl(rbx, Bytecodes::_caload);
 496     __ movl(rcx, Bytecodes::_fast_icaload);
 497     __ jccb(Assembler::equal, rewrite);
 498 
 499     // rewrite so iload doesn't check again.
 500     __ movl(rcx, Bytecodes::_fast_iload);
 501 
 502     // rewrite
 503     // rcx: fast bytecode
 504     __ bind(rewrite);
 505     patch_bytecode(Bytecodes::_iload, rcx, rbx, false);
 506     __ bind(done);
 507   }
 508 
 509   // Get the local value into tos
 510   locals_index(rbx);
 511   __ movl(rax, iaddress(rbx));
 512 }
 513 
 514 
 515 void TemplateTable::fast_iload2() {
 516   transition(vtos, itos);
 517   locals_index(rbx);
 518   __ movl(rax, iaddress(rbx));
 519   __ push(itos);
 520   locals_index(rbx, 3);
 521   __ movl(rax, iaddress(rbx));
 522 }
 523 
 524 void TemplateTable::fast_iload() {
 525   transition(vtos, itos);
 526   locals_index(rbx);
 527   __ movl(rax, iaddress(rbx));
 528 }
 529 
 530 
 531 void TemplateTable::lload() {
 532   transition(vtos, ltos);
 533   locals_index(rbx);
 534   __ movptr(rax, laddress(rbx));
 535   NOT_LP64(__ movl(rdx, haddress(rbx)));
 536 }
 537 
 538 
 539 void TemplateTable::fload() {
 540   transition(vtos, ftos);
 541   locals_index(rbx);
 542   __ fld_s(faddress(rbx));
 543 }
 544 
 545 
 546 void TemplateTable::dload() {
 547   transition(vtos, dtos);
 548   locals_index(rbx);
 549   __ fld_d(daddress(rbx));
 550 }
 551 
 552 
 553 void TemplateTable::aload() {
 554   transition(vtos, atos);
 555   locals_index(rbx);
 556   __ movptr(rax, aaddress(rbx));
 557 }
 558 
 559 
 560 void TemplateTable::locals_index_wide(Register reg) {
 561   __ load_unsigned_short(reg, at_bcp(2));
 562   __ bswapl(reg);
 563   __ shrl(reg, 16);
 564   __ negptr(reg);
 565 }
 566 
 567 
 568 void TemplateTable::wide_iload() {
 569   transition(vtos, itos);
 570   locals_index_wide(rbx);
 571   __ movl(rax, iaddress(rbx));
 572 }
 573 
 574 
 575 void TemplateTable::wide_lload() {
 576   transition(vtos, ltos);
 577   locals_index_wide(rbx);
 578   __ movptr(rax, laddress(rbx));
 579   NOT_LP64(__ movl(rdx, haddress(rbx)));
 580 }
 581 
 582 
 583 void TemplateTable::wide_fload() {
 584   transition(vtos, ftos);
 585   locals_index_wide(rbx);
 586   __ fld_s(faddress(rbx));
 587 }
 588 
 589 
 590 void TemplateTable::wide_dload() {
 591   transition(vtos, dtos);
 592   locals_index_wide(rbx);
 593   __ fld_d(daddress(rbx));
 594 }
 595 
 596 
 597 void TemplateTable::wide_aload() {
 598   transition(vtos, atos);
 599   locals_index_wide(rbx);
 600   __ movptr(rax, aaddress(rbx));
 601 }
 602 
 603 void TemplateTable::index_check(Register array, Register index) {
 604   // Pop ptr into array
 605   __ pop_ptr(array);
 606   index_check_without_pop(array, index);
 607 }
 608 
 609 void TemplateTable::index_check_without_pop(Register array, Register index) {
 610   // destroys rbx,
 611   // check array
 612   __ null_check(array, arrayOopDesc::length_offset_in_bytes());
 613   LP64_ONLY(__ movslq(index, index));
 614   // check index
 615   __ cmpl(index, Address(array, arrayOopDesc::length_offset_in_bytes()));
 616   if (index != rbx) {
 617     // ??? convention: move aberrant index into rbx, for exception message
 618     assert(rbx != array, "different registers");
 619     __ mov(rbx, index);
 620   }
 621   __ jump_cc(Assembler::aboveEqual,
 622              ExternalAddress(Interpreter::_throw_ArrayIndexOutOfBoundsException_entry));
 623 }
 624 
 625 
 626 void TemplateTable::iaload() {
 627   transition(itos, itos);
 628   // rdx: array
 629   index_check(rdx, rax);  // kills rbx,
 630   // rax,: index
 631   __ movl(rax, Address(rdx, rax, Address::times_4, arrayOopDesc::base_offset_in_bytes(T_INT)));
 632 }
 633 
 634 
 635 void TemplateTable::laload() {
 636   transition(itos, ltos);
 637   // rax,: index
 638   // rdx: array
 639   index_check(rdx, rax);
 640   __ mov(rbx, rax);
 641   // rbx,: index
 642   __ movptr(rax, Address(rdx, rbx, Address::times_8, arrayOopDesc::base_offset_in_bytes(T_LONG) + 0 * wordSize));
 643   NOT_LP64(__ movl(rdx, Address(rdx, rbx, Address::times_8, arrayOopDesc::base_offset_in_bytes(T_LONG) + 1 * wordSize)));
 644 }
 645 
 646 
 647 void TemplateTable::faload() {
 648   transition(itos, ftos);
 649   // rdx: array
 650   index_check(rdx, rax);  // kills rbx,
 651   // rax,: index
 652   __ fld_s(Address(rdx, rax, Address::times_4, arrayOopDesc::base_offset_in_bytes(T_FLOAT)));
 653 }
 654 
 655 
 656 void TemplateTable::daload() {
 657   transition(itos, dtos);
 658   // rdx: array
 659   index_check(rdx, rax);  // kills rbx,
 660   // rax,: index
 661   __ fld_d(Address(rdx, rax, Address::times_8, arrayOopDesc::base_offset_in_bytes(T_DOUBLE)));
 662 }
 663 
 664 
 665 void TemplateTable::aaload() {
 666   transition(itos, atos);
 667   // rdx: array
 668   index_check(rdx, rax);  // kills rbx,
 669   // rax,: index
 670   __ movptr(rax, Address(rdx, rax, Address::times_ptr, arrayOopDesc::base_offset_in_bytes(T_OBJECT)));
 671 }
 672 
 673 
 674 void TemplateTable::baload() {
 675   transition(itos, itos);
 676   // rdx: array
 677   index_check(rdx, rax);  // kills rbx,
 678   // rax,: index
 679   // can do better code for P5 - fix this at some point
 680   __ load_signed_byte(rbx, Address(rdx, rax, Address::times_1, arrayOopDesc::base_offset_in_bytes(T_BYTE)));
 681   __ mov(rax, rbx);
 682 }
 683 
 684 
 685 void TemplateTable::caload() {
 686   transition(itos, itos);
 687   // rdx: array
 688   index_check(rdx, rax);  // kills rbx,
 689   // rax,: index
 690   // can do better code for P5 - may want to improve this at some point
 691   __ load_unsigned_short(rbx, Address(rdx, rax, Address::times_2, arrayOopDesc::base_offset_in_bytes(T_CHAR)));
 692   __ mov(rax, rbx);
 693 }
 694 
 695 // iload followed by caload frequent pair
 696 void TemplateTable::fast_icaload() {
 697   transition(vtos, itos);
 698   // load index out of locals
 699   locals_index(rbx);
 700   __ movl(rax, iaddress(rbx));
 701 
 702   // rdx: array
 703   index_check(rdx, rax);
 704   // rax,: index
 705   __ load_unsigned_short(rbx, Address(rdx, rax, Address::times_2, arrayOopDesc::base_offset_in_bytes(T_CHAR)));
 706   __ mov(rax, rbx);
 707 }
 708 
 709 void TemplateTable::saload() {
 710   transition(itos, itos);
 711   // rdx: array
 712   index_check(rdx, rax);  // kills rbx,
 713   // rax,: index
 714   // can do better code for P5 - may want to improve this at some point
 715   __ load_signed_short(rbx, Address(rdx, rax, Address::times_2, arrayOopDesc::base_offset_in_bytes(T_SHORT)));
 716   __ mov(rax, rbx);
 717 }
 718 
 719 
 720 void TemplateTable::iload(int n) {
 721   transition(vtos, itos);
 722   __ movl(rax, iaddress(n));
 723 }
 724 
 725 
 726 void TemplateTable::lload(int n) {
 727   transition(vtos, ltos);
 728   __ movptr(rax, laddress(n));
 729   NOT_LP64(__ movptr(rdx, haddress(n)));
 730 }
 731 
 732 
 733 void TemplateTable::fload(int n) {
 734   transition(vtos, ftos);
 735   __ fld_s(faddress(n));
 736 }
 737 
 738 
 739 void TemplateTable::dload(int n) {
 740   transition(vtos, dtos);
 741   __ fld_d(daddress(n));
 742 }
 743 
 744 
 745 void TemplateTable::aload(int n) {
 746   transition(vtos, atos);
 747   __ movptr(rax, aaddress(n));
 748 }
 749 
 750 
 751 void TemplateTable::aload_0() {
 752   transition(vtos, atos);
 753   // According to bytecode histograms, the pairs:
 754   //
 755   // _aload_0, _fast_igetfield
 756   // _aload_0, _fast_agetfield
 757   // _aload_0, _fast_fgetfield
 758   //
 759   // occur frequently. If RewriteFrequentPairs is set, the (slow) _aload_0
 760   // bytecode checks if the next bytecode is either _fast_igetfield,
 761   // _fast_agetfield or _fast_fgetfield and then rewrites the
 762   // current bytecode into a pair bytecode; otherwise it rewrites the current
 763   // bytecode into _fast_aload_0 that doesn't do the pair check anymore.
 764   //
 765   // Note: If the next bytecode is _getfield, the rewrite must be delayed,
 766   //       otherwise we may miss an opportunity for a pair.
 767   //
 768   // Also rewrite frequent pairs
 769   //   aload_0, aload_1
 770   //   aload_0, iload_1
 771   // These bytecodes with a small amount of code are most profitable to rewrite
 772   if (RewriteFrequentPairs) {
 773     Label rewrite, done;
 774     // get next byte
 775     __ load_unsigned_byte(rbx, at_bcp(Bytecodes::length_for(Bytecodes::_aload_0)));
 776 
 777     // do actual aload_0
 778     aload(0);
 779 
 780     // if _getfield then wait with rewrite
 781     __ cmpl(rbx, Bytecodes::_getfield);
 782     __ jcc(Assembler::equal, done);
 783 
 784     // if _igetfield then reqrite to _fast_iaccess_0
 785     assert(Bytecodes::java_code(Bytecodes::_fast_iaccess_0) == Bytecodes::_aload_0, "fix bytecode definition");
 786     __ cmpl(rbx, Bytecodes::_fast_igetfield);
 787     __ movl(rcx, Bytecodes::_fast_iaccess_0);
 788     __ jccb(Assembler::equal, rewrite);
 789 
 790     // if _agetfield then reqrite to _fast_aaccess_0
 791     assert(Bytecodes::java_code(Bytecodes::_fast_aaccess_0) == Bytecodes::_aload_0, "fix bytecode definition");
 792     __ cmpl(rbx, Bytecodes::_fast_agetfield);
 793     __ movl(rcx, Bytecodes::_fast_aaccess_0);
 794     __ jccb(Assembler::equal, rewrite);
 795 
 796     // if _fgetfield then reqrite to _fast_faccess_0
 797     assert(Bytecodes::java_code(Bytecodes::_fast_faccess_0) == Bytecodes::_aload_0, "fix bytecode definition");
 798     __ cmpl(rbx, Bytecodes::_fast_fgetfield);
 799     __ movl(rcx, Bytecodes::_fast_faccess_0);
 800     __ jccb(Assembler::equal, rewrite);
 801 
 802     // else rewrite to _fast_aload0
 803     assert(Bytecodes::java_code(Bytecodes::_fast_aload_0) == Bytecodes::_aload_0, "fix bytecode definition");
 804     __ movl(rcx, Bytecodes::_fast_aload_0);
 805 
 806     // rewrite
 807     // rcx: fast bytecode
 808     __ bind(rewrite);
 809     patch_bytecode(Bytecodes::_aload_0, rcx, rbx, false);
 810 
 811     __ bind(done);
 812   } else {
 813     aload(0);
 814   }
 815 }
 816 
 817 void TemplateTable::istore() {
 818   transition(itos, vtos);
 819   locals_index(rbx);
 820   __ movl(iaddress(rbx), rax);
 821 }
 822 
 823 
 824 void TemplateTable::lstore() {
 825   transition(ltos, vtos);
 826   locals_index(rbx);
 827   __ movptr(laddress(rbx), rax);
 828   NOT_LP64(__ movptr(haddress(rbx), rdx));
 829 }
 830 
 831 
 832 void TemplateTable::fstore() {
 833   transition(ftos, vtos);
 834   locals_index(rbx);
 835   __ fstp_s(faddress(rbx));
 836 }
 837 
 838 
 839 void TemplateTable::dstore() {
 840   transition(dtos, vtos);
 841   locals_index(rbx);
 842   __ fstp_d(daddress(rbx));
 843 }
 844 
 845 
 846 void TemplateTable::astore() {
 847   transition(vtos, vtos);
 848   __ pop_ptr(rax);
 849   locals_index(rbx);
 850   __ movptr(aaddress(rbx), rax);
 851 }
 852 
 853 
 854 void TemplateTable::wide_istore() {
 855   transition(vtos, vtos);
 856   __ pop_i(rax);
 857   locals_index_wide(rbx);
 858   __ movl(iaddress(rbx), rax);
 859 }
 860 
 861 
 862 void TemplateTable::wide_lstore() {
 863   transition(vtos, vtos);
 864   __ pop_l(rax, rdx);
 865   locals_index_wide(rbx);
 866   __ movptr(laddress(rbx), rax);
 867   NOT_LP64(__ movl(haddress(rbx), rdx));
 868 }
 869 
 870 
 871 void TemplateTable::wide_fstore() {
 872   wide_istore();
 873 }
 874 
 875 
 876 void TemplateTable::wide_dstore() {
 877   wide_lstore();
 878 }
 879 
 880 
 881 void TemplateTable::wide_astore() {
 882   transition(vtos, vtos);
 883   __ pop_ptr(rax);
 884   locals_index_wide(rbx);
 885   __ movptr(aaddress(rbx), rax);
 886 }
 887 
 888 
 889 void TemplateTable::iastore() {
 890   transition(itos, vtos);
 891   __ pop_i(rbx);
 892   // rax,: value
 893   // rdx: array
 894   index_check(rdx, rbx);  // prefer index in rbx,
 895   // rbx,: index
 896   __ movl(Address(rdx, rbx, Address::times_4, arrayOopDesc::base_offset_in_bytes(T_INT)), rax);
 897 }
 898 
 899 
 900 void TemplateTable::lastore() {
 901   transition(ltos, vtos);
 902   __ pop_i(rbx);
 903   // rax,: low(value)
 904   // rcx: array
 905   // rdx: high(value)
 906   index_check(rcx, rbx);  // prefer index in rbx,
 907   // rbx,: index
 908   __ movptr(Address(rcx, rbx, Address::times_8, arrayOopDesc::base_offset_in_bytes(T_LONG) + 0 * wordSize), rax);
 909   NOT_LP64(__ movl(Address(rcx, rbx, Address::times_8, arrayOopDesc::base_offset_in_bytes(T_LONG) + 1 * wordSize), rdx));
 910 }
 911 
 912 
 913 void TemplateTable::fastore() {
 914   transition(ftos, vtos);
 915   __ pop_i(rbx);
 916   // rdx: array
 917   // st0: value
 918   index_check(rdx, rbx);  // prefer index in rbx,
 919   // rbx,: index
 920   __ fstp_s(Address(rdx, rbx, Address::times_4, arrayOopDesc::base_offset_in_bytes(T_FLOAT)));
 921 }
 922 
 923 
 924 void TemplateTable::dastore() {
 925   transition(dtos, vtos);
 926   __ pop_i(rbx);
 927   // rdx: array
 928   // st0: value
 929   index_check(rdx, rbx);  // prefer index in rbx,
 930   // rbx,: index
 931   __ fstp_d(Address(rdx, rbx, Address::times_8, arrayOopDesc::base_offset_in_bytes(T_DOUBLE)));
 932 }
 933 
 934 
 935 void TemplateTable::aastore() {
 936   Label is_null, ok_is_subtype, done;
 937   transition(vtos, vtos);
 938   // stack: ..., array, index, value
 939   __ movptr(rax, at_tos());     // Value
 940   __ movl(rcx, at_tos_p1());  // Index
 941   __ movptr(rdx, at_tos_p2());  // Array
 942 
 943   Address element_address(rdx, rcx, Address::times_4, arrayOopDesc::base_offset_in_bytes(T_OBJECT));
 944   index_check_without_pop(rdx, rcx);      // kills rbx,
 945   // do array store check - check for NULL value first
 946   __ testptr(rax, rax);
 947   __ jcc(Assembler::zero, is_null);
 948 
 949   // Move subklass into EBX
 950   __ load_klass(rbx, rax);
 951   // Move superklass into EAX
 952   __ load_klass(rax, rdx);
 953   __ movptr(rax, Address(rax, ObjArrayKlass::element_klass_offset()));
 954   // Compress array+index*wordSize+12 into a single register.  Frees ECX.
 955   __ lea(rdx, element_address);
 956 
 957   // Generate subtype check.  Blows ECX.  Resets EDI to locals.
 958   // Superklass in EAX.  Subklass in EBX.
 959   __ gen_subtype_check( rbx, ok_is_subtype );
 960 
 961   // Come here on failure
 962   // object is at TOS
 963   __ jump(ExternalAddress(Interpreter::_throw_ArrayStoreException_entry));
 964 
 965   // Come here on success
 966   __ bind(ok_is_subtype);
 967 
 968   // Get the value to store
 969   __ movptr(rax, at_rsp());
 970   // and store it with appropriate barrier
 971   do_oop_store(_masm, Address(rdx, 0), rax, _bs->kind(), true);
 972 
 973   __ jmp(done);
 974 
 975   // Have a NULL in EAX, EDX=array, ECX=index.  Store NULL at ary[idx]
 976   __ bind(is_null);
 977   __ profile_null_seen(rbx);
 978 
 979   // Store NULL, (noreg means NULL to do_oop_store)
 980   do_oop_store(_masm, element_address, noreg, _bs->kind(), true);
 981 
 982   // Pop stack arguments
 983   __ bind(done);
 984   __ addptr(rsp, 3 * Interpreter::stackElementSize);
 985 }
 986 
 987 
 988 void TemplateTable::bastore() {
 989   transition(itos, vtos);
 990   __ pop_i(rbx);
 991   // rax,: value
 992   // rdx: array
 993   index_check(rdx, rbx);  // prefer index in rbx,
 994   // rbx,: index
 995   __ movb(Address(rdx, rbx, Address::times_1, arrayOopDesc::base_offset_in_bytes(T_BYTE)), rax);
 996 }
 997 
 998 
 999 void TemplateTable::castore() {
1000   transition(itos, vtos);
1001   __ pop_i(rbx);
1002   // rax,: value
1003   // rdx: array
1004   index_check(rdx, rbx);  // prefer index in rbx,
1005   // rbx,: index
1006   __ movw(Address(rdx, rbx, Address::times_2, arrayOopDesc::base_offset_in_bytes(T_CHAR)), rax);
1007 }
1008 
1009 
1010 void TemplateTable::sastore() {
1011   castore();
1012 }
1013 
1014 
1015 void TemplateTable::istore(int n) {
1016   transition(itos, vtos);
1017   __ movl(iaddress(n), rax);
1018 }
1019 
1020 
1021 void TemplateTable::lstore(int n) {
1022   transition(ltos, vtos);
1023   __ movptr(laddress(n), rax);
1024   NOT_LP64(__ movptr(haddress(n), rdx));
1025 }
1026 
1027 
1028 void TemplateTable::fstore(int n) {
1029   transition(ftos, vtos);
1030   __ fstp_s(faddress(n));
1031 }
1032 
1033 
1034 void TemplateTable::dstore(int n) {
1035   transition(dtos, vtos);
1036   __ fstp_d(daddress(n));
1037 }
1038 
1039 
1040 void TemplateTable::astore(int n) {
1041   transition(vtos, vtos);
1042   __ pop_ptr(rax);
1043   __ movptr(aaddress(n), rax);
1044 }
1045 
1046 
1047 void TemplateTable::pop() {
1048   transition(vtos, vtos);
1049   __ addptr(rsp, Interpreter::stackElementSize);
1050 }
1051 
1052 
1053 void TemplateTable::pop2() {
1054   transition(vtos, vtos);
1055   __ addptr(rsp, 2*Interpreter::stackElementSize);
1056 }
1057 
1058 
1059 void TemplateTable::dup() {
1060   transition(vtos, vtos);
1061   // stack: ..., a
1062   __ load_ptr(0, rax);
1063   __ push_ptr(rax);
1064   // stack: ..., a, a
1065 }
1066 
1067 
1068 void TemplateTable::dup_x1() {
1069   transition(vtos, vtos);
1070   // stack: ..., a, b
1071   __ load_ptr( 0, rax);  // load b
1072   __ load_ptr( 1, rcx);  // load a
1073   __ store_ptr(1, rax);  // store b
1074   __ store_ptr(0, rcx);  // store a
1075   __ push_ptr(rax);      // push b
1076   // stack: ..., b, a, b
1077 }
1078 
1079 
1080 void TemplateTable::dup_x2() {
1081   transition(vtos, vtos);
1082   // stack: ..., a, b, c
1083   __ load_ptr( 0, rax);  // load c
1084   __ load_ptr( 2, rcx);  // load a
1085   __ store_ptr(2, rax);  // store c in a
1086   __ push_ptr(rax);      // push c
1087   // stack: ..., c, b, c, c
1088   __ load_ptr( 2, rax);  // load b
1089   __ store_ptr(2, rcx);  // store a in b
1090   // stack: ..., c, a, c, c
1091   __ store_ptr(1, rax);  // store b in c
1092   // stack: ..., c, a, b, c
1093 }
1094 
1095 
1096 void TemplateTable::dup2() {
1097   transition(vtos, vtos);
1098   // stack: ..., a, b
1099   __ load_ptr(1, rax);  // load a
1100   __ push_ptr(rax);     // push a
1101   __ load_ptr(1, rax);  // load b
1102   __ push_ptr(rax);     // push b
1103   // stack: ..., a, b, a, b
1104 }
1105 
1106 
1107 void TemplateTable::dup2_x1() {
1108   transition(vtos, vtos);
1109   // stack: ..., a, b, c
1110   __ load_ptr( 0, rcx);  // load c
1111   __ load_ptr( 1, rax);  // load b
1112   __ push_ptr(rax);      // push b
1113   __ push_ptr(rcx);      // push c
1114   // stack: ..., a, b, c, b, c
1115   __ store_ptr(3, rcx);  // store c in b
1116   // stack: ..., a, c, c, b, c
1117   __ load_ptr( 4, rcx);  // load a
1118   __ store_ptr(2, rcx);  // store a in 2nd c
1119   // stack: ..., a, c, a, b, c
1120   __ store_ptr(4, rax);  // store b in a
1121   // stack: ..., b, c, a, b, c
1122   // stack: ..., b, c, a, b, c
1123 }
1124 
1125 
1126 void TemplateTable::dup2_x2() {
1127   transition(vtos, vtos);
1128   // stack: ..., a, b, c, d
1129   __ load_ptr( 0, rcx);  // load d
1130   __ load_ptr( 1, rax);  // load c
1131   __ push_ptr(rax);      // push c
1132   __ push_ptr(rcx);      // push d
1133   // stack: ..., a, b, c, d, c, d
1134   __ load_ptr( 4, rax);  // load b
1135   __ store_ptr(2, rax);  // store b in d
1136   __ store_ptr(4, rcx);  // store d in b
1137   // stack: ..., a, d, c, b, c, d
1138   __ load_ptr( 5, rcx);  // load a
1139   __ load_ptr( 3, rax);  // load c
1140   __ store_ptr(3, rcx);  // store a in c
1141   __ store_ptr(5, rax);  // store c in a
1142   // stack: ..., c, d, a, b, c, d
1143   // stack: ..., c, d, a, b, c, d
1144 }
1145 
1146 
1147 void TemplateTable::swap() {
1148   transition(vtos, vtos);
1149   // stack: ..., a, b
1150   __ load_ptr( 1, rcx);  // load a
1151   __ load_ptr( 0, rax);  // load b
1152   __ store_ptr(0, rcx);  // store a in b
1153   __ store_ptr(1, rax);  // store b in a
1154   // stack: ..., b, a
1155 }
1156 
1157 
1158 void TemplateTable::iop2(Operation op) {
1159   transition(itos, itos);
1160   switch (op) {
1161     case add  :                   __ pop_i(rdx); __ addl (rax, rdx); break;
1162     case sub  : __ mov(rdx, rax); __ pop_i(rax); __ subl (rax, rdx); break;
1163     case mul  :                   __ pop_i(rdx); __ imull(rax, rdx); break;
1164     case _and :                   __ pop_i(rdx); __ andl (rax, rdx); break;
1165     case _or  :                   __ pop_i(rdx); __ orl  (rax, rdx); break;
1166     case _xor :                   __ pop_i(rdx); __ xorl (rax, rdx); break;
1167     case shl  : __ mov(rcx, rax); __ pop_i(rax); __ shll (rax);      break; // implicit masking of lower 5 bits by Intel shift instr.
1168     case shr  : __ mov(rcx, rax); __ pop_i(rax); __ sarl (rax);      break; // implicit masking of lower 5 bits by Intel shift instr.
1169     case ushr : __ mov(rcx, rax); __ pop_i(rax); __ shrl (rax);      break; // implicit masking of lower 5 bits by Intel shift instr.
1170     default   : ShouldNotReachHere();
1171   }
1172 }
1173 
1174 
1175 void TemplateTable::lop2(Operation op) {
1176   transition(ltos, ltos);
1177   __ pop_l(rbx, rcx);
1178   switch (op) {
1179     case add  : __ addl(rax, rbx); __ adcl(rdx, rcx); break;
1180     case sub  : __ subl(rbx, rax); __ sbbl(rcx, rdx);
1181                 __ mov (rax, rbx); __ mov (rdx, rcx); break;
1182     case _and : __ andl(rax, rbx); __ andl(rdx, rcx); break;
1183     case _or  : __ orl (rax, rbx); __ orl (rdx, rcx); break;
1184     case _xor : __ xorl(rax, rbx); __ xorl(rdx, rcx); break;
1185     default   : ShouldNotReachHere();
1186   }
1187 }
1188 
1189 
1190 void TemplateTable::idiv() {
1191   transition(itos, itos);
1192   __ mov(rcx, rax);
1193   __ pop_i(rax);
1194   // Note: could xor rax, and rcx and compare with (-1 ^ min_int). If
1195   //       they are not equal, one could do a normal division (no correction
1196   //       needed), which may speed up this implementation for the common case.
1197   //       (see also JVM spec., p.243 & p.271)
1198   __ corrected_idivl(rcx);
1199 }
1200 
1201 
1202 void TemplateTable::irem() {
1203   transition(itos, itos);
1204   __ mov(rcx, rax);
1205   __ pop_i(rax);
1206   // Note: could xor rax, and rcx and compare with (-1 ^ min_int). If
1207   //       they are not equal, one could do a normal division (no correction
1208   //       needed), which may speed up this implementation for the common case.
1209   //       (see also JVM spec., p.243 & p.271)
1210   __ corrected_idivl(rcx);
1211   __ mov(rax, rdx);
1212 }
1213 
1214 
1215 void TemplateTable::lmul() {
1216   transition(ltos, ltos);
1217   __ pop_l(rbx, rcx);
1218   __ push(rcx); __ push(rbx);
1219   __ push(rdx); __ push(rax);
1220   __ lmul(2 * wordSize, 0);
1221   __ addptr(rsp, 4 * wordSize);  // take off temporaries
1222 }
1223 
1224 
1225 void TemplateTable::ldiv() {
1226   transition(ltos, ltos);
1227   __ pop_l(rbx, rcx);
1228   __ push(rcx); __ push(rbx);
1229   __ push(rdx); __ push(rax);
1230   // check if y = 0
1231   __ orl(rax, rdx);
1232   __ jump_cc(Assembler::zero,
1233              ExternalAddress(Interpreter::_throw_ArithmeticException_entry));
1234   __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::ldiv));
1235   __ addptr(rsp, 4 * wordSize);  // take off temporaries
1236 }
1237 
1238 
1239 void TemplateTable::lrem() {
1240   transition(ltos, ltos);
1241   __ pop_l(rbx, rcx);
1242   __ push(rcx); __ push(rbx);
1243   __ push(rdx); __ push(rax);
1244   // check if y = 0
1245   __ orl(rax, rdx);
1246   __ jump_cc(Assembler::zero,
1247              ExternalAddress(Interpreter::_throw_ArithmeticException_entry));
1248   __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::lrem));
1249   __ addptr(rsp, 4 * wordSize);
1250 }
1251 
1252 
1253 void TemplateTable::lshl() {
1254   transition(itos, ltos);
1255   __ movl(rcx, rax);                             // get shift count
1256   __ pop_l(rax, rdx);                            // get shift value
1257   __ lshl(rdx, rax);
1258 }
1259 
1260 
1261 void TemplateTable::lshr() {
1262   transition(itos, ltos);
1263   __ mov(rcx, rax);                              // get shift count
1264   __ pop_l(rax, rdx);                            // get shift value
1265   __ lshr(rdx, rax, true);
1266 }
1267 
1268 
1269 void TemplateTable::lushr() {
1270   transition(itos, ltos);
1271   __ mov(rcx, rax);                              // get shift count
1272   __ pop_l(rax, rdx);                            // get shift value
1273   __ lshr(rdx, rax);
1274 }
1275 
1276 
1277 void TemplateTable::fop2(Operation op) {
1278   transition(ftos, ftos);
1279   switch (op) {
1280     case add: __ fadd_s (at_rsp());                break;
1281     case sub: __ fsubr_s(at_rsp());                break;
1282     case mul: __ fmul_s (at_rsp());                break;
1283     case div: __ fdivr_s(at_rsp());                break;
1284     case rem: __ fld_s  (at_rsp()); __ fremr(rax); break;
1285     default : ShouldNotReachHere();
1286   }
1287   __ f2ieee();
1288   __ pop(rax);  // pop float thing off
1289 }
1290 
1291 
1292 void TemplateTable::dop2(Operation op) {
1293   transition(dtos, dtos);
1294 
1295   switch (op) {
1296     case add: __ fadd_d (at_rsp());                break;
1297     case sub: __ fsubr_d(at_rsp());                break;
1298     case mul: {
1299       Label L_strict;
1300       Label L_join;
1301       const Address access_flags      (rcx, Method::access_flags_offset());
1302       __ get_method(rcx);
1303       __ movl(rcx, access_flags);
1304       __ testl(rcx, JVM_ACC_STRICT);
1305       __ jccb(Assembler::notZero, L_strict);
1306       __ fmul_d (at_rsp());
1307       __ jmpb(L_join);
1308       __ bind(L_strict);
1309       __ fld_x(ExternalAddress(StubRoutines::addr_fpu_subnormal_bias1()));
1310       __ fmulp();
1311       __ fmul_d (at_rsp());
1312       __ fld_x(ExternalAddress(StubRoutines::addr_fpu_subnormal_bias2()));
1313       __ fmulp();
1314       __ bind(L_join);
1315       break;
1316     }
1317     case div: {
1318       Label L_strict;
1319       Label L_join;
1320       const Address access_flags      (rcx, Method::access_flags_offset());
1321       __ get_method(rcx);
1322       __ movl(rcx, access_flags);
1323       __ testl(rcx, JVM_ACC_STRICT);
1324       __ jccb(Assembler::notZero, L_strict);
1325       __ fdivr_d(at_rsp());
1326       __ jmp(L_join);
1327       __ bind(L_strict);
1328       __ fld_x(ExternalAddress(StubRoutines::addr_fpu_subnormal_bias1()));
1329       __ fmul_d (at_rsp());
1330       __ fdivrp();
1331       __ fld_x(ExternalAddress(StubRoutines::addr_fpu_subnormal_bias2()));
1332       __ fmulp();
1333       __ bind(L_join);
1334       break;
1335     }
1336     case rem: __ fld_d  (at_rsp()); __ fremr(rax); break;
1337     default : ShouldNotReachHere();
1338   }
1339   __ d2ieee();
1340   // Pop double precision number from rsp.
1341   __ pop(rax);
1342   __ pop(rdx);
1343 }
1344 
1345 
1346 void TemplateTable::ineg() {
1347   transition(itos, itos);
1348   __ negl(rax);
1349 }
1350 
1351 
1352 void TemplateTable::lneg() {
1353   transition(ltos, ltos);
1354   __ lneg(rdx, rax);
1355 }
1356 
1357 
1358 void TemplateTable::fneg() {
1359   transition(ftos, ftos);
1360   __ fchs();
1361 }
1362 
1363 
1364 void TemplateTable::dneg() {
1365   transition(dtos, dtos);
1366   __ fchs();
1367 }
1368 
1369 
1370 void TemplateTable::iinc() {
1371   transition(vtos, vtos);
1372   __ load_signed_byte(rdx, at_bcp(2));           // get constant
1373   locals_index(rbx);
1374   __ addl(iaddress(rbx), rdx);
1375 }
1376 
1377 
1378 void TemplateTable::wide_iinc() {
1379   transition(vtos, vtos);
1380   __ movl(rdx, at_bcp(4));                       // get constant
1381   locals_index_wide(rbx);
1382   __ bswapl(rdx);                                 // swap bytes & sign-extend constant
1383   __ sarl(rdx, 16);
1384   __ addl(iaddress(rbx), rdx);
1385   // Note: should probably use only one movl to get both
1386   //       the index and the constant -> fix this
1387 }
1388 
1389 
1390 void TemplateTable::convert() {
1391   // Checking
1392 #ifdef ASSERT
1393   { TosState tos_in  = ilgl;
1394     TosState tos_out = ilgl;
1395     switch (bytecode()) {
1396       case Bytecodes::_i2l: // fall through
1397       case Bytecodes::_i2f: // fall through
1398       case Bytecodes::_i2d: // fall through
1399       case Bytecodes::_i2b: // fall through
1400       case Bytecodes::_i2c: // fall through
1401       case Bytecodes::_i2s: tos_in = itos; break;
1402       case Bytecodes::_l2i: // fall through
1403       case Bytecodes::_l2f: // fall through
1404       case Bytecodes::_l2d: tos_in = ltos; break;
1405       case Bytecodes::_f2i: // fall through
1406       case Bytecodes::_f2l: // fall through
1407       case Bytecodes::_f2d: tos_in = ftos; break;
1408       case Bytecodes::_d2i: // fall through
1409       case Bytecodes::_d2l: // fall through
1410       case Bytecodes::_d2f: tos_in = dtos; break;
1411       default             : ShouldNotReachHere();
1412     }
1413     switch (bytecode()) {
1414       case Bytecodes::_l2i: // fall through
1415       case Bytecodes::_f2i: // fall through
1416       case Bytecodes::_d2i: // fall through
1417       case Bytecodes::_i2b: // fall through
1418       case Bytecodes::_i2c: // fall through
1419       case Bytecodes::_i2s: tos_out = itos; break;
1420       case Bytecodes::_i2l: // fall through
1421       case Bytecodes::_f2l: // fall through
1422       case Bytecodes::_d2l: tos_out = ltos; break;
1423       case Bytecodes::_i2f: // fall through
1424       case Bytecodes::_l2f: // fall through
1425       case Bytecodes::_d2f: tos_out = ftos; break;
1426       case Bytecodes::_i2d: // fall through
1427       case Bytecodes::_l2d: // fall through
1428       case Bytecodes::_f2d: tos_out = dtos; break;
1429       default             : ShouldNotReachHere();
1430     }
1431     transition(tos_in, tos_out);
1432   }
1433 #endif // ASSERT
1434 
1435   // Conversion
1436   // (Note: use push(rcx)/pop(rcx) for 1/2-word stack-ptr manipulation)
1437   switch (bytecode()) {
1438     case Bytecodes::_i2l:
1439       __ extend_sign(rdx, rax);
1440       break;
1441     case Bytecodes::_i2f:
1442       __ push(rax);          // store int on tos
1443       __ fild_s(at_rsp());   // load int to ST0
1444       __ f2ieee();           // truncate to float size
1445       __ pop(rcx);           // adjust rsp
1446       break;
1447     case Bytecodes::_i2d:
1448       __ push(rax);          // add one slot for d2ieee()
1449       __ push(rax);          // store int on tos
1450       __ fild_s(at_rsp());   // load int to ST0
1451       __ d2ieee();           // truncate to double size
1452       __ pop(rcx);           // adjust rsp
1453       __ pop(rcx);
1454       break;
1455     case Bytecodes::_i2b:
1456       __ shll(rax, 24);      // truncate upper 24 bits
1457       __ sarl(rax, 24);      // and sign-extend byte
1458       LP64_ONLY(__ movsbl(rax, rax));
1459       break;
1460     case Bytecodes::_i2c:
1461       __ andl(rax, 0xFFFF);  // truncate upper 16 bits
1462       LP64_ONLY(__ movzwl(rax, rax));
1463       break;
1464     case Bytecodes::_i2s:
1465       __ shll(rax, 16);      // truncate upper 16 bits
1466       __ sarl(rax, 16);      // and sign-extend short
1467       LP64_ONLY(__ movswl(rax, rax));
1468       break;
1469     case Bytecodes::_l2i:
1470       /* nothing to do */
1471       break;
1472     case Bytecodes::_l2f:
1473       __ push(rdx);          // store long on tos
1474       __ push(rax);
1475       __ fild_d(at_rsp());   // load long to ST0
1476       __ f2ieee();           // truncate to float size
1477       __ pop(rcx);           // adjust rsp
1478       __ pop(rcx);
1479       break;
1480     case Bytecodes::_l2d:
1481       __ push(rdx);          // store long on tos
1482       __ push(rax);
1483       __ fild_d(at_rsp());   // load long to ST0
1484       __ d2ieee();           // truncate to double size
1485       __ pop(rcx);           // adjust rsp
1486       __ pop(rcx);
1487       break;
1488     case Bytecodes::_f2i:
1489       __ push(rcx);          // reserve space for argument
1490       __ fstp_s(at_rsp());   // pass float argument on stack
1491       __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::f2i), 1);
1492       break;
1493     case Bytecodes::_f2l:
1494       __ push(rcx);          // reserve space for argument
1495       __ fstp_s(at_rsp());   // pass float argument on stack
1496       __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::f2l), 1);
1497       break;
1498     case Bytecodes::_f2d:
1499       /* nothing to do */
1500       break;
1501     case Bytecodes::_d2i:
1502       __ push(rcx);          // reserve space for argument
1503       __ push(rcx);
1504       __ fstp_d(at_rsp());   // pass double argument on stack
1505       __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::d2i), 2);
1506       break;
1507     case Bytecodes::_d2l:
1508       __ push(rcx);          // reserve space for argument
1509       __ push(rcx);
1510       __ fstp_d(at_rsp());   // pass double argument on stack
1511       __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::d2l), 2);
1512       break;
1513     case Bytecodes::_d2f:
1514       __ push(rcx);          // reserve space for f2ieee()
1515       __ f2ieee();           // truncate to float size
1516       __ pop(rcx);           // adjust rsp
1517       break;
1518     default             :
1519       ShouldNotReachHere();
1520   }
1521 }
1522 
1523 
1524 void TemplateTable::lcmp() {
1525   transition(ltos, itos);
1526   // y = rdx:rax
1527   __ pop_l(rbx, rcx);             // get x = rcx:rbx
1528   __ lcmp2int(rcx, rbx, rdx, rax);// rcx := cmp(x, y)
1529   __ mov(rax, rcx);
1530 }
1531 
1532 
1533 void TemplateTable::float_cmp(bool is_float, int unordered_result) {
1534   if (is_float) {
1535     __ fld_s(at_rsp());
1536   } else {
1537     __ fld_d(at_rsp());
1538     __ pop(rdx);
1539   }
1540   __ pop(rcx);
1541   __ fcmp2int(rax, unordered_result < 0);
1542 }
1543 
1544 
1545 void TemplateTable::branch(bool is_jsr, bool is_wide) {
1546   __ get_method(rcx);           // ECX holds method
1547   __ profile_taken_branch(rax,rbx); // EAX holds updated MDP, EBX holds bumped taken count
1548 
1549   const ByteSize be_offset = MethodCounters::backedge_counter_offset() +
1550                              InvocationCounter::counter_offset();
1551   const ByteSize inv_offset = MethodCounters::invocation_counter_offset() +
1552                               InvocationCounter::counter_offset();
1553 
1554   // Load up EDX with the branch displacement
1555   if (is_wide) {
1556     __ movl(rdx, at_bcp(1));
1557   } else {
1558     __ load_signed_short(rdx, at_bcp(1));
1559   }
1560   __ bswapl(rdx);
1561   if (!is_wide) __ sarl(rdx, 16);
1562   LP64_ONLY(__ movslq(rdx, rdx));
1563 
1564 
1565   // Handle all the JSR stuff here, then exit.
1566   // It's much shorter and cleaner than intermingling with the
1567   // non-JSR normal-branch stuff occurring below.
1568   if (is_jsr) {
1569     // Pre-load the next target bytecode into EBX
1570     __ load_unsigned_byte(rbx, Address(rsi, rdx, Address::times_1, 0));
1571 
1572     // compute return address as bci in rax,
1573     __ lea(rax, at_bcp((is_wide ? 5 : 3) - in_bytes(ConstMethod::codes_offset())));
1574     __ subptr(rax, Address(rcx, Method::const_offset()));
1575     // Adjust the bcp in RSI by the displacement in EDX
1576     __ addptr(rsi, rdx);
1577     // Push return address
1578     __ push_i(rax);
1579     // jsr returns vtos
1580     __ dispatch_only_noverify(vtos);
1581     return;
1582   }
1583 
1584   // Normal (non-jsr) branch handling
1585 
1586   // Adjust the bcp in RSI by the displacement in EDX
1587   __ addptr(rsi, rdx);
1588 
1589   assert(UseLoopCounter || !UseOnStackReplacement, "on-stack-replacement requires loop counters");
1590   Label backedge_counter_overflow;
1591   Label profile_method;
1592   Label dispatch;
1593   if (UseLoopCounter) {
1594     // increment backedge counter for backward branches
1595     // rax,: MDO
1596     // rbx,: MDO bumped taken-count
1597     // rcx: method
1598     // rdx: target offset
1599     // rsi: target bcp
1600     // rdi: locals pointer
1601     __ testl(rdx, rdx);             // check if forward or backward branch
1602     __ jcc(Assembler::positive, dispatch); // count only if backward branch
1603 
1604     // check if MethodCounters exists
1605     Label has_counters;
1606     __ movptr(rax, Address(rcx, Method::method_counters_offset()));
1607     __ testptr(rax, rax);
1608     __ jcc(Assembler::notZero, has_counters);
1609     __ push(rdx);
1610     __ push(rcx);
1611     __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::build_method_counters),
1612                rcx);
1613     __ pop(rcx);
1614     __ pop(rdx);
1615     __ movptr(rax, Address(rcx, Method::method_counters_offset()));
1616     __ testptr(rax, rax);
1617     __ jcc(Assembler::zero, dispatch);
1618     __ bind(has_counters);
1619 
1620     if (TieredCompilation) {
1621       Label no_mdo;
1622       int increment = InvocationCounter::count_increment;
1623       int mask = ((1 << Tier0BackedgeNotifyFreqLog) - 1) << InvocationCounter::count_shift;
1624       if (ProfileInterpreter) {
1625         // Are we profiling?
1626         __ movptr(rbx, Address(rcx, in_bytes(Method::method_data_offset())));
1627         __ testptr(rbx, rbx);
1628         __ jccb(Assembler::zero, no_mdo);
1629         // Increment the MDO backedge counter
1630         const Address mdo_backedge_counter(rbx, in_bytes(MethodData::backedge_counter_offset()) +
1631                                                 in_bytes(InvocationCounter::counter_offset()));
1632         __ increment_mask_and_jump(mdo_backedge_counter, increment, mask,
1633                                    rax, false, Assembler::zero, &backedge_counter_overflow);
1634         __ jmp(dispatch);
1635       }
1636       __ bind(no_mdo);
1637       // Increment backedge counter in MethodCounters*
1638       __ movptr(rcx, Address(rcx, Method::method_counters_offset()));
1639       __ increment_mask_and_jump(Address(rcx, be_offset), increment, mask,
1640                                  rax, false, Assembler::zero, &backedge_counter_overflow);
1641     } else {
1642       // increment counter
1643       __ movptr(rcx, Address(rcx, Method::method_counters_offset()));
1644       __ movl(rax, Address(rcx, be_offset));        // load backedge counter
1645       __ incrementl(rax, InvocationCounter::count_increment); // increment counter
1646       __ movl(Address(rcx, be_offset), rax);        // store counter
1647 
1648       __ movl(rax, Address(rcx, inv_offset));    // load invocation counter
1649 
1650       __ andl(rax, InvocationCounter::count_mask_value);     // and the status bits
1651       __ addl(rax, Address(rcx, be_offset));        // add both counters
1652 
1653       if (ProfileInterpreter) {
1654         // Test to see if we should create a method data oop
1655         __ cmp32(rax,
1656                  ExternalAddress((address) &InvocationCounter::InterpreterProfileLimit));
1657         __ jcc(Assembler::less, dispatch);
1658 
1659         // if no method data exists, go to profile method
1660         __ test_method_data_pointer(rax, profile_method);
1661 
1662         if (UseOnStackReplacement) {
1663           // check for overflow against rbx, which is the MDO taken count
1664           __ cmp32(rbx,
1665                    ExternalAddress((address) &InvocationCounter::InterpreterBackwardBranchLimit));
1666           __ jcc(Assembler::below, dispatch);
1667 
1668           // When ProfileInterpreter is on, the backedge_count comes from the
1669           // MethodData*, which value does not get reset on the call to
1670           // frequency_counter_overflow().  To avoid excessive calls to the overflow
1671           // routine while the method is being compiled, add a second test to make
1672           // sure the overflow function is called only once every overflow_frequency.
1673           const int overflow_frequency = 1024;
1674           __ andptr(rbx, overflow_frequency-1);
1675           __ jcc(Assembler::zero, backedge_counter_overflow);
1676         }
1677       } else {
1678         if (UseOnStackReplacement) {
1679           // check for overflow against rax, which is the sum of the counters
1680           __ cmp32(rax,
1681                    ExternalAddress((address) &InvocationCounter::InterpreterBackwardBranchLimit));
1682           __ jcc(Assembler::aboveEqual, backedge_counter_overflow);
1683 
1684         }
1685       }
1686     }
1687     __ bind(dispatch);
1688   }
1689 
1690   // Pre-load the next target bytecode into EBX
1691   __ load_unsigned_byte(rbx, Address(rsi, 0));
1692 
1693   // continue with the bytecode @ target
1694   // rax,: return bci for jsr's, unused otherwise
1695   // rbx,: target bytecode
1696   // rsi: target bcp
1697   __ dispatch_only(vtos);
1698 
1699   if (UseLoopCounter) {
1700     if (ProfileInterpreter) {
1701       // Out-of-line code to allocate method data oop.
1702       __ bind(profile_method);
1703       __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::profile_method));
1704       __ load_unsigned_byte(rbx, Address(rsi, 0));  // restore target bytecode
1705       __ set_method_data_pointer_for_bcp();
1706       __ jmp(dispatch);
1707     }
1708 
1709     if (UseOnStackReplacement) {
1710 
1711       // invocation counter overflow
1712       __ bind(backedge_counter_overflow);
1713       __ negptr(rdx);
1714       __ addptr(rdx, rsi);        // branch bcp
1715       call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::frequency_counter_overflow), rdx);
1716       __ load_unsigned_byte(rbx, Address(rsi, 0));  // restore target bytecode
1717 
1718       // rax,: osr nmethod (osr ok) or NULL (osr not possible)
1719       // rbx,: target bytecode
1720       // rdx: scratch
1721       // rdi: locals pointer
1722       // rsi: bcp
1723       __ testptr(rax, rax);                      // test result
1724       __ jcc(Assembler::zero, dispatch);         // no osr if null
1725       // nmethod may have been invalidated (VM may block upon call_VM return)
1726       __ movl(rcx, Address(rax, nmethod::entry_bci_offset()));
1727       __ cmpl(rcx, InvalidOSREntryBci);
1728       __ jcc(Assembler::equal, dispatch);
1729 
1730       // We have the address of an on stack replacement routine in rax,
1731       // We need to prepare to execute the OSR method. First we must
1732       // migrate the locals and monitors off of the stack.
1733 
1734       __ mov(rbx, rax);                             // save the nmethod
1735 
1736       const Register thread = rcx;
1737       __ get_thread(thread);
1738       call_VM(noreg, CAST_FROM_FN_PTR(address, SharedRuntime::OSR_migration_begin));
1739       // rax, is OSR buffer, move it to expected parameter location
1740       __ mov(rcx, rax);
1741 
1742       // pop the interpreter frame
1743       __ movptr(rdx, Address(rbp, frame::interpreter_frame_sender_sp_offset * wordSize)); // get sender sp
1744       __ leave();                                // remove frame anchor
1745       __ pop(rdi);                               // get return address
1746       __ mov(rsp, rdx);                          // set sp to sender sp
1747 
1748       // Align stack pointer for compiled code (note that caller is
1749       // responsible for undoing this fixup by remembering the old SP
1750       // in an rbp,-relative location)
1751       __ andptr(rsp, -(StackAlignmentInBytes));
1752 
1753       // push the (possibly adjusted) return address
1754       __ push(rdi);
1755 
1756       // and begin the OSR nmethod
1757       __ jmp(Address(rbx, nmethod::osr_entry_point_offset()));
1758     }
1759   }
1760 }
1761 
1762 
1763 void TemplateTable::if_0cmp(Condition cc) {
1764   transition(itos, vtos);
1765   // assume branch is more often taken than not (loops use backward branches)
1766   Label not_taken;
1767   __ testl(rax, rax);
1768   __ jcc(j_not(cc), not_taken);
1769   branch(false, false);
1770   __ bind(not_taken);
1771   __ profile_not_taken_branch(rax);
1772 }
1773 
1774 
1775 void TemplateTable::if_icmp(Condition cc) {
1776   transition(itos, vtos);
1777   // assume branch is more often taken than not (loops use backward branches)
1778   Label not_taken;
1779   __ pop_i(rdx);
1780   __ cmpl(rdx, rax);
1781   __ jcc(j_not(cc), not_taken);
1782   branch(false, false);
1783   __ bind(not_taken);
1784   __ profile_not_taken_branch(rax);
1785 }
1786 
1787 
1788 void TemplateTable::if_nullcmp(Condition cc) {
1789   transition(atos, vtos);
1790   // assume branch is more often taken than not (loops use backward branches)
1791   Label not_taken;
1792   __ testptr(rax, rax);
1793   __ jcc(j_not(cc), not_taken);
1794   branch(false, false);
1795   __ bind(not_taken);
1796   __ profile_not_taken_branch(rax);
1797 }
1798 
1799 
1800 void TemplateTable::if_acmp(Condition cc) {
1801   transition(atos, vtos);
1802   // assume branch is more often taken than not (loops use backward branches)
1803   Label not_taken;
1804   __ pop_ptr(rdx);
1805   __ cmpptr(rdx, rax);
1806   __ jcc(j_not(cc), not_taken);
1807   branch(false, false);
1808   __ bind(not_taken);
1809   __ profile_not_taken_branch(rax);
1810 }
1811 
1812 
1813 void TemplateTable::ret() {
1814   transition(vtos, vtos);
1815   locals_index(rbx);
1816   __ movptr(rbx, iaddress(rbx));                   // get return bci, compute return bcp
1817   __ profile_ret(rbx, rcx);
1818   __ get_method(rax);
1819   __ movptr(rsi, Address(rax, Method::const_offset()));
1820   __ lea(rsi, Address(rsi, rbx, Address::times_1,
1821                       ConstMethod::codes_offset()));
1822   __ dispatch_next(vtos);
1823 }
1824 
1825 
1826 void TemplateTable::wide_ret() {
1827   transition(vtos, vtos);
1828   locals_index_wide(rbx);
1829   __ movptr(rbx, iaddress(rbx));                   // get return bci, compute return bcp
1830   __ profile_ret(rbx, rcx);
1831   __ get_method(rax);
1832   __ movptr(rsi, Address(rax, Method::const_offset()));
1833   __ lea(rsi, Address(rsi, rbx, Address::times_1, ConstMethod::codes_offset()));
1834   __ dispatch_next(vtos);
1835 }
1836 
1837 
1838 void TemplateTable::tableswitch() {
1839   Label default_case, continue_execution;
1840   transition(itos, vtos);
1841   // align rsi
1842   __ lea(rbx, at_bcp(wordSize));
1843   __ andptr(rbx, -wordSize);
1844   // load lo & hi
1845   __ movl(rcx, Address(rbx, 1 * wordSize));
1846   __ movl(rdx, Address(rbx, 2 * wordSize));
1847   __ bswapl(rcx);
1848   __ bswapl(rdx);
1849   // check against lo & hi
1850   __ cmpl(rax, rcx);
1851   __ jccb(Assembler::less, default_case);
1852   __ cmpl(rax, rdx);
1853   __ jccb(Assembler::greater, default_case);
1854   // lookup dispatch offset
1855   __ subl(rax, rcx);
1856   __ movl(rdx, Address(rbx, rax, Address::times_4, 3 * BytesPerInt));
1857   __ profile_switch_case(rax, rbx, rcx);
1858   // continue execution
1859   __ bind(continue_execution);
1860   __ bswapl(rdx);
1861   __ load_unsigned_byte(rbx, Address(rsi, rdx, Address::times_1));
1862   __ addptr(rsi, rdx);
1863   __ dispatch_only(vtos);
1864   // handle default
1865   __ bind(default_case);
1866   __ profile_switch_default(rax);
1867   __ movl(rdx, Address(rbx, 0));
1868   __ jmp(continue_execution);
1869 }
1870 
1871 
1872 void TemplateTable::lookupswitch() {
1873   transition(itos, itos);
1874   __ stop("lookupswitch bytecode should have been rewritten");
1875 }
1876 
1877 
1878 void TemplateTable::fast_linearswitch() {
1879   transition(itos, vtos);
1880   Label loop_entry, loop, found, continue_execution;
1881   // bswapl rax, so we can avoid bswapping the table entries
1882   __ bswapl(rax);
1883   // align rsi
1884   __ lea(rbx, at_bcp(wordSize));                // btw: should be able to get rid of this instruction (change offsets below)
1885   __ andptr(rbx, -wordSize);
1886   // set counter
1887   __ movl(rcx, Address(rbx, wordSize));
1888   __ bswapl(rcx);
1889   __ jmpb(loop_entry);
1890   // table search
1891   __ bind(loop);
1892   __ cmpl(rax, Address(rbx, rcx, Address::times_8, 2 * wordSize));
1893   __ jccb(Assembler::equal, found);
1894   __ bind(loop_entry);
1895   __ decrementl(rcx);
1896   __ jcc(Assembler::greaterEqual, loop);
1897   // default case
1898   __ profile_switch_default(rax);
1899   __ movl(rdx, Address(rbx, 0));
1900   __ jmpb(continue_execution);
1901   // entry found -> get offset
1902   __ bind(found);
1903   __ movl(rdx, Address(rbx, rcx, Address::times_8, 3 * wordSize));
1904   __ profile_switch_case(rcx, rax, rbx);
1905   // continue execution
1906   __ bind(continue_execution);
1907   __ bswapl(rdx);
1908   __ load_unsigned_byte(rbx, Address(rsi, rdx, Address::times_1));
1909   __ addptr(rsi, rdx);
1910   __ dispatch_only(vtos);
1911 }
1912 
1913 
1914 void TemplateTable::fast_binaryswitch() {
1915   transition(itos, vtos);
1916   // Implementation using the following core algorithm:
1917   //
1918   // int binary_search(int key, LookupswitchPair* array, int n) {
1919   //   // Binary search according to "Methodik des Programmierens" by
1920   //   // Edsger W. Dijkstra and W.H.J. Feijen, Addison Wesley Germany 1985.
1921   //   int i = 0;
1922   //   int j = n;
1923   //   while (i+1 < j) {
1924   //     // invariant P: 0 <= i < j <= n and (a[i] <= key < a[j] or Q)
1925   //     // with      Q: for all i: 0 <= i < n: key < a[i]
1926   //     // where a stands for the array and assuming that the (inexisting)
1927   //     // element a[n] is infinitely big.
1928   //     int h = (i + j) >> 1;
1929   //     // i < h < j
1930   //     if (key < array[h].fast_match()) {
1931   //       j = h;
1932   //     } else {
1933   //       i = h;
1934   //     }
1935   //   }
1936   //   // R: a[i] <= key < a[i+1] or Q
1937   //   // (i.e., if key is within array, i is the correct index)
1938   //   return i;
1939   // }
1940 
1941   // register allocation
1942   const Register key   = rax;                    // already set (tosca)
1943   const Register array = rbx;
1944   const Register i     = rcx;
1945   const Register j     = rdx;
1946   const Register h     = rdi;                    // needs to be restored
1947   const Register temp  = rsi;
1948   // setup array
1949   __ save_bcp();
1950 
1951   __ lea(array, at_bcp(3*wordSize));             // btw: should be able to get rid of this instruction (change offsets below)
1952   __ andptr(array, -wordSize);
1953   // initialize i & j
1954   __ xorl(i, i);                                 // i = 0;
1955   __ movl(j, Address(array, -wordSize));         // j = length(array);
1956   // Convert j into native byteordering
1957   __ bswapl(j);
1958   // and start
1959   Label entry;
1960   __ jmp(entry);
1961 
1962   // binary search loop
1963   { Label loop;
1964     __ bind(loop);
1965     // int h = (i + j) >> 1;
1966     __ leal(h, Address(i, j, Address::times_1)); // h = i + j;
1967     __ sarl(h, 1);                               // h = (i + j) >> 1;
1968     // if (key < array[h].fast_match()) {
1969     //   j = h;
1970     // } else {
1971     //   i = h;
1972     // }
1973     // Convert array[h].match to native byte-ordering before compare
1974     __ movl(temp, Address(array, h, Address::times_8, 0*wordSize));
1975     __ bswapl(temp);
1976     __ cmpl(key, temp);
1977     // j = h if (key <  array[h].fast_match())
1978     __ cmov32(Assembler::less        , j, h);
1979     // i = h if (key >= array[h].fast_match())
1980     __ cmov32(Assembler::greaterEqual, i, h);
1981     // while (i+1 < j)
1982     __ bind(entry);
1983     __ leal(h, Address(i, 1));                   // i+1
1984     __ cmpl(h, j);                               // i+1 < j
1985     __ jcc(Assembler::less, loop);
1986   }
1987 
1988   // end of binary search, result index is i (must check again!)
1989   Label default_case;
1990   // Convert array[i].match to native byte-ordering before compare
1991   __ movl(temp, Address(array, i, Address::times_8, 0*wordSize));
1992   __ bswapl(temp);
1993   __ cmpl(key, temp);
1994   __ jcc(Assembler::notEqual, default_case);
1995 
1996   // entry found -> j = offset
1997   __ movl(j , Address(array, i, Address::times_8, 1*wordSize));
1998   __ profile_switch_case(i, key, array);
1999   __ bswapl(j);
2000   LP64_ONLY(__ movslq(j, j));
2001   __ restore_bcp();
2002   __ restore_locals();                           // restore rdi
2003   __ load_unsigned_byte(rbx, Address(rsi, j, Address::times_1));
2004 
2005   __ addptr(rsi, j);
2006   __ dispatch_only(vtos);
2007 
2008   // default case -> j = default offset
2009   __ bind(default_case);
2010   __ profile_switch_default(i);
2011   __ movl(j, Address(array, -2*wordSize));
2012   __ bswapl(j);
2013   LP64_ONLY(__ movslq(j, j));
2014   __ restore_bcp();
2015   __ restore_locals();                           // restore rdi
2016   __ load_unsigned_byte(rbx, Address(rsi, j, Address::times_1));
2017   __ addptr(rsi, j);
2018   __ dispatch_only(vtos);
2019 }
2020 
2021 
2022 void TemplateTable::_return(TosState state) {
2023   transition(state, state);
2024   assert(_desc->calls_vm(), "inconsistent calls_vm information"); // call in remove_activation
2025 
2026   if (_desc->bytecode() == Bytecodes::_return_register_finalizer) {
2027     assert(state == vtos, "only valid state");
2028     __ movptr(rax, aaddress(0));
2029     __ load_klass(rdi, rax);
2030     __ movl(rdi, Address(rdi, Klass::access_flags_offset()));
2031     __ testl(rdi, JVM_ACC_HAS_FINALIZER);
2032     Label skip_register_finalizer;
2033     __ jcc(Assembler::zero, skip_register_finalizer);
2034 
2035     __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::register_finalizer), rax);
2036 
2037     __ bind(skip_register_finalizer);
2038   }
2039 
2040   __ remove_activation(state, rsi);
2041   __ jmp(rsi);
2042 }
2043 
2044 
2045 // ----------------------------------------------------------------------------
2046 // Volatile variables demand their effects be made known to all CPU's in
2047 // order.  Store buffers on most chips allow reads & writes to reorder; the
2048 // JMM's ReadAfterWrite.java test fails in -Xint mode without some kind of
2049 // memory barrier (i.e., it's not sufficient that the interpreter does not
2050 // reorder volatile references, the hardware also must not reorder them).
2051 //
2052 // According to the new Java Memory Model (JMM):
2053 // (1) All volatiles are serialized wrt to each other.
2054 // ALSO reads & writes act as aquire & release, so:
2055 // (2) A read cannot let unrelated NON-volatile memory refs that happen after
2056 // the read float up to before the read.  It's OK for non-volatile memory refs
2057 // that happen before the volatile read to float down below it.
2058 // (3) Similar a volatile write cannot let unrelated NON-volatile memory refs
2059 // that happen BEFORE the write float down to after the write.  It's OK for
2060 // non-volatile memory refs that happen after the volatile write to float up
2061 // before it.
2062 //
2063 // We only put in barriers around volatile refs (they are expensive), not
2064 // _between_ memory refs (that would require us to track the flavor of the
2065 // previous memory refs).  Requirements (2) and (3) require some barriers
2066 // before volatile stores and after volatile loads.  These nearly cover
2067 // requirement (1) but miss the volatile-store-volatile-load case.  This final
2068 // case is placed after volatile-stores although it could just as well go
2069 // before volatile-loads.
2070 void TemplateTable::volatile_barrier(Assembler::Membar_mask_bits order_constraint ) {
2071   // Helper function to insert a is-volatile test and memory barrier
2072   if( !os::is_MP() ) return;    // Not needed on single CPU
2073   __ membar(order_constraint);
2074 }
2075 
2076 void TemplateTable::resolve_cache_and_index(int byte_no,
2077                                             Register Rcache,
2078                                             Register index,
2079                                             size_t index_size) {
2080   const Register temp = rbx;
2081   assert_different_registers(Rcache, index, temp);
2082 
2083   Label resolved;
2084     assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range");
2085     __ get_cache_and_index_and_bytecode_at_bcp(Rcache, index, temp, byte_no, 1, index_size);
2086     __ cmpl(temp, (int) bytecode());  // have we resolved this bytecode?
2087     __ jcc(Assembler::equal, resolved);
2088 
2089   // resolve first time through
2090   address entry;
2091   switch (bytecode()) {
2092     case Bytecodes::_getstatic      : // fall through
2093     case Bytecodes::_putstatic      : // fall through
2094     case Bytecodes::_getfield       : // fall through
2095     case Bytecodes::_putfield       : entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_get_put);        break;
2096     case Bytecodes::_invokevirtual  : // fall through
2097     case Bytecodes::_invokespecial  : // fall through
2098     case Bytecodes::_invokestatic   : // fall through
2099     case Bytecodes::_invokeinterface: entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invoke);         break;
2100     case Bytecodes::_invokehandle   : entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invokehandle);   break;
2101     case Bytecodes::_invokedynamic  : entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invokedynamic);  break;
2102     default:
2103       fatal(err_msg("unexpected bytecode: %s", Bytecodes::name(bytecode())));
2104       break;
2105   }
2106   __ movl(temp, (int)bytecode());
2107   __ call_VM(noreg, entry, temp);
2108   // Update registers with resolved info
2109   __ get_cache_and_index_at_bcp(Rcache, index, 1, index_size);
2110   __ bind(resolved);
2111 }
2112 
2113 
2114 // The cache and index registers must be set before call
2115 void TemplateTable::load_field_cp_cache_entry(Register obj,
2116                                               Register cache,
2117                                               Register index,
2118                                               Register off,
2119                                               Register flags,
2120                                               bool is_static = false) {
2121   assert_different_registers(cache, index, flags, off);
2122 
2123   ByteSize cp_base_offset = ConstantPoolCache::base_offset();
2124   // Field offset
2125   __ movptr(off, Address(cache, index, Address::times_ptr,
2126                          in_bytes(cp_base_offset + ConstantPoolCacheEntry::f2_offset())));
2127   // Flags
2128   __ movl(flags, Address(cache, index, Address::times_ptr,
2129            in_bytes(cp_base_offset + ConstantPoolCacheEntry::flags_offset())));
2130 
2131   // klass overwrite register
2132   if (is_static) {
2133     __ movptr(obj, Address(cache, index, Address::times_ptr,
2134                            in_bytes(cp_base_offset + ConstantPoolCacheEntry::f1_offset())));
2135     const int mirror_offset = in_bytes(Klass::java_mirror_offset());
2136     __ movptr(obj, Address(obj, mirror_offset));
2137   }
2138 }
2139 
2140 void TemplateTable::load_invoke_cp_cache_entry(int byte_no,
2141                                                Register method,
2142                                                Register itable_index,
2143                                                Register flags,
2144                                                bool is_invokevirtual,
2145                                                bool is_invokevfinal, /*unused*/
2146                                                bool is_invokedynamic) {
2147   // setup registers
2148   const Register cache = rcx;
2149   const Register index = rdx;
2150   assert_different_registers(method, flags);
2151   assert_different_registers(method, cache, index);
2152   assert_different_registers(itable_index, flags);
2153   assert_different_registers(itable_index, cache, index);
2154   // determine constant pool cache field offsets
2155   assert(is_invokevirtual == (byte_no == f2_byte), "is_invokevirtual flag redundant");
2156   const int method_offset = in_bytes(
2157     ConstantPoolCache::base_offset() +
2158       ((byte_no == f2_byte)
2159        ? ConstantPoolCacheEntry::f2_offset()
2160        : ConstantPoolCacheEntry::f1_offset()));
2161   const int flags_offset = in_bytes(ConstantPoolCache::base_offset() +
2162                                     ConstantPoolCacheEntry::flags_offset());
2163   // access constant pool cache fields
2164   const int index_offset = in_bytes(ConstantPoolCache::base_offset() +
2165                                     ConstantPoolCacheEntry::f2_offset());
2166 
2167   size_t index_size = (is_invokedynamic ? sizeof(u4) : sizeof(u2));
2168   resolve_cache_and_index(byte_no, cache, index, index_size);
2169     __ movptr(method, Address(cache, index, Address::times_ptr, method_offset));
2170 
2171   if (itable_index != noreg) {
2172     __ movptr(itable_index, Address(cache, index, Address::times_ptr, index_offset));
2173   }
2174   __ movl(flags, Address(cache, index, Address::times_ptr, flags_offset));
2175 }
2176 
2177 
2178 // The registers cache and index expected to be set before call.
2179 // Correct values of the cache and index registers are preserved.
2180 void TemplateTable::jvmti_post_field_access(Register cache,
2181                                             Register index,
2182                                             bool is_static,
2183                                             bool has_tos) {
2184   if (JvmtiExport::can_post_field_access()) {
2185     // Check to see if a field access watch has been set before we take
2186     // the time to call into the VM.
2187     Label L1;
2188     assert_different_registers(cache, index, rax);
2189     __ mov32(rax, ExternalAddress((address) JvmtiExport::get_field_access_count_addr()));
2190     __ testl(rax,rax);
2191     __ jcc(Assembler::zero, L1);
2192 
2193     // cache entry pointer
2194     __ addptr(cache, in_bytes(ConstantPoolCache::base_offset()));
2195     __ shll(index, LogBytesPerWord);
2196     __ addptr(cache, index);
2197     if (is_static) {
2198       __ xorptr(rax, rax);      // NULL object reference
2199     } else {
2200       __ pop(atos);         // Get the object
2201       __ verify_oop(rax);
2202       __ push(atos);        // Restore stack state
2203     }
2204     // rax,:   object pointer or NULL
2205     // cache: cache entry pointer
2206     __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_access),
2207                rax, cache);
2208     __ get_cache_and_index_at_bcp(cache, index, 1);
2209     __ bind(L1);
2210   }
2211 }
2212 
2213 void TemplateTable::pop_and_check_object(Register r) {
2214   __ pop_ptr(r);
2215   __ null_check(r);  // for field access must check obj.
2216   __ verify_oop(r);
2217 }
2218 
2219 void TemplateTable::getfield_or_static(int byte_no, bool is_static) {
2220   transition(vtos, vtos);
2221 
2222   const Register cache = rcx;
2223   const Register index = rdx;
2224   const Register obj   = rcx;
2225   const Register off   = rbx;
2226   const Register flags = rax;
2227 
2228   resolve_cache_and_index(byte_no, cache, index, sizeof(u2));
2229   jvmti_post_field_access(cache, index, is_static, false);
2230   load_field_cp_cache_entry(obj, cache, index, off, flags, is_static);
2231 
2232   if (!is_static) pop_and_check_object(obj);
2233 
2234   const Address lo(obj, off, Address::times_1, 0*wordSize);
2235   const Address hi(obj, off, Address::times_1, 1*wordSize);
2236 
2237   Label Done, notByte, notInt, notShort, notChar, notLong, notFloat, notObj, notDouble;
2238 
2239   __ shrl(flags, ConstantPoolCacheEntry::tos_state_shift);
2240   assert(btos == 0, "change code, btos != 0");
2241   // btos
2242   __ andptr(flags, ConstantPoolCacheEntry::tos_state_mask);
2243   __ jcc(Assembler::notZero, notByte);
2244 
2245   __ load_signed_byte(rax, lo );
2246   __ push(btos);
2247   // Rewrite bytecode to be faster
2248   if (!is_static) {
2249     patch_bytecode(Bytecodes::_fast_bgetfield, rcx, rbx);
2250   }
2251   __ jmp(Done);
2252 
2253   __ bind(notByte);
2254   // itos
2255   __ cmpl(flags, itos );
2256   __ jcc(Assembler::notEqual, notInt);
2257 
2258   __ movl(rax, lo );
2259   __ push(itos);
2260   // Rewrite bytecode to be faster
2261   if (!is_static) {
2262     patch_bytecode(Bytecodes::_fast_igetfield, rcx, rbx);
2263   }
2264   __ jmp(Done);
2265 
2266   __ bind(notInt);
2267   // atos
2268   __ cmpl(flags, atos );
2269   __ jcc(Assembler::notEqual, notObj);
2270 
2271   __ movl(rax, lo );
2272   __ push(atos);
2273   if (!is_static) {
2274     patch_bytecode(Bytecodes::_fast_agetfield, rcx, rbx);
2275   }
2276   __ jmp(Done);
2277 
2278   __ bind(notObj);
2279   // ctos
2280   __ cmpl(flags, ctos );
2281   __ jcc(Assembler::notEqual, notChar);
2282 
2283   __ load_unsigned_short(rax, lo );
2284   __ push(ctos);
2285   if (!is_static) {
2286     patch_bytecode(Bytecodes::_fast_cgetfield, rcx, rbx);
2287   }
2288   __ jmp(Done);
2289 
2290   __ bind(notChar);
2291   // stos
2292   __ cmpl(flags, stos );
2293   __ jcc(Assembler::notEqual, notShort);
2294 
2295   __ load_signed_short(rax, lo );
2296   __ push(stos);
2297   if (!is_static) {
2298     patch_bytecode(Bytecodes::_fast_sgetfield, rcx, rbx);
2299   }
2300   __ jmp(Done);
2301 
2302   __ bind(notShort);
2303   // ltos
2304   __ cmpl(flags, ltos );
2305   __ jcc(Assembler::notEqual, notLong);
2306 
2307   // Generate code as if volatile.  There just aren't enough registers to
2308   // save that information and this code is faster than the test.
2309   __ fild_d(lo);                // Must load atomically
2310   __ subptr(rsp,2*wordSize);    // Make space for store
2311   __ fistp_d(Address(rsp,0));
2312   __ pop(rax);
2313   __ pop(rdx);
2314 
2315   __ push(ltos);
2316   // Don't rewrite to _fast_lgetfield for potential volatile case.
2317   __ jmp(Done);
2318 
2319   __ bind(notLong);
2320   // ftos
2321   __ cmpl(flags, ftos );
2322   __ jcc(Assembler::notEqual, notFloat);
2323 
2324   __ fld_s(lo);
2325   __ push(ftos);
2326   if (!is_static) {
2327     patch_bytecode(Bytecodes::_fast_fgetfield, rcx, rbx);
2328   }
2329   __ jmp(Done);
2330 
2331   __ bind(notFloat);
2332   // dtos
2333   __ cmpl(flags, dtos );
2334   __ jcc(Assembler::notEqual, notDouble);
2335 
2336   __ fld_d(lo);
2337   __ push(dtos);
2338   if (!is_static) {
2339     patch_bytecode(Bytecodes::_fast_dgetfield, rcx, rbx);
2340   }
2341   __ jmpb(Done);
2342 
2343   __ bind(notDouble);
2344 
2345   __ stop("Bad state");
2346 
2347   __ bind(Done);
2348   // Doug Lea believes this is not needed with current Sparcs (TSO) and Intel (PSO).
2349   // volatile_barrier( );
2350 }
2351 
2352 
2353 void TemplateTable::getfield(int byte_no) {
2354   getfield_or_static(byte_no, false);
2355 }
2356 
2357 
2358 void TemplateTable::getstatic(int byte_no) {
2359   getfield_or_static(byte_no, true);
2360 }
2361 
2362 // The registers cache and index expected to be set before call.
2363 // The function may destroy various registers, just not the cache and index registers.
2364 void TemplateTable::jvmti_post_field_mod(Register cache, Register index, bool is_static) {
2365 
2366   ByteSize cp_base_offset = ConstantPoolCache::base_offset();
2367 
2368   if (JvmtiExport::can_post_field_modification()) {
2369     // Check to see if a field modification watch has been set before we take
2370     // the time to call into the VM.
2371     Label L1;
2372     assert_different_registers(cache, index, rax);
2373     __ mov32(rax, ExternalAddress((address)JvmtiExport::get_field_modification_count_addr()));
2374     __ testl(rax, rax);
2375     __ jcc(Assembler::zero, L1);
2376 
2377     // The cache and index registers have been already set.
2378     // This allows to eliminate this call but the cache and index
2379     // registers have to be correspondingly used after this line.
2380     __ get_cache_and_index_at_bcp(rax, rdx, 1);
2381 
2382     if (is_static) {
2383       // Life is simple.  Null out the object pointer.
2384       __ xorptr(rbx, rbx);
2385     } else {
2386       // Life is harder. The stack holds the value on top, followed by the object.
2387       // We don't know the size of the value, though; it could be one or two words
2388       // depending on its type. As a result, we must find the type to determine where
2389       // the object is.
2390       Label two_word, valsize_known;
2391       __ movl(rcx, Address(rax, rdx, Address::times_ptr, in_bytes(cp_base_offset +
2392                                    ConstantPoolCacheEntry::flags_offset())));
2393       __ mov(rbx, rsp);
2394       __ shrl(rcx, ConstantPoolCacheEntry::tos_state_shift);
2395       // Make sure we don't need to mask rcx after the above shift
2396       ConstantPoolCacheEntry::verify_tos_state_shift();
2397       __ cmpl(rcx, ltos);
2398       __ jccb(Assembler::equal, two_word);
2399       __ cmpl(rcx, dtos);
2400       __ jccb(Assembler::equal, two_word);
2401       __ addptr(rbx, Interpreter::expr_offset_in_bytes(1)); // one word jvalue (not ltos, dtos)
2402       __ jmpb(valsize_known);
2403 
2404       __ bind(two_word);
2405       __ addptr(rbx, Interpreter::expr_offset_in_bytes(2)); // two words jvalue
2406 
2407       __ bind(valsize_known);
2408       // setup object pointer
2409       __ movptr(rbx, Address(rbx, 0));
2410     }
2411     // cache entry pointer
2412     __ addptr(rax, in_bytes(cp_base_offset));
2413     __ shll(rdx, LogBytesPerWord);
2414     __ addptr(rax, rdx);
2415     // object (tos)
2416     __ mov(rcx, rsp);
2417     // rbx,: object pointer set up above (NULL if static)
2418     // rax,: cache entry pointer
2419     // rcx: jvalue object on the stack
2420     __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_modification),
2421                rbx, rax, rcx);
2422     __ get_cache_and_index_at_bcp(cache, index, 1);
2423     __ bind(L1);
2424   }
2425 }
2426 
2427 
2428 void TemplateTable::putfield_or_static(int byte_no, bool is_static) {
2429   transition(vtos, vtos);
2430 
2431   const Register cache = rcx;
2432   const Register index = rdx;
2433   const Register obj   = rcx;
2434   const Register off   = rbx;
2435   const Register flags = rax;
2436 
2437   resolve_cache_and_index(byte_no, cache, index, sizeof(u2));
2438   jvmti_post_field_mod(cache, index, is_static);
2439   load_field_cp_cache_entry(obj, cache, index, off, flags, is_static);
2440 
2441   // Doug Lea believes this is not needed with current Sparcs (TSO) and Intel (PSO).
2442   // volatile_barrier( );
2443 
2444   Label notVolatile, Done;
2445   __ movl(rdx, flags);
2446   __ shrl(rdx, ConstantPoolCacheEntry::is_volatile_shift);
2447   __ andl(rdx, 0x1);
2448 
2449   // field addresses
2450   const Address lo(obj, off, Address::times_1, 0*wordSize);
2451   const Address hi(obj, off, Address::times_1, 1*wordSize);
2452 
2453   Label notByte, notInt, notShort, notChar, notLong, notFloat, notObj, notDouble;
2454 
2455   __ shrl(flags, ConstantPoolCacheEntry::tos_state_shift);
2456   assert(btos == 0, "change code, btos != 0");
2457   __ andl(flags, ConstantPoolCacheEntry::tos_state_mask);
2458   __ jcc(Assembler::notZero, notByte);
2459 
2460   // btos
2461   {
2462     __ pop(btos);
2463     if (!is_static) pop_and_check_object(obj);
2464     __ movb(lo, rax);
2465     if (!is_static) {
2466       patch_bytecode(Bytecodes::_fast_bputfield, rcx, rbx, true, byte_no);
2467     }
2468     __ jmp(Done);
2469   }
2470 
2471   __ bind(notByte);
2472   __ cmpl(flags, itos);
2473   __ jcc(Assembler::notEqual, notInt);
2474 
2475   // itos
2476   {
2477     __ pop(itos);
2478     if (!is_static) pop_and_check_object(obj);
2479     __ movl(lo, rax);
2480     if (!is_static) {
2481       patch_bytecode(Bytecodes::_fast_iputfield, rcx, rbx, true, byte_no);
2482     }
2483     __ jmp(Done);
2484   }
2485 
2486   __ bind(notInt);
2487   __ cmpl(flags, atos);
2488   __ jcc(Assembler::notEqual, notObj);
2489 
2490   // atos
2491   {
2492     __ pop(atos);
2493     if (!is_static) pop_and_check_object(obj);
2494     do_oop_store(_masm, lo, rax, _bs->kind(), false);
2495     if (!is_static) {
2496       patch_bytecode(Bytecodes::_fast_aputfield, rcx, rbx, true, byte_no);
2497     }
2498     __ jmp(Done);
2499   }
2500 
2501   __ bind(notObj);
2502   __ cmpl(flags, ctos);
2503   __ jcc(Assembler::notEqual, notChar);
2504 
2505   // ctos
2506   {
2507     __ pop(ctos);
2508     if (!is_static) pop_and_check_object(obj);
2509     __ movw(lo, rax);
2510     if (!is_static) {
2511       patch_bytecode(Bytecodes::_fast_cputfield, rcx, rbx, true, byte_no);
2512     }
2513     __ jmp(Done);
2514   }
2515 
2516   __ bind(notChar);
2517   __ cmpl(flags, stos);
2518   __ jcc(Assembler::notEqual, notShort);
2519 
2520   // stos
2521   {
2522     __ pop(stos);
2523     if (!is_static) pop_and_check_object(obj);
2524     __ movw(lo, rax);
2525     if (!is_static) {
2526       patch_bytecode(Bytecodes::_fast_sputfield, rcx, rbx, true, byte_no);
2527     }
2528     __ jmp(Done);
2529   }
2530 
2531   __ bind(notShort);
2532   __ cmpl(flags, ltos);
2533   __ jcc(Assembler::notEqual, notLong);
2534 
2535   // ltos
2536   {
2537     Label notVolatileLong;
2538     __ testl(rdx, rdx);
2539     __ jcc(Assembler::zero, notVolatileLong);
2540 
2541     __ pop(ltos);  // overwrites rdx, do this after testing volatile.
2542     if (!is_static) pop_and_check_object(obj);
2543 
2544     // Replace with real volatile test
2545     __ push(rdx);
2546     __ push(rax);                 // Must update atomically with FIST
2547     __ fild_d(Address(rsp,0));    // So load into FPU register
2548     __ fistp_d(lo);               // and put into memory atomically
2549     __ addptr(rsp, 2*wordSize);
2550     // volatile_barrier();
2551     volatile_barrier(Assembler::Membar_mask_bits(Assembler::StoreLoad |
2552                                                  Assembler::StoreStore));
2553     // Don't rewrite volatile version
2554     __ jmp(notVolatile);
2555 
2556     __ bind(notVolatileLong);
2557 
2558     __ pop(ltos);  // overwrites rdx
2559     if (!is_static) pop_and_check_object(obj);
2560     NOT_LP64(__ movptr(hi, rdx));
2561     __ movptr(lo, rax);
2562     if (!is_static) {
2563       patch_bytecode(Bytecodes::_fast_lputfield, rcx, rbx, true, byte_no);
2564     }
2565     __ jmp(notVolatile);
2566   }
2567 
2568   __ bind(notLong);
2569   __ cmpl(flags, ftos);
2570   __ jcc(Assembler::notEqual, notFloat);
2571 
2572   // ftos
2573   {
2574     __ pop(ftos);
2575     if (!is_static) pop_and_check_object(obj);
2576     __ fstp_s(lo);
2577     if (!is_static) {
2578       patch_bytecode(Bytecodes::_fast_fputfield, rcx, rbx, true, byte_no);
2579     }
2580     __ jmp(Done);
2581   }
2582 
2583   __ bind(notFloat);
2584 #ifdef ASSERT
2585   __ cmpl(flags, dtos);
2586   __ jcc(Assembler::notEqual, notDouble);
2587 #endif
2588 
2589   // dtos
2590   {
2591     __ pop(dtos);
2592     if (!is_static) pop_and_check_object(obj);
2593     __ fstp_d(lo);
2594     if (!is_static) {
2595       patch_bytecode(Bytecodes::_fast_dputfield, rcx, rbx, true, byte_no);
2596     }
2597     __ jmp(Done);
2598   }
2599 
2600 #ifdef ASSERT
2601   __ bind(notDouble);
2602   __ stop("Bad state");
2603 #endif
2604 
2605   __ bind(Done);
2606 
2607   // Check for volatile store
2608   __ testl(rdx, rdx);
2609   __ jcc(Assembler::zero, notVolatile);
2610   volatile_barrier(Assembler::Membar_mask_bits(Assembler::StoreLoad |
2611                                                Assembler::StoreStore));
2612   __ bind(notVolatile);
2613 }
2614 
2615 
2616 void TemplateTable::putfield(int byte_no) {
2617   putfield_or_static(byte_no, false);
2618 }
2619 
2620 
2621 void TemplateTable::putstatic(int byte_no) {
2622   putfield_or_static(byte_no, true);
2623 }
2624 
2625 void TemplateTable::jvmti_post_fast_field_mod() {
2626   if (JvmtiExport::can_post_field_modification()) {
2627     // Check to see if a field modification watch has been set before we take
2628     // the time to call into the VM.
2629     Label L2;
2630      __ mov32(rcx, ExternalAddress((address)JvmtiExport::get_field_modification_count_addr()));
2631      __ testl(rcx,rcx);
2632      __ jcc(Assembler::zero, L2);
2633      __ pop_ptr(rbx);               // copy the object pointer from tos
2634      __ verify_oop(rbx);
2635      __ push_ptr(rbx);              // put the object pointer back on tos
2636 
2637      // Save tos values before call_VM() clobbers them. Since we have
2638      // to do it for every data type, we use the saved values as the
2639      // jvalue object.
2640      switch (bytecode()) {          // load values into the jvalue object
2641      case Bytecodes::_fast_aputfield: __ push_ptr(rax); break;
2642      case Bytecodes::_fast_bputfield: // fall through
2643      case Bytecodes::_fast_sputfield: // fall through
2644      case Bytecodes::_fast_cputfield: // fall through
2645      case Bytecodes::_fast_iputfield: __ push_i(rax); break;
2646      case Bytecodes::_fast_dputfield: __ push_d(); break;
2647      case Bytecodes::_fast_fputfield: __ push_f(); break;
2648      case Bytecodes::_fast_lputfield: __ push_l(rax); break;
2649 
2650      default:
2651        ShouldNotReachHere();
2652      }
2653      __ mov(rcx, rsp);              // points to jvalue on the stack
2654      // access constant pool cache entry
2655      __ get_cache_entry_pointer_at_bcp(rax, rdx, 1);
2656      __ verify_oop(rbx);
2657      // rbx,: object pointer copied above
2658      // rax,: cache entry pointer
2659      // rcx: jvalue object on the stack
2660      __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_modification), rbx, rax, rcx);
2661 
2662      switch (bytecode()) {             // restore tos values
2663      case Bytecodes::_fast_aputfield: __ pop_ptr(rax); break;
2664      case Bytecodes::_fast_bputfield: // fall through
2665      case Bytecodes::_fast_sputfield: // fall through
2666      case Bytecodes::_fast_cputfield: // fall through
2667      case Bytecodes::_fast_iputfield: __ pop_i(rax); break;
2668      case Bytecodes::_fast_dputfield: __ pop_d(); break;
2669      case Bytecodes::_fast_fputfield: __ pop_f(); break;
2670      case Bytecodes::_fast_lputfield: __ pop_l(rax); break;
2671      }
2672      __ bind(L2);
2673   }
2674 }
2675 
2676 void TemplateTable::fast_storefield(TosState state) {
2677   transition(state, vtos);
2678 
2679   ByteSize base = ConstantPoolCache::base_offset();
2680 
2681   jvmti_post_fast_field_mod();
2682 
2683   // access constant pool cache
2684   __ get_cache_and_index_at_bcp(rcx, rbx, 1);
2685 
2686   // test for volatile with rdx but rdx is tos register for lputfield.
2687   if (bytecode() == Bytecodes::_fast_lputfield) __ push(rdx);
2688   __ movl(rdx, Address(rcx, rbx, Address::times_ptr, in_bytes(base +
2689                        ConstantPoolCacheEntry::flags_offset())));
2690 
2691   // replace index with field offset from cache entry
2692   __ movptr(rbx, Address(rcx, rbx, Address::times_ptr, in_bytes(base + ConstantPoolCacheEntry::f2_offset())));
2693 
2694   // Doug Lea believes this is not needed with current Sparcs (TSO) and Intel (PSO).
2695   // volatile_barrier( );
2696 
2697   Label notVolatile, Done;
2698   __ shrl(rdx, ConstantPoolCacheEntry::is_volatile_shift);
2699   __ andl(rdx, 0x1);
2700   // Check for volatile store
2701   __ testl(rdx, rdx);
2702   __ jcc(Assembler::zero, notVolatile);
2703 
2704   if (bytecode() == Bytecodes::_fast_lputfield) __ pop(rdx);
2705 
2706   // Get object from stack
2707   pop_and_check_object(rcx);
2708 
2709   // field addresses
2710   const Address lo(rcx, rbx, Address::times_1, 0*wordSize);
2711   const Address hi(rcx, rbx, Address::times_1, 1*wordSize);
2712 
2713   // access field
2714   switch (bytecode()) {
2715     case Bytecodes::_fast_bputfield: __ movb(lo, rax); break;
2716     case Bytecodes::_fast_sputfield: // fall through
2717     case Bytecodes::_fast_cputfield: __ movw(lo, rax); break;
2718     case Bytecodes::_fast_iputfield: __ movl(lo, rax); break;
2719     case Bytecodes::_fast_lputfield:
2720       NOT_LP64(__ movptr(hi, rdx));
2721       __ movptr(lo, rax);
2722       break;
2723     case Bytecodes::_fast_fputfield: __ fstp_s(lo); break;
2724     case Bytecodes::_fast_dputfield: __ fstp_d(lo); break;
2725     case Bytecodes::_fast_aputfield: {
2726       do_oop_store(_masm, lo, rax, _bs->kind(), false);
2727       break;
2728     }
2729     default:
2730       ShouldNotReachHere();
2731   }
2732 
2733   Label done;
2734   volatile_barrier(Assembler::Membar_mask_bits(Assembler::StoreLoad |
2735                                                Assembler::StoreStore));
2736   // Barriers are so large that short branch doesn't reach!
2737   __ jmp(done);
2738 
2739   // Same code as above, but don't need rdx to test for volatile.
2740   __ bind(notVolatile);
2741 
2742   if (bytecode() == Bytecodes::_fast_lputfield) __ pop(rdx);
2743 
2744   // Get object from stack
2745   pop_and_check_object(rcx);
2746 
2747   // access field
2748   switch (bytecode()) {
2749     case Bytecodes::_fast_bputfield: __ movb(lo, rax); break;
2750     case Bytecodes::_fast_sputfield: // fall through
2751     case Bytecodes::_fast_cputfield: __ movw(lo, rax); break;
2752     case Bytecodes::_fast_iputfield: __ movl(lo, rax); break;
2753     case Bytecodes::_fast_lputfield:
2754       NOT_LP64(__ movptr(hi, rdx));
2755       __ movptr(lo, rax);
2756       break;
2757     case Bytecodes::_fast_fputfield: __ fstp_s(lo); break;
2758     case Bytecodes::_fast_dputfield: __ fstp_d(lo); break;
2759     case Bytecodes::_fast_aputfield: {
2760       do_oop_store(_masm, lo, rax, _bs->kind(), false);
2761       break;
2762     }
2763     default:
2764       ShouldNotReachHere();
2765   }
2766   __ bind(done);
2767 }
2768 
2769 
2770 void TemplateTable::fast_accessfield(TosState state) {
2771   transition(atos, state);
2772 
2773   // do the JVMTI work here to avoid disturbing the register state below
2774   if (JvmtiExport::can_post_field_access()) {
2775     // Check to see if a field access watch has been set before we take
2776     // the time to call into the VM.
2777     Label L1;
2778     __ mov32(rcx, ExternalAddress((address) JvmtiExport::get_field_access_count_addr()));
2779     __ testl(rcx,rcx);
2780     __ jcc(Assembler::zero, L1);
2781     // access constant pool cache entry
2782     __ get_cache_entry_pointer_at_bcp(rcx, rdx, 1);
2783     __ push_ptr(rax);  // save object pointer before call_VM() clobbers it
2784     __ verify_oop(rax);
2785     // rax,: object pointer copied above
2786     // rcx: cache entry pointer
2787     __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_access), rax, rcx);
2788     __ pop_ptr(rax);   // restore object pointer
2789     __ bind(L1);
2790   }
2791 
2792   // access constant pool cache
2793   __ get_cache_and_index_at_bcp(rcx, rbx, 1);
2794   // replace index with field offset from cache entry
2795   __ movptr(rbx, Address(rcx,
2796                          rbx,
2797                          Address::times_ptr,
2798                          in_bytes(ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::f2_offset())));
2799 
2800 
2801   // rax,: object
2802   __ verify_oop(rax);
2803   __ null_check(rax);
2804   // field addresses
2805   const Address lo = Address(rax, rbx, Address::times_1, 0*wordSize);
2806   const Address hi = Address(rax, rbx, Address::times_1, 1*wordSize);
2807 
2808   // access field
2809   switch (bytecode()) {
2810     case Bytecodes::_fast_bgetfield: __ movsbl(rax, lo );                 break;
2811     case Bytecodes::_fast_sgetfield: __ load_signed_short(rax, lo );      break;
2812     case Bytecodes::_fast_cgetfield: __ load_unsigned_short(rax, lo );    break;
2813     case Bytecodes::_fast_igetfield: __ movl(rax, lo);                    break;
2814     case Bytecodes::_fast_lgetfield: __ stop("should not be rewritten");  break;
2815     case Bytecodes::_fast_fgetfield: __ fld_s(lo);                        break;
2816     case Bytecodes::_fast_dgetfield: __ fld_d(lo);                        break;
2817     case Bytecodes::_fast_agetfield: __ movptr(rax, lo); __ verify_oop(rax); break;
2818     default:
2819       ShouldNotReachHere();
2820   }
2821 
2822   // Doug Lea believes this is not needed with current Sparcs(TSO) and Intel(PSO)
2823   // volatile_barrier( );
2824 }
2825 
2826 void TemplateTable::fast_xaccess(TosState state) {
2827   transition(vtos, state);
2828   // get receiver
2829   __ movptr(rax, aaddress(0));
2830   // access constant pool cache
2831   __ get_cache_and_index_at_bcp(rcx, rdx, 2);
2832   __ movptr(rbx, Address(rcx,
2833                          rdx,
2834                          Address::times_ptr,
2835                          in_bytes(ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::f2_offset())));
2836   // make sure exception is reported in correct bcp range (getfield is next instruction)
2837   __ increment(rsi);
2838   __ null_check(rax);
2839   const Address lo = Address(rax, rbx, Address::times_1, 0*wordSize);
2840   if (state == itos) {
2841     __ movl(rax, lo);
2842   } else if (state == atos) {
2843     __ movptr(rax, lo);
2844     __ verify_oop(rax);
2845   } else if (state == ftos) {
2846     __ fld_s(lo);
2847   } else {
2848     ShouldNotReachHere();
2849   }
2850   __ decrement(rsi);
2851 }
2852 
2853 
2854 
2855 //----------------------------------------------------------------------------------------------------
2856 // Calls
2857 
2858 void TemplateTable::count_calls(Register method, Register temp) {
2859   // implemented elsewhere
2860   ShouldNotReachHere();
2861 }
2862 
2863 
2864 void TemplateTable::prepare_invoke(int byte_no,
2865                                    Register method,  // linked method (or i-klass)
2866                                    Register index,   // itable index, MethodType, etc.
2867                                    Register recv,    // if caller wants to see it
2868                                    Register flags    // if caller wants to test it
2869                                    ) {
2870   // determine flags
2871   const Bytecodes::Code code = bytecode();
2872   const bool is_invokeinterface  = code == Bytecodes::_invokeinterface;
2873   const bool is_invokedynamic    = code == Bytecodes::_invokedynamic;
2874   const bool is_invokehandle     = code == Bytecodes::_invokehandle;
2875   const bool is_invokevirtual    = code == Bytecodes::_invokevirtual;
2876   const bool is_invokespecial    = code == Bytecodes::_invokespecial;
2877   const bool load_receiver       = (recv  != noreg);
2878   const bool save_flags          = (flags != noreg);
2879   assert(load_receiver == (code != Bytecodes::_invokestatic && code != Bytecodes::_invokedynamic), "");
2880   assert(save_flags    == (is_invokeinterface || is_invokevirtual), "need flags for vfinal");
2881   assert(flags == noreg || flags == rdx, "");
2882   assert(recv  == noreg || recv  == rcx, "");
2883 
2884   // setup registers & access constant pool cache
2885   if (recv  == noreg)  recv  = rcx;
2886   if (flags == noreg)  flags = rdx;
2887   assert_different_registers(method, index, recv, flags);
2888 
2889   // save 'interpreter return address'
2890   __ save_bcp();
2891 
2892   load_invoke_cp_cache_entry(byte_no, method, index, flags, is_invokevirtual, false, is_invokedynamic);
2893 
2894   // maybe push appendix to arguments (just before return address)
2895   if (is_invokedynamic || is_invokehandle) {
2896     Label L_no_push;
2897     __ testl(flags, (1 << ConstantPoolCacheEntry::has_appendix_shift));
2898     __ jccb(Assembler::zero, L_no_push);
2899     // Push the appendix as a trailing parameter.
2900     // This must be done before we get the receiver,
2901     // since the parameter_size includes it.
2902     __ push(rbx);
2903     __ mov(rbx, index);
2904     assert(ConstantPoolCacheEntry::_indy_resolved_references_appendix_offset == 0, "appendix expected at index+0");
2905     __ load_resolved_reference_at_index(index, rbx);
2906     __ pop(rbx);
2907     __ push(index);  // push appendix (MethodType, CallSite, etc.)
2908     __ bind(L_no_push);
2909   }
2910 
2911   // load receiver if needed (note: no return address pushed yet)
2912   if (load_receiver) {
2913     __ movl(recv, flags);
2914     __ andl(recv, ConstantPoolCacheEntry::parameter_size_mask);
2915     const int no_return_pc_pushed_yet = -1;  // argument slot correction before we push return address
2916     const int receiver_is_at_end      = -1;  // back off one slot to get receiver
2917     Address recv_addr = __ argument_address(recv, no_return_pc_pushed_yet + receiver_is_at_end);
2918     __ movptr(recv, recv_addr);
2919     __ verify_oop(recv);
2920   }
2921 
2922   if (save_flags) {
2923     __ mov(rsi, flags);
2924   }
2925 
2926   // compute return type
2927   __ shrl(flags, ConstantPoolCacheEntry::tos_state_shift);
2928   // Make sure we don't need to mask flags after the above shift
2929   ConstantPoolCacheEntry::verify_tos_state_shift();
2930   // load return address
2931   {
2932     const address table_addr = (is_invokeinterface || is_invokedynamic) ?
2933         (address)Interpreter::return_5_addrs_by_index_table() :
2934         (address)Interpreter::return_3_addrs_by_index_table();
2935     ExternalAddress table(table_addr);
2936     __ movptr(flags, ArrayAddress(table, Address(noreg, flags, Address::times_ptr)));
2937   }
2938 
2939   // push return address
2940   __ push(flags);
2941 
2942   // Restore flags value from the constant pool cache, and restore rsi
2943   // for later null checks.  rsi is the bytecode pointer
2944   if (save_flags) {
2945     __ mov(flags, rsi);
2946     __ restore_bcp();
2947   }
2948 }
2949 
2950 
2951 void TemplateTable::invokevirtual_helper(Register index,
2952                                          Register recv,
2953                                          Register flags) {
2954   // Uses temporary registers rax, rdx
2955   assert_different_registers(index, recv, rax, rdx);
2956   assert(index == rbx, "");
2957   assert(recv  == rcx, "");
2958 
2959   // Test for an invoke of a final method
2960   Label notFinal;
2961   __ movl(rax, flags);
2962   __ andl(rax, (1 << ConstantPoolCacheEntry::is_vfinal_shift));
2963   __ jcc(Assembler::zero, notFinal);
2964 
2965   const Register method = index;  // method must be rbx
2966   assert(method == rbx,
2967          "Method* must be rbx for interpreter calling convention");
2968 
2969   // do the call - the index is actually the method to call
2970   // that is, f2 is a vtable index if !is_vfinal, else f2 is a Method*
2971 
2972   // It's final, need a null check here!
2973   __ null_check(recv);
2974 
2975   // profile this call
2976   __ profile_final_call(rax);
2977   __ profile_arguments_type(rax, method, rsi, true);
2978 
2979   __ jump_from_interpreted(method, rax);
2980 
2981   __ bind(notFinal);
2982 
2983   // get receiver klass
2984   __ null_check(recv, oopDesc::klass_offset_in_bytes());
2985   __ load_klass(rax, recv);
2986 
2987   // profile this call
2988   __ profile_virtual_call(rax, rdi, rdx);
2989 
2990   // get target Method* & entry point
2991   __ lookup_virtual_method(rax, index, method);
2992   __ profile_arguments_type(rdx, method, rsi, true);
2993   __ jump_from_interpreted(method, rdx);
2994 }
2995 
2996 
2997 void TemplateTable::invokevirtual(int byte_no) {
2998   transition(vtos, vtos);
2999   assert(byte_no == f2_byte, "use this argument");
3000   prepare_invoke(byte_no,
3001                  rbx,    // method or vtable index
3002                  noreg,  // unused itable index
3003                  rcx, rdx); // recv, flags
3004 
3005   // rbx: index
3006   // rcx: receiver
3007   // rdx: flags
3008 
3009   invokevirtual_helper(rbx, rcx, rdx);
3010 }
3011 
3012 
3013 void TemplateTable::invokespecial(int byte_no) {
3014   transition(vtos, vtos);
3015   assert(byte_no == f1_byte, "use this argument");
3016   prepare_invoke(byte_no, rbx, noreg,  // get f1 Method*
3017                  rcx);  // get receiver also for null check
3018   __ verify_oop(rcx);
3019   __ null_check(rcx);
3020   // do the call
3021   __ profile_call(rax);
3022   __ profile_arguments_type(rax, rbx, rsi, false);
3023   __ jump_from_interpreted(rbx, rax);
3024 }
3025 
3026 
3027 void TemplateTable::invokestatic(int byte_no) {
3028   transition(vtos, vtos);
3029   assert(byte_no == f1_byte, "use this argument");
3030   prepare_invoke(byte_no, rbx);  // get f1 Method*
3031   // do the call
3032   __ profile_call(rax);
3033   __ profile_arguments_type(rax, rbx, rsi, false);
3034   __ jump_from_interpreted(rbx, rax);
3035 }
3036 
3037 
3038 void TemplateTable::fast_invokevfinal(int byte_no) {
3039   transition(vtos, vtos);
3040   assert(byte_no == f2_byte, "use this argument");
3041   __ stop("fast_invokevfinal not used on x86");
3042 }
3043 
3044 
3045 void TemplateTable::invokeinterface(int byte_no) {
3046   transition(vtos, vtos);
3047   assert(byte_no == f1_byte, "use this argument");
3048   prepare_invoke(byte_no, rax, rbx,  // get f1 Klass*, f2 itable index
3049                  rcx, rdx); // recv, flags
3050 
3051   // rax: interface klass (from f1)
3052   // rbx: itable index (from f2)
3053   // rcx: receiver
3054   // rdx: flags
3055 
3056   // Special case of invokeinterface called for virtual method of
3057   // java.lang.Object.  See cpCacheOop.cpp for details.
3058   // This code isn't produced by javac, but could be produced by
3059   // another compliant java compiler.
3060   Label notMethod;
3061   __ movl(rdi, rdx);
3062   __ andl(rdi, (1 << ConstantPoolCacheEntry::is_forced_virtual_shift));
3063   __ jcc(Assembler::zero, notMethod);
3064 
3065   invokevirtual_helper(rbx, rcx, rdx);
3066   __ bind(notMethod);
3067 
3068   // Get receiver klass into rdx - also a null check
3069   __ restore_locals();  // restore rdi
3070   __ null_check(rcx, oopDesc::klass_offset_in_bytes());
3071   __ load_klass(rdx, rcx);
3072 
3073   // profile this call
3074   __ profile_virtual_call(rdx, rsi, rdi);
3075 
3076   Label no_such_interface, no_such_method;
3077 
3078   __ lookup_interface_method(// inputs: rec. class, interface, itable index
3079                              rdx, rax, rbx,
3080                              // outputs: method, scan temp. reg
3081                              rbx, rsi,
3082                              no_such_interface);
3083 
3084   // rbx: Method* to call
3085   // rcx: receiver
3086   // Check for abstract method error
3087   // Note: This should be done more efficiently via a throw_abstract_method_error
3088   //       interpreter entry point and a conditional jump to it in case of a null
3089   //       method.
3090   __ testptr(rbx, rbx);
3091   __ jcc(Assembler::zero, no_such_method);
3092 
3093   __ profile_arguments_type(rdx, rbx, rsi, true);
3094 
3095   // do the call
3096   // rcx: receiver
3097   // rbx,: Method*
3098   __ jump_from_interpreted(rbx, rdx);
3099   __ should_not_reach_here();
3100 
3101   // exception handling code follows...
3102   // note: must restore interpreter registers to canonical
3103   //       state for exception handling to work correctly!
3104 
3105   __ bind(no_such_method);
3106   // throw exception
3107   __ pop(rbx);           // pop return address (pushed by prepare_invoke)
3108   __ restore_bcp();      // rsi must be correct for exception handler   (was destroyed)
3109   __ restore_locals();   // make sure locals pointer is correct as well (was destroyed)
3110   __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_AbstractMethodError));
3111   // the call_VM checks for exception, so we should never return here.
3112   __ should_not_reach_here();
3113 
3114   __ bind(no_such_interface);
3115   // throw exception
3116   __ pop(rbx);           // pop return address (pushed by prepare_invoke)
3117   __ restore_bcp();      // rsi must be correct for exception handler   (was destroyed)
3118   __ restore_locals();   // make sure locals pointer is correct as well (was destroyed)
3119   __ call_VM(noreg, CAST_FROM_FN_PTR(address,
3120                    InterpreterRuntime::throw_IncompatibleClassChangeError));
3121   // the call_VM checks for exception, so we should never return here.
3122   __ should_not_reach_here();
3123 }
3124 
3125 void TemplateTable::invokehandle(int byte_no) {
3126   transition(vtos, vtos);
3127   assert(byte_no == f1_byte, "use this argument");
3128   const Register rbx_method = rbx;
3129   const Register rax_mtype  = rax;
3130   const Register rcx_recv   = rcx;
3131   const Register rdx_flags  = rdx;
3132 
3133   if (!EnableInvokeDynamic) {
3134     // rewriter does not generate this bytecode
3135     __ should_not_reach_here();
3136     return;
3137   }
3138 
3139   prepare_invoke(byte_no, rbx_method, rax_mtype, rcx_recv);
3140   __ verify_method_ptr(rbx_method);
3141   __ verify_oop(rcx_recv);
3142   __ null_check(rcx_recv);
3143 
3144   // rax: MethodType object (from cpool->resolved_references[f1], if necessary)
3145   // rbx: MH.invokeExact_MT method (from f2)
3146 
3147   // Note:  rax_mtype is already pushed (if necessary) by prepare_invoke
3148 
3149   // FIXME: profile the LambdaForm also
3150   __ profile_final_call(rax);
3151   __ profile_arguments_type(rdx, rbx_method, rsi, true);
3152 
3153   __ jump_from_interpreted(rbx_method, rdx);
3154 }
3155 
3156 
3157 void TemplateTable::invokedynamic(int byte_no) {
3158   transition(vtos, vtos);
3159   assert(byte_no == f1_byte, "use this argument");
3160 
3161   if (!EnableInvokeDynamic) {
3162     // We should not encounter this bytecode if !EnableInvokeDynamic.
3163     // The verifier will stop it.  However, if we get past the verifier,
3164     // this will stop the thread in a reasonable way, without crashing the JVM.
3165     __ call_VM(noreg, CAST_FROM_FN_PTR(address,
3166                      InterpreterRuntime::throw_IncompatibleClassChangeError));
3167     // the call_VM checks for exception, so we should never return here.
3168     __ should_not_reach_here();
3169     return;
3170   }
3171 
3172   const Register rbx_method   = rbx;
3173   const Register rax_callsite = rax;
3174 
3175   prepare_invoke(byte_no, rbx_method, rax_callsite);
3176 
3177   // rax: CallSite object (from cpool->resolved_references[f1])
3178   // rbx: MH.linkToCallSite method (from f2)
3179 
3180   // Note:  rax_callsite is already pushed by prepare_invoke
3181 
3182   // %%% should make a type profile for any invokedynamic that takes a ref argument
3183   // profile this call
3184   __ profile_call(rsi);
3185   __ profile_arguments_type(rdx, rbx, rsi, false);
3186 
3187   __ verify_oop(rax_callsite);
3188 
3189   __ jump_from_interpreted(rbx_method, rdx);
3190 }
3191 
3192 //----------------------------------------------------------------------------------------------------
3193 // Allocation
3194 
3195 void TemplateTable::_new() {
3196   transition(vtos, atos);
3197   __ get_unsigned_2_byte_index_at_bcp(rdx, 1);
3198   Label slow_case;
3199   Label slow_case_no_pop;
3200   Label done;
3201   Label initialize_header;
3202   Label initialize_object;  // including clearing the fields
3203   Label allocate_shared;
3204 
3205   __ get_cpool_and_tags(rcx, rax);
3206 
3207   // Make sure the class we're about to instantiate has been resolved.
3208   // This is done before loading InstanceKlass to be consistent with the order
3209   // how Constant Pool is updated (see ConstantPool::klass_at_put)
3210   const int tags_offset = Array<u1>::base_offset_in_bytes();
3211   __ cmpb(Address(rax, rdx, Address::times_1, tags_offset), JVM_CONSTANT_Class);
3212   __ jcc(Assembler::notEqual, slow_case_no_pop);
3213 
3214   // get InstanceKlass
3215   __ movptr(rcx, Address(rcx, rdx, Address::times_ptr, sizeof(ConstantPool)));
3216   __ push(rcx);  // save the contexts of klass for initializing the header
3217 
3218   // make sure klass is initialized & doesn't have finalizer
3219   // make sure klass is fully initialized
3220   __ cmpb(Address(rcx, InstanceKlass::init_state_offset()), InstanceKlass::fully_initialized);
3221   __ jcc(Assembler::notEqual, slow_case);
3222 
3223   // get instance_size in InstanceKlass (scaled to a count of bytes)
3224   __ movl(rdx, Address(rcx, Klass::layout_helper_offset()));
3225   // test to see if it has a finalizer or is malformed in some way
3226   __ testl(rdx, Klass::_lh_instance_slow_path_bit);
3227   __ jcc(Assembler::notZero, slow_case);
3228 
3229   //
3230   // Allocate the instance
3231   // 1) Try to allocate in the TLAB
3232   // 2) if fail and the object is large allocate in the shared Eden
3233   // 3) if the above fails (or is not applicable), go to a slow case
3234   // (creates a new TLAB, etc.)
3235 
3236   const bool allow_shared_alloc =
3237     Universe::heap()->supports_inline_contig_alloc() && !CMSIncrementalMode;
3238 
3239   const Register thread = rcx;
3240   if (UseTLAB || allow_shared_alloc) {
3241     __ get_thread(thread);
3242   }
3243 
3244   if (UseTLAB) {
3245     __ movptr(rax, Address(thread, in_bytes(JavaThread::tlab_top_offset())));
3246     __ lea(rbx, Address(rax, rdx, Address::times_1));
3247     __ cmpptr(rbx, Address(thread, in_bytes(JavaThread::tlab_end_offset())));
3248     __ jcc(Assembler::above, allow_shared_alloc ? allocate_shared : slow_case);
3249     __ movptr(Address(thread, in_bytes(JavaThread::tlab_top_offset())), rbx);
3250     if (ZeroTLAB) {
3251       // the fields have been already cleared
3252       __ jmp(initialize_header);
3253     } else {
3254       // initialize both the header and fields
3255       __ jmp(initialize_object);
3256     }
3257   }
3258 
3259   // Allocation in the shared Eden, if allowed.
3260   //
3261   // rdx: instance size in bytes
3262   if (allow_shared_alloc) {
3263     __ bind(allocate_shared);
3264 
3265     ExternalAddress heap_top((address)Universe::heap()->top_addr());
3266 
3267     Label retry;
3268     __ bind(retry);
3269     __ movptr(rax, heap_top);
3270     __ lea(rbx, Address(rax, rdx, Address::times_1));
3271     __ cmpptr(rbx, ExternalAddress((address)Universe::heap()->end_addr()));
3272     __ jcc(Assembler::above, slow_case);
3273 
3274     // Compare rax, with the top addr, and if still equal, store the new
3275     // top addr in rbx, at the address of the top addr pointer. Sets ZF if was
3276     // equal, and clears it otherwise. Use lock prefix for atomicity on MPs.
3277     //
3278     // rax,: object begin
3279     // rbx,: object end
3280     // rdx: instance size in bytes
3281     __ locked_cmpxchgptr(rbx, heap_top);
3282 
3283     // if someone beat us on the allocation, try again, otherwise continue
3284     __ jcc(Assembler::notEqual, retry);
3285 
3286     __ incr_allocated_bytes(thread, rdx, 0);
3287   }
3288 
3289   if (UseTLAB || Universe::heap()->supports_inline_contig_alloc()) {
3290     // The object is initialized before the header.  If the object size is
3291     // zero, go directly to the header initialization.
3292     __ bind(initialize_object);
3293     __ decrement(rdx, sizeof(oopDesc));
3294     __ jcc(Assembler::zero, initialize_header);
3295 
3296     // Initialize topmost object field, divide rdx by 8, check if odd and
3297     // test if zero.
3298     __ xorl(rcx, rcx);    // use zero reg to clear memory (shorter code)
3299     __ shrl(rdx, LogBytesPerLong); // divide by 2*oopSize and set carry flag if odd
3300 
3301     // rdx must have been multiple of 8
3302 #ifdef ASSERT
3303     // make sure rdx was multiple of 8
3304     Label L;
3305     // Ignore partial flag stall after shrl() since it is debug VM
3306     __ jccb(Assembler::carryClear, L);
3307     __ stop("object size is not multiple of 2 - adjust this code");
3308     __ bind(L);
3309     // rdx must be > 0, no extra check needed here
3310 #endif
3311 
3312     // initialize remaining object fields: rdx was a multiple of 8
3313     { Label loop;
3314     __ bind(loop);
3315     __ movptr(Address(rax, rdx, Address::times_8, sizeof(oopDesc) - 1*oopSize), rcx);
3316     NOT_LP64(__ movptr(Address(rax, rdx, Address::times_8, sizeof(oopDesc) - 2*oopSize), rcx));
3317     __ decrement(rdx);
3318     __ jcc(Assembler::notZero, loop);
3319     }
3320 
3321     // initialize object header only.
3322     __ bind(initialize_header);
3323     if (UseBiasedLocking) {
3324       __ pop(rcx);   // get saved klass back in the register.
3325       __ movptr(rbx, Address(rcx, Klass::prototype_header_offset()));
3326       __ movptr(Address(rax, oopDesc::mark_offset_in_bytes ()), rbx);
3327     } else {
3328       __ movptr(Address(rax, oopDesc::mark_offset_in_bytes ()),
3329                 (int32_t)markOopDesc::prototype()); // header
3330       __ pop(rcx);   // get saved klass back in the register.
3331     }
3332     __ store_klass(rax, rcx);  // klass
3333 
3334     {
3335       SkipIfEqual skip_if(_masm, &DTraceAllocProbes, 0);
3336       // Trigger dtrace event for fastpath
3337       __ push(atos);
3338       __ call_VM_leaf(
3339            CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_object_alloc), rax);
3340       __ pop(atos);
3341     }
3342 
3343     __ jmp(done);
3344   }
3345 
3346   // slow case
3347   __ bind(slow_case);
3348   __ pop(rcx);   // restore stack pointer to what it was when we came in.
3349   __ bind(slow_case_no_pop);
3350   __ get_constant_pool(rax);
3351   __ get_unsigned_2_byte_index_at_bcp(rdx, 1);
3352   call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::_new), rax, rdx);
3353 
3354   // continue
3355   __ bind(done);
3356 }
3357 
3358 
3359 void TemplateTable::newarray() {
3360   transition(itos, atos);
3361   __ push_i(rax);                                 // make sure everything is on the stack
3362   __ load_unsigned_byte(rdx, at_bcp(1));
3363   call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::newarray), rdx, rax);
3364   __ pop_i(rdx);                                  // discard size
3365 }
3366 
3367 
3368 void TemplateTable::anewarray() {
3369   transition(itos, atos);
3370   __ get_unsigned_2_byte_index_at_bcp(rdx, 1);
3371   __ get_constant_pool(rcx);
3372   call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::anewarray), rcx, rdx, rax);
3373 }
3374 
3375 
3376 void TemplateTable::arraylength() {
3377   transition(atos, itos);
3378   __ null_check(rax, arrayOopDesc::length_offset_in_bytes());
3379   __ movl(rax, Address(rax, arrayOopDesc::length_offset_in_bytes()));
3380 }
3381 
3382 
3383 void TemplateTable::checkcast() {
3384   transition(atos, atos);
3385   Label done, is_null, ok_is_subtype, quicked, resolved;
3386   __ testptr(rax, rax);   // Object is in EAX
3387   __ jcc(Assembler::zero, is_null);
3388 
3389   // Get cpool & tags index
3390   __ get_cpool_and_tags(rcx, rdx); // ECX=cpool, EDX=tags array
3391   __ get_unsigned_2_byte_index_at_bcp(rbx, 1); // EBX=index
3392   // See if bytecode has already been quicked
3393   __ cmpb(Address(rdx, rbx, Address::times_1, Array<u1>::base_offset_in_bytes()), JVM_CONSTANT_Class);
3394   __ jcc(Assembler::equal, quicked);
3395 
3396   __ push(atos);
3397   call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc) );
3398   // vm_result_2 has metadata result
3399   // borrow rdi from locals
3400   __ get_thread(rdi);
3401   __ get_vm_result_2(rax, rdi);
3402   __ restore_locals();
3403   __ pop_ptr(rdx);
3404   __ jmpb(resolved);
3405 
3406   // Get superklass in EAX and subklass in EBX
3407   __ bind(quicked);
3408   __ mov(rdx, rax);          // Save object in EDX; EAX needed for subtype check
3409   __ movptr(rax, Address(rcx, rbx, Address::times_ptr, sizeof(ConstantPool)));
3410 
3411   __ bind(resolved);
3412   __ load_klass(rbx, rdx);
3413 
3414   // Generate subtype check.  Blows ECX.  Resets EDI.  Object in EDX.
3415   // Superklass in EAX.  Subklass in EBX.
3416   __ gen_subtype_check( rbx, ok_is_subtype );
3417 
3418   // Come here on failure
3419   __ push(rdx);
3420   // object is at TOS
3421   __ jump(ExternalAddress(Interpreter::_throw_ClassCastException_entry));
3422 
3423   // Come here on success
3424   __ bind(ok_is_subtype);
3425   __ mov(rax,rdx);           // Restore object in EDX
3426 
3427   // Collect counts on whether this check-cast sees NULLs a lot or not.
3428   if (ProfileInterpreter) {
3429     __ jmp(done);
3430     __ bind(is_null);
3431     __ profile_null_seen(rcx);
3432   } else {
3433     __ bind(is_null);   // same as 'done'
3434   }
3435   __ bind(done);
3436 }
3437 
3438 
3439 void TemplateTable::instanceof() {
3440   transition(atos, itos);
3441   Label done, is_null, ok_is_subtype, quicked, resolved;
3442   __ testptr(rax, rax);
3443   __ jcc(Assembler::zero, is_null);
3444 
3445   // Get cpool & tags index
3446   __ get_cpool_and_tags(rcx, rdx); // ECX=cpool, EDX=tags array
3447   __ get_unsigned_2_byte_index_at_bcp(rbx, 1); // EBX=index
3448   // See if bytecode has already been quicked
3449   __ cmpb(Address(rdx, rbx, Address::times_1, Array<u1>::base_offset_in_bytes()), JVM_CONSTANT_Class);
3450   __ jcc(Assembler::equal, quicked);
3451 
3452   __ push(atos);
3453   call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc) );
3454   // vm_result_2 has metadata result
3455   // borrow rdi from locals
3456   __ get_thread(rdi);
3457   __ get_vm_result_2(rax, rdi);
3458   __ restore_locals();
3459   __ pop_ptr(rdx);
3460   __ load_klass(rdx, rdx);
3461   __ jmp(resolved);
3462 
3463   // Get superklass in EAX and subklass in EDX
3464   __ bind(quicked);
3465   __ load_klass(rdx, rax);
3466   __ movptr(rax, Address(rcx, rbx, Address::times_ptr, sizeof(ConstantPool)));
3467 
3468   __ bind(resolved);
3469 
3470   // Generate subtype check.  Blows ECX.  Resets EDI.
3471   // Superklass in EAX.  Subklass in EDX.
3472   __ gen_subtype_check( rdx, ok_is_subtype );
3473 
3474   // Come here on failure
3475   __ xorl(rax,rax);
3476   __ jmpb(done);
3477   // Come here on success
3478   __ bind(ok_is_subtype);
3479   __ movl(rax, 1);
3480 
3481   // Collect counts on whether this test sees NULLs a lot or not.
3482   if (ProfileInterpreter) {
3483     __ jmp(done);
3484     __ bind(is_null);
3485     __ profile_null_seen(rcx);
3486   } else {
3487     __ bind(is_null);   // same as 'done'
3488   }
3489   __ bind(done);
3490   // rax, = 0: obj == NULL or  obj is not an instanceof the specified klass
3491   // rax, = 1: obj != NULL and obj is     an instanceof the specified klass
3492 }
3493 
3494 
3495 //----------------------------------------------------------------------------------------------------
3496 // Breakpoints
3497 void TemplateTable::_breakpoint() {
3498 
3499   // Note: We get here even if we are single stepping..
3500   // jbug inists on setting breakpoints at every bytecode
3501   // even if we are in single step mode.
3502 
3503   transition(vtos, vtos);
3504 
3505   // get the unpatched byte code
3506   __ get_method(rcx);
3507   __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::get_original_bytecode_at), rcx, rsi);
3508   __ mov(rbx, rax);
3509 
3510   // post the breakpoint event
3511   __ get_method(rcx);
3512   __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::_breakpoint), rcx, rsi);
3513 
3514   // complete the execution of original bytecode
3515   __ dispatch_only_normal(vtos);
3516 }
3517 
3518 
3519 //----------------------------------------------------------------------------------------------------
3520 // Exceptions
3521 
3522 void TemplateTable::athrow() {
3523   transition(atos, vtos);
3524   __ null_check(rax);
3525   __ jump(ExternalAddress(Interpreter::throw_exception_entry()));
3526 }
3527 
3528 
3529 //----------------------------------------------------------------------------------------------------
3530 // Synchronization
3531 //
3532 // Note: monitorenter & exit are symmetric routines; which is reflected
3533 //       in the assembly code structure as well
3534 //
3535 // Stack layout:
3536 //
3537 // [expressions  ] <--- rsp               = expression stack top
3538 // ..
3539 // [expressions  ]
3540 // [monitor entry] <--- monitor block top = expression stack bot
3541 // ..
3542 // [monitor entry]
3543 // [frame data   ] <--- monitor block bot
3544 // ...
3545 // [saved rbp,    ] <--- rbp,
3546 
3547 
3548 void TemplateTable::monitorenter() {
3549   transition(atos, vtos);
3550 
3551   // check for NULL object
3552   __ null_check(rax);
3553 
3554   const Address monitor_block_top(rbp, frame::interpreter_frame_monitor_block_top_offset * wordSize);
3555   const Address monitor_block_bot(rbp, frame::interpreter_frame_initial_sp_offset        * wordSize);
3556   const int entry_size =         (     frame::interpreter_frame_monitor_size()           * wordSize);
3557   Label allocated;
3558 
3559   // initialize entry pointer
3560   __ xorl(rdx, rdx);                             // points to free slot or NULL
3561 
3562   // find a free slot in the monitor block (result in rdx)
3563   { Label entry, loop, exit;
3564     __ movptr(rcx, monitor_block_top);           // points to current entry, starting with top-most entry
3565 
3566     __ lea(rbx, monitor_block_bot);              // points to word before bottom of monitor block
3567     __ jmpb(entry);
3568 
3569     __ bind(loop);
3570     __ cmpptr(Address(rcx, BasicObjectLock::obj_offset_in_bytes()), (int32_t)NULL_WORD);  // check if current entry is used
3571     __ cmovptr(Assembler::equal, rdx, rcx);      // if not used then remember entry in rdx
3572     __ cmpptr(rax, Address(rcx, BasicObjectLock::obj_offset_in_bytes()));   // check if current entry is for same object
3573     __ jccb(Assembler::equal, exit);             // if same object then stop searching
3574     __ addptr(rcx, entry_size);                  // otherwise advance to next entry
3575     __ bind(entry);
3576     __ cmpptr(rcx, rbx);                         // check if bottom reached
3577     __ jcc(Assembler::notEqual, loop);           // if not at bottom then check this entry
3578     __ bind(exit);
3579   }
3580 
3581   __ testptr(rdx, rdx);                          // check if a slot has been found
3582   __ jccb(Assembler::notZero, allocated);        // if found, continue with that one
3583 
3584   // allocate one if there's no free slot
3585   { Label entry, loop;
3586     // 1. compute new pointers                   // rsp: old expression stack top
3587     __ movptr(rdx, monitor_block_bot);           // rdx: old expression stack bottom
3588     __ subptr(rsp, entry_size);                  // move expression stack top
3589     __ subptr(rdx, entry_size);                  // move expression stack bottom
3590     __ mov(rcx, rsp);                            // set start value for copy loop
3591     __ movptr(monitor_block_bot, rdx);           // set new monitor block top
3592     __ jmp(entry);
3593     // 2. move expression stack contents
3594     __ bind(loop);
3595     __ movptr(rbx, Address(rcx, entry_size));    // load expression stack word from old location
3596     __ movptr(Address(rcx, 0), rbx);             // and store it at new location
3597     __ addptr(rcx, wordSize);                    // advance to next word
3598     __ bind(entry);
3599     __ cmpptr(rcx, rdx);                         // check if bottom reached
3600     __ jcc(Assembler::notEqual, loop);           // if not at bottom then copy next word
3601   }
3602 
3603   // call run-time routine
3604   // rdx: points to monitor entry
3605   __ bind(allocated);
3606 
3607   // Increment bcp to point to the next bytecode, so exception handling for async. exceptions work correctly.
3608   // The object has already been poped from the stack, so the expression stack looks correct.
3609   __ increment(rsi);
3610 
3611   __ movptr(Address(rdx, BasicObjectLock::obj_offset_in_bytes()), rax);     // store object
3612   __ lock_object(rdx);
3613 
3614   // check to make sure this monitor doesn't cause stack overflow after locking
3615   __ save_bcp();  // in case of exception
3616   __ generate_stack_overflow_check(0);
3617 
3618   // The bcp has already been incremented. Just need to dispatch to next instruction.
3619   __ dispatch_next(vtos);
3620 }
3621 
3622 
3623 void TemplateTable::monitorexit() {
3624   transition(atos, vtos);
3625 
3626   // check for NULL object
3627   __ null_check(rax);
3628 
3629   const Address monitor_block_top(rbp, frame::interpreter_frame_monitor_block_top_offset * wordSize);
3630   const Address monitor_block_bot(rbp, frame::interpreter_frame_initial_sp_offset        * wordSize);
3631   const int entry_size =         (     frame::interpreter_frame_monitor_size()           * wordSize);
3632   Label found;
3633 
3634   // find matching slot
3635   { Label entry, loop;
3636     __ movptr(rdx, monitor_block_top);           // points to current entry, starting with top-most entry
3637     __ lea(rbx, monitor_block_bot);             // points to word before bottom of monitor block
3638     __ jmpb(entry);
3639 
3640     __ bind(loop);
3641     __ cmpptr(rax, Address(rdx, BasicObjectLock::obj_offset_in_bytes()));   // check if current entry is for same object
3642     __ jcc(Assembler::equal, found);             // if same object then stop searching
3643     __ addptr(rdx, entry_size);                  // otherwise advance to next entry
3644     __ bind(entry);
3645     __ cmpptr(rdx, rbx);                         // check if bottom reached
3646     __ jcc(Assembler::notEqual, loop);           // if not at bottom then check this entry
3647   }
3648 
3649   // error handling. Unlocking was not block-structured
3650   Label end;
3651   __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_illegal_monitor_state_exception));
3652   __ should_not_reach_here();
3653 
3654   // call run-time routine
3655   // rcx: points to monitor entry
3656   __ bind(found);
3657   __ push_ptr(rax);                                 // make sure object is on stack (contract with oopMaps)
3658   __ unlock_object(rdx);
3659   __ pop_ptr(rax);                                  // discard object
3660   __ bind(end);
3661 }
3662 
3663 
3664 //----------------------------------------------------------------------------------------------------
3665 // Wide instructions
3666 
3667 void TemplateTable::wide() {
3668   transition(vtos, vtos);
3669   __ load_unsigned_byte(rbx, at_bcp(1));
3670   ExternalAddress wtable((address)Interpreter::_wentry_point);
3671   __ jump(ArrayAddress(wtable, Address(noreg, rbx, Address::times_ptr)));
3672   // Note: the rsi increment step is part of the individual wide bytecode implementations
3673 }
3674 
3675 
3676 //----------------------------------------------------------------------------------------------------
3677 // Multi arrays
3678 
3679 void TemplateTable::multianewarray() {
3680   transition(vtos, atos);
3681   __ load_unsigned_byte(rax, at_bcp(3)); // get number of dimensions
3682   // last dim is on top of stack; we want address of first one:
3683   // first_addr = last_addr + (ndims - 1) * stackElementSize - 1*wordsize
3684   // the latter wordSize to point to the beginning of the array.
3685   __ lea(  rax, Address(rsp, rax, Interpreter::stackElementScale(), -wordSize));
3686   call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::multianewarray), rax);     // pass in rax,
3687   __ load_unsigned_byte(rbx, at_bcp(3));
3688   __ lea(rsp, Address(rsp, rbx, Interpreter::stackElementScale()));  // get rid of counts
3689 }
3690 
3691 #endif /* !CC_INTERP */