1 /*
   2  * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
   3  * Copyright 2013, 2014 SAP AG. All rights reserved.
   4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   5  *
   6  * This code is free software; you can redistribute it and/or modify it
   7  * under the terms of the GNU General Public License version 2 only, as
   8  * published by the Free Software Foundation.
   9  *
  10  * This code is distributed in the hope that it will be useful, but WITHOUT
  11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  13  * version 2 for more details (a copy is included in the LICENSE file that
  14  * accompanied this code).
  15  *
  16  * You should have received a copy of the GNU General Public License version
  17  * 2 along with this work; if not, write to the Free Software Foundation,
  18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19  *
  20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21  * or visit www.oracle.com if you need additional information or have any
  22  * questions.
  23  *
  24  */
  25 
  26 #include "precompiled.hpp"
  27 #include "asm/macroAssembler.inline.hpp"
  28 #include "interpreter/interpreter.hpp"
  29 #include "interpreter/interpreterRuntime.hpp"
  30 #include "interpreter/interp_masm.hpp"
  31 #include "interpreter/templateInterpreter.hpp"
  32 #include "interpreter/templateTable.hpp"
  33 #include "memory/universe.inline.hpp"
  34 #include "oops/objArrayKlass.hpp"
  35 #include "oops/oop.inline.hpp"
  36 #include "prims/methodHandles.hpp"
  37 #include "runtime/sharedRuntime.hpp"
  38 #include "runtime/stubRoutines.hpp"
  39 #include "runtime/synchronizer.hpp"
  40 #include "utilities/macros.hpp"
  41 
  42 #ifndef CC_INTERP
  43 
  44 #undef __
  45 #define __ _masm->
  46 
  47 // ============================================================================
  48 // Misc helpers
  49 
  50 // Do an oop store like *(base + index) = val OR *(base + offset) = val
  51 // (only one of both variants is possible at the same time).
  52 // Index can be noreg.
  53 // Kills:
  54 //   Rbase, Rtmp
  55 static void do_oop_store(InterpreterMacroAssembler* _masm,
  56                          Register           Rbase,
  57                          RegisterOrConstant offset,
  58                          Register           Rval,         // Noreg means always null.
  59                          Register           Rtmp1,
  60                          Register           Rtmp2,
  61                          Register           Rtmp3,
  62                          BarrierSet::Name   barrier,
  63                          bool               precise,
  64                          bool               check_null) {
  65   assert_different_registers(Rtmp1, Rtmp2, Rtmp3, Rval, Rbase);
  66 
  67   switch (barrier) {
  68 #if INCLUDE_ALL_GCS
  69     case BarrierSet::G1SATBCT:
  70     case BarrierSet::G1SATBCTLogging:
  71       {
  72         // Load and record the previous value.
  73         __ g1_write_barrier_pre(Rbase, offset,
  74                                 Rtmp3, /* holder of pre_val ? */
  75                                 Rtmp1, Rtmp2, false /* frame */);
  76 
  77         Label Lnull, Ldone;
  78         if (Rval != noreg) {
  79           if (check_null) {
  80             __ cmpdi(CCR0, Rval, 0);
  81             __ beq(CCR0, Lnull);
  82           }
  83           __ store_heap_oop_not_null(Rval, offset, Rbase, /*Rval must stay uncompressed.*/ Rtmp1);
  84           // Mark the card.
  85           if (!(offset.is_constant() && offset.as_constant() == 0) && precise) {
  86             __ add(Rbase, offset, Rbase);
  87           }
  88           __ g1_write_barrier_post(Rbase, Rval, Rtmp1, Rtmp2, Rtmp3, /*filtered (fast path)*/ &Ldone);
  89           if (check_null) { __ b(Ldone); }
  90         }
  91 
  92         if (Rval == noreg || check_null) { // Store null oop.
  93           Register Rnull = Rval;
  94           __ bind(Lnull);
  95           if (Rval == noreg) {
  96             Rnull = Rtmp1;
  97             __ li(Rnull, 0);
  98           }
  99           if (UseCompressedOops) {
 100             __ stw(Rnull, offset, Rbase);
 101           } else {
 102             __ std(Rnull, offset, Rbase);
 103           }
 104         }
 105         __ bind(Ldone);
 106       }
 107       break;
 108 #endif // INCLUDE_ALL_GCS
 109     case BarrierSet::CardTableModRef:
 110     case BarrierSet::CardTableExtension:
 111       {
 112         Label Lnull, Ldone;
 113         if (Rval != noreg) {
 114           if (check_null) {
 115             __ cmpdi(CCR0, Rval, 0);
 116             __ beq(CCR0, Lnull);
 117           }
 118           __ store_heap_oop_not_null(Rval, offset, Rbase, /*Rval should better stay uncompressed.*/ Rtmp1);
 119           // Mark the card.
 120           if (!(offset.is_constant() && offset.as_constant() == 0) && precise) {
 121             __ add(Rbase, offset, Rbase);
 122           }
 123           __ card_write_barrier_post(Rbase, Rval, Rtmp1);
 124           if (check_null) {
 125             __ b(Ldone);
 126           }
 127         }
 128 
 129         if (Rval == noreg || check_null) { // Store null oop.
 130           Register Rnull = Rval;
 131           __ bind(Lnull);
 132           if (Rval == noreg) {
 133             Rnull = Rtmp1;
 134             __ li(Rnull, 0);
 135           }
 136           if (UseCompressedOops) {
 137             __ stw(Rnull, offset, Rbase);
 138           } else {
 139             __ std(Rnull, offset, Rbase);
 140           }
 141         }
 142         __ bind(Ldone);
 143       }
 144       break;
 145     case BarrierSet::ModRef:
 146     case BarrierSet::Other:
 147       ShouldNotReachHere();
 148       break;
 149     default:
 150       ShouldNotReachHere();
 151   }
 152 }
 153 
 154 // ============================================================================
 155 // Platform-dependent initialization
 156 
 157 void TemplateTable::pd_initialize() {
 158   // No ppc64 specific initialization.
 159 }
 160 
 161 Address TemplateTable::at_bcp(int offset) {
 162   // Not used on ppc.
 163   ShouldNotReachHere();
 164   return Address();
 165 }
 166 
 167 // Patches the current bytecode (ptr to it located in bcp)
 168 // in the bytecode stream with a new one.
 169 void TemplateTable::patch_bytecode(Bytecodes::Code new_bc, Register Rnew_bc, Register Rtemp, bool load_bc_into_bc_reg /*=true*/, int byte_no) {
 170   // With sharing on, may need to test method flag.
 171   if (!RewriteBytecodes) return;
 172   Label L_patch_done;
 173 
 174   switch (new_bc) {
 175     case Bytecodes::_fast_aputfield:
 176     case Bytecodes::_fast_bputfield:
 177     case Bytecodes::_fast_cputfield:
 178     case Bytecodes::_fast_dputfield:
 179     case Bytecodes::_fast_fputfield:
 180     case Bytecodes::_fast_iputfield:
 181     case Bytecodes::_fast_lputfield:
 182     case Bytecodes::_fast_sputfield:
 183     {
 184       // We skip bytecode quickening for putfield instructions when
 185       // the put_code written to the constant pool cache is zero.
 186       // This is required so that every execution of this instruction
 187       // calls out to InterpreterRuntime::resolve_get_put to do
 188       // additional, required work.
 189       assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range");
 190       assert(load_bc_into_bc_reg, "we use bc_reg as temp");
 191       __ get_cache_and_index_at_bcp(Rtemp /* dst = cache */, 1);
 192       // ((*(cache+indices))>>((1+byte_no)*8))&0xFF:
 193 #if defined(VM_LITTLE_ENDIAN)
 194       __ lbz(Rnew_bc, in_bytes(ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::indices_offset()) + 1 + byte_no, Rtemp);
 195 #else
 196       __ lbz(Rnew_bc, in_bytes(ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::indices_offset()) + 7 - (1 + byte_no), Rtemp);
 197 #endif
 198       __ cmpwi(CCR0, Rnew_bc, 0);
 199       __ li(Rnew_bc, (unsigned int)(unsigned char)new_bc);
 200       __ beq(CCR0, L_patch_done);
 201       // __ isync(); // acquire not needed
 202       break;
 203     }
 204 
 205     default:
 206       assert(byte_no == -1, "sanity");
 207       if (load_bc_into_bc_reg) {
 208         __ li(Rnew_bc, (unsigned int)(unsigned char)new_bc);
 209       }
 210   }
 211 
 212   if (JvmtiExport::can_post_breakpoint()) {
 213     Label L_fast_patch;
 214     __ lbz(Rtemp, 0, R14_bcp);
 215     __ cmpwi(CCR0, Rtemp, (unsigned int)(unsigned char)Bytecodes::_breakpoint);
 216     __ bne(CCR0, L_fast_patch);
 217     // Perform the quickening, slowly, in the bowels of the breakpoint table.
 218     __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::set_original_bytecode_at), R19_method, R14_bcp, Rnew_bc);
 219     __ b(L_patch_done);
 220     __ bind(L_fast_patch);
 221   }
 222 
 223   // Patch bytecode.
 224   __ stb(Rnew_bc, 0, R14_bcp);
 225 
 226   __ bind(L_patch_done);
 227 }
 228 
 229 // ============================================================================
 230 // Individual instructions
 231 
 232 void TemplateTable::nop() {
 233   transition(vtos, vtos);
 234   // Nothing to do.
 235 }
 236 
 237 void TemplateTable::shouldnotreachhere() {
 238   transition(vtos, vtos);
 239   __ stop("shouldnotreachhere bytecode");
 240 }
 241 
 242 void TemplateTable::aconst_null() {
 243   transition(vtos, atos);
 244   __ li(R17_tos, 0);
 245 }
 246 
 247 void TemplateTable::iconst(int value) {
 248   transition(vtos, itos);
 249   assert(value >= -1 && value <= 5, "");
 250   __ li(R17_tos, value);
 251 }
 252 
 253 void TemplateTable::lconst(int value) {
 254   transition(vtos, ltos);
 255   assert(value >= -1 && value <= 5, "");
 256   __ li(R17_tos, value);
 257 }
 258 
 259 void TemplateTable::fconst(int value) {
 260   transition(vtos, ftos);
 261   static float zero = 0.0;
 262   static float one  = 1.0;
 263   static float two  = 2.0;
 264   switch (value) {
 265     default: ShouldNotReachHere();
 266     case 0: {
 267       int simm16_offset = __ load_const_optimized(R11_scratch1, (address*)&zero, R0, true);
 268       __ lfs(F15_ftos, simm16_offset, R11_scratch1);
 269       break;
 270     }
 271     case 1: {
 272       int simm16_offset = __ load_const_optimized(R11_scratch1, (address*)&one, R0, true);
 273       __ lfs(F15_ftos, simm16_offset, R11_scratch1);
 274       break;
 275     }
 276     case 2: {
 277       int simm16_offset = __ load_const_optimized(R11_scratch1, (address*)&two, R0, true);
 278       __ lfs(F15_ftos, simm16_offset, R11_scratch1);
 279       break;
 280     }
 281   }
 282 }
 283 
 284 void TemplateTable::dconst(int value) {
 285   transition(vtos, dtos);
 286   static double zero = 0.0;
 287   static double one  = 1.0;
 288   switch (value) {
 289     case 0: {
 290       int simm16_offset = __ load_const_optimized(R11_scratch1, (address*)&zero, R0, true);
 291       __ lfd(F15_ftos, simm16_offset, R11_scratch1);
 292       break;
 293     }
 294     case 1: {
 295       int simm16_offset = __ load_const_optimized(R11_scratch1, (address*)&one, R0, true);
 296       __ lfd(F15_ftos, simm16_offset, R11_scratch1);
 297       break;
 298     }
 299     default: ShouldNotReachHere();
 300   }
 301 }
 302 
 303 void TemplateTable::bipush() {
 304   transition(vtos, itos);
 305   __ lbz(R17_tos, 1, R14_bcp);
 306   __ extsb(R17_tos, R17_tos);
 307 }
 308 
 309 void TemplateTable::sipush() {
 310   transition(vtos, itos);
 311   __ get_2_byte_integer_at_bcp(1, R17_tos, InterpreterMacroAssembler::Signed);
 312 }
 313 
 314 void TemplateTable::ldc(bool wide) {
 315   Register Rscratch1 = R11_scratch1,
 316            Rscratch2 = R12_scratch2,
 317            Rcpool    = R3_ARG1;
 318 
 319   transition(vtos, vtos);
 320   Label notInt, notClass, exit;
 321 
 322   __ get_cpool_and_tags(Rcpool, Rscratch2); // Set Rscratch2 = &tags.
 323   if (wide) { // Read index.
 324     __ get_2_byte_integer_at_bcp(1, Rscratch1, InterpreterMacroAssembler::Unsigned);
 325   } else {
 326     __ lbz(Rscratch1, 1, R14_bcp);
 327   }
 328 
 329   const int base_offset = ConstantPool::header_size() * wordSize;
 330   const int tags_offset = Array<u1>::base_offset_in_bytes();
 331 
 332   // Get type from tags.
 333   __ addi(Rscratch2, Rscratch2, tags_offset);
 334   __ lbzx(Rscratch2, Rscratch2, Rscratch1);
 335 
 336   __ cmpwi(CCR0, Rscratch2, JVM_CONSTANT_UnresolvedClass); // Unresolved class?
 337   __ cmpwi(CCR1, Rscratch2, JVM_CONSTANT_UnresolvedClassInError); // Unresolved class in error state?
 338   __ cror(/*CR0 eq*/2, /*CR1 eq*/4+2, /*CR0 eq*/2);
 339 
 340   // Resolved class - need to call vm to get java mirror of the class.
 341   __ cmpwi(CCR1, Rscratch2, JVM_CONSTANT_Class);
 342   __ crnor(/*CR0 eq*/2, /*CR1 eq*/4+2, /*CR0 eq*/2); // Neither resolved class nor unresolved case from above?
 343   __ beq(CCR0, notClass);
 344 
 345   __ li(R4, wide ? 1 : 0);
 346   call_VM(R17_tos, CAST_FROM_FN_PTR(address, InterpreterRuntime::ldc), R4);
 347   __ push(atos);
 348   __ b(exit);
 349 
 350   __ align(32, 12);
 351   __ bind(notClass);
 352   __ addi(Rcpool, Rcpool, base_offset);
 353   __ sldi(Rscratch1, Rscratch1, LogBytesPerWord);
 354   __ cmpdi(CCR0, Rscratch2, JVM_CONSTANT_Integer);
 355   __ bne(CCR0, notInt);
 356   __ lwax(R17_tos, Rcpool, Rscratch1);
 357   __ push(itos);
 358   __ b(exit);
 359 
 360   __ align(32, 12);
 361   __ bind(notInt);
 362 #ifdef ASSERT
 363   // String and Object are rewritten to fast_aldc
 364   __ cmpdi(CCR0, Rscratch2, JVM_CONSTANT_Float);
 365   __ asm_assert_eq("unexpected type", 0x8765);
 366 #endif
 367   __ lfsx(F15_ftos, Rcpool, Rscratch1);
 368   __ push(ftos);
 369 
 370   __ align(32, 12);
 371   __ bind(exit);
 372 }
 373 
 374 // Fast path for caching oop constants.
 375 void TemplateTable::fast_aldc(bool wide) {
 376   transition(vtos, atos);
 377 
 378   int index_size = wide ? sizeof(u2) : sizeof(u1);
 379   const Register Rscratch = R11_scratch1;
 380   Label resolved;
 381 
 382   // We are resolved if the resolved reference cache entry contains a
 383   // non-null object (CallSite, etc.)
 384   __ get_cache_index_at_bcp(Rscratch, 1, index_size);  // Load index.
 385   __ load_resolved_reference_at_index(R17_tos, Rscratch);
 386   __ cmpdi(CCR0, R17_tos, 0);
 387   __ bne(CCR0, resolved);
 388   __ load_const_optimized(R3_ARG1, (int)bytecode());
 389 
 390   address entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_ldc);
 391 
 392   // First time invocation - must resolve first.
 393   __ call_VM(R17_tos, entry, R3_ARG1);
 394 
 395   __ align(32, 12);
 396   __ bind(resolved);
 397   __ verify_oop(R17_tos);
 398 }
 399 
 400 void TemplateTable::ldc2_w() {
 401   transition(vtos, vtos);
 402   Label Llong, Lexit;
 403 
 404   Register Rindex = R11_scratch1,
 405            Rcpool = R12_scratch2,
 406            Rtag   = R3_ARG1;
 407   __ get_cpool_and_tags(Rcpool, Rtag);
 408   __ get_2_byte_integer_at_bcp(1, Rindex, InterpreterMacroAssembler::Unsigned);
 409 
 410   const int base_offset = ConstantPool::header_size() * wordSize;
 411   const int tags_offset = Array<u1>::base_offset_in_bytes();
 412   // Get type from tags.
 413   __ addi(Rcpool, Rcpool, base_offset);
 414   __ addi(Rtag, Rtag, tags_offset);
 415 
 416   __ lbzx(Rtag, Rtag, Rindex);
 417 
 418   __ sldi(Rindex, Rindex, LogBytesPerWord);
 419   __ cmpdi(CCR0, Rtag, JVM_CONSTANT_Double);
 420   __ bne(CCR0, Llong);
 421   // A double can be placed at word-aligned locations in the constant pool.
 422   // Check out Conversions.java for an example.
 423   // Also ConstantPool::header_size() is 20, which makes it very difficult
 424   // to double-align double on the constant pool. SG, 11/7/97
 425   __ lfdx(F15_ftos, Rcpool, Rindex);
 426   __ push(dtos);
 427   __ b(Lexit);
 428 
 429   __ bind(Llong);
 430   __ ldx(R17_tos, Rcpool, Rindex);
 431   __ push(ltos);
 432 
 433   __ bind(Lexit);
 434 }
 435 
 436 // Get the locals index located in the bytecode stream at bcp + offset.
 437 void TemplateTable::locals_index(Register Rdst, int offset) {
 438   __ lbz(Rdst, offset, R14_bcp);
 439 }
 440 
 441 void TemplateTable::iload() {
 442   transition(vtos, itos);
 443 
 444   // Get the local value into tos
 445   const Register Rindex = R22_tmp2;
 446   locals_index(Rindex);
 447 
 448   // Rewrite iload,iload  pair into fast_iload2
 449   //         iload,caload pair into fast_icaload
 450   if (RewriteFrequentPairs) {
 451     Label Lrewrite, Ldone;
 452     Register Rnext_byte  = R3_ARG1,
 453              Rrewrite_to = R6_ARG4,
 454              Rscratch    = R11_scratch1;
 455 
 456     // get next byte
 457     __ lbz(Rnext_byte, Bytecodes::length_for(Bytecodes::_iload), R14_bcp);
 458 
 459     // if _iload, wait to rewrite to iload2. We only want to rewrite the
 460     // last two iloads in a pair. Comparing against fast_iload means that
 461     // the next bytecode is neither an iload or a caload, and therefore
 462     // an iload pair.
 463     __ cmpwi(CCR0, Rnext_byte, (unsigned int)(unsigned char)Bytecodes::_iload);
 464     __ beq(CCR0, Ldone);
 465 
 466     __ cmpwi(CCR1, Rnext_byte, (unsigned int)(unsigned char)Bytecodes::_fast_iload);
 467     __ li(Rrewrite_to, (unsigned int)(unsigned char)Bytecodes::_fast_iload2);
 468     __ beq(CCR1, Lrewrite);
 469 
 470     __ cmpwi(CCR0, Rnext_byte, (unsigned int)(unsigned char)Bytecodes::_caload);
 471     __ li(Rrewrite_to, (unsigned int)(unsigned char)Bytecodes::_fast_icaload);
 472     __ beq(CCR0, Lrewrite);
 473 
 474     __ li(Rrewrite_to, (unsigned int)(unsigned char)Bytecodes::_fast_iload);
 475 
 476     __ bind(Lrewrite);
 477     patch_bytecode(Bytecodes::_iload, Rrewrite_to, Rscratch, false);
 478     __ bind(Ldone);
 479   }
 480 
 481   __ load_local_int(R17_tos, Rindex, Rindex);
 482 }
 483 
 484 // Load 2 integers in a row without dispatching
 485 void TemplateTable::fast_iload2() {
 486   transition(vtos, itos);
 487 
 488   __ lbz(R3_ARG1, 1, R14_bcp);
 489   __ lbz(R17_tos, Bytecodes::length_for(Bytecodes::_iload) + 1, R14_bcp);
 490 
 491   __ load_local_int(R3_ARG1, R11_scratch1, R3_ARG1);
 492   __ load_local_int(R17_tos, R12_scratch2, R17_tos);
 493   __ push_i(R3_ARG1);
 494 }
 495 
 496 void TemplateTable::fast_iload() {
 497   transition(vtos, itos);
 498   // Get the local value into tos
 499 
 500   const Register Rindex = R11_scratch1;
 501   locals_index(Rindex);
 502   __ load_local_int(R17_tos, Rindex, Rindex);
 503 }
 504 
 505 // Load a local variable type long from locals area to TOS cache register.
 506 // Local index resides in bytecodestream.
 507 void TemplateTable::lload() {
 508   transition(vtos, ltos);
 509 
 510   const Register Rindex = R11_scratch1;
 511   locals_index(Rindex);
 512   __ load_local_long(R17_tos, Rindex, Rindex);
 513 }
 514 
 515 void TemplateTable::fload() {
 516   transition(vtos, ftos);
 517 
 518   const Register Rindex = R11_scratch1;
 519   locals_index(Rindex);
 520   __ load_local_float(F15_ftos, Rindex, Rindex);
 521 }
 522 
 523 void TemplateTable::dload() {
 524   transition(vtos, dtos);
 525 
 526   const Register Rindex = R11_scratch1;
 527   locals_index(Rindex);
 528   __ load_local_double(F15_ftos, Rindex, Rindex);
 529 }
 530 
 531 void TemplateTable::aload() {
 532   transition(vtos, atos);
 533 
 534   const Register Rindex = R11_scratch1;
 535   locals_index(Rindex);
 536   __ load_local_ptr(R17_tos, Rindex, Rindex);
 537 }
 538 
 539 void TemplateTable::locals_index_wide(Register Rdst) {
 540   // Offset is 2, not 1, because Lbcp points to wide prefix code.
 541   __ get_2_byte_integer_at_bcp(2, Rdst, InterpreterMacroAssembler::Unsigned);
 542 }
 543 
 544 void TemplateTable::wide_iload() {
 545   // Get the local value into tos.
 546 
 547   const Register Rindex = R11_scratch1;
 548   locals_index_wide(Rindex);
 549   __ load_local_int(R17_tos, Rindex, Rindex);
 550 }
 551 
 552 void TemplateTable::wide_lload() {
 553   transition(vtos, ltos);
 554 
 555   const Register Rindex = R11_scratch1;
 556   locals_index_wide(Rindex);
 557   __ load_local_long(R17_tos, Rindex, Rindex);
 558 }
 559 
 560 void TemplateTable::wide_fload() {
 561   transition(vtos, ftos);
 562 
 563   const Register Rindex = R11_scratch1;
 564   locals_index_wide(Rindex);
 565   __ load_local_float(F15_ftos, Rindex, Rindex);
 566 }
 567 
 568 void TemplateTable::wide_dload() {
 569   transition(vtos, dtos);
 570 
 571   const Register Rindex = R11_scratch1;
 572   locals_index_wide(Rindex);
 573   __ load_local_double(F15_ftos, Rindex, Rindex);
 574 }
 575 
 576 void TemplateTable::wide_aload() {
 577   transition(vtos, atos);
 578 
 579   const Register Rindex = R11_scratch1;
 580   locals_index_wide(Rindex);
 581   __ load_local_ptr(R17_tos, Rindex, Rindex);
 582 }
 583 
 584 void TemplateTable::iaload() {
 585   transition(itos, itos);
 586 
 587   const Register Rload_addr = R3_ARG1,
 588                  Rarray     = R4_ARG2,
 589                  Rtemp      = R5_ARG3;
 590   __ index_check(Rarray, R17_tos /* index */, LogBytesPerInt, Rtemp, Rload_addr);
 591   __ lwa(R17_tos, arrayOopDesc::base_offset_in_bytes(T_INT), Rload_addr);
 592 }
 593 
 594 void TemplateTable::laload() {
 595   transition(itos, ltos);
 596 
 597   const Register Rload_addr = R3_ARG1,
 598                  Rarray     = R4_ARG2,
 599                  Rtemp      = R5_ARG3;
 600   __ index_check(Rarray, R17_tos /* index */, LogBytesPerLong, Rtemp, Rload_addr);
 601   __ ld(R17_tos, arrayOopDesc::base_offset_in_bytes(T_LONG), Rload_addr);
 602 }
 603 
 604 void TemplateTable::faload() {
 605   transition(itos, ftos);
 606 
 607   const Register Rload_addr = R3_ARG1,
 608                  Rarray     = R4_ARG2,
 609                  Rtemp      = R5_ARG3;
 610   __ index_check(Rarray, R17_tos /* index */, LogBytesPerInt, Rtemp, Rload_addr);
 611   __ lfs(F15_ftos, arrayOopDesc::base_offset_in_bytes(T_FLOAT), Rload_addr);
 612 }
 613 
 614 void TemplateTable::daload() {
 615   transition(itos, dtos);
 616 
 617   const Register Rload_addr = R3_ARG1,
 618                  Rarray     = R4_ARG2,
 619                  Rtemp      = R5_ARG3;
 620   __ index_check(Rarray, R17_tos /* index */, LogBytesPerLong, Rtemp, Rload_addr);
 621   __ lfd(F15_ftos, arrayOopDesc::base_offset_in_bytes(T_DOUBLE), Rload_addr);
 622 }
 623 
 624 void TemplateTable::aaload() {
 625   transition(itos, atos);
 626 
 627   // tos: index
 628   // result tos: array
 629   const Register Rload_addr = R3_ARG1,
 630                  Rarray     = R4_ARG2,
 631                  Rtemp      = R5_ARG3;
 632   __ index_check(Rarray, R17_tos /* index */, UseCompressedOops ? 2 : LogBytesPerWord, Rtemp, Rload_addr);
 633   __ load_heap_oop(R17_tos, arrayOopDesc::base_offset_in_bytes(T_OBJECT), Rload_addr);
 634   __ verify_oop(R17_tos);
 635   //__ dcbt(R17_tos); // prefetch
 636 }
 637 
 638 void TemplateTable::baload() {
 639   transition(itos, itos);
 640 
 641   const Register Rload_addr = R3_ARG1,
 642                  Rarray     = R4_ARG2,
 643                  Rtemp      = R5_ARG3;
 644   __ index_check(Rarray, R17_tos /* index */, 0, Rtemp, Rload_addr);
 645   __ lbz(R17_tos, arrayOopDesc::base_offset_in_bytes(T_BYTE), Rload_addr);
 646   __ extsb(R17_tos, R17_tos);
 647 }
 648 
 649 void TemplateTable::caload() {
 650   transition(itos, itos);
 651 
 652   const Register Rload_addr = R3_ARG1,
 653                  Rarray     = R4_ARG2,
 654                  Rtemp      = R5_ARG3;
 655   __ index_check(Rarray, R17_tos /* index */, LogBytesPerShort, Rtemp, Rload_addr);
 656   __ lhz(R17_tos, arrayOopDesc::base_offset_in_bytes(T_CHAR), Rload_addr);
 657 }
 658 
 659 // Iload followed by caload frequent pair.
 660 void TemplateTable::fast_icaload() {
 661   transition(vtos, itos);
 662 
 663   const Register Rload_addr = R3_ARG1,
 664                  Rarray     = R4_ARG2,
 665                  Rtemp      = R11_scratch1;
 666 
 667   locals_index(R17_tos);
 668   __ load_local_int(R17_tos, Rtemp, R17_tos);
 669   __ index_check(Rarray, R17_tos /* index */, LogBytesPerShort, Rtemp, Rload_addr);
 670   __ lhz(R17_tos, arrayOopDesc::base_offset_in_bytes(T_CHAR), Rload_addr);
 671 }
 672 
 673 void TemplateTable::saload() {
 674   transition(itos, itos);
 675 
 676   const Register Rload_addr = R11_scratch1,
 677                  Rarray     = R12_scratch2,
 678                  Rtemp      = R3_ARG1;
 679   __ index_check(Rarray, R17_tos /* index */, LogBytesPerShort, Rtemp, Rload_addr);
 680   __ lha(R17_tos, arrayOopDesc::base_offset_in_bytes(T_SHORT), Rload_addr);
 681 }
 682 
 683 void TemplateTable::iload(int n) {
 684   transition(vtos, itos);
 685 
 686   __ lwz(R17_tos, Interpreter::local_offset_in_bytes(n), R18_locals);
 687 }
 688 
 689 void TemplateTable::lload(int n) {
 690   transition(vtos, ltos);
 691 
 692   __ ld(R17_tos, Interpreter::local_offset_in_bytes(n + 1), R18_locals);
 693 }
 694 
 695 void TemplateTable::fload(int n) {
 696   transition(vtos, ftos);
 697 
 698   __ lfs(F15_ftos, Interpreter::local_offset_in_bytes(n), R18_locals);
 699 }
 700 
 701 void TemplateTable::dload(int n) {
 702   transition(vtos, dtos);
 703 
 704   __ lfd(F15_ftos, Interpreter::local_offset_in_bytes(n + 1), R18_locals);
 705 }
 706 
 707 void TemplateTable::aload(int n) {
 708   transition(vtos, atos);
 709 
 710   __ ld(R17_tos, Interpreter::local_offset_in_bytes(n), R18_locals);
 711 }
 712 
 713 void TemplateTable::aload_0() {
 714   transition(vtos, atos);
 715   // According to bytecode histograms, the pairs:
 716   //
 717   // _aload_0, _fast_igetfield
 718   // _aload_0, _fast_agetfield
 719   // _aload_0, _fast_fgetfield
 720   //
 721   // occur frequently. If RewriteFrequentPairs is set, the (slow)
 722   // _aload_0 bytecode checks if the next bytecode is either
 723   // _fast_igetfield, _fast_agetfield or _fast_fgetfield and then
 724   // rewrites the current bytecode into a pair bytecode; otherwise it
 725   // rewrites the current bytecode into _0 that doesn't do
 726   // the pair check anymore.
 727   //
 728   // Note: If the next bytecode is _getfield, the rewrite must be
 729   //       delayed, otherwise we may miss an opportunity for a pair.
 730   //
 731   // Also rewrite frequent pairs
 732   //   aload_0, aload_1
 733   //   aload_0, iload_1
 734   // These bytecodes with a small amount of code are most profitable
 735   // to rewrite.
 736 
 737   if (RewriteFrequentPairs) {
 738 
 739     Label Lrewrite, Ldont_rewrite;
 740     Register Rnext_byte  = R3_ARG1,
 741              Rrewrite_to = R6_ARG4,
 742              Rscratch    = R11_scratch1;
 743 
 744     // Get next byte.
 745     __ lbz(Rnext_byte, Bytecodes::length_for(Bytecodes::_aload_0), R14_bcp);
 746 
 747     // If _getfield, wait to rewrite. We only want to rewrite the last two bytecodes in a pair.
 748     __ cmpwi(CCR0, Rnext_byte, (unsigned int)(unsigned char)Bytecodes::_getfield);
 749     __ beq(CCR0, Ldont_rewrite);
 750 
 751     __ cmpwi(CCR1, Rnext_byte, (unsigned int)(unsigned char)Bytecodes::_fast_igetfield);
 752     __ li(Rrewrite_to, (unsigned int)(unsigned char)Bytecodes::_fast_iaccess_0);
 753     __ beq(CCR1, Lrewrite);
 754 
 755     __ cmpwi(CCR0, Rnext_byte, (unsigned int)(unsigned char)Bytecodes::_fast_agetfield);
 756     __ li(Rrewrite_to, (unsigned int)(unsigned char)Bytecodes::_fast_aaccess_0);
 757     __ beq(CCR0, Lrewrite);
 758 
 759     __ cmpwi(CCR1, Rnext_byte, (unsigned int)(unsigned char)Bytecodes::_fast_fgetfield);
 760     __ li(Rrewrite_to, (unsigned int)(unsigned char)Bytecodes::_fast_faccess_0);
 761     __ beq(CCR1, Lrewrite);
 762 
 763     __ li(Rrewrite_to, (unsigned int)(unsigned char)Bytecodes::_fast_aload_0);
 764 
 765     __ bind(Lrewrite);
 766     patch_bytecode(Bytecodes::_aload_0, Rrewrite_to, Rscratch, false);
 767     __ bind(Ldont_rewrite);
 768   }
 769 
 770   // Do actual aload_0 (must do this after patch_bytecode which might call VM and GC might change oop).
 771   aload(0);
 772 }
 773 
 774 void TemplateTable::istore() {
 775   transition(itos, vtos);
 776 
 777   const Register Rindex = R11_scratch1;
 778   locals_index(Rindex);
 779   __ store_local_int(R17_tos, Rindex);
 780 }
 781 
 782 void TemplateTable::lstore() {
 783   transition(ltos, vtos);
 784   const Register Rindex = R11_scratch1;
 785   locals_index(Rindex);
 786   __ store_local_long(R17_tos, Rindex);
 787 }
 788 
 789 void TemplateTable::fstore() {
 790   transition(ftos, vtos);
 791 
 792   const Register Rindex = R11_scratch1;
 793   locals_index(Rindex);
 794   __ store_local_float(F15_ftos, Rindex);
 795 }
 796 
 797 void TemplateTable::dstore() {
 798   transition(dtos, vtos);
 799 
 800   const Register Rindex = R11_scratch1;
 801   locals_index(Rindex);
 802   __ store_local_double(F15_ftos, Rindex);
 803 }
 804 
 805 void TemplateTable::astore() {
 806   transition(vtos, vtos);
 807 
 808   const Register Rindex = R11_scratch1;
 809   __ pop_ptr();
 810   __ verify_oop_or_return_address(R17_tos, Rindex);
 811   locals_index(Rindex);
 812   __ store_local_ptr(R17_tos, Rindex);
 813 }
 814 
 815 void TemplateTable::wide_istore() {
 816   transition(vtos, vtos);
 817 
 818   const Register Rindex = R11_scratch1;
 819   __ pop_i();
 820   locals_index_wide(Rindex);
 821   __ store_local_int(R17_tos, Rindex);
 822 }
 823 
 824 void TemplateTable::wide_lstore() {
 825   transition(vtos, vtos);
 826 
 827   const Register Rindex = R11_scratch1;
 828   __ pop_l();
 829   locals_index_wide(Rindex);
 830   __ store_local_long(R17_tos, Rindex);
 831 }
 832 
 833 void TemplateTable::wide_fstore() {
 834   transition(vtos, vtos);
 835 
 836   const Register Rindex = R11_scratch1;
 837   __ pop_f();
 838   locals_index_wide(Rindex);
 839   __ store_local_float(F15_ftos, Rindex);
 840 }
 841 
 842 void TemplateTable::wide_dstore() {
 843   transition(vtos, vtos);
 844 
 845   const Register Rindex = R11_scratch1;
 846   __ pop_d();
 847   locals_index_wide(Rindex);
 848   __ store_local_double(F15_ftos, Rindex);
 849 }
 850 
 851 void TemplateTable::wide_astore() {
 852   transition(vtos, vtos);
 853 
 854   const Register Rindex = R11_scratch1;
 855   __ pop_ptr();
 856   __ verify_oop_or_return_address(R17_tos, Rindex);
 857   locals_index_wide(Rindex);
 858   __ store_local_ptr(R17_tos, Rindex);
 859 }
 860 
 861 void TemplateTable::iastore() {
 862   transition(itos, vtos);
 863 
 864   const Register Rindex      = R3_ARG1,
 865                  Rstore_addr = R4_ARG2,
 866                  Rarray      = R5_ARG3,
 867                  Rtemp       = R6_ARG4;
 868   __ pop_i(Rindex);
 869   __ index_check(Rarray, Rindex, LogBytesPerInt, Rtemp, Rstore_addr);
 870   __ stw(R17_tos, arrayOopDesc::base_offset_in_bytes(T_INT), Rstore_addr);
 871   }
 872 
 873 void TemplateTable::lastore() {
 874   transition(ltos, vtos);
 875 
 876   const Register Rindex      = R3_ARG1,
 877                  Rstore_addr = R4_ARG2,
 878                  Rarray      = R5_ARG3,
 879                  Rtemp       = R6_ARG4;
 880   __ pop_i(Rindex);
 881   __ index_check(Rarray, Rindex, LogBytesPerLong, Rtemp, Rstore_addr);
 882   __ std(R17_tos, arrayOopDesc::base_offset_in_bytes(T_LONG), Rstore_addr);
 883   }
 884 
 885 void TemplateTable::fastore() {
 886   transition(ftos, vtos);
 887 
 888   const Register Rindex      = R3_ARG1,
 889                  Rstore_addr = R4_ARG2,
 890                  Rarray      = R5_ARG3,
 891                  Rtemp       = R6_ARG4;
 892   __ pop_i(Rindex);
 893   __ index_check(Rarray, Rindex, LogBytesPerInt, Rtemp, Rstore_addr);
 894   __ stfs(F15_ftos, arrayOopDesc::base_offset_in_bytes(T_FLOAT), Rstore_addr);
 895   }
 896 
 897 void TemplateTable::dastore() {
 898   transition(dtos, vtos);
 899 
 900   const Register Rindex      = R3_ARG1,
 901                  Rstore_addr = R4_ARG2,
 902                  Rarray      = R5_ARG3,
 903                  Rtemp       = R6_ARG4;
 904   __ pop_i(Rindex);
 905   __ index_check(Rarray, Rindex, LogBytesPerLong, Rtemp, Rstore_addr);
 906   __ stfd(F15_ftos, arrayOopDesc::base_offset_in_bytes(T_DOUBLE), Rstore_addr);
 907   }
 908 
 909 // Pop 3 values from the stack and...
 910 void TemplateTable::aastore() {
 911   transition(vtos, vtos);
 912 
 913   Label Lstore_ok, Lis_null, Ldone;
 914   const Register Rindex    = R3_ARG1,
 915                  Rarray    = R4_ARG2,
 916                  Rscratch  = R11_scratch1,
 917                  Rscratch2 = R12_scratch2,
 918                  Rarray_klass = R5_ARG3,
 919                  Rarray_element_klass = Rarray_klass,
 920                  Rvalue_klass = R6_ARG4,
 921                  Rstore_addr = R31;    // Use register which survives VM call.
 922 
 923   __ ld(R17_tos, Interpreter::expr_offset_in_bytes(0), R15_esp); // Get value to store.
 924   __ lwz(Rindex, Interpreter::expr_offset_in_bytes(1), R15_esp); // Get index.
 925   __ ld(Rarray, Interpreter::expr_offset_in_bytes(2), R15_esp);  // Get array.
 926 
 927   __ verify_oop(R17_tos);
 928   __ index_check_without_pop(Rarray, Rindex, UseCompressedOops ? 2 : LogBytesPerWord, Rscratch, Rstore_addr);
 929   // Rindex is dead!
 930   Register Rscratch3 = Rindex;
 931 
 932   // Do array store check - check for NULL value first.
 933   __ cmpdi(CCR0, R17_tos, 0);
 934   __ beq(CCR0, Lis_null);
 935 
 936   __ load_klass(Rarray_klass, Rarray);
 937   __ load_klass(Rvalue_klass, R17_tos);
 938 
 939   // Do fast instanceof cache test.
 940   __ ld(Rarray_element_klass, in_bytes(ObjArrayKlass::element_klass_offset()), Rarray_klass);
 941 
 942   // Generate a fast subtype check. Branch to store_ok if no failure. Throw if failure.
 943   __ gen_subtype_check(Rvalue_klass /*subklass*/, Rarray_element_klass /*superklass*/, Rscratch, Rscratch2, Rscratch3, Lstore_ok);
 944 
 945   // Fell through: subtype check failed => throw an exception.
 946   __ load_dispatch_table(R11_scratch1, (address*)Interpreter::_throw_ArrayStoreException_entry);
 947   __ mtctr(R11_scratch1);
 948   __ bctr();
 949 
 950   __ bind(Lis_null);
 951   do_oop_store(_masm, Rstore_addr, arrayOopDesc::base_offset_in_bytes(T_OBJECT), noreg /* 0 */,
 952                Rscratch, Rscratch2, Rscratch3, _bs->kind(), true /* precise */, false /* check_null */);
 953   __ profile_null_seen(Rscratch, Rscratch2);
 954   __ b(Ldone);
 955 
 956   // Store is OK.
 957   __ bind(Lstore_ok);
 958   do_oop_store(_masm, Rstore_addr, arrayOopDesc::base_offset_in_bytes(T_OBJECT), R17_tos /* value */,
 959                Rscratch, Rscratch2, Rscratch3, _bs->kind(), true /* precise */, false /* check_null */);
 960 
 961   __ bind(Ldone);
 962   // Adjust sp (pops array, index and value).
 963   __ addi(R15_esp, R15_esp, 3 * Interpreter::stackElementSize);
 964 }
 965 
 966 void TemplateTable::bastore() {
 967   transition(itos, vtos);
 968 
 969   const Register Rindex   = R11_scratch1,
 970                  Rarray   = R12_scratch2,
 971                  Rscratch = R3_ARG1;
 972   __ pop_i(Rindex);
 973   // tos: val
 974   // Rarray: array ptr (popped by index_check)
 975   __ index_check(Rarray, Rindex, 0, Rscratch, Rarray);
 976   __ stb(R17_tos, arrayOopDesc::base_offset_in_bytes(T_BYTE), Rarray);
 977 }
 978 
 979 void TemplateTable::castore() {
 980   transition(itos, vtos);
 981 
 982   const Register Rindex   = R11_scratch1,
 983                  Rarray   = R12_scratch2,
 984                  Rscratch = R3_ARG1;
 985   __ pop_i(Rindex);
 986   // tos: val
 987   // Rarray: array ptr (popped by index_check)
 988   __ index_check(Rarray, Rindex, LogBytesPerShort, Rscratch, Rarray);
 989   __ sth(R17_tos, arrayOopDesc::base_offset_in_bytes(T_CHAR), Rarray);
 990 }
 991 
 992 void TemplateTable::sastore() {
 993   castore();
 994 }
 995 
 996 void TemplateTable::istore(int n) {
 997   transition(itos, vtos);
 998   __ stw(R17_tos, Interpreter::local_offset_in_bytes(n), R18_locals);
 999 }
1000 
1001 void TemplateTable::lstore(int n) {
1002   transition(ltos, vtos);
1003   __ std(R17_tos, Interpreter::local_offset_in_bytes(n + 1), R18_locals);
1004 }
1005 
1006 void TemplateTable::fstore(int n) {
1007   transition(ftos, vtos);
1008   __ stfs(F15_ftos, Interpreter::local_offset_in_bytes(n), R18_locals);
1009 }
1010 
1011 void TemplateTable::dstore(int n) {
1012   transition(dtos, vtos);
1013   __ stfd(F15_ftos, Interpreter::local_offset_in_bytes(n + 1), R18_locals);
1014 }
1015 
1016 void TemplateTable::astore(int n) {
1017   transition(vtos, vtos);
1018 
1019   __ pop_ptr();
1020   __ verify_oop_or_return_address(R17_tos, R11_scratch1);
1021   __ std(R17_tos, Interpreter::local_offset_in_bytes(n), R18_locals);
1022 }
1023 
1024 void TemplateTable::pop() {
1025   transition(vtos, vtos);
1026 
1027   __ addi(R15_esp, R15_esp, Interpreter::stackElementSize);
1028 }
1029 
1030 void TemplateTable::pop2() {
1031   transition(vtos, vtos);
1032 
1033   __ addi(R15_esp, R15_esp, Interpreter::stackElementSize * 2);
1034 }
1035 
1036 void TemplateTable::dup() {
1037   transition(vtos, vtos);
1038 
1039   __ ld(R11_scratch1, Interpreter::stackElementSize, R15_esp);
1040   __ push_ptr(R11_scratch1);
1041 }
1042 
1043 void TemplateTable::dup_x1() {
1044   transition(vtos, vtos);
1045 
1046   Register Ra = R11_scratch1,
1047            Rb = R12_scratch2;
1048   // stack: ..., a, b
1049   __ ld(Rb, Interpreter::stackElementSize,     R15_esp);
1050   __ ld(Ra, Interpreter::stackElementSize * 2, R15_esp);
1051   __ std(Rb, Interpreter::stackElementSize * 2, R15_esp);
1052   __ std(Ra, Interpreter::stackElementSize,     R15_esp);
1053   __ push_ptr(Rb);
1054   // stack: ..., b, a, b
1055 }
1056 
1057 void TemplateTable::dup_x2() {
1058   transition(vtos, vtos);
1059 
1060   Register Ra = R11_scratch1,
1061            Rb = R12_scratch2,
1062            Rc = R3_ARG1;
1063 
1064   // stack: ..., a, b, c
1065   __ ld(Rc, Interpreter::stackElementSize,     R15_esp);  // load c
1066   __ ld(Ra, Interpreter::stackElementSize * 3, R15_esp);  // load a
1067   __ std(Rc, Interpreter::stackElementSize * 3, R15_esp); // store c in a
1068   __ ld(Rb, Interpreter::stackElementSize * 2, R15_esp);  // load b
1069   // stack: ..., c, b, c
1070   __ std(Ra, Interpreter::stackElementSize * 2, R15_esp); // store a in b
1071   // stack: ..., c, a, c
1072   __ std(Rb, Interpreter::stackElementSize,     R15_esp); // store b in c
1073   __ push_ptr(Rc);                                        // push c
1074   // stack: ..., c, a, b, c
1075 }
1076 
1077 void TemplateTable::dup2() {
1078   transition(vtos, vtos);
1079 
1080   Register Ra = R11_scratch1,
1081            Rb = R12_scratch2;
1082   // stack: ..., a, b
1083   __ ld(Rb, Interpreter::stackElementSize,     R15_esp);
1084   __ ld(Ra, Interpreter::stackElementSize * 2, R15_esp);
1085   __ push_2ptrs(Ra, Rb);
1086   // stack: ..., a, b, a, b
1087 }
1088 
1089 void TemplateTable::dup2_x1() {
1090   transition(vtos, vtos);
1091 
1092   Register Ra = R11_scratch1,
1093            Rb = R12_scratch2,
1094            Rc = R3_ARG1;
1095   // stack: ..., a, b, c
1096   __ ld(Rc, Interpreter::stackElementSize,     R15_esp);
1097   __ ld(Rb, Interpreter::stackElementSize * 2, R15_esp);
1098   __ std(Rc, Interpreter::stackElementSize * 2, R15_esp);
1099   __ ld(Ra, Interpreter::stackElementSize * 3, R15_esp);
1100   __ std(Ra, Interpreter::stackElementSize,     R15_esp);
1101   __ std(Rb, Interpreter::stackElementSize * 3, R15_esp);
1102   // stack: ..., b, c, a
1103   __ push_2ptrs(Rb, Rc);
1104   // stack: ..., b, c, a, b, c
1105 }
1106 
1107 void TemplateTable::dup2_x2() {
1108   transition(vtos, vtos);
1109 
1110   Register Ra = R11_scratch1,
1111            Rb = R12_scratch2,
1112            Rc = R3_ARG1,
1113            Rd = R4_ARG2;
1114   // stack: ..., a, b, c, d
1115   __ ld(Rb, Interpreter::stackElementSize * 3, R15_esp);
1116   __ ld(Rd, Interpreter::stackElementSize,     R15_esp);
1117   __ std(Rb, Interpreter::stackElementSize,     R15_esp);  // store b in d
1118   __ std(Rd, Interpreter::stackElementSize * 3, R15_esp);  // store d in b
1119   __ ld(Ra, Interpreter::stackElementSize * 4, R15_esp);
1120   __ ld(Rc, Interpreter::stackElementSize * 2, R15_esp);
1121   __ std(Ra, Interpreter::stackElementSize * 2, R15_esp);  // store a in c
1122   __ std(Rc, Interpreter::stackElementSize * 4, R15_esp);  // store c in a
1123   // stack: ..., c, d, a, b
1124   __ push_2ptrs(Rc, Rd);
1125   // stack: ..., c, d, a, b, c, d
1126 }
1127 
1128 void TemplateTable::swap() {
1129   transition(vtos, vtos);
1130   // stack: ..., a, b
1131 
1132   Register Ra = R11_scratch1,
1133            Rb = R12_scratch2;
1134   // stack: ..., a, b
1135   __ ld(Rb, Interpreter::stackElementSize,     R15_esp);
1136   __ ld(Ra, Interpreter::stackElementSize * 2, R15_esp);
1137   __ std(Rb, Interpreter::stackElementSize * 2, R15_esp);
1138   __ std(Ra, Interpreter::stackElementSize,     R15_esp);
1139   // stack: ..., b, a
1140 }
1141 
1142 void TemplateTable::iop2(Operation op) {
1143   transition(itos, itos);
1144 
1145   Register Rscratch = R11_scratch1;
1146 
1147   __ pop_i(Rscratch);
1148   // tos  = number of bits to shift
1149   // Rscratch = value to shift
1150   switch (op) {
1151     case  add:   __ add(R17_tos, Rscratch, R17_tos); break;
1152     case  sub:   __ sub(R17_tos, Rscratch, R17_tos); break;
1153     case  mul:   __ mullw(R17_tos, Rscratch, R17_tos); break;
1154     case  _and:  __ andr(R17_tos, Rscratch, R17_tos); break;
1155     case  _or:   __ orr(R17_tos, Rscratch, R17_tos); break;
1156     case  _xor:  __ xorr(R17_tos, Rscratch, R17_tos); break;
1157     case  shl:   __ rldicl(R17_tos, R17_tos, 0, 64-5); __ slw(R17_tos, Rscratch, R17_tos); break;
1158     case  shr:   __ rldicl(R17_tos, R17_tos, 0, 64-5); __ sraw(R17_tos, Rscratch, R17_tos); break;
1159     case  ushr:  __ rldicl(R17_tos, R17_tos, 0, 64-5); __ srw(R17_tos, Rscratch, R17_tos); break;
1160     default:     ShouldNotReachHere();
1161   }
1162 }
1163 
1164 void TemplateTable::lop2(Operation op) {
1165   transition(ltos, ltos);
1166 
1167   Register Rscratch = R11_scratch1;
1168   __ pop_l(Rscratch);
1169   switch (op) {
1170     case  add:   __ add(R17_tos, Rscratch, R17_tos); break;
1171     case  sub:   __ sub(R17_tos, Rscratch, R17_tos); break;
1172     case  _and:  __ andr(R17_tos, Rscratch, R17_tos); break;
1173     case  _or:   __ orr(R17_tos, Rscratch, R17_tos); break;
1174     case  _xor:  __ xorr(R17_tos, Rscratch, R17_tos); break;
1175     default:     ShouldNotReachHere();
1176   }
1177 }
1178 
1179 void TemplateTable::idiv() {
1180   transition(itos, itos);
1181 
1182   Label Lnormal, Lexception, Ldone;
1183   Register Rdividend = R11_scratch1; // Used by irem.
1184 
1185   __ addi(R0, R17_tos, 1);
1186   __ cmplwi(CCR0, R0, 2);
1187   __ bgt(CCR0, Lnormal); // divisor <-1 or >1
1188 
1189   __ cmpwi(CCR1, R17_tos, 0);
1190   __ beq(CCR1, Lexception); // divisor == 0
1191 
1192   __ pop_i(Rdividend);
1193   __ mullw(R17_tos, Rdividend, R17_tos); // div by +/-1
1194   __ b(Ldone);
1195 
1196   __ bind(Lexception);
1197   __ load_dispatch_table(R11_scratch1, (address*)Interpreter::_throw_ArithmeticException_entry);
1198   __ mtctr(R11_scratch1);
1199   __ bctr();
1200 
1201   __ align(32, 12);
1202   __ bind(Lnormal);
1203   __ pop_i(Rdividend);
1204   __ divw(R17_tos, Rdividend, R17_tos); // Can't divide minint/-1.
1205   __ bind(Ldone);
1206 }
1207 
1208 void TemplateTable::irem() {
1209   transition(itos, itos);
1210 
1211   __ mr(R12_scratch2, R17_tos);
1212   idiv();
1213   __ mullw(R17_tos, R17_tos, R12_scratch2);
1214   __ subf(R17_tos, R17_tos, R11_scratch1); // Dividend set by idiv.
1215 }
1216 
1217 void TemplateTable::lmul() {
1218   transition(ltos, ltos);
1219 
1220   __ pop_l(R11_scratch1);
1221   __ mulld(R17_tos, R11_scratch1, R17_tos);
1222 }
1223 
1224 void TemplateTable::ldiv() {
1225   transition(ltos, ltos);
1226 
1227   Label Lnormal, Lexception, Ldone;
1228   Register Rdividend = R11_scratch1; // Used by lrem.
1229 
1230   __ addi(R0, R17_tos, 1);
1231   __ cmpldi(CCR0, R0, 2);
1232   __ bgt(CCR0, Lnormal); // divisor <-1 or >1
1233 
1234   __ cmpdi(CCR1, R17_tos, 0);
1235   __ beq(CCR1, Lexception); // divisor == 0
1236 
1237   __ pop_l(Rdividend);
1238   __ mulld(R17_tos, Rdividend, R17_tos); // div by +/-1
1239   __ b(Ldone);
1240 
1241   __ bind(Lexception);
1242   __ load_dispatch_table(R11_scratch1, (address*)Interpreter::_throw_ArithmeticException_entry);
1243   __ mtctr(R11_scratch1);
1244   __ bctr();
1245 
1246   __ align(32, 12);
1247   __ bind(Lnormal);
1248   __ pop_l(Rdividend);
1249   __ divd(R17_tos, Rdividend, R17_tos); // Can't divide minint/-1.
1250   __ bind(Ldone);
1251 }
1252 
1253 void TemplateTable::lrem() {
1254   transition(ltos, ltos);
1255 
1256   __ mr(R12_scratch2, R17_tos);
1257   ldiv();
1258   __ mulld(R17_tos, R17_tos, R12_scratch2);
1259   __ subf(R17_tos, R17_tos, R11_scratch1); // Dividend set by ldiv.
1260 }
1261 
1262 void TemplateTable::lshl() {
1263   transition(itos, ltos);
1264 
1265   __ rldicl(R17_tos, R17_tos, 0, 64-6); // Extract least significant bits.
1266   __ pop_l(R11_scratch1);
1267   __ sld(R17_tos, R11_scratch1, R17_tos);
1268 }
1269 
1270 void TemplateTable::lshr() {
1271   transition(itos, ltos);
1272 
1273   __ rldicl(R17_tos, R17_tos, 0, 64-6); // Extract least significant bits.
1274   __ pop_l(R11_scratch1);
1275   __ srad(R17_tos, R11_scratch1, R17_tos);
1276 }
1277 
1278 void TemplateTable::lushr() {
1279   transition(itos, ltos);
1280 
1281   __ rldicl(R17_tos, R17_tos, 0, 64-6); // Extract least significant bits.
1282   __ pop_l(R11_scratch1);
1283   __ srd(R17_tos, R11_scratch1, R17_tos);
1284 }
1285 
1286 void TemplateTable::fop2(Operation op) {
1287   transition(ftos, ftos);
1288 
1289   switch (op) {
1290     case add: __ pop_f(F0_SCRATCH); __ fadds(F15_ftos, F0_SCRATCH, F15_ftos); break;
1291     case sub: __ pop_f(F0_SCRATCH); __ fsubs(F15_ftos, F0_SCRATCH, F15_ftos); break;
1292     case mul: __ pop_f(F0_SCRATCH); __ fmuls(F15_ftos, F0_SCRATCH, F15_ftos); break;
1293     case div: __ pop_f(F0_SCRATCH); __ fdivs(F15_ftos, F0_SCRATCH, F15_ftos); break;
1294     case rem:
1295       __ pop_f(F1_ARG1);
1296       __ fmr(F2_ARG2, F15_ftos);
1297       __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::frem));
1298       __ fmr(F15_ftos, F1_RET);
1299       break;
1300 
1301     default: ShouldNotReachHere();
1302   }
1303 }
1304 
1305 void TemplateTable::dop2(Operation op) {
1306   transition(dtos, dtos);
1307 
1308   switch (op) {
1309     case add: __ pop_d(F0_SCRATCH); __ fadd(F15_ftos, F0_SCRATCH, F15_ftos); break;
1310     case sub: __ pop_d(F0_SCRATCH); __ fsub(F15_ftos, F0_SCRATCH, F15_ftos); break;
1311     case mul: __ pop_d(F0_SCRATCH); __ fmul(F15_ftos, F0_SCRATCH, F15_ftos); break;
1312     case div: __ pop_d(F0_SCRATCH); __ fdiv(F15_ftos, F0_SCRATCH, F15_ftos); break;
1313     case rem:
1314       __ pop_d(F1_ARG1);
1315       __ fmr(F2_ARG2, F15_ftos);
1316       __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::drem));
1317       __ fmr(F15_ftos, F1_RET);
1318       break;
1319 
1320     default: ShouldNotReachHere();
1321   }
1322 }
1323 
1324 // Negate the value in the TOS cache.
1325 void TemplateTable::ineg() {
1326   transition(itos, itos);
1327 
1328   __ neg(R17_tos, R17_tos);
1329 }
1330 
1331 // Negate the value in the TOS cache.
1332 void TemplateTable::lneg() {
1333   transition(ltos, ltos);
1334 
1335   __ neg(R17_tos, R17_tos);
1336 }
1337 
1338 void TemplateTable::fneg() {
1339   transition(ftos, ftos);
1340 
1341   __ fneg(F15_ftos, F15_ftos);
1342 }
1343 
1344 void TemplateTable::dneg() {
1345   transition(dtos, dtos);
1346 
1347   __ fneg(F15_ftos, F15_ftos);
1348 }
1349 
1350 // Increments a local variable in place.
1351 void TemplateTable::iinc() {
1352   transition(vtos, vtos);
1353 
1354   const Register Rindex     = R11_scratch1,
1355                  Rincrement = R0,
1356                  Rvalue     = R12_scratch2;
1357 
1358   locals_index(Rindex);              // Load locals index from bytecode stream.
1359   __ lbz(Rincrement, 2, R14_bcp);    // Load increment from the bytecode stream.
1360   __ extsb(Rincrement, Rincrement);
1361 
1362   __ load_local_int(Rvalue, Rindex, Rindex); // Puts address of local into Rindex.
1363 
1364   __ add(Rvalue, Rincrement, Rvalue);
1365   __ stw(Rvalue, 0, Rindex);
1366 }
1367 
1368 void TemplateTable::wide_iinc() {
1369   transition(vtos, vtos);
1370 
1371   Register Rindex       = R11_scratch1,
1372            Rlocals_addr = Rindex,
1373            Rincr        = R12_scratch2;
1374   locals_index_wide(Rindex);
1375   __ get_2_byte_integer_at_bcp(4, Rincr, InterpreterMacroAssembler::Signed);
1376   __ load_local_int(R17_tos, Rlocals_addr, Rindex);
1377   __ add(R17_tos, Rincr, R17_tos);
1378   __ stw(R17_tos, 0, Rlocals_addr);
1379 }
1380 
1381 void TemplateTable::convert() {
1382   // %%%%% Factor this first part accross platforms
1383 #ifdef ASSERT
1384   TosState tos_in  = ilgl;
1385   TosState tos_out = ilgl;
1386   switch (bytecode()) {
1387     case Bytecodes::_i2l: // fall through
1388     case Bytecodes::_i2f: // fall through
1389     case Bytecodes::_i2d: // fall through
1390     case Bytecodes::_i2b: // fall through
1391     case Bytecodes::_i2c: // fall through
1392     case Bytecodes::_i2s: tos_in = itos; break;
1393     case Bytecodes::_l2i: // fall through
1394     case Bytecodes::_l2f: // fall through
1395     case Bytecodes::_l2d: tos_in = ltos; break;
1396     case Bytecodes::_f2i: // fall through
1397     case Bytecodes::_f2l: // fall through
1398     case Bytecodes::_f2d: tos_in = ftos; break;
1399     case Bytecodes::_d2i: // fall through
1400     case Bytecodes::_d2l: // fall through
1401     case Bytecodes::_d2f: tos_in = dtos; break;
1402     default             : ShouldNotReachHere();
1403   }
1404   switch (bytecode()) {
1405     case Bytecodes::_l2i: // fall through
1406     case Bytecodes::_f2i: // fall through
1407     case Bytecodes::_d2i: // fall through
1408     case Bytecodes::_i2b: // fall through
1409     case Bytecodes::_i2c: // fall through
1410     case Bytecodes::_i2s: tos_out = itos; break;
1411     case Bytecodes::_i2l: // fall through
1412     case Bytecodes::_f2l: // fall through
1413     case Bytecodes::_d2l: tos_out = ltos; break;
1414     case Bytecodes::_i2f: // fall through
1415     case Bytecodes::_l2f: // fall through
1416     case Bytecodes::_d2f: tos_out = ftos; break;
1417     case Bytecodes::_i2d: // fall through
1418     case Bytecodes::_l2d: // fall through
1419     case Bytecodes::_f2d: tos_out = dtos; break;
1420     default             : ShouldNotReachHere();
1421   }
1422   transition(tos_in, tos_out);
1423 #endif
1424 
1425   // Conversion
1426   Label done;
1427   switch (bytecode()) {
1428     case Bytecodes::_i2l:
1429       __ extsw(R17_tos, R17_tos);
1430       break;
1431 
1432     case Bytecodes::_l2i:
1433       // Nothing to do, we'll continue to work with the lower bits.
1434       break;
1435 
1436     case Bytecodes::_i2b:
1437       __ extsb(R17_tos, R17_tos);
1438       break;
1439 
1440     case Bytecodes::_i2c:
1441       __ rldicl(R17_tos, R17_tos, 0, 64-2*8);
1442       break;
1443 
1444     case Bytecodes::_i2s:
1445       __ extsh(R17_tos, R17_tos);
1446       break;
1447 
1448     case Bytecodes::_i2d:
1449       __ extsw(R17_tos, R17_tos);
1450     case Bytecodes::_l2d:
1451       __ push_l_pop_d();
1452       __ fcfid(F15_ftos, F15_ftos);
1453       break;
1454 
1455     case Bytecodes::_i2f:
1456       __ extsw(R17_tos, R17_tos);
1457       __ push_l_pop_d();
1458       if (VM_Version::has_fcfids()) { // fcfids is >= Power7 only
1459         // Comment: alternatively, load with sign extend could be done by lfiwax.
1460         __ fcfids(F15_ftos, F15_ftos);
1461       } else {
1462         __ fcfid(F15_ftos, F15_ftos);
1463         __ frsp(F15_ftos, F15_ftos);
1464       }
1465       break;
1466 
1467     case Bytecodes::_l2f:
1468       if (VM_Version::has_fcfids()) { // fcfids is >= Power7 only
1469         __ push_l_pop_d();
1470         __ fcfids(F15_ftos, F15_ftos);
1471       } else {
1472         // Avoid rounding problem when result should be 0x3f800001: need fixup code before fcfid+frsp.
1473         __ mr(R3_ARG1, R17_tos);
1474         __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::l2f));
1475         __ fmr(F15_ftos, F1_RET);
1476       }
1477       break;
1478 
1479     case Bytecodes::_f2d:
1480       // empty
1481       break;
1482 
1483     case Bytecodes::_d2f:
1484       __ frsp(F15_ftos, F15_ftos);
1485       break;
1486 
1487     case Bytecodes::_d2i:
1488     case Bytecodes::_f2i:
1489       __ fcmpu(CCR0, F15_ftos, F15_ftos);
1490       __ li(R17_tos, 0); // 0 in case of NAN
1491       __ bso(CCR0, done);
1492       __ fctiwz(F15_ftos, F15_ftos);
1493       __ push_d_pop_l();
1494       break;
1495 
1496     case Bytecodes::_d2l:
1497     case Bytecodes::_f2l:
1498       __ fcmpu(CCR0, F15_ftos, F15_ftos);
1499       __ li(R17_tos, 0); // 0 in case of NAN
1500       __ bso(CCR0, done);
1501       __ fctidz(F15_ftos, F15_ftos);
1502       __ push_d_pop_l();
1503       break;
1504 
1505     default: ShouldNotReachHere();
1506   }
1507   __ bind(done);
1508 }
1509 
1510 // Long compare
1511 void TemplateTable::lcmp() {
1512   transition(ltos, itos);
1513 
1514   const Register Rscratch = R11_scratch1;
1515   __ pop_l(Rscratch); // first operand, deeper in stack
1516 
1517   __ cmpd(CCR0, Rscratch, R17_tos); // compare
1518   __ mfcr(R17_tos); // set bit 32..33 as follows: <: 0b10, =: 0b00, >: 0b01
1519   __ srwi(Rscratch, R17_tos, 30);
1520   __ srawi(R17_tos, R17_tos, 31);
1521   __ orr(R17_tos, Rscratch, R17_tos); // set result as follows: <: -1, =: 0, >: 1
1522 }
1523 
1524 // fcmpl/fcmpg and dcmpl/dcmpg bytecodes
1525 // unordered_result == -1 => fcmpl or dcmpl
1526 // unordered_result ==  1 => fcmpg or dcmpg
1527 void TemplateTable::float_cmp(bool is_float, int unordered_result) {
1528   const FloatRegister Rfirst  = F0_SCRATCH,
1529                       Rsecond = F15_ftos;
1530   const Register Rscratch = R11_scratch1;
1531 
1532   if (is_float) {
1533     __ pop_f(Rfirst);
1534   } else {
1535     __ pop_d(Rfirst);
1536   }
1537 
1538   Label Lunordered, Ldone;
1539   __ fcmpu(CCR0, Rfirst, Rsecond); // compare
1540   if (unordered_result) {
1541     __ bso(CCR0, Lunordered);
1542   }
1543   __ mfcr(R17_tos); // set bit 32..33 as follows: <: 0b10, =: 0b00, >: 0b01
1544   __ srwi(Rscratch, R17_tos, 30);
1545   __ srawi(R17_tos, R17_tos, 31);
1546   __ orr(R17_tos, Rscratch, R17_tos); // set result as follows: <: -1, =: 0, >: 1
1547   if (unordered_result) {
1548     __ b(Ldone);
1549     __ bind(Lunordered);
1550     __ load_const_optimized(R17_tos, unordered_result);
1551   }
1552   __ bind(Ldone);
1553 }
1554 
1555 // Branch_conditional which takes TemplateTable::Condition.
1556 void TemplateTable::branch_conditional(ConditionRegister crx, TemplateTable::Condition cc, Label& L, bool invert) {
1557   bool positive = false;
1558   Assembler::Condition cond = Assembler::equal;
1559   switch (cc) {
1560     case TemplateTable::equal:         positive = true ; cond = Assembler::equal  ; break;
1561     case TemplateTable::not_equal:     positive = false; cond = Assembler::equal  ; break;
1562     case TemplateTable::less:          positive = true ; cond = Assembler::less   ; break;
1563     case TemplateTable::less_equal:    positive = false; cond = Assembler::greater; break;
1564     case TemplateTable::greater:       positive = true ; cond = Assembler::greater; break;
1565     case TemplateTable::greater_equal: positive = false; cond = Assembler::less   ; break;
1566     default: ShouldNotReachHere();
1567   }
1568   int bo = (positive != invert) ? Assembler::bcondCRbiIs1 : Assembler::bcondCRbiIs0;
1569   int bi = Assembler::bi0(crx, cond);
1570   __ bc(bo, bi, L);
1571 }
1572 
1573 void TemplateTable::branch(bool is_jsr, bool is_wide) {
1574 
1575   // Note: on SPARC, we use InterpreterMacroAssembler::if_cmp also.
1576   __ verify_thread();
1577 
1578   const Register Rscratch1    = R11_scratch1,
1579                  Rscratch2    = R12_scratch2,
1580                  Rscratch3    = R3_ARG1,
1581                  R4_counters  = R4_ARG2,
1582                  bumped_count = R31,
1583                  Rdisp        = R22_tmp2;
1584 
1585   __ profile_taken_branch(Rscratch1, bumped_count);
1586 
1587   // Get (wide) offset.
1588   if (is_wide) {
1589     __ get_4_byte_integer_at_bcp(1, Rdisp, InterpreterMacroAssembler::Signed);
1590   } else {
1591     __ get_2_byte_integer_at_bcp(1, Rdisp, InterpreterMacroAssembler::Signed);
1592   }
1593 
1594   // --------------------------------------------------------------------------
1595   // Handle all the JSR stuff here, then exit.
1596   // It's much shorter and cleaner than intermingling with the
1597   // non-JSR normal-branch stuff occurring below.
1598   if (is_jsr) {
1599     // Compute return address as bci in Otos_i.
1600     __ ld(Rscratch1, in_bytes(Method::const_offset()), R19_method);
1601     __ addi(Rscratch2, R14_bcp, -in_bytes(ConstMethod::codes_offset()) + (is_wide ? 5 : 3));
1602     __ subf(R17_tos, Rscratch1, Rscratch2);
1603 
1604     // Bump bcp to target of JSR.
1605     __ add(R14_bcp, Rdisp, R14_bcp);
1606     // Push returnAddress for "ret" on stack.
1607     __ push_ptr(R17_tos);
1608     // And away we go!
1609     __ dispatch_next(vtos);
1610     return;
1611   }
1612 
1613   // --------------------------------------------------------------------------
1614   // Normal (non-jsr) branch handling
1615 
1616   const bool increment_invocation_counter_for_backward_branches = UseCompiler && UseLoopCounter;
1617   if (increment_invocation_counter_for_backward_branches) {
1618     //__ unimplemented("branch invocation counter");
1619 
1620     Label Lforward;
1621     __ add(R14_bcp, Rdisp, R14_bcp); // Add to bc addr.
1622 
1623     // Check branch direction.
1624     __ cmpdi(CCR0, Rdisp, 0);
1625     __ bgt(CCR0, Lforward);
1626 
1627     __ get_method_counters(R19_method, R4_counters, Lforward);
1628 
1629     if (TieredCompilation) {
1630       Label Lno_mdo, Loverflow;
1631       const int increment = InvocationCounter::count_increment;
1632       const int mask = ((1 << Tier0BackedgeNotifyFreqLog) - 1) << InvocationCounter::count_shift;
1633       if (ProfileInterpreter) {
1634         Register Rmdo = Rscratch1;
1635 
1636         // If no method data exists, go to profile_continue.
1637         __ ld(Rmdo, in_bytes(Method::method_data_offset()), R19_method);
1638         __ cmpdi(CCR0, Rmdo, 0);
1639         __ beq(CCR0, Lno_mdo);
1640 
1641         // Increment backedge counter in the MDO.
1642         const int mdo_bc_offs = in_bytes(MethodData::backedge_counter_offset()) + in_bytes(InvocationCounter::counter_offset());
1643         __ lwz(Rscratch2, mdo_bc_offs, Rmdo);
1644         __ load_const_optimized(Rscratch3, mask, R0);
1645         __ addi(Rscratch2, Rscratch2, increment);
1646         __ stw(Rscratch2, mdo_bc_offs, Rmdo);
1647         __ and_(Rscratch3, Rscratch2, Rscratch3);
1648         __ bne(CCR0, Lforward);
1649         __ b(Loverflow);
1650       }
1651 
1652       // If there's no MDO, increment counter in method.
1653       const int mo_bc_offs = in_bytes(MethodCounters::backedge_counter_offset()) + in_bytes(InvocationCounter::counter_offset());
1654       __ bind(Lno_mdo);
1655       __ lwz(Rscratch2, mo_bc_offs, R4_counters);
1656       __ load_const_optimized(Rscratch3, mask, R0);
1657       __ addi(Rscratch2, Rscratch2, increment);
1658       __ stw(Rscratch2, mo_bc_offs, R19_method);
1659       __ and_(Rscratch3, Rscratch2, Rscratch3);
1660       __ bne(CCR0, Lforward);
1661 
1662       __ bind(Loverflow);
1663 
1664       // Notify point for loop, pass branch bytecode.
1665       __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::frequency_counter_overflow), R14_bcp, true);
1666 
1667       // Was an OSR adapter generated?
1668       // O0 = osr nmethod
1669       __ cmpdi(CCR0, R3_RET, 0);
1670       __ beq(CCR0, Lforward);
1671 
1672       // Has the nmethod been invalidated already?
1673       __ lbz(R0, nmethod::state_offset(), R3_RET);
1674       __ cmpwi(CCR0, R0, nmethod::in_use);
1675       __ bne(CCR0, Lforward);
1676 
1677       // Migrate the interpreter frame off of the stack.
1678       // We can use all registers because we will not return to interpreter from this point.
1679 
1680       // Save nmethod.
1681       const Register osr_nmethod = R31;
1682       __ mr(osr_nmethod, R3_RET);
1683       __ set_top_ijava_frame_at_SP_as_last_Java_frame(R1_SP, R11_scratch1);
1684       __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::OSR_migration_begin), R16_thread);
1685       __ reset_last_Java_frame();
1686       // OSR buffer is in ARG1.
1687 
1688       // Remove the interpreter frame.
1689       __ merge_frames(/*top_frame_sp*/ R21_sender_SP, /*return_pc*/ R0, R11_scratch1, R12_scratch2);
1690 
1691       // Jump to the osr code.
1692       __ ld(R11_scratch1, nmethod::osr_entry_point_offset(), osr_nmethod);
1693       __ mtlr(R0);
1694       __ mtctr(R11_scratch1);
1695       __ bctr();
1696 
1697     } else {
1698 
1699       const Register invoke_ctr = Rscratch1;
1700       // Update Backedge branch separately from invocations.
1701       __ increment_backedge_counter(R4_counters, invoke_ctr, Rscratch2, Rscratch3);
1702 
1703       if (ProfileInterpreter) {
1704         __ test_invocation_counter_for_mdp(invoke_ctr, Rscratch2, Lforward);
1705         if (UseOnStackReplacement) {
1706           __ test_backedge_count_for_osr(bumped_count, R14_bcp, Rscratch2);
1707         }
1708       } else {
1709         if (UseOnStackReplacement) {
1710           __ test_backedge_count_for_osr(invoke_ctr, R14_bcp, Rscratch2);
1711         }
1712       }
1713     }
1714 
1715     __ bind(Lforward);
1716 
1717   } else {
1718     // Bump bytecode pointer by displacement (take the branch).
1719     __ add(R14_bcp, Rdisp, R14_bcp); // Add to bc addr.
1720   }
1721   // Continue with bytecode @ target.
1722   // %%%%% Like Intel, could speed things up by moving bytecode fetch to code above,
1723   // %%%%% and changing dispatch_next to dispatch_only.
1724   __ dispatch_next(vtos);
1725 }
1726 
1727 // Helper function for if_cmp* methods below.
1728 // Factored out common compare and branch code.
1729 void TemplateTable::if_cmp_common(Register Rfirst, Register Rsecond, Register Rscratch1, Register Rscratch2, Condition cc, bool is_jint, bool cmp0) {
1730   Label Lnot_taken;
1731   // Note: The condition code we get is the condition under which we
1732   // *fall through*! So we have to inverse the CC here.
1733 
1734   if (is_jint) {
1735     if (cmp0) {
1736       __ cmpwi(CCR0, Rfirst, 0);
1737     } else {
1738       __ cmpw(CCR0, Rfirst, Rsecond);
1739     }
1740   } else {
1741     if (cmp0) {
1742       __ cmpdi(CCR0, Rfirst, 0);
1743     } else {
1744       __ cmpd(CCR0, Rfirst, Rsecond);
1745     }
1746   }
1747   branch_conditional(CCR0, cc, Lnot_taken, /*invert*/ true);
1748 
1749   // Conition is false => Jump!
1750   branch(false, false);
1751 
1752   // Condition is not true => Continue.
1753   __ align(32, 12);
1754   __ bind(Lnot_taken);
1755   __ profile_not_taken_branch(Rscratch1, Rscratch2);
1756 }
1757 
1758 // Compare integer values with zero and fall through if CC holds, branch away otherwise.
1759 void TemplateTable::if_0cmp(Condition cc) {
1760   transition(itos, vtos);
1761 
1762   if_cmp_common(R17_tos, noreg, R11_scratch1, R12_scratch2, cc, true, true);
1763 }
1764 
1765 // Compare integer values and fall through if CC holds, branch away otherwise.
1766 //
1767 // Interface:
1768 //  - Rfirst: First operand  (older stack value)
1769 //  - tos:    Second operand (younger stack value)
1770 void TemplateTable::if_icmp(Condition cc) {
1771   transition(itos, vtos);
1772 
1773   const Register Rfirst  = R0,
1774                  Rsecond = R17_tos;
1775 
1776   __ pop_i(Rfirst);
1777   if_cmp_common(Rfirst, Rsecond, R11_scratch1, R12_scratch2, cc, true, false);
1778 }
1779 
1780 void TemplateTable::if_nullcmp(Condition cc) {
1781   transition(atos, vtos);
1782 
1783   if_cmp_common(R17_tos, noreg, R11_scratch1, R12_scratch2, cc, false, true);
1784 }
1785 
1786 void TemplateTable::if_acmp(Condition cc) {
1787   transition(atos, vtos);
1788 
1789   const Register Rfirst  = R0,
1790                  Rsecond = R17_tos;
1791 
1792   __ pop_ptr(Rfirst);
1793   if_cmp_common(Rfirst, Rsecond, R11_scratch1, R12_scratch2, cc, false, false);
1794 }
1795 
1796 void TemplateTable::ret() {
1797   locals_index(R11_scratch1);
1798   __ load_local_ptr(R17_tos, R11_scratch1, R11_scratch1);
1799 
1800   __ profile_ret(vtos, R17_tos, R11_scratch1, R12_scratch2);
1801 
1802   __ ld(R11_scratch1, in_bytes(Method::const_offset()), R19_method);
1803   __ add(R11_scratch1, R17_tos, R11_scratch1);
1804   __ addi(R14_bcp, R11_scratch1, in_bytes(ConstMethod::codes_offset()));
1805   __ dispatch_next(vtos);
1806 }
1807 
1808 void TemplateTable::wide_ret() {
1809   transition(vtos, vtos);
1810 
1811   const Register Rindex = R3_ARG1,
1812                  Rscratch1 = R11_scratch1,
1813                  Rscratch2 = R12_scratch2;
1814 
1815   locals_index_wide(Rindex);
1816   __ load_local_ptr(R17_tos, R17_tos, Rindex);
1817   __ profile_ret(vtos, R17_tos, Rscratch1, R12_scratch2);
1818   // Tos now contains the bci, compute the bcp from that.
1819   __ ld(Rscratch1, in_bytes(Method::const_offset()), R19_method);
1820   __ addi(Rscratch2, R17_tos, in_bytes(ConstMethod::codes_offset()));
1821   __ add(R14_bcp, Rscratch1, Rscratch2);
1822   __ dispatch_next(vtos);
1823 }
1824 
1825 void TemplateTable::tableswitch() {
1826   transition(itos, vtos);
1827 
1828   Label Ldispatch, Ldefault_case;
1829   Register Rlow_byte         = R3_ARG1,
1830            Rindex            = Rlow_byte,
1831            Rhigh_byte        = R4_ARG2,
1832            Rdef_offset_addr  = R5_ARG3, // is going to contain address of default offset
1833            Rscratch1         = R11_scratch1,
1834            Rscratch2         = R12_scratch2,
1835            Roffset           = R6_ARG4;
1836 
1837   // Align bcp.
1838   __ addi(Rdef_offset_addr, R14_bcp, BytesPerInt);
1839   __ clrrdi(Rdef_offset_addr, Rdef_offset_addr, log2_long((jlong)BytesPerInt));
1840 
1841   // Load lo & hi.
1842   __ get_u4(Rlow_byte, Rdef_offset_addr, BytesPerInt, InterpreterMacroAssembler::Unsigned);
1843   __ get_u4(Rhigh_byte, Rdef_offset_addr, 2 *BytesPerInt, InterpreterMacroAssembler::Unsigned);
1844 
1845   // Check for default case (=index outside [low,high]).
1846   __ cmpw(CCR0, R17_tos, Rlow_byte);
1847   __ cmpw(CCR1, R17_tos, Rhigh_byte);
1848   __ blt(CCR0, Ldefault_case);
1849   __ bgt(CCR1, Ldefault_case);
1850 
1851   // Lookup dispatch offset.
1852   __ sub(Rindex, R17_tos, Rlow_byte);
1853   __ extsw(Rindex, Rindex);
1854   __ profile_switch_case(Rindex, Rhigh_byte /* scratch */, Rscratch1, Rscratch2);
1855   __ sldi(Rindex, Rindex, LogBytesPerInt);
1856   __ addi(Rindex, Rindex, 3 * BytesPerInt);
1857 #if defined(VM_LITTLE_ENDIAN)
1858   __ lwbrx(Roffset, Rdef_offset_addr, Rindex);
1859   __ extsw(Roffset, Roffset);
1860 #else
1861   __ lwax(Roffset, Rdef_offset_addr, Rindex);
1862 #endif
1863   __ b(Ldispatch);
1864 
1865   __ bind(Ldefault_case);
1866   __ profile_switch_default(Rhigh_byte, Rscratch1);
1867   __ get_u4(Roffset, Rdef_offset_addr, 0, InterpreterMacroAssembler::Signed);
1868 
1869   __ bind(Ldispatch);
1870 
1871   __ add(R14_bcp, Roffset, R14_bcp);
1872   __ dispatch_next(vtos);
1873 }
1874 
1875 void TemplateTable::lookupswitch() {
1876   transition(itos, itos);
1877   __ stop("lookupswitch bytecode should have been rewritten");
1878 }
1879 
1880 // Table switch using linear search through cases.
1881 // Bytecode stream format:
1882 // Bytecode (1) | 4-byte padding | default offset (4) | count (4) | value/offset pair1 (8) | value/offset pair2 (8) | ...
1883 // Note: Everything is big-endian format here.
1884 void TemplateTable::fast_linearswitch() {
1885   transition(itos, vtos);
1886 
1887   Label Lloop_entry, Lsearch_loop, Lcontinue_execution, Ldefault_case;
1888   Register Rcount           = R3_ARG1,
1889            Rcurrent_pair    = R4_ARG2,
1890            Rdef_offset_addr = R5_ARG3, // Is going to contain address of default offset.
1891            Roffset          = R31,     // Might need to survive C call.
1892            Rvalue           = R12_scratch2,
1893            Rscratch         = R11_scratch1,
1894            Rcmp_value       = R17_tos;
1895 
1896   // Align bcp.
1897   __ addi(Rdef_offset_addr, R14_bcp, BytesPerInt);
1898   __ clrrdi(Rdef_offset_addr, Rdef_offset_addr, log2_long((jlong)BytesPerInt));
1899 
1900   // Setup loop counter and limit.
1901   __ get_u4(Rcount, Rdef_offset_addr, BytesPerInt, InterpreterMacroAssembler::Unsigned);
1902   __ addi(Rcurrent_pair, Rdef_offset_addr, 2 * BytesPerInt); // Rcurrent_pair now points to first pair.
1903 
1904   __ mtctr(Rcount);
1905   __ cmpwi(CCR0, Rcount, 0);
1906   __ bne(CCR0, Lloop_entry);
1907 
1908   // Default case
1909   __ bind(Ldefault_case);
1910   __ get_u4(Roffset, Rdef_offset_addr, 0, InterpreterMacroAssembler::Signed);
1911   if (ProfileInterpreter) {
1912     __ profile_switch_default(Rdef_offset_addr, Rcount/* scratch */);
1913   }
1914   __ b(Lcontinue_execution);
1915 
1916   // Next iteration
1917   __ bind(Lsearch_loop);
1918   __ bdz(Ldefault_case);
1919   __ addi(Rcurrent_pair, Rcurrent_pair, 2 * BytesPerInt);
1920   __ bind(Lloop_entry);
1921   __ get_u4(Rvalue, Rcurrent_pair, 0, InterpreterMacroAssembler::Unsigned);
1922   __ cmpw(CCR0, Rvalue, Rcmp_value);
1923   __ bne(CCR0, Lsearch_loop);
1924 
1925   // Found, load offset.
1926   __ get_u4(Roffset, Rcurrent_pair, BytesPerInt, InterpreterMacroAssembler::Signed);
1927   // Calculate case index and profile
1928   __ mfctr(Rcurrent_pair);
1929   if (ProfileInterpreter) {
1930     __ sub(Rcurrent_pair, Rcount, Rcurrent_pair);
1931     __ profile_switch_case(Rcurrent_pair, Rcount /*scratch*/, Rdef_offset_addr/*scratch*/, Rscratch);
1932   }
1933 
1934   __ bind(Lcontinue_execution);
1935   __ add(R14_bcp, Roffset, R14_bcp);
1936   __ dispatch_next(vtos);
1937 }
1938 
1939 // Table switch using binary search (value/offset pairs are ordered).
1940 // Bytecode stream format:
1941 // Bytecode (1) | 4-byte padding | default offset (4) | count (4) | value/offset pair1 (8) | value/offset pair2 (8) | ...
1942 // Note: Everything is big-endian format here. So on little endian machines, we have to revers offset and count and cmp value.
1943 void TemplateTable::fast_binaryswitch() {
1944 
1945   transition(itos, vtos);
1946   // Implementation using the following core algorithm: (copied from Intel)
1947   //
1948   // int binary_search(int key, LookupswitchPair* array, int n) {
1949   //   // Binary search according to "Methodik des Programmierens" by
1950   //   // Edsger W. Dijkstra and W.H.J. Feijen, Addison Wesley Germany 1985.
1951   //   int i = 0;
1952   //   int j = n;
1953   //   while (i+1 < j) {
1954   //     // invariant P: 0 <= i < j <= n and (a[i] <= key < a[j] or Q)
1955   //     // with      Q: for all i: 0 <= i < n: key < a[i]
1956   //     // where a stands for the array and assuming that the (inexisting)
1957   //     // element a[n] is infinitely big.
1958   //     int h = (i + j) >> 1;
1959   //     // i < h < j
1960   //     if (key < array[h].fast_match()) {
1961   //       j = h;
1962   //     } else {
1963   //       i = h;
1964   //     }
1965   //   }
1966   //   // R: a[i] <= key < a[i+1] or Q
1967   //   // (i.e., if key is within array, i is the correct index)
1968   //   return i;
1969   // }
1970 
1971   // register allocation
1972   const Register Rkey     = R17_tos;          // already set (tosca)
1973   const Register Rarray   = R3_ARG1;
1974   const Register Ri       = R4_ARG2;
1975   const Register Rj       = R5_ARG3;
1976   const Register Rh       = R6_ARG4;
1977   const Register Rscratch = R11_scratch1;
1978 
1979   const int log_entry_size = 3;
1980   const int entry_size = 1 << log_entry_size;
1981 
1982   Label found;
1983 
1984   // Find Array start,
1985   __ addi(Rarray, R14_bcp, 3 * BytesPerInt);
1986   __ clrrdi(Rarray, Rarray, log2_long((jlong)BytesPerInt));
1987 
1988   // initialize i & j
1989   __ li(Ri,0);
1990   __ get_u4(Rj, Rarray, -BytesPerInt, InterpreterMacroAssembler::Unsigned);
1991 
1992   // and start.
1993   Label entry;
1994   __ b(entry);
1995 
1996   // binary search loop
1997   { Label loop;
1998     __ bind(loop);
1999     // int h = (i + j) >> 1;
2000     __ srdi(Rh, Rh, 1);
2001     // if (key < array[h].fast_match()) {
2002     //   j = h;
2003     // } else {
2004     //   i = h;
2005     // }
2006     __ sldi(Rscratch, Rh, log_entry_size);
2007 #if defined(VM_LITTLE_ENDIAN)
2008     __ lwbrx(Rscratch, Rscratch, Rarray);
2009 #else
2010     __ lwzx(Rscratch, Rscratch, Rarray);
2011 #endif
2012 
2013     // if (key < current value)
2014     //   Rh = Rj
2015     // else
2016     //   Rh = Ri
2017     Label Lgreater;
2018     __ cmpw(CCR0, Rkey, Rscratch);
2019     __ bge(CCR0, Lgreater);
2020     __ mr(Rj, Rh);
2021     __ b(entry);
2022     __ bind(Lgreater);
2023     __ mr(Ri, Rh);
2024 
2025     // while (i+1 < j)
2026     __ bind(entry);
2027     __ addi(Rscratch, Ri, 1);
2028     __ cmpw(CCR0, Rscratch, Rj);
2029     __ add(Rh, Ri, Rj); // start h = i + j >> 1;
2030 
2031     __ blt(CCR0, loop);
2032   }
2033 
2034   // End of binary search, result index is i (must check again!).
2035   Label default_case;
2036   Label continue_execution;
2037   if (ProfileInterpreter) {
2038     __ mr(Rh, Ri);              // Save index in i for profiling.
2039   }
2040   // Ri = value offset
2041   __ sldi(Ri, Ri, log_entry_size);
2042   __ add(Ri, Ri, Rarray);
2043   __ get_u4(Rscratch, Ri, 0, InterpreterMacroAssembler::Unsigned);
2044 
2045   Label not_found;
2046   // Ri = offset offset
2047   __ cmpw(CCR0, Rkey, Rscratch);
2048   __ beq(CCR0, not_found);
2049   // entry not found -> j = default offset
2050   __ get_u4(Rj, Rarray, -2 * BytesPerInt, InterpreterMacroAssembler::Unsigned);
2051   __ b(default_case);
2052 
2053   __ bind(not_found);
2054   // entry found -> j = offset
2055   __ profile_switch_case(Rh, Rj, Rscratch, Rkey);
2056   __ get_u4(Rj, Ri, BytesPerInt, InterpreterMacroAssembler::Unsigned);
2057 
2058   if (ProfileInterpreter) {
2059     __ b(continue_execution);
2060   }
2061 
2062   __ bind(default_case); // fall through (if not profiling)
2063   __ profile_switch_default(Ri, Rscratch);
2064 
2065   __ bind(continue_execution);
2066 
2067   __ extsw(Rj, Rj);
2068   __ add(R14_bcp, Rj, R14_bcp);
2069   __ dispatch_next(vtos);
2070 }
2071 
2072 void TemplateTable::_return(TosState state) {
2073   transition(state, state);
2074   assert(_desc->calls_vm(),
2075          "inconsistent calls_vm information"); // call in remove_activation
2076 
2077   if (_desc->bytecode() == Bytecodes::_return_register_finalizer) {
2078 
2079     Register Rscratch     = R11_scratch1,
2080              Rklass       = R12_scratch2,
2081              Rklass_flags = Rklass;
2082     Label Lskip_register_finalizer;
2083 
2084     // Check if the method has the FINALIZER flag set and call into the VM to finalize in this case.
2085     assert(state == vtos, "only valid state");
2086     __ ld(R17_tos, 0, R18_locals);
2087 
2088     // Load klass of this obj.
2089     __ load_klass(Rklass, R17_tos);
2090     __ lwz(Rklass_flags, in_bytes(Klass::access_flags_offset()), Rklass);
2091     __ testbitdi(CCR0, R0, Rklass_flags, exact_log2(JVM_ACC_HAS_FINALIZER));
2092     __ bfalse(CCR0, Lskip_register_finalizer);
2093 
2094     __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::register_finalizer), R17_tos /* obj */);
2095 
2096     __ align(32, 12);
2097     __ bind(Lskip_register_finalizer);
2098   }
2099 
2100   // Move the result value into the correct register and remove memory stack frame.
2101   __ remove_activation(state, /* throw_monitor_exception */ true);
2102   // Restoration of lr done by remove_activation.
2103   switch (state) {
2104     case ltos:
2105     case btos:
2106     case ctos:
2107     case stos:
2108     case atos:
2109     case itos: __ mr(R3_RET, R17_tos); break;
2110     case ftos:
2111     case dtos: __ fmr(F1_RET, F15_ftos); break;
2112     case vtos: // This might be a constructor. Final fields (and volatile fields on PPC64) need
2113                // to get visible before the reference to the object gets stored anywhere.
2114                __ membar(Assembler::StoreStore); break;
2115     default  : ShouldNotReachHere();
2116   }
2117   __ blr();
2118 }
2119 
2120 // ============================================================================
2121 // Constant pool cache access
2122 //
2123 // Memory ordering:
2124 //
2125 // Like done in C++ interpreter, we load the fields
2126 //   - _indices
2127 //   - _f12_oop
2128 // acquired, because these are asked if the cache is already resolved. We don't
2129 // want to float loads above this check.
2130 // See also comments in ConstantPoolCacheEntry::bytecode_1(),
2131 // ConstantPoolCacheEntry::bytecode_2() and ConstantPoolCacheEntry::f1();
2132 
2133 // Call into the VM if call site is not yet resolved
2134 //
2135 // Input regs:
2136 //   - None, all passed regs are outputs.
2137 //
2138 // Returns:
2139 //   - Rcache:  The const pool cache entry that contains the resolved result.
2140 //   - Rresult: Either noreg or output for f1/f2.
2141 //
2142 // Kills:
2143 //   - Rscratch
2144 void TemplateTable::resolve_cache_and_index(int byte_no, Register Rcache, Register Rscratch, size_t index_size) {
2145 
2146   __ get_cache_and_index_at_bcp(Rcache, 1, index_size);
2147   Label Lresolved, Ldone;
2148 
2149   assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range");
2150   // We are resolved if the indices offset contains the current bytecode.
2151 #if defined(VM_LITTLE_ENDIAN)
2152   __ lbz(Rscratch, in_bytes(ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::indices_offset()) + byte_no + 1, Rcache);
2153 #else
2154   __ lbz(Rscratch, in_bytes(ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::indices_offset()) + 7 - (byte_no + 1), Rcache);
2155 #endif
2156   // Acquire by cmp-br-isync (see below).
2157   __ cmpdi(CCR0, Rscratch, (int)bytecode());
2158   __ beq(CCR0, Lresolved);
2159 
2160   address entry = NULL;
2161   switch (bytecode()) {
2162     case Bytecodes::_getstatic      : // fall through
2163     case Bytecodes::_putstatic      : // fall through
2164     case Bytecodes::_getfield       : // fall through
2165     case Bytecodes::_putfield       : entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_get_put); break;
2166     case Bytecodes::_invokevirtual  : // fall through
2167     case Bytecodes::_invokespecial  : // fall through
2168     case Bytecodes::_invokestatic   : // fall through
2169     case Bytecodes::_invokeinterface: entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invoke); break;
2170     case Bytecodes::_invokehandle   : entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invokehandle); break;
2171     case Bytecodes::_invokedynamic  : entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invokedynamic); break;
2172     default                         : ShouldNotReachHere(); break;
2173   }
2174   __ li(R4_ARG2, (int)bytecode());
2175   __ call_VM(noreg, entry, R4_ARG2, true);
2176 
2177   // Update registers with resolved info.
2178   __ get_cache_and_index_at_bcp(Rcache, 1, index_size);
2179   __ b(Ldone);
2180 
2181   __ bind(Lresolved);
2182   __ isync(); // Order load wrt. succeeding loads.
2183   __ bind(Ldone);
2184 }
2185 
2186 // Load the constant pool cache entry at field accesses into registers.
2187 // The Rcache and Rindex registers must be set before call.
2188 // Input:
2189 //   - Rcache, Rindex
2190 // Output:
2191 //   - Robj, Roffset, Rflags
2192 void TemplateTable::load_field_cp_cache_entry(Register Robj,
2193                                               Register Rcache,
2194                                               Register Rindex /* unused on PPC64 */,
2195                                               Register Roffset,
2196                                               Register Rflags,
2197                                               bool is_static = false) {
2198   assert_different_registers(Rcache, Rflags, Roffset);
2199   // assert(Rindex == noreg, "parameter not used on PPC64");
2200 
2201   ByteSize cp_base_offset = ConstantPoolCache::base_offset();
2202   __ ld(Rflags, in_bytes(cp_base_offset) + in_bytes(ConstantPoolCacheEntry::flags_offset()), Rcache);
2203   __ ld(Roffset, in_bytes(cp_base_offset) + in_bytes(ConstantPoolCacheEntry::f2_offset()), Rcache);
2204   if (is_static) {
2205     __ ld(Robj, in_bytes(cp_base_offset) + in_bytes(ConstantPoolCacheEntry::f1_offset()), Rcache);
2206     __ ld(Robj, in_bytes(Klass::java_mirror_offset()), Robj);
2207     // Acquire not needed here. Following access has an address dependency on this value.
2208   }
2209 }
2210 
2211 // Load the constant pool cache entry at invokes into registers.
2212 // Resolve if necessary.
2213 
2214 // Input Registers:
2215 //   - None, bcp is used, though
2216 //
2217 // Return registers:
2218 //   - Rmethod       (f1 field or f2 if invokevirtual)
2219 //   - Ritable_index (f2 field)
2220 //   - Rflags        (flags field)
2221 //
2222 // Kills:
2223 //   - R21
2224 //
2225 void TemplateTable::load_invoke_cp_cache_entry(int byte_no,
2226                                                Register Rmethod,
2227                                                Register Ritable_index,
2228                                                Register Rflags,
2229                                                bool is_invokevirtual,
2230                                                bool is_invokevfinal,
2231                                                bool is_invokedynamic) {
2232 
2233   ByteSize cp_base_offset = ConstantPoolCache::base_offset();
2234   // Determine constant pool cache field offsets.
2235   assert(is_invokevirtual == (byte_no == f2_byte), "is_invokevirtual flag redundant");
2236   const int method_offset = in_bytes(cp_base_offset + (is_invokevirtual ? ConstantPoolCacheEntry::f2_offset() : ConstantPoolCacheEntry::f1_offset()));
2237   const int flags_offset  = in_bytes(cp_base_offset + ConstantPoolCacheEntry::flags_offset());
2238   // Access constant pool cache fields.
2239   const int index_offset  = in_bytes(cp_base_offset + ConstantPoolCacheEntry::f2_offset());
2240 
2241   Register Rcache = R21_tmp1; // Note: same register as R21_sender_SP.
2242 
2243   if (is_invokevfinal) {
2244     assert(Ritable_index == noreg, "register not used");
2245     // Already resolved.
2246     __ get_cache_and_index_at_bcp(Rcache, 1);
2247   } else {
2248     resolve_cache_and_index(byte_no, Rcache, R0, is_invokedynamic ? sizeof(u4) : sizeof(u2));
2249   }
2250 
2251   __ ld(Rmethod, method_offset, Rcache);
2252   __ ld(Rflags, flags_offset, Rcache);
2253 
2254   if (Ritable_index != noreg) {
2255     __ ld(Ritable_index, index_offset, Rcache);
2256   }
2257 }
2258 
2259 // ============================================================================
2260 // Field access
2261 
2262 // Volatile variables demand their effects be made known to all CPU's
2263 // in order. Store buffers on most chips allow reads & writes to
2264 // reorder; the JMM's ReadAfterWrite.java test fails in -Xint mode
2265 // without some kind of memory barrier (i.e., it's not sufficient that
2266 // the interpreter does not reorder volatile references, the hardware
2267 // also must not reorder them).
2268 //
2269 // According to the new Java Memory Model (JMM):
2270 // (1) All volatiles are serialized wrt to each other. ALSO reads &
2271 //     writes act as aquire & release, so:
2272 // (2) A read cannot let unrelated NON-volatile memory refs that
2273 //     happen after the read float up to before the read. It's OK for
2274 //     non-volatile memory refs that happen before the volatile read to
2275 //     float down below it.
2276 // (3) Similar a volatile write cannot let unrelated NON-volatile
2277 //     memory refs that happen BEFORE the write float down to after the
2278 //     write. It's OK for non-volatile memory refs that happen after the
2279 //     volatile write to float up before it.
2280 //
2281 // We only put in barriers around volatile refs (they are expensive),
2282 // not _between_ memory refs (that would require us to track the
2283 // flavor of the previous memory refs). Requirements (2) and (3)
2284 // require some barriers before volatile stores and after volatile
2285 // loads. These nearly cover requirement (1) but miss the
2286 // volatile-store-volatile-load case.  This final case is placed after
2287 // volatile-stores although it could just as well go before
2288 // volatile-loads.
2289 
2290 // The registers cache and index expected to be set before call.
2291 // Correct values of the cache and index registers are preserved.
2292 // Kills:
2293 //   Rcache (if has_tos)
2294 //   Rscratch
2295 void TemplateTable::jvmti_post_field_access(Register Rcache, Register Rscratch, bool is_static, bool has_tos) {
2296 
2297   assert_different_registers(Rcache, Rscratch);
2298 
2299   if (JvmtiExport::can_post_field_access()) {
2300     ByteSize cp_base_offset = ConstantPoolCache::base_offset();
2301     Label Lno_field_access_post;
2302 
2303     // Check if post field access in enabled.
2304     int offs = __ load_const_optimized(Rscratch, JvmtiExport::get_field_access_count_addr(), R0, true);
2305     __ lwz(Rscratch, offs, Rscratch);
2306 
2307     __ cmpwi(CCR0, Rscratch, 0);
2308     __ beq(CCR0, Lno_field_access_post);
2309 
2310     // Post access enabled - do it!
2311     __ addi(Rcache, Rcache, in_bytes(cp_base_offset));
2312     if (is_static) {
2313       __ li(R17_tos, 0);
2314     } else {
2315       if (has_tos) {
2316         // The fast bytecode versions have obj ptr in register.
2317         // Thus, save object pointer before call_VM() clobbers it
2318         // put object on tos where GC wants it.
2319         __ push_ptr(R17_tos);
2320       } else {
2321         // Load top of stack (do not pop the value off the stack).
2322         __ ld(R17_tos, Interpreter::expr_offset_in_bytes(0), R15_esp);
2323       }
2324       __ verify_oop(R17_tos);
2325     }
2326     // tos:   object pointer or NULL if static
2327     // cache: cache entry pointer
2328     __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_access), R17_tos, Rcache);
2329     if (!is_static && has_tos) {
2330       // Restore object pointer.
2331       __ pop_ptr(R17_tos);
2332       __ verify_oop(R17_tos);
2333     } else {
2334       // Cache is still needed to get class or obj.
2335       __ get_cache_and_index_at_bcp(Rcache, 1);
2336     }
2337 
2338     __ align(32, 12);
2339     __ bind(Lno_field_access_post);
2340   }
2341 }
2342 
2343 // kills R11_scratch1
2344 void TemplateTable::pop_and_check_object(Register Roop) {
2345   Register Rtmp = R11_scratch1;
2346 
2347   assert_different_registers(Rtmp, Roop);
2348   __ pop_ptr(Roop);
2349   // For field access must check obj.
2350   __ null_check_throw(Roop, -1, Rtmp);
2351   __ verify_oop(Roop);
2352 }
2353 
2354 // PPC64: implement volatile loads as fence-store-acquire.
2355 void TemplateTable::getfield_or_static(int byte_no, bool is_static) {
2356   transition(vtos, vtos);
2357 
2358   Label Lacquire, Lisync;
2359 
2360   const Register Rcache        = R3_ARG1,
2361                  Rclass_or_obj = R22_tmp2,
2362                  Roffset       = R23_tmp3,
2363                  Rflags        = R31,
2364                  Rbtable       = R5_ARG3,
2365                  Rbc           = R6_ARG4,
2366                  Rscratch      = R12_scratch2;
2367 
2368   static address field_branch_table[number_of_states],
2369                  static_branch_table[number_of_states];
2370 
2371   address* branch_table = is_static ? static_branch_table : field_branch_table;
2372 
2373   // Get field offset.
2374   resolve_cache_and_index(byte_no, Rcache, Rscratch, sizeof(u2));
2375 
2376   // JVMTI support
2377   jvmti_post_field_access(Rcache, Rscratch, is_static, false);
2378 
2379   // Load after possible GC.
2380   load_field_cp_cache_entry(Rclass_or_obj, Rcache, noreg, Roffset, Rflags, is_static);
2381 
2382   // Load pointer to branch table.
2383   __ load_const_optimized(Rbtable, (address)branch_table, Rscratch);
2384 
2385   // Get volatile flag.
2386   __ rldicl(Rscratch, Rflags, 64-ConstantPoolCacheEntry::is_volatile_shift, 63); // Extract volatile bit.
2387   // Note: sync is needed before volatile load on PPC64.
2388 
2389   // Check field type.
2390   __ rldicl(Rflags, Rflags, 64-ConstantPoolCacheEntry::tos_state_shift, 64-ConstantPoolCacheEntry::tos_state_bits);
2391 
2392 #ifdef ASSERT
2393   Label LFlagInvalid;
2394   __ cmpldi(CCR0, Rflags, number_of_states);
2395   __ bge(CCR0, LFlagInvalid);
2396 #endif
2397 
2398   // Load from branch table and dispatch (volatile case: one instruction ahead).
2399   __ sldi(Rflags, Rflags, LogBytesPerWord);
2400   __ cmpwi(CCR6, Rscratch, 1); // Volatile?
2401   if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
2402     __ sldi(Rscratch, Rscratch, exact_log2(BytesPerInstWord)); // Volatile ? size of 1 instruction : 0.
2403   }
2404   __ ldx(Rbtable, Rbtable, Rflags);
2405 
2406   // Get the obj from stack.
2407   if (!is_static) {
2408     pop_and_check_object(Rclass_or_obj); // Kills R11_scratch1.
2409   } else {
2410     __ verify_oop(Rclass_or_obj);
2411   }
2412 
2413   if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
2414     __ subf(Rbtable, Rscratch, Rbtable); // Point to volatile/non-volatile entry point.
2415   }
2416   __ mtctr(Rbtable);
2417   __ bctr();
2418 
2419 #ifdef ASSERT
2420   __ bind(LFlagInvalid);
2421   __ stop("got invalid flag", 0x654);
2422 
2423   // __ bind(Lvtos);
2424   address pc_before_fence = __ pc();
2425   __ fence(); // Volatile entry point (one instruction before non-volatile_entry point).
2426   assert(__ pc() - pc_before_fence == (ptrdiff_t)BytesPerInstWord, "must be single instruction");
2427   assert(branch_table[vtos] == 0, "can't compute twice");
2428   branch_table[vtos] = __ pc(); // non-volatile_entry point
2429   __ stop("vtos unexpected", 0x655);
2430 #endif
2431 
2432   __ align(32, 28, 28); // Align load.
2433   // __ bind(Ldtos);
2434   __ fence(); // Volatile entry point (one instruction before non-volatile_entry point).
2435   assert(branch_table[dtos] == 0, "can't compute twice");
2436   branch_table[dtos] = __ pc(); // non-volatile_entry point
2437   __ lfdx(F15_ftos, Rclass_or_obj, Roffset);
2438   __ push(dtos);
2439   if (!is_static) patch_bytecode(Bytecodes::_fast_dgetfield, Rbc, Rscratch);
2440   {
2441     Label acquire_double;
2442     __ beq(CCR6, acquire_double); // Volatile?
2443     __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode()));
2444 
2445     __ bind(acquire_double);
2446     __ fcmpu(CCR0, F15_ftos, F15_ftos); // Acquire by cmp-br-isync.
2447     __ beq_predict_taken(CCR0, Lisync);
2448     __ b(Lisync); // In case of NAN.
2449   }
2450 
2451   __ align(32, 28, 28); // Align load.
2452   // __ bind(Lftos);
2453   __ fence(); // Volatile entry point (one instruction before non-volatile_entry point).
2454   assert(branch_table[ftos] == 0, "can't compute twice");
2455   branch_table[ftos] = __ pc(); // non-volatile_entry point
2456   __ lfsx(F15_ftos, Rclass_or_obj, Roffset);
2457   __ push(ftos);
2458   if (!is_static) { patch_bytecode(Bytecodes::_fast_fgetfield, Rbc, Rscratch); }
2459   {
2460     Label acquire_float;
2461     __ beq(CCR6, acquire_float); // Volatile?
2462     __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode()));
2463 
2464     __ bind(acquire_float);
2465     __ fcmpu(CCR0, F15_ftos, F15_ftos); // Acquire by cmp-br-isync.
2466     __ beq_predict_taken(CCR0, Lisync);
2467     __ b(Lisync); // In case of NAN.
2468   }
2469 
2470   __ align(32, 28, 28); // Align load.
2471   // __ bind(Litos);
2472   __ fence(); // Volatile entry point (one instruction before non-volatile_entry point).
2473   assert(branch_table[itos] == 0, "can't compute twice");
2474   branch_table[itos] = __ pc(); // non-volatile_entry point
2475   __ lwax(R17_tos, Rclass_or_obj, Roffset);
2476   __ push(itos);
2477   if (!is_static) patch_bytecode(Bytecodes::_fast_igetfield, Rbc, Rscratch);
2478   __ beq(CCR6, Lacquire); // Volatile?
2479   __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode()));
2480 
2481   __ align(32, 28, 28); // Align load.
2482   // __ bind(Lltos);
2483   __ fence(); // Volatile entry point (one instruction before non-volatile_entry point).
2484   assert(branch_table[ltos] == 0, "can't compute twice");
2485   branch_table[ltos] = __ pc(); // non-volatile_entry point
2486   __ ldx(R17_tos, Rclass_or_obj, Roffset);
2487   __ push(ltos);
2488   if (!is_static) patch_bytecode(Bytecodes::_fast_lgetfield, Rbc, Rscratch);
2489   __ beq(CCR6, Lacquire); // Volatile?
2490   __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode()));
2491 
2492   __ align(32, 28, 28); // Align load.
2493   // __ bind(Lbtos);
2494   __ fence(); // Volatile entry point (one instruction before non-volatile_entry point).
2495   assert(branch_table[btos] == 0, "can't compute twice");
2496   branch_table[btos] = __ pc(); // non-volatile_entry point
2497   __ lbzx(R17_tos, Rclass_or_obj, Roffset);
2498   __ extsb(R17_tos, R17_tos);
2499   __ push(btos);
2500   if (!is_static) patch_bytecode(Bytecodes::_fast_bgetfield, Rbc, Rscratch);
2501   __ beq(CCR6, Lacquire); // Volatile?
2502   __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode()));
2503 
2504   __ align(32, 28, 28); // Align load.
2505   // __ bind(Lctos);
2506   __ fence(); // Volatile entry point (one instruction before non-volatile_entry point).
2507   assert(branch_table[ctos] == 0, "can't compute twice");
2508   branch_table[ctos] = __ pc(); // non-volatile_entry point
2509   __ lhzx(R17_tos, Rclass_or_obj, Roffset);
2510   __ push(ctos);
2511   if (!is_static) patch_bytecode(Bytecodes::_fast_cgetfield, Rbc, Rscratch);
2512   __ beq(CCR6, Lacquire); // Volatile?
2513   __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode()));
2514 
2515   __ align(32, 28, 28); // Align load.
2516   // __ bind(Lstos);
2517   __ fence(); // Volatile entry point (one instruction before non-volatile_entry point).
2518   assert(branch_table[stos] == 0, "can't compute twice");
2519   branch_table[stos] = __ pc(); // non-volatile_entry point
2520   __ lhax(R17_tos, Rclass_or_obj, Roffset);
2521   __ push(stos);
2522   if (!is_static) patch_bytecode(Bytecodes::_fast_sgetfield, Rbc, Rscratch);
2523   __ beq(CCR6, Lacquire); // Volatile?
2524   __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode()));
2525 
2526   __ align(32, 28, 28); // Align load.
2527   // __ bind(Latos);
2528   __ fence(); // Volatile entry point (one instruction before non-volatile_entry point).
2529   assert(branch_table[atos] == 0, "can't compute twice");
2530   branch_table[atos] = __ pc(); // non-volatile_entry point
2531   __ load_heap_oop(R17_tos, (RegisterOrConstant)Roffset, Rclass_or_obj);
2532   __ verify_oop(R17_tos);
2533   __ push(atos);
2534   //__ dcbt(R17_tos); // prefetch
2535   if (!is_static) patch_bytecode(Bytecodes::_fast_agetfield, Rbc, Rscratch);
2536   __ beq(CCR6, Lacquire); // Volatile?
2537   __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode()));
2538 
2539   __ align(32, 12);
2540   __ bind(Lacquire);
2541   __ twi_0(R17_tos);
2542   __ bind(Lisync);
2543   __ isync(); // acquire
2544 
2545 #ifdef ASSERT
2546   for (int i = 0; i<number_of_states; ++i) {
2547     assert(branch_table[i], "get initialization");
2548     //tty->print_cr("get: %s_branch_table[%d] = 0x%llx (opcode 0x%llx)",
2549     //              is_static ? "static" : "field", i, branch_table[i], *((unsigned int*)branch_table[i]));
2550   }
2551 #endif
2552 }
2553 
2554 void TemplateTable::getfield(int byte_no) {
2555   getfield_or_static(byte_no, false);
2556 }
2557 
2558 void TemplateTable::getstatic(int byte_no) {
2559   getfield_or_static(byte_no, true);
2560 }
2561 
2562 // The registers cache and index expected to be set before call.
2563 // The function may destroy various registers, just not the cache and index registers.
2564 void TemplateTable::jvmti_post_field_mod(Register Rcache, Register Rscratch, bool is_static) {
2565 
2566   assert_different_registers(Rcache, Rscratch, R6_ARG4);
2567 
2568   if (JvmtiExport::can_post_field_modification()) {
2569     Label Lno_field_mod_post;
2570 
2571     // Check if post field access in enabled.
2572     int offs = __ load_const_optimized(Rscratch, JvmtiExport::get_field_modification_count_addr(), R0, true);
2573     __ lwz(Rscratch, offs, Rscratch);
2574 
2575     __ cmpwi(CCR0, Rscratch, 0);
2576     __ beq(CCR0, Lno_field_mod_post);
2577 
2578     // Do the post
2579     ByteSize cp_base_offset = ConstantPoolCache::base_offset();
2580     const Register Robj = Rscratch;
2581 
2582     __ addi(Rcache, Rcache, in_bytes(cp_base_offset));
2583     if (is_static) {
2584       // Life is simple. Null out the object pointer.
2585       __ li(Robj, 0);
2586     } else {
2587       // In case of the fast versions, value lives in registers => put it back on tos.
2588       int offs = Interpreter::expr_offset_in_bytes(0);
2589       Register base = R15_esp;
2590       switch(bytecode()) {
2591         case Bytecodes::_fast_aputfield: __ push_ptr(); offs+= Interpreter::stackElementSize; break;
2592         case Bytecodes::_fast_iputfield: // Fall through
2593         case Bytecodes::_fast_bputfield: // Fall through
2594         case Bytecodes::_fast_cputfield: // Fall through
2595         case Bytecodes::_fast_sputfield: __ push_i(); offs+=  Interpreter::stackElementSize; break;
2596         case Bytecodes::_fast_lputfield: __ push_l(); offs+=2*Interpreter::stackElementSize; break;
2597         case Bytecodes::_fast_fputfield: __ push_f(); offs+=  Interpreter::stackElementSize; break;
2598         case Bytecodes::_fast_dputfield: __ push_d(); offs+=2*Interpreter::stackElementSize; break;
2599         default: {
2600           offs = 0;
2601           base = Robj;
2602           const Register Rflags = Robj;
2603           Label is_one_slot;
2604           // Life is harder. The stack holds the value on top, followed by the
2605           // object. We don't know the size of the value, though; it could be
2606           // one or two words depending on its type. As a result, we must find
2607           // the type to determine where the object is.
2608           __ ld(Rflags, in_bytes(ConstantPoolCacheEntry::flags_offset()), Rcache); // Big Endian
2609           __ rldicl(Rflags, Rflags, 64-ConstantPoolCacheEntry::tos_state_shift, 64-ConstantPoolCacheEntry::tos_state_bits);
2610 
2611           __ cmpwi(CCR0, Rflags, ltos);
2612           __ cmpwi(CCR1, Rflags, dtos);
2613           __ addi(base, R15_esp, Interpreter::expr_offset_in_bytes(1));
2614           __ crnor(/*CR0 eq*/2, /*CR1 eq*/4+2, /*CR0 eq*/2);
2615           __ beq(CCR0, is_one_slot);
2616           __ addi(base, R15_esp, Interpreter::expr_offset_in_bytes(2));
2617           __ bind(is_one_slot);
2618           break;
2619         }
2620       }
2621       __ ld(Robj, offs, base);
2622       __ verify_oop(Robj);
2623     }
2624 
2625     __ addi(R6_ARG4, R15_esp, Interpreter::expr_offset_in_bytes(0));
2626     __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_modification), Robj, Rcache, R6_ARG4);
2627     __ get_cache_and_index_at_bcp(Rcache, 1);
2628 
2629     // In case of the fast versions, value lives in registers => put it back on tos.
2630     switch(bytecode()) {
2631       case Bytecodes::_fast_aputfield: __ pop_ptr(); break;
2632       case Bytecodes::_fast_iputfield: // Fall through
2633       case Bytecodes::_fast_bputfield: // Fall through
2634       case Bytecodes::_fast_cputfield: // Fall through
2635       case Bytecodes::_fast_sputfield: __ pop_i(); break;
2636       case Bytecodes::_fast_lputfield: __ pop_l(); break;
2637       case Bytecodes::_fast_fputfield: __ pop_f(); break;
2638       case Bytecodes::_fast_dputfield: __ pop_d(); break;
2639       default: break; // Nothin' to do.
2640     }
2641 
2642     __ align(32, 12);
2643     __ bind(Lno_field_mod_post);
2644   }
2645 }
2646 
2647 // PPC64: implement volatile stores as release-store (return bytecode contains an additional release).
2648 void TemplateTable::putfield_or_static(int byte_no, bool is_static) {
2649   Label Lvolatile;
2650 
2651   const Register Rcache        = R5_ARG3,  // Do not use ARG1/2 (causes trouble in jvmti_post_field_mod).
2652                  Rclass_or_obj = R31,      // Needs to survive C call.
2653                  Roffset       = R22_tmp2, // Needs to survive C call.
2654                  Rflags        = R3_ARG1,
2655                  Rbtable       = R4_ARG2,
2656                  Rscratch      = R11_scratch1,
2657                  Rscratch2     = R12_scratch2,
2658                  Rscratch3     = R6_ARG4,
2659                  Rbc           = Rscratch3;
2660   const ConditionRegister CR_is_vol = CCR2; // Non-volatile condition register (survives runtime call in do_oop_store).
2661 
2662   static address field_branch_table[number_of_states],
2663                  static_branch_table[number_of_states];
2664 
2665   address* branch_table = is_static ? static_branch_table : field_branch_table;
2666 
2667   // Stack (grows up):
2668   //  value
2669   //  obj
2670 
2671   // Load the field offset.
2672   resolve_cache_and_index(byte_no, Rcache, Rscratch, sizeof(u2));
2673   jvmti_post_field_mod(Rcache, Rscratch, is_static);
2674   load_field_cp_cache_entry(Rclass_or_obj, Rcache, noreg, Roffset, Rflags, is_static);
2675 
2676   // Load pointer to branch table.
2677   __ load_const_optimized(Rbtable, (address)branch_table, Rscratch);
2678 
2679   // Get volatile flag.
2680   __ rldicl(Rscratch, Rflags, 64-ConstantPoolCacheEntry::is_volatile_shift, 63); // Extract volatile bit.
2681 
2682   // Check the field type.
2683   __ rldicl(Rflags, Rflags, 64-ConstantPoolCacheEntry::tos_state_shift, 64-ConstantPoolCacheEntry::tos_state_bits);
2684 
2685 #ifdef ASSERT
2686   Label LFlagInvalid;
2687   __ cmpldi(CCR0, Rflags, number_of_states);
2688   __ bge(CCR0, LFlagInvalid);
2689 #endif
2690 
2691   // Load from branch table and dispatch (volatile case: one instruction ahead).
2692   __ sldi(Rflags, Rflags, LogBytesPerWord);
2693   if (!support_IRIW_for_not_multiple_copy_atomic_cpu) { __ cmpwi(CR_is_vol, Rscratch, 1); } // Volatile?
2694   __ sldi(Rscratch, Rscratch, exact_log2(BytesPerInstWord)); // Volatile? size of instruction 1 : 0.
2695   __ ldx(Rbtable, Rbtable, Rflags);
2696 
2697   __ subf(Rbtable, Rscratch, Rbtable); // Point to volatile/non-volatile entry point.
2698   __ mtctr(Rbtable);
2699   __ bctr();
2700 
2701 #ifdef ASSERT
2702   __ bind(LFlagInvalid);
2703   __ stop("got invalid flag", 0x656);
2704 
2705   // __ bind(Lvtos);
2706   address pc_before_release = __ pc();
2707   __ release(); // Volatile entry point (one instruction before non-volatile_entry point).
2708   assert(__ pc() - pc_before_release == (ptrdiff_t)BytesPerInstWord, "must be single instruction");
2709   assert(branch_table[vtos] == 0, "can't compute twice");
2710   branch_table[vtos] = __ pc(); // non-volatile_entry point
2711   __ stop("vtos unexpected", 0x657);
2712 #endif
2713 
2714   __ align(32, 28, 28); // Align pop.
2715   // __ bind(Ldtos);
2716   __ release(); // Volatile entry point (one instruction before non-volatile_entry point).
2717   assert(branch_table[dtos] == 0, "can't compute twice");
2718   branch_table[dtos] = __ pc(); // non-volatile_entry point
2719   __ pop(dtos);
2720   if (!is_static) { pop_and_check_object(Rclass_or_obj); } // Kills R11_scratch1.
2721   __ stfdx(F15_ftos, Rclass_or_obj, Roffset);
2722   if (!is_static) { patch_bytecode(Bytecodes::_fast_dputfield, Rbc, Rscratch, true, byte_no); }
2723   if (!support_IRIW_for_not_multiple_copy_atomic_cpu) {
2724     __ beq(CR_is_vol, Lvolatile); // Volatile?
2725   }
2726   __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode()));
2727 
2728   __ align(32, 28, 28); // Align pop.
2729   // __ bind(Lftos);
2730   __ release(); // Volatile entry point (one instruction before non-volatile_entry point).
2731   assert(branch_table[ftos] == 0, "can't compute twice");
2732   branch_table[ftos] = __ pc(); // non-volatile_entry point
2733   __ pop(ftos);
2734   if (!is_static) { pop_and_check_object(Rclass_or_obj); } // Kills R11_scratch1.
2735   __ stfsx(F15_ftos, Rclass_or_obj, Roffset);
2736   if (!is_static) { patch_bytecode(Bytecodes::_fast_fputfield, Rbc, Rscratch, true, byte_no); }
2737   if (!support_IRIW_for_not_multiple_copy_atomic_cpu) {
2738     __ beq(CR_is_vol, Lvolatile); // Volatile?
2739   }
2740   __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode()));
2741 
2742   __ align(32, 28, 28); // Align pop.
2743   // __ bind(Litos);
2744   __ release(); // Volatile entry point (one instruction before non-volatile_entry point).
2745   assert(branch_table[itos] == 0, "can't compute twice");
2746   branch_table[itos] = __ pc(); // non-volatile_entry point
2747   __ pop(itos);
2748   if (!is_static) { pop_and_check_object(Rclass_or_obj); } // Kills R11_scratch1.
2749   __ stwx(R17_tos, Rclass_or_obj, Roffset);
2750   if (!is_static) { patch_bytecode(Bytecodes::_fast_iputfield, Rbc, Rscratch, true, byte_no); }
2751   if (!support_IRIW_for_not_multiple_copy_atomic_cpu) {
2752     __ beq(CR_is_vol, Lvolatile); // Volatile?
2753   }
2754   __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode()));
2755 
2756   __ align(32, 28, 28); // Align pop.
2757   // __ bind(Lltos);
2758   __ release(); // Volatile entry point (one instruction before non-volatile_entry point).
2759   assert(branch_table[ltos] == 0, "can't compute twice");
2760   branch_table[ltos] = __ pc(); // non-volatile_entry point
2761   __ pop(ltos);
2762   if (!is_static) { pop_and_check_object(Rclass_or_obj); } // Kills R11_scratch1.
2763   __ stdx(R17_tos, Rclass_or_obj, Roffset);
2764   if (!is_static) { patch_bytecode(Bytecodes::_fast_lputfield, Rbc, Rscratch, true, byte_no); }
2765   if (!support_IRIW_for_not_multiple_copy_atomic_cpu) {
2766     __ beq(CR_is_vol, Lvolatile); // Volatile?
2767   }
2768   __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode()));
2769 
2770   __ align(32, 28, 28); // Align pop.
2771   // __ bind(Lbtos);
2772   __ release(); // Volatile entry point (one instruction before non-volatile_entry point).
2773   assert(branch_table[btos] == 0, "can't compute twice");
2774   branch_table[btos] = __ pc(); // non-volatile_entry point
2775   __ pop(btos);
2776   if (!is_static) { pop_and_check_object(Rclass_or_obj); } // Kills R11_scratch1.
2777   __ stbx(R17_tos, Rclass_or_obj, Roffset);
2778   if (!is_static) { patch_bytecode(Bytecodes::_fast_bputfield, Rbc, Rscratch, true, byte_no); }
2779   if (!support_IRIW_for_not_multiple_copy_atomic_cpu) {
2780     __ beq(CR_is_vol, Lvolatile); // Volatile?
2781   }
2782   __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode()));
2783 
2784   __ align(32, 28, 28); // Align pop.
2785   // __ bind(Lctos);
2786   __ release(); // Volatile entry point (one instruction before non-volatile_entry point).
2787   assert(branch_table[ctos] == 0, "can't compute twice");
2788   branch_table[ctos] = __ pc(); // non-volatile_entry point
2789   __ pop(ctos);
2790   if (!is_static) { pop_and_check_object(Rclass_or_obj); } // Kills R11_scratch1..
2791   __ sthx(R17_tos, Rclass_or_obj, Roffset);
2792   if (!is_static) { patch_bytecode(Bytecodes::_fast_cputfield, Rbc, Rscratch, true, byte_no); }
2793   if (!support_IRIW_for_not_multiple_copy_atomic_cpu) {
2794     __ beq(CR_is_vol, Lvolatile); // Volatile?
2795   }
2796   __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode()));
2797 
2798   __ align(32, 28, 28); // Align pop.
2799   // __ bind(Lstos);
2800   __ release(); // Volatile entry point (one instruction before non-volatile_entry point).
2801   assert(branch_table[stos] == 0, "can't compute twice");
2802   branch_table[stos] = __ pc(); // non-volatile_entry point
2803   __ pop(stos);
2804   if (!is_static) { pop_and_check_object(Rclass_or_obj); } // Kills R11_scratch1.
2805   __ sthx(R17_tos, Rclass_or_obj, Roffset);
2806   if (!is_static) { patch_bytecode(Bytecodes::_fast_sputfield, Rbc, Rscratch, true, byte_no); }
2807   if (!support_IRIW_for_not_multiple_copy_atomic_cpu) {
2808     __ beq(CR_is_vol, Lvolatile); // Volatile?
2809   }
2810   __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode()));
2811 
2812   __ align(32, 28, 28); // Align pop.
2813   // __ bind(Latos);
2814   __ release(); // Volatile entry point (one instruction before non-volatile_entry point).
2815   assert(branch_table[atos] == 0, "can't compute twice");
2816   branch_table[atos] = __ pc(); // non-volatile_entry point
2817   __ pop(atos);
2818   if (!is_static) { pop_and_check_object(Rclass_or_obj); } // kills R11_scratch1
2819   do_oop_store(_masm, Rclass_or_obj, Roffset, R17_tos, Rscratch, Rscratch2, Rscratch3, _bs->kind(), false /* precise */, true /* check null */);
2820   if (!is_static) { patch_bytecode(Bytecodes::_fast_aputfield, Rbc, Rscratch, true, byte_no); }
2821   if (!support_IRIW_for_not_multiple_copy_atomic_cpu) {
2822     __ beq(CR_is_vol, Lvolatile); // Volatile?
2823     __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode()));
2824 
2825     __ align(32, 12);
2826     __ bind(Lvolatile);
2827     __ fence();
2828   }
2829   // fallthru: __ b(Lexit);
2830 
2831 #ifdef ASSERT
2832   for (int i = 0; i<number_of_states; ++i) {
2833     assert(branch_table[i], "put initialization");
2834     //tty->print_cr("put: %s_branch_table[%d] = 0x%llx (opcode 0x%llx)",
2835     //              is_static ? "static" : "field", i, branch_table[i], *((unsigned int*)branch_table[i]));
2836   }
2837 #endif
2838 }
2839 
2840 void TemplateTable::putfield(int byte_no) {
2841   putfield_or_static(byte_no, false);
2842 }
2843 
2844 void TemplateTable::putstatic(int byte_no) {
2845   putfield_or_static(byte_no, true);
2846 }
2847 
2848 // See SPARC. On PPC64, we have a different jvmti_post_field_mod which does the job.
2849 void TemplateTable::jvmti_post_fast_field_mod() {
2850   __ should_not_reach_here();
2851 }
2852 
2853 void TemplateTable::fast_storefield(TosState state) {
2854   transition(state, vtos);
2855 
2856   const Register Rcache        = R5_ARG3,  // Do not use ARG1/2 (causes trouble in jvmti_post_field_mod).
2857                  Rclass_or_obj = R31,      // Needs to survive C call.
2858                  Roffset       = R22_tmp2, // Needs to survive C call.
2859                  Rflags        = R3_ARG1,
2860                  Rscratch      = R11_scratch1,
2861                  Rscratch2     = R12_scratch2,
2862                  Rscratch3     = R4_ARG2;
2863   const ConditionRegister CR_is_vol = CCR2; // Non-volatile condition register (survives runtime call in do_oop_store).
2864 
2865   // Constant pool already resolved => Load flags and offset of field.
2866   __ get_cache_and_index_at_bcp(Rcache, 1);
2867   jvmti_post_field_mod(Rcache, Rscratch, false /* not static */);
2868   load_field_cp_cache_entry(noreg, Rcache, noreg, Roffset, Rflags, false);
2869 
2870   // Get the obj and the final store addr.
2871   pop_and_check_object(Rclass_or_obj); // Kills R11_scratch1.
2872 
2873   // Get volatile flag.
2874   __ rldicl_(Rscratch, Rflags, 64-ConstantPoolCacheEntry::is_volatile_shift, 63); // Extract volatile bit.
2875   if (!support_IRIW_for_not_multiple_copy_atomic_cpu) { __ cmpdi(CR_is_vol, Rscratch, 1); }
2876   {
2877     Label LnotVolatile;
2878     __ beq(CCR0, LnotVolatile);
2879     __ release();
2880     __ align(32, 12);
2881     __ bind(LnotVolatile);
2882   }
2883 
2884   // Do the store and fencing.
2885   switch(bytecode()) {
2886     case Bytecodes::_fast_aputfield:
2887       // Store into the field.
2888       do_oop_store(_masm, Rclass_or_obj, Roffset, R17_tos, Rscratch, Rscratch2, Rscratch3, _bs->kind(), false /* precise */, true /* check null */);
2889       break;
2890 
2891     case Bytecodes::_fast_iputfield:
2892       __ stwx(R17_tos, Rclass_or_obj, Roffset);
2893       break;
2894 
2895     case Bytecodes::_fast_lputfield:
2896       __ stdx(R17_tos, Rclass_or_obj, Roffset);
2897       break;
2898 
2899     case Bytecodes::_fast_bputfield:
2900       __ stbx(R17_tos, Rclass_or_obj, Roffset);
2901       break;
2902 
2903     case Bytecodes::_fast_cputfield:
2904     case Bytecodes::_fast_sputfield:
2905       __ sthx(R17_tos, Rclass_or_obj, Roffset);
2906       break;
2907 
2908     case Bytecodes::_fast_fputfield:
2909       __ stfsx(F15_ftos, Rclass_or_obj, Roffset);
2910       break;
2911 
2912     case Bytecodes::_fast_dputfield:
2913       __ stfdx(F15_ftos, Rclass_or_obj, Roffset);
2914       break;
2915 
2916     default: ShouldNotReachHere();
2917   }
2918 
2919   if (!support_IRIW_for_not_multiple_copy_atomic_cpu) {
2920     Label LVolatile;
2921     __ beq(CR_is_vol, LVolatile);
2922     __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode()));
2923 
2924     __ align(32, 12);
2925     __ bind(LVolatile);
2926     __ fence();
2927   }
2928 }
2929 
2930 void TemplateTable::fast_accessfield(TosState state) {
2931   transition(atos, state);
2932 
2933   Label LisVolatile;
2934   ByteSize cp_base_offset = ConstantPoolCache::base_offset();
2935 
2936   const Register Rcache        = R3_ARG1,
2937                  Rclass_or_obj = R17_tos,
2938                  Roffset       = R22_tmp2,
2939                  Rflags        = R23_tmp3,
2940                  Rscratch      = R12_scratch2;
2941 
2942   // Constant pool already resolved. Get the field offset.
2943   __ get_cache_and_index_at_bcp(Rcache, 1);
2944   load_field_cp_cache_entry(noreg, Rcache, noreg, Roffset, Rflags, false);
2945 
2946   // JVMTI support
2947   jvmti_post_field_access(Rcache, Rscratch, false, true);
2948 
2949   // Get the load address.
2950   __ null_check_throw(Rclass_or_obj, -1, Rscratch);
2951 
2952   // Get volatile flag.
2953   __ rldicl_(Rscratch, Rflags, 64-ConstantPoolCacheEntry::is_volatile_shift, 63); // Extract volatile bit.
2954   __ bne(CCR0, LisVolatile);
2955 
2956   switch(bytecode()) {
2957     case Bytecodes::_fast_agetfield:
2958     {
2959       __ load_heap_oop(R17_tos, (RegisterOrConstant)Roffset, Rclass_or_obj);
2960       __ verify_oop(R17_tos);
2961       __ dispatch_epilog(state, Bytecodes::length_for(bytecode()));
2962 
2963       __ bind(LisVolatile);
2964       if (support_IRIW_for_not_multiple_copy_atomic_cpu) { __ fence(); }
2965       __ load_heap_oop(R17_tos, (RegisterOrConstant)Roffset, Rclass_or_obj);
2966       __ verify_oop(R17_tos);
2967       __ twi_0(R17_tos);
2968       __ isync();
2969       break;
2970     }
2971     case Bytecodes::_fast_igetfield:
2972     {
2973       __ lwax(R17_tos, Rclass_or_obj, Roffset);
2974       __ dispatch_epilog(state, Bytecodes::length_for(bytecode()));
2975 
2976       __ bind(LisVolatile);
2977       if (support_IRIW_for_not_multiple_copy_atomic_cpu) { __ fence(); }
2978       __ lwax(R17_tos, Rclass_or_obj, Roffset);
2979       __ twi_0(R17_tos);
2980       __ isync();
2981       break;
2982     }
2983     case Bytecodes::_fast_lgetfield:
2984     {
2985       __ ldx(R17_tos, Rclass_or_obj, Roffset);
2986       __ dispatch_epilog(state, Bytecodes::length_for(bytecode()));
2987 
2988       __ bind(LisVolatile);
2989       if (support_IRIW_for_not_multiple_copy_atomic_cpu) { __ fence(); }
2990       __ ldx(R17_tos, Rclass_or_obj, Roffset);
2991       __ twi_0(R17_tos);
2992       __ isync();
2993       break;
2994     }
2995     case Bytecodes::_fast_bgetfield:
2996     {
2997       __ lbzx(R17_tos, Rclass_or_obj, Roffset);
2998       __ extsb(R17_tos, R17_tos);
2999       __ dispatch_epilog(state, Bytecodes::length_for(bytecode()));
3000 
3001       __ bind(LisVolatile);
3002       if (support_IRIW_for_not_multiple_copy_atomic_cpu) { __ fence(); }
3003       __ lbzx(R17_tos, Rclass_or_obj, Roffset);
3004       __ twi_0(R17_tos);
3005       __ extsb(R17_tos, R17_tos);
3006       __ isync();
3007       break;
3008     }
3009     case Bytecodes::_fast_cgetfield:
3010     {
3011       __ lhzx(R17_tos, Rclass_or_obj, Roffset);
3012       __ dispatch_epilog(state, Bytecodes::length_for(bytecode()));
3013 
3014       __ bind(LisVolatile);
3015       if (support_IRIW_for_not_multiple_copy_atomic_cpu) { __ fence(); }
3016       __ lhzx(R17_tos, Rclass_or_obj, Roffset);
3017       __ twi_0(R17_tos);
3018       __ isync();
3019       break;
3020     }
3021     case Bytecodes::_fast_sgetfield:
3022     {
3023       __ lhax(R17_tos, Rclass_or_obj, Roffset);
3024       __ dispatch_epilog(state, Bytecodes::length_for(bytecode()));
3025 
3026       __ bind(LisVolatile);
3027       if (support_IRIW_for_not_multiple_copy_atomic_cpu) { __ fence(); }
3028       __ lhax(R17_tos, Rclass_or_obj, Roffset);
3029       __ twi_0(R17_tos);
3030       __ isync();
3031       break;
3032     }
3033     case Bytecodes::_fast_fgetfield:
3034     {
3035       __ lfsx(F15_ftos, Rclass_or_obj, Roffset);
3036       __ dispatch_epilog(state, Bytecodes::length_for(bytecode()));
3037 
3038       __ bind(LisVolatile);
3039       Label Ldummy;
3040       if (support_IRIW_for_not_multiple_copy_atomic_cpu) { __ fence(); }
3041       __ lfsx(F15_ftos, Rclass_or_obj, Roffset);
3042       __ fcmpu(CCR0, F15_ftos, F15_ftos); // Acquire by cmp-br-isync.
3043       __ bne_predict_not_taken(CCR0, Ldummy);
3044       __ bind(Ldummy);
3045       __ isync();
3046       break;
3047     }
3048     case Bytecodes::_fast_dgetfield:
3049     {
3050       __ lfdx(F15_ftos, Rclass_or_obj, Roffset);
3051       __ dispatch_epilog(state, Bytecodes::length_for(bytecode()));
3052 
3053       __ bind(LisVolatile);
3054       Label Ldummy;
3055       if (support_IRIW_for_not_multiple_copy_atomic_cpu) { __ fence(); }
3056       __ lfdx(F15_ftos, Rclass_or_obj, Roffset);
3057       __ fcmpu(CCR0, F15_ftos, F15_ftos); // Acquire by cmp-br-isync.
3058       __ bne_predict_not_taken(CCR0, Ldummy);
3059       __ bind(Ldummy);
3060       __ isync();
3061       break;
3062     }
3063     default: ShouldNotReachHere();
3064   }
3065 }
3066 
3067 void TemplateTable::fast_xaccess(TosState state) {
3068   transition(vtos, state);
3069 
3070   Label LisVolatile;
3071   ByteSize cp_base_offset = ConstantPoolCache::base_offset();
3072   const Register Rcache        = R3_ARG1,
3073                  Rclass_or_obj = R17_tos,
3074                  Roffset       = R22_tmp2,
3075                  Rflags        = R23_tmp3,
3076                  Rscratch      = R12_scratch2;
3077 
3078   __ ld(Rclass_or_obj, 0, R18_locals);
3079 
3080   // Constant pool already resolved. Get the field offset.
3081   __ get_cache_and_index_at_bcp(Rcache, 2);
3082   load_field_cp_cache_entry(noreg, Rcache, noreg, Roffset, Rflags, false);
3083 
3084   // JVMTI support not needed, since we switch back to single bytecode as soon as debugger attaches.
3085 
3086   // Needed to report exception at the correct bcp.
3087   __ addi(R14_bcp, R14_bcp, 1);
3088 
3089   // Get the load address.
3090   __ null_check_throw(Rclass_or_obj, -1, Rscratch);
3091 
3092   // Get volatile flag.
3093   __ rldicl_(Rscratch, Rflags, 64-ConstantPoolCacheEntry::is_volatile_shift, 63); // Extract volatile bit.
3094   __ bne(CCR0, LisVolatile);
3095 
3096   switch(state) {
3097   case atos:
3098     {
3099       __ load_heap_oop(R17_tos, (RegisterOrConstant)Roffset, Rclass_or_obj);
3100       __ verify_oop(R17_tos);
3101       __ dispatch_epilog(state, Bytecodes::length_for(bytecode()) - 1); // Undo bcp increment.
3102 
3103       __ bind(LisVolatile);
3104       if (support_IRIW_for_not_multiple_copy_atomic_cpu) { __ fence(); }
3105       __ load_heap_oop(R17_tos, (RegisterOrConstant)Roffset, Rclass_or_obj);
3106       __ verify_oop(R17_tos);
3107       __ twi_0(R17_tos);
3108       __ isync();
3109       break;
3110     }
3111   case itos:
3112     {
3113       __ lwax(R17_tos, Rclass_or_obj, Roffset);
3114       __ dispatch_epilog(state, Bytecodes::length_for(bytecode()) - 1); // Undo bcp increment.
3115 
3116       __ bind(LisVolatile);
3117       if (support_IRIW_for_not_multiple_copy_atomic_cpu) { __ fence(); }
3118       __ lwax(R17_tos, Rclass_or_obj, Roffset);
3119       __ twi_0(R17_tos);
3120       __ isync();
3121       break;
3122     }
3123   case ftos:
3124     {
3125       __ lfsx(F15_ftos, Rclass_or_obj, Roffset);
3126       __ dispatch_epilog(state, Bytecodes::length_for(bytecode()) - 1); // Undo bcp increment.
3127 
3128       __ bind(LisVolatile);
3129       Label Ldummy;
3130       if (support_IRIW_for_not_multiple_copy_atomic_cpu) { __ fence(); }
3131       __ lfsx(F15_ftos, Rclass_or_obj, Roffset);
3132       __ fcmpu(CCR0, F15_ftos, F15_ftos); // Acquire by cmp-br-isync.
3133       __ bne_predict_not_taken(CCR0, Ldummy);
3134       __ bind(Ldummy);
3135       __ isync();
3136       break;
3137     }
3138   default: ShouldNotReachHere();
3139   }
3140   __ addi(R14_bcp, R14_bcp, -1);
3141 }
3142 
3143 // ============================================================================
3144 // Calls
3145 
3146 // Common code for invoke
3147 //
3148 // Input:
3149 //   - byte_no
3150 //
3151 // Output:
3152 //   - Rmethod:        The method to invoke next.
3153 //   - Rret_addr:      The return address to return to.
3154 //   - Rindex:         MethodType (invokehandle) or CallSite obj (invokedynamic)
3155 //   - Rrecv:          Cache for "this" pointer, might be noreg if static call.
3156 //   - Rflags:         Method flags from const pool cache.
3157 //
3158 //  Kills:
3159 //   - Rscratch1
3160 //
3161 void TemplateTable::prepare_invoke(int byte_no,
3162                                    Register Rmethod,  // linked method (or i-klass)
3163                                    Register Rret_addr,// return address
3164                                    Register Rindex,   // itable index, MethodType, etc.
3165                                    Register Rrecv,    // If caller wants to see it.
3166                                    Register Rflags,   // If caller wants to test it.
3167                                    Register Rscratch
3168                                    ) {
3169   // Determine flags.
3170   const Bytecodes::Code code = bytecode();
3171   const bool is_invokeinterface  = code == Bytecodes::_invokeinterface;
3172   const bool is_invokedynamic    = code == Bytecodes::_invokedynamic;
3173   const bool is_invokehandle     = code == Bytecodes::_invokehandle;
3174   const bool is_invokevirtual    = code == Bytecodes::_invokevirtual;
3175   const bool is_invokespecial    = code == Bytecodes::_invokespecial;
3176   const bool load_receiver       = (Rrecv != noreg);
3177   assert(load_receiver == (code != Bytecodes::_invokestatic && code != Bytecodes::_invokedynamic), "");
3178 
3179   assert_different_registers(Rmethod, Rindex, Rflags, Rscratch);
3180   assert_different_registers(Rmethod, Rrecv, Rflags, Rscratch);
3181   assert_different_registers(Rret_addr, Rscratch);
3182 
3183   load_invoke_cp_cache_entry(byte_no, Rmethod, Rindex, Rflags, is_invokevirtual, false, is_invokedynamic);
3184 
3185   // Saving of SP done in call_from_interpreter.
3186 
3187   // Maybe push "appendix" to arguments.
3188   if (is_invokedynamic || is_invokehandle) {
3189     Label Ldone;
3190     __ rldicl_(R0, Rflags, 64-ConstantPoolCacheEntry::has_appendix_shift, 63);
3191     __ beq(CCR0, Ldone);
3192     // Push "appendix" (MethodType, CallSite, etc.).
3193     // This must be done before we get the receiver,
3194     // since the parameter_size includes it.
3195     __ load_resolved_reference_at_index(Rscratch, Rindex);
3196     __ verify_oop(Rscratch);
3197     __ push_ptr(Rscratch);
3198     __ bind(Ldone);
3199   }
3200 
3201   // Load receiver if needed (after appendix is pushed so parameter size is correct).
3202   if (load_receiver) {
3203     const Register Rparam_count = Rscratch;
3204     __ andi(Rparam_count, Rflags, ConstantPoolCacheEntry::parameter_size_mask);
3205     __ load_receiver(Rparam_count, Rrecv);
3206     __ verify_oop(Rrecv);
3207   }
3208 
3209   // Get return address.
3210   {
3211     Register Rtable_addr = Rscratch;
3212     Register Rret_type = Rret_addr;
3213     address table_addr = (address) Interpreter::invoke_return_entry_table_for(code);
3214 
3215     // Get return type. It's coded into the upper 4 bits of the lower half of the 64 bit value.
3216     __ rldicl(Rret_type, Rflags, 64-ConstantPoolCacheEntry::tos_state_shift, 64-ConstantPoolCacheEntry::tos_state_bits);
3217     __ load_dispatch_table(Rtable_addr, (address*)table_addr);
3218     __ sldi(Rret_type, Rret_type, LogBytesPerWord);
3219     // Get return address.
3220     __ ldx(Rret_addr, Rtable_addr, Rret_type);
3221   }
3222 }
3223 
3224 // Helper for virtual calls. Load target out of vtable and jump off!
3225 // Kills all passed registers.
3226 void TemplateTable::generate_vtable_call(Register Rrecv_klass, Register Rindex, Register Rret, Register Rtemp) {
3227 
3228   assert_different_registers(Rrecv_klass, Rtemp, Rret);
3229   const Register Rtarget_method = Rindex;
3230 
3231   // Get target method & entry point.
3232   const int base = InstanceKlass::vtable_start_offset() * wordSize;
3233   // Calc vtable addr scale the vtable index by 8.
3234   __ sldi(Rindex, Rindex, exact_log2(vtableEntry::size() * wordSize));
3235   // Load target.
3236   __ addi(Rrecv_klass, Rrecv_klass, base + vtableEntry::method_offset_in_bytes());
3237   __ ldx(Rtarget_method, Rindex, Rrecv_klass);
3238   __ call_from_interpreter(Rtarget_method, Rret, Rrecv_klass /* scratch1 */, Rtemp /* scratch2 */);
3239 }
3240 
3241 // Virtual or final call. Final calls are rewritten on the fly to run through "fast_finalcall" next time.
3242 void TemplateTable::invokevirtual(int byte_no) {
3243   transition(vtos, vtos);
3244 
3245   Register Rtable_addr = R11_scratch1,
3246            Rret_type = R12_scratch2,
3247            Rret_addr = R5_ARG3,
3248            Rflags = R22_tmp2, // Should survive C call.
3249            Rrecv = R3_ARG1,
3250            Rrecv_klass = Rrecv,
3251            Rvtableindex_or_method = R31, // Should survive C call.
3252            Rnum_params = R4_ARG2,
3253            Rnew_bc = R6_ARG4;
3254 
3255   Label LnotFinal;
3256 
3257   load_invoke_cp_cache_entry(byte_no, Rvtableindex_or_method, noreg, Rflags, /*virtual*/ true, false, false);
3258 
3259   __ testbitdi(CCR0, R0, Rflags, ConstantPoolCacheEntry::is_vfinal_shift);
3260   __ bfalse(CCR0, LnotFinal);
3261 
3262   patch_bytecode(Bytecodes::_fast_invokevfinal, Rnew_bc, R12_scratch2);
3263   invokevfinal_helper(Rvtableindex_or_method, Rflags, R11_scratch1, R12_scratch2);
3264 
3265   __ align(32, 12);
3266   __ bind(LnotFinal);
3267   // Load "this" pointer (receiver).
3268   __ rldicl(Rnum_params, Rflags, 64, 48);
3269   __ load_receiver(Rnum_params, Rrecv);
3270   __ verify_oop(Rrecv);
3271 
3272   // Get return type. It's coded into the upper 4 bits of the lower half of the 64 bit value.
3273   __ rldicl(Rret_type, Rflags, 64-ConstantPoolCacheEntry::tos_state_shift, 64-ConstantPoolCacheEntry::tos_state_bits);
3274   __ load_dispatch_table(Rtable_addr, Interpreter::invoke_return_entry_table());
3275   __ sldi(Rret_type, Rret_type, LogBytesPerWord);
3276   __ ldx(Rret_addr, Rret_type, Rtable_addr);
3277   __ null_check_throw(Rrecv, oopDesc::klass_offset_in_bytes(), R11_scratch1);
3278   __ load_klass(Rrecv_klass, Rrecv);
3279   __ verify_klass_ptr(Rrecv_klass);
3280   __ profile_virtual_call(Rrecv_klass, R11_scratch1, R12_scratch2, false);
3281 
3282   generate_vtable_call(Rrecv_klass, Rvtableindex_or_method, Rret_addr, R11_scratch1);
3283 }
3284 
3285 void TemplateTable::fast_invokevfinal(int byte_no) {
3286   transition(vtos, vtos);
3287 
3288   assert(byte_no == f2_byte, "use this argument");
3289   Register Rflags  = R22_tmp2,
3290            Rmethod = R31;
3291   load_invoke_cp_cache_entry(byte_no, Rmethod, noreg, Rflags, /*virtual*/ true, /*is_invokevfinal*/ true, false);
3292   invokevfinal_helper(Rmethod, Rflags, R11_scratch1, R12_scratch2);
3293 }
3294 
3295 void TemplateTable::invokevfinal_helper(Register Rmethod, Register Rflags, Register Rscratch1, Register Rscratch2) {
3296 
3297   assert_different_registers(Rmethod, Rflags, Rscratch1, Rscratch2);
3298 
3299   // Load receiver from stack slot.
3300   Register Rrecv = Rscratch2;
3301   Register Rnum_params = Rrecv;
3302 
3303   __ ld(Rnum_params, in_bytes(Method::const_offset()), Rmethod);
3304   __ lhz(Rnum_params /* number of params */, in_bytes(ConstMethod::size_of_parameters_offset()), Rnum_params);
3305 
3306   // Get return address.
3307   Register Rtable_addr = Rscratch1,
3308            Rret_addr   = Rflags,
3309            Rret_type   = Rret_addr;
3310   // Get return type. It's coded into the upper 4 bits of the lower half of the 64 bit value.
3311   __ rldicl(Rret_type, Rflags, 64-ConstantPoolCacheEntry::tos_state_shift, 64-ConstantPoolCacheEntry::tos_state_bits);
3312   __ load_dispatch_table(Rtable_addr, Interpreter::invoke_return_entry_table());
3313   __ sldi(Rret_type, Rret_type, LogBytesPerWord);
3314   __ ldx(Rret_addr, Rret_type, Rtable_addr);
3315 
3316   // Load receiver and receiver NULL check.
3317   __ load_receiver(Rnum_params, Rrecv);
3318   __ null_check_throw(Rrecv, -1, Rscratch1);
3319 
3320   __ profile_final_call(Rrecv, Rscratch1);
3321 
3322   // Do the call.
3323   __ call_from_interpreter(Rmethod, Rret_addr, Rscratch1, Rscratch2);
3324 }
3325 
3326 void TemplateTable::invokespecial(int byte_no) {
3327   assert(byte_no == f1_byte, "use this argument");
3328   transition(vtos, vtos);
3329 
3330   Register Rtable_addr = R3_ARG1,
3331            Rret_addr   = R4_ARG2,
3332            Rflags      = R5_ARG3,
3333            Rreceiver   = R6_ARG4,
3334            Rmethod     = R31;
3335 
3336   prepare_invoke(byte_no, Rmethod, Rret_addr, noreg, Rreceiver, Rflags, R11_scratch1);
3337 
3338   // Receiver NULL check.
3339   __ null_check_throw(Rreceiver, -1, R11_scratch1);
3340 
3341   __ profile_call(R11_scratch1, R12_scratch2);
3342   __ call_from_interpreter(Rmethod, Rret_addr, R11_scratch1, R12_scratch2);
3343 }
3344 
3345 void TemplateTable::invokestatic(int byte_no) {
3346   assert(byte_no == f1_byte, "use this argument");
3347   transition(vtos, vtos);
3348 
3349   Register Rtable_addr = R3_ARG1,
3350            Rret_addr   = R4_ARG2,
3351            Rflags      = R5_ARG3;
3352 
3353   prepare_invoke(byte_no, R19_method, Rret_addr, noreg, noreg, Rflags, R11_scratch1);
3354 
3355   __ profile_call(R11_scratch1, R12_scratch2);
3356   __ call_from_interpreter(R19_method, Rret_addr, R11_scratch1, R12_scratch2);
3357 }
3358 
3359 void TemplateTable::invokeinterface_object_method(Register Rrecv_klass,
3360                                                   Register Rret,
3361                                                   Register Rflags,
3362                                                   Register Rindex,
3363                                                   Register Rtemp1,
3364                                                   Register Rtemp2) {
3365 
3366   assert_different_registers(Rindex, Rret, Rrecv_klass, Rflags, Rtemp1, Rtemp2);
3367   Label LnotFinal;
3368 
3369   // Check for vfinal.
3370   __ testbitdi(CCR0, R0, Rflags, ConstantPoolCacheEntry::is_vfinal_shift);
3371   __ bfalse(CCR0, LnotFinal);
3372 
3373   Register Rscratch = Rflags; // Rflags is dead now.
3374 
3375   // Final call case.
3376   __ profile_final_call(Rtemp1, Rscratch);
3377   // Do the final call - the index (f2) contains the method.
3378   __ call_from_interpreter(Rindex, Rret, Rscratch, Rrecv_klass /* scratch */);
3379 
3380   // Non-final callc case.
3381   __ bind(LnotFinal);
3382   __ profile_virtual_call(Rrecv_klass, Rtemp1, Rscratch, false);
3383   generate_vtable_call(Rrecv_klass, Rindex, Rret, Rscratch);
3384 }
3385 
3386 void TemplateTable::invokeinterface(int byte_no) {
3387   assert(byte_no == f1_byte, "use this argument");
3388   transition(vtos, vtos);
3389 
3390   const Register Rscratch1        = R11_scratch1,
3391                  Rscratch2        = R12_scratch2,
3392                  Rscratch3        = R9_ARG7,
3393                  Rscratch4        = R10_ARG8,
3394                  Rtable_addr      = Rscratch2,
3395                  Rinterface_klass = R5_ARG3,
3396                  Rret_type        = R8_ARG6,
3397                  Rret_addr        = Rret_type,
3398                  Rindex           = R6_ARG4,
3399                  Rreceiver        = R4_ARG2,
3400                  Rrecv_klass      = Rreceiver,
3401                  Rflags           = R7_ARG5;
3402 
3403   prepare_invoke(byte_no, Rinterface_klass, Rret_addr, Rindex, Rreceiver, Rflags, Rscratch1);
3404 
3405   // Get receiver klass.
3406   __ null_check_throw(Rreceiver, oopDesc::klass_offset_in_bytes(), Rscratch3);
3407   __ load_klass(Rrecv_klass, Rreceiver);
3408 
3409   // Check corner case object method.
3410   Label LobjectMethod;
3411 
3412   __ testbitdi(CCR0, R0, Rflags, ConstantPoolCacheEntry::is_forced_virtual_shift);
3413   __ btrue(CCR0, LobjectMethod);
3414 
3415   // Fallthrough: The normal invokeinterface case.
3416   __ profile_virtual_call(Rrecv_klass, Rscratch1, Rscratch2, false);
3417 
3418   // Find entry point to call.
3419   Label Lthrow_icc, Lthrow_ame;
3420   // Result will be returned in Rindex.
3421   __ mr(Rscratch4, Rrecv_klass);
3422   __ mr(Rscratch3, Rindex);
3423   __ lookup_interface_method(Rrecv_klass, Rinterface_klass, Rindex, Rindex, Rscratch1, Rscratch2, Lthrow_icc);
3424 
3425   __ cmpdi(CCR0, Rindex, 0);
3426   __ beq(CCR0, Lthrow_ame);
3427   // Found entry. Jump off!
3428   __ call_from_interpreter(Rindex, Rret_addr, Rscratch1, Rscratch2);
3429 
3430   // Vtable entry was NULL => Throw abstract method error.
3431   __ bind(Lthrow_ame);
3432   __ mr(Rrecv_klass, Rscratch4);
3433   __ mr(Rindex, Rscratch3);
3434   call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_AbstractMethodError));
3435 
3436   // Interface was not found => Throw incompatible class change error.
3437   __ bind(Lthrow_icc);
3438   __ mr(Rrecv_klass, Rscratch4);
3439   call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_IncompatibleClassChangeError));
3440 
3441   __ should_not_reach_here();
3442 
3443   // Special case of invokeinterface called for virtual method of
3444   // java.lang.Object. See ConstantPoolCacheEntry::set_method() for details:
3445   // The invokeinterface was rewritten to a invokevirtual, hence we have
3446   // to handle this corner case. This code isn't produced by javac, but could
3447   // be produced by another compliant java compiler.
3448   __ bind(LobjectMethod);
3449   invokeinterface_object_method(Rrecv_klass, Rret_addr, Rflags, Rindex, Rscratch1, Rscratch2);
3450 }
3451 
3452 void TemplateTable::invokedynamic(int byte_no) {
3453   transition(vtos, vtos);
3454 
3455   const Register Rret_addr = R3_ARG1,
3456                  Rflags    = R4_ARG2,
3457                  Rmethod   = R22_tmp2,
3458                  Rscratch1 = R11_scratch1,
3459                  Rscratch2 = R12_scratch2;
3460 
3461   prepare_invoke(byte_no, Rmethod, Rret_addr, Rscratch1, noreg, Rflags, Rscratch2);
3462 
3463   // Profile this call.
3464   __ profile_call(Rscratch1, Rscratch2);
3465 
3466   // Off we go. With the new method handles, we don't jump to a method handle
3467   // entry any more. Instead, we pushed an "appendix" in prepare invoke, which happens
3468   // to be the callsite object the bootstrap method returned. This is passed to a
3469   // "link" method which does the dispatch (Most likely just grabs the MH stored
3470   // inside the callsite and does an invokehandle).
3471   __ call_from_interpreter(Rmethod, Rret_addr, Rscratch1 /* scratch1 */, Rscratch2 /* scratch2 */);
3472 }
3473 
3474 void TemplateTable::invokehandle(int byte_no) {
3475   transition(vtos, vtos);
3476 
3477   const Register Rret_addr = R3_ARG1,
3478                  Rflags    = R4_ARG2,
3479                  Rrecv     = R5_ARG3,
3480                  Rmethod   = R22_tmp2,
3481                  Rscratch1 = R11_scratch1,
3482                  Rscratch2 = R12_scratch2;
3483 
3484   prepare_invoke(byte_no, Rmethod, Rret_addr, Rscratch1, Rrecv, Rflags, Rscratch2);
3485   __ verify_method_ptr(Rmethod);
3486   __ null_check_throw(Rrecv, -1, Rscratch2);
3487 
3488   __ profile_final_call(Rrecv, Rscratch1);
3489 
3490   // Still no call from handle => We call the method handle interpreter here.
3491   __ call_from_interpreter(Rmethod, Rret_addr, Rscratch1 /* scratch1 */, Rscratch2 /* scratch2 */);
3492 }
3493 
3494 // =============================================================================
3495 // Allocation
3496 
3497 // Puts allocated obj ref onto the expression stack.
3498 void TemplateTable::_new() {
3499   transition(vtos, atos);
3500 
3501   Label Lslow_case,
3502         Ldone,
3503         Linitialize_header,
3504         Lallocate_shared,
3505         Linitialize_object;  // Including clearing the fields.
3506 
3507   const Register RallocatedObject = R17_tos,
3508                  RinstanceKlass   = R9_ARG7,
3509                  Rscratch         = R11_scratch1,
3510                  Roffset          = R8_ARG6,
3511                  Rinstance_size   = Roffset,
3512                  Rcpool           = R4_ARG2,
3513                  Rtags            = R3_ARG1,
3514                  Rindex           = R5_ARG3;
3515 
3516   const bool allow_shared_alloc = Universe::heap()->supports_inline_contig_alloc();
3517 
3518   // --------------------------------------------------------------------------
3519   // Check if fast case is possible.
3520 
3521   // Load pointers to const pool and const pool's tags array.
3522   __ get_cpool_and_tags(Rcpool, Rtags);
3523   // Load index of constant pool entry.
3524   __ get_2_byte_integer_at_bcp(1, Rindex, InterpreterMacroAssembler::Unsigned);
3525 
3526   if (UseTLAB) {
3527     // Make sure the class we're about to instantiate has been resolved
3528     // This is done before loading instanceKlass to be consistent with the order
3529     // how Constant Pool is updated (see ConstantPoolCache::klass_at_put).
3530     __ addi(Rtags, Rtags, Array<u1>::base_offset_in_bytes());
3531     __ lbzx(Rtags, Rindex, Rtags);
3532 
3533     __ cmpdi(CCR0, Rtags, JVM_CONSTANT_Class);
3534     __ bne(CCR0, Lslow_case);
3535 
3536     // Get instanceKlass (load from Rcpool + sizeof(ConstantPool) + Rindex*BytesPerWord).
3537     __ sldi(Roffset, Rindex, LogBytesPerWord);
3538     __ addi(Rscratch, Rcpool, sizeof(ConstantPool));
3539     __ isync(); // Order load of instance Klass wrt. tags.
3540     __ ldx(RinstanceKlass, Roffset, Rscratch);
3541 
3542     // Make sure klass is fully initialized and get instance_size.
3543     __ lbz(Rscratch, in_bytes(InstanceKlass::init_state_offset()), RinstanceKlass);
3544     __ lwz(Rinstance_size, in_bytes(Klass::layout_helper_offset()), RinstanceKlass);
3545 
3546     __ cmpdi(CCR1, Rscratch, InstanceKlass::fully_initialized);
3547     // Make sure klass does not have has_finalizer, or is abstract, or interface or java/lang/Class.
3548     __ andi_(R0, Rinstance_size, Klass::_lh_instance_slow_path_bit); // slow path bit equals 0?
3549 
3550     __ crnand(/*CR0 eq*/2, /*CR1 eq*/4+2, /*CR0 eq*/2); // slow path bit set or not fully initialized?
3551     __ beq(CCR0, Lslow_case);
3552 
3553     // --------------------------------------------------------------------------
3554     // Fast case:
3555     // Allocate the instance.
3556     // 1) Try to allocate in the TLAB.
3557     // 2) If fail, and the TLAB is not full enough to discard, allocate in the shared Eden.
3558     // 3) If the above fails (or is not applicable), go to a slow case (creates a new TLAB, etc.).
3559 
3560     Register RoldTopValue = RallocatedObject; // Object will be allocated here if it fits.
3561     Register RnewTopValue = R6_ARG4;
3562     Register RendValue    = R7_ARG5;
3563 
3564     // Check if we can allocate in the TLAB.
3565     __ ld(RoldTopValue, in_bytes(JavaThread::tlab_top_offset()), R16_thread);
3566     __ ld(RendValue,    in_bytes(JavaThread::tlab_end_offset()), R16_thread);
3567 
3568     __ add(RnewTopValue, Rinstance_size, RoldTopValue);
3569 
3570     // If there is enough space, we do not CAS and do not clear.
3571     __ cmpld(CCR0, RnewTopValue, RendValue);
3572     __ bgt(CCR0, allow_shared_alloc ? Lallocate_shared : Lslow_case);
3573 
3574     __ std(RnewTopValue, in_bytes(JavaThread::tlab_top_offset()), R16_thread);
3575 
3576     if (ZeroTLAB) {
3577       // The fields have already been cleared.
3578       __ b(Linitialize_header);
3579     } else {
3580       // Initialize both the header and fields.
3581       __ b(Linitialize_object);
3582     }
3583 
3584     // Fall through: TLAB was too small.
3585     if (allow_shared_alloc) {
3586       Register RtlabWasteLimitValue = R10_ARG8;
3587       Register RfreeValue = RnewTopValue;
3588 
3589       __ bind(Lallocate_shared);
3590       // Check if tlab should be discarded (refill_waste_limit >= free).
3591       __ ld(RtlabWasteLimitValue, in_bytes(JavaThread::tlab_refill_waste_limit_offset()), R16_thread);
3592       __ subf(RfreeValue, RoldTopValue, RendValue);
3593       __ srdi(RfreeValue, RfreeValue, LogHeapWordSize); // in dwords
3594       __ cmpld(CCR0, RtlabWasteLimitValue, RfreeValue);
3595       __ bge(CCR0, Lslow_case);
3596 
3597       // Increment waste limit to prevent getting stuck on this slow path.
3598       __ addi(RtlabWasteLimitValue, RtlabWasteLimitValue, (int)ThreadLocalAllocBuffer::refill_waste_limit_increment());
3599       __ std(RtlabWasteLimitValue, in_bytes(JavaThread::tlab_refill_waste_limit_offset()), R16_thread);
3600     }
3601     // else: No allocation in the shared eden. // fallthru: __ b(Lslow_case);
3602   }
3603   // else: Always go the slow path.
3604 
3605   // --------------------------------------------------------------------------
3606   // slow case
3607   __ bind(Lslow_case);
3608   call_VM(R17_tos, CAST_FROM_FN_PTR(address, InterpreterRuntime::_new), Rcpool, Rindex);
3609 
3610   if (UseTLAB) {
3611     __ b(Ldone);
3612     // --------------------------------------------------------------------------
3613     // Init1: Zero out newly allocated memory.
3614 
3615     if (!ZeroTLAB || allow_shared_alloc) {
3616       // Clear object fields.
3617       __ bind(Linitialize_object);
3618 
3619       // Initialize remaining object fields.
3620       Register Rbase = Rtags;
3621       __ addi(Rinstance_size, Rinstance_size, 7 - (int)sizeof(oopDesc));
3622       __ addi(Rbase, RallocatedObject, sizeof(oopDesc));
3623       __ srdi(Rinstance_size, Rinstance_size, 3);
3624 
3625       // Clear out object skipping header. Takes also care of the zero length case.
3626       __ clear_memory_doubleword(Rbase, Rinstance_size);
3627       // fallthru: __ b(Linitialize_header);
3628     }
3629 
3630     // --------------------------------------------------------------------------
3631     // Init2: Initialize the header: mark, klass
3632     __ bind(Linitialize_header);
3633 
3634     // Init mark.
3635     if (UseBiasedLocking) {
3636       __ ld(Rscratch, in_bytes(Klass::prototype_header_offset()), RinstanceKlass);
3637     } else {
3638       __ load_const_optimized(Rscratch, markOopDesc::prototype(), R0);
3639     }
3640     __ std(Rscratch, oopDesc::mark_offset_in_bytes(), RallocatedObject);
3641 
3642     // Init klass.
3643     __ store_klass_gap(RallocatedObject);
3644     __ store_klass(RallocatedObject, RinstanceKlass, Rscratch); // klass (last for cms)
3645 
3646     // Check and trigger dtrace event.
3647     {
3648       SkipIfEqualZero skip_if(_masm, Rscratch, &DTraceAllocProbes);
3649       __ push(atos);
3650       __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_object_alloc));
3651       __ pop(atos);
3652     }
3653   }
3654 
3655   // continue
3656   __ bind(Ldone);
3657 
3658   // Must prevent reordering of stores for object initialization with stores that publish the new object.
3659   __ membar(Assembler::StoreStore);
3660 }
3661 
3662 void TemplateTable::newarray() {
3663   transition(itos, atos);
3664 
3665   __ lbz(R4, 1, R14_bcp);
3666   __ extsw(R5, R17_tos);
3667   call_VM(R17_tos, CAST_FROM_FN_PTR(address, InterpreterRuntime::newarray), R4, R5 /* size */);
3668 
3669   // Must prevent reordering of stores for object initialization with stores that publish the new object.
3670   __ membar(Assembler::StoreStore);
3671 }
3672 
3673 void TemplateTable::anewarray() {
3674   transition(itos, atos);
3675 
3676   __ get_constant_pool(R4);
3677   __ get_2_byte_integer_at_bcp(1, R5, InterpreterMacroAssembler::Unsigned);
3678   __ extsw(R6, R17_tos); // size
3679   call_VM(R17_tos, CAST_FROM_FN_PTR(address, InterpreterRuntime::anewarray), R4 /* pool */, R5 /* index */, R6 /* size */);
3680 
3681   // Must prevent reordering of stores for object initialization with stores that publish the new object.
3682   __ membar(Assembler::StoreStore);
3683 }
3684 
3685 // Allocate a multi dimensional array
3686 void TemplateTable::multianewarray() {
3687   transition(vtos, atos);
3688 
3689   Register Rptr = R31; // Needs to survive C call.
3690 
3691   // Put ndims * wordSize into frame temp slot
3692   __ lbz(Rptr, 3, R14_bcp);
3693   __ sldi(Rptr, Rptr, Interpreter::logStackElementSize);
3694   // Esp points past last_dim, so set to R4 to first_dim address.
3695   __ add(R4, Rptr, R15_esp);
3696   call_VM(R17_tos, CAST_FROM_FN_PTR(address, InterpreterRuntime::multianewarray), R4 /* first_size_address */);
3697   // Pop all dimensions off the stack.
3698   __ add(R15_esp, Rptr, R15_esp);
3699 
3700   // Must prevent reordering of stores for object initialization with stores that publish the new object.
3701   __ membar(Assembler::StoreStore);
3702 }
3703 
3704 void TemplateTable::arraylength() {
3705   transition(atos, itos);
3706 
3707   Label LnoException;
3708   __ verify_oop(R17_tos);
3709   __ null_check_throw(R17_tos, arrayOopDesc::length_offset_in_bytes(), R11_scratch1);
3710   __ lwa(R17_tos, arrayOopDesc::length_offset_in_bytes(), R17_tos);
3711 }
3712 
3713 // ============================================================================
3714 // Typechecks
3715 
3716 void TemplateTable::checkcast() {
3717   transition(atos, atos);
3718 
3719   Label Ldone, Lis_null, Lquicked, Lresolved;
3720   Register Roffset         = R6_ARG4,
3721            RobjKlass       = R4_ARG2,
3722            RspecifiedKlass = R5_ARG3, // Generate_ClassCastException_verbose_handler will read value from this register.
3723            Rcpool          = R11_scratch1,
3724            Rtags           = R12_scratch2;
3725 
3726   // Null does not pass.
3727   __ cmpdi(CCR0, R17_tos, 0);
3728   __ beq(CCR0, Lis_null);
3729 
3730   // Get constant pool tag to find out if the bytecode has already been "quickened".
3731   __ get_cpool_and_tags(Rcpool, Rtags);
3732 
3733   __ get_2_byte_integer_at_bcp(1, Roffset, InterpreterMacroAssembler::Unsigned);
3734 
3735   __ addi(Rtags, Rtags, Array<u1>::base_offset_in_bytes());
3736   __ lbzx(Rtags, Rtags, Roffset);
3737 
3738   __ cmpdi(CCR0, Rtags, JVM_CONSTANT_Class);
3739   __ beq(CCR0, Lquicked);
3740 
3741   // Call into the VM to "quicken" instanceof.
3742   __ push_ptr();  // for GC
3743   call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc));
3744   __ get_vm_result_2(RspecifiedKlass);
3745   __ pop_ptr();   // Restore receiver.
3746   __ b(Lresolved);
3747 
3748   // Extract target class from constant pool.
3749   __ bind(Lquicked);
3750   __ sldi(Roffset, Roffset, LogBytesPerWord);
3751   __ addi(Rcpool, Rcpool, sizeof(ConstantPool));
3752   __ isync(); // Order load of specified Klass wrt. tags.
3753   __ ldx(RspecifiedKlass, Rcpool, Roffset);
3754 
3755   // Do the checkcast.
3756   __ bind(Lresolved);
3757   // Get value klass in RobjKlass.
3758   __ load_klass(RobjKlass, R17_tos);
3759   // Generate a fast subtype check. Branch to cast_ok if no failure. Return 0 if failure.
3760   __ gen_subtype_check(RobjKlass, RspecifiedKlass, /*3 temp regs*/ Roffset, Rcpool, Rtags, /*target if subtype*/ Ldone);
3761 
3762   // Not a subtype; so must throw exception
3763   // Target class oop is in register R6_ARG4 == RspecifiedKlass by convention.
3764   __ load_dispatch_table(R11_scratch1, (address*)Interpreter::_throw_ClassCastException_entry);
3765   __ mtctr(R11_scratch1);
3766   __ bctr();
3767 
3768   // Profile the null case.
3769   __ align(32, 12);
3770   __ bind(Lis_null);
3771   __ profile_null_seen(R11_scratch1, Rtags); // Rtags used as scratch.
3772 
3773   __ align(32, 12);
3774   __ bind(Ldone);
3775 }
3776 
3777 // Output:
3778 //   - tos == 0: Obj was null or not an instance of class.
3779 //   - tos == 1: Obj was an instance of class.
3780 void TemplateTable::instanceof() {
3781   transition(atos, itos);
3782 
3783   Label Ldone, Lis_null, Lquicked, Lresolved;
3784   Register Roffset         = R5_ARG3,
3785            RobjKlass       = R4_ARG2,
3786            RspecifiedKlass = R6_ARG4, // Generate_ClassCastException_verbose_handler will expect the value in this register.
3787            Rcpool          = R11_scratch1,
3788            Rtags           = R12_scratch2;
3789 
3790   // Null does not pass.
3791   __ cmpdi(CCR0, R17_tos, 0);
3792   __ beq(CCR0, Lis_null);
3793 
3794   // Get constant pool tag to find out if the bytecode has already been "quickened".
3795   __ get_cpool_and_tags(Rcpool, Rtags);
3796 
3797   __ get_2_byte_integer_at_bcp(1, Roffset, InterpreterMacroAssembler::Unsigned);
3798 
3799   __ addi(Rtags, Rtags, Array<u1>::base_offset_in_bytes());
3800   __ lbzx(Rtags, Rtags, Roffset);
3801 
3802   __ cmpdi(CCR0, Rtags, JVM_CONSTANT_Class);
3803   __ beq(CCR0, Lquicked);
3804 
3805   // Call into the VM to "quicken" instanceof.
3806   __ push_ptr();  // for GC
3807   call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc));
3808   __ get_vm_result_2(RspecifiedKlass);
3809   __ pop_ptr();   // Restore receiver.
3810   __ b(Lresolved);
3811 
3812   // Extract target class from constant pool.
3813   __ bind(Lquicked);
3814   __ sldi(Roffset, Roffset, LogBytesPerWord);
3815   __ addi(Rcpool, Rcpool, sizeof(ConstantPool));
3816   __ isync(); // Order load of specified Klass wrt. tags.
3817   __ ldx(RspecifiedKlass, Rcpool, Roffset);
3818 
3819   // Do the checkcast.
3820   __ bind(Lresolved);
3821   // Get value klass in RobjKlass.
3822   __ load_klass(RobjKlass, R17_tos);
3823   // Generate a fast subtype check. Branch to cast_ok if no failure. Return 0 if failure.
3824   __ li(R17_tos, 1);
3825   __ gen_subtype_check(RobjKlass, RspecifiedKlass, /*3 temp regs*/ Roffset, Rcpool, Rtags, /*target if subtype*/ Ldone);
3826   __ li(R17_tos, 0);
3827 
3828   if (ProfileInterpreter) {
3829     __ b(Ldone);
3830   }
3831 
3832   // Profile the null case.
3833   __ align(32, 12);
3834   __ bind(Lis_null);
3835   __ profile_null_seen(Rcpool, Rtags); // Rcpool and Rtags used as scratch.
3836 
3837   __ align(32, 12);
3838   __ bind(Ldone);
3839 }
3840 
3841 // =============================================================================
3842 // Breakpoints
3843 
3844 void TemplateTable::_breakpoint() {
3845   transition(vtos, vtos);
3846 
3847   // Get the unpatched byte code.
3848   __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::get_original_bytecode_at), R19_method, R14_bcp);
3849   __ mr(R31, R3_RET);
3850 
3851   // Post the breakpoint event.
3852   __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::_breakpoint), R19_method, R14_bcp);
3853 
3854   // Complete the execution of original bytecode.
3855   __ dispatch_Lbyte_code(vtos, R31, Interpreter::normal_table(vtos));
3856 }
3857 
3858 // =============================================================================
3859 // Exceptions
3860 
3861 void TemplateTable::athrow() {
3862   transition(atos, vtos);
3863 
3864   // Exception oop is in tos
3865   __ verify_oop(R17_tos);
3866 
3867   __ null_check_throw(R17_tos, -1, R11_scratch1);
3868 
3869   // Throw exception interpreter entry expects exception oop to be in R3.
3870   __ mr(R3_RET, R17_tos);
3871   __ load_dispatch_table(R11_scratch1, (address*)Interpreter::throw_exception_entry());
3872   __ mtctr(R11_scratch1);
3873   __ bctr();
3874 }
3875 
3876 // =============================================================================
3877 // Synchronization
3878 // Searches the basic object lock list on the stack for a free slot
3879 // and uses it to lock the obect in tos.
3880 //
3881 // Recursive locking is enabled by exiting the search if the same
3882 // object is already found in the list. Thus, a new basic lock obj lock
3883 // is allocated "higher up" in the stack and thus is found first
3884 // at next monitor exit.
3885 void TemplateTable::monitorenter() {
3886   transition(atos, vtos);
3887 
3888   __ verify_oop(R17_tos);
3889 
3890   Register Rcurrent_monitor  = R11_scratch1,
3891            Rcurrent_obj      = R12_scratch2,
3892            Robj_to_lock      = R17_tos,
3893            Rscratch1         = R3_ARG1,
3894            Rscratch2         = R4_ARG2,
3895            Rscratch3         = R5_ARG3,
3896            Rcurrent_obj_addr = R6_ARG4;
3897 
3898   // ------------------------------------------------------------------------------
3899   // Null pointer exception.
3900   __ null_check_throw(Robj_to_lock, -1, R11_scratch1);
3901 
3902   // Try to acquire a lock on the object.
3903   // Repeat until succeeded (i.e., until monitorenter returns true).
3904 
3905   // ------------------------------------------------------------------------------
3906   // Find a free slot in the monitor block.
3907   Label Lfound, Lexit, Lallocate_new;
3908   ConditionRegister found_free_slot = CCR0,
3909                     found_same_obj  = CCR1,
3910                     reached_limit   = CCR6;
3911   {
3912     Label Lloop, Lentry;
3913     Register Rlimit = Rcurrent_monitor;
3914 
3915     // Set up search loop - start with topmost monitor.
3916     __ add(Rcurrent_obj_addr, BasicObjectLock::obj_offset_in_bytes(), R26_monitor);
3917 
3918     __ ld(Rlimit, 0, R1_SP);
3919     __ addi(Rlimit, Rlimit, - (frame::ijava_state_size + frame::interpreter_frame_monitor_size_in_bytes() - BasicObjectLock::obj_offset_in_bytes())); // Monitor base
3920 
3921     // Check if any slot is present => short cut to allocation if not.
3922     __ cmpld(reached_limit, Rcurrent_obj_addr, Rlimit);
3923     __ bgt(reached_limit, Lallocate_new);
3924 
3925     // Pre-load topmost slot.
3926     __ ld(Rcurrent_obj, 0, Rcurrent_obj_addr);
3927     __ addi(Rcurrent_obj_addr, Rcurrent_obj_addr, frame::interpreter_frame_monitor_size() * wordSize);
3928     // The search loop.
3929     __ bind(Lloop);
3930     // Found free slot?
3931     __ cmpdi(found_free_slot, Rcurrent_obj, 0);
3932     // Is this entry for same obj? If so, stop the search and take the found
3933     // free slot or allocate a new one to enable recursive locking.
3934     __ cmpd(found_same_obj, Rcurrent_obj, Robj_to_lock);
3935     __ cmpld(reached_limit, Rcurrent_obj_addr, Rlimit);
3936     __ beq(found_free_slot, Lexit);
3937     __ beq(found_same_obj, Lallocate_new);
3938     __ bgt(reached_limit, Lallocate_new);
3939     // Check if last allocated BasicLockObj reached.
3940     __ ld(Rcurrent_obj, 0, Rcurrent_obj_addr);
3941     __ addi(Rcurrent_obj_addr, Rcurrent_obj_addr, frame::interpreter_frame_monitor_size() * wordSize);
3942     // Next iteration if unchecked BasicObjectLocks exist on the stack.
3943     __ b(Lloop);
3944   }
3945 
3946   // ------------------------------------------------------------------------------
3947   // Check if we found a free slot.
3948   __ bind(Lexit);
3949 
3950   __ addi(Rcurrent_monitor, Rcurrent_obj_addr, -(frame::interpreter_frame_monitor_size() * wordSize) - BasicObjectLock::obj_offset_in_bytes());
3951   __ addi(Rcurrent_obj_addr, Rcurrent_obj_addr, - frame::interpreter_frame_monitor_size() * wordSize);
3952   __ b(Lfound);
3953 
3954   // We didn't find a free BasicObjLock => allocate one.
3955   __ align(32, 12);
3956   __ bind(Lallocate_new);
3957   __ add_monitor_to_stack(false, Rscratch1, Rscratch2);
3958   __ mr(Rcurrent_monitor, R26_monitor);
3959   __ addi(Rcurrent_obj_addr, R26_monitor, BasicObjectLock::obj_offset_in_bytes());
3960 
3961   // ------------------------------------------------------------------------------
3962   // We now have a slot to lock.
3963   __ bind(Lfound);
3964 
3965   // Increment bcp to point to the next bytecode, so exception handling for async. exceptions work correctly.
3966   // The object has already been poped from the stack, so the expression stack looks correct.
3967   __ addi(R14_bcp, R14_bcp, 1);
3968 
3969   __ std(Robj_to_lock, 0, Rcurrent_obj_addr);
3970   __ lock_object(Rcurrent_monitor, Robj_to_lock);
3971 
3972   // Check if there's enough space on the stack for the monitors after locking.
3973   Label Lskip_stack_check;
3974   // Optimization: If the monitors stack section is less then a std page size (4K) don't run
3975   // the stack check. There should be enough shadow pages to fit that in.
3976   __ ld(Rscratch3, 0, R1_SP);
3977   __ sub(Rscratch3, Rscratch3, R26_monitor);
3978   __ cmpdi(CCR0, Rscratch3, 4*K);
3979   __ blt(CCR0, Lskip_stack_check);
3980 
3981   DEBUG_ONLY(__ untested("stack overflow check during monitor enter");)
3982   __ li(Rscratch1, 0);
3983   __ generate_stack_overflow_check_with_compare_and_throw(Rscratch1, Rscratch2);
3984 
3985   __ align(32, 12);
3986   __ bind(Lskip_stack_check);
3987 
3988   // The bcp has already been incremented. Just need to dispatch to next instruction.
3989   __ dispatch_next(vtos);
3990 }
3991 
3992 void TemplateTable::monitorexit() {
3993   transition(atos, vtos);
3994   __ verify_oop(R17_tos);
3995 
3996   Register Rcurrent_monitor  = R11_scratch1,
3997            Rcurrent_obj      = R12_scratch2,
3998            Robj_to_lock      = R17_tos,
3999            Rcurrent_obj_addr = R3_ARG1,
4000            Rlimit            = R4_ARG2;
4001   Label Lfound, Lillegal_monitor_state;
4002 
4003   // Check corner case: unbalanced monitorEnter / Exit.
4004   __ ld(Rlimit, 0, R1_SP);
4005   __ addi(Rlimit, Rlimit, - (frame::ijava_state_size + frame::interpreter_frame_monitor_size_in_bytes())); // Monitor base
4006 
4007   // Null pointer check.
4008   __ null_check_throw(Robj_to_lock, -1, R11_scratch1);
4009 
4010   __ cmpld(CCR0, R26_monitor, Rlimit);
4011   __ bgt(CCR0, Lillegal_monitor_state);
4012 
4013   // Find the corresponding slot in the monitors stack section.
4014   {
4015     Label Lloop;
4016 
4017     // Start with topmost monitor.
4018     __ addi(Rcurrent_obj_addr, R26_monitor, BasicObjectLock::obj_offset_in_bytes());
4019     __ addi(Rlimit, Rlimit, BasicObjectLock::obj_offset_in_bytes());
4020     __ ld(Rcurrent_obj, 0, Rcurrent_obj_addr);
4021     __ addi(Rcurrent_obj_addr, Rcurrent_obj_addr, frame::interpreter_frame_monitor_size() * wordSize);
4022 
4023     __ bind(Lloop);
4024     // Is this entry for same obj?
4025     __ cmpd(CCR0, Rcurrent_obj, Robj_to_lock);
4026     __ beq(CCR0, Lfound);
4027 
4028     // Check if last allocated BasicLockObj reached.
4029 
4030     __ ld(Rcurrent_obj, 0, Rcurrent_obj_addr);
4031     __ cmpld(CCR0, Rcurrent_obj_addr, Rlimit);
4032     __ addi(Rcurrent_obj_addr, Rcurrent_obj_addr, frame::interpreter_frame_monitor_size() * wordSize);
4033 
4034     // Next iteration if unchecked BasicObjectLocks exist on the stack.
4035     __ ble(CCR0, Lloop);
4036   }
4037 
4038   // Fell through without finding the basic obj lock => throw up!
4039   __ bind(Lillegal_monitor_state);
4040   call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_illegal_monitor_state_exception));
4041   __ should_not_reach_here();
4042 
4043   __ align(32, 12);
4044   __ bind(Lfound);
4045   __ addi(Rcurrent_monitor, Rcurrent_obj_addr,
4046           -(frame::interpreter_frame_monitor_size() * wordSize) - BasicObjectLock::obj_offset_in_bytes());
4047   __ unlock_object(Rcurrent_monitor);
4048 }
4049 
4050 // ============================================================================
4051 // Wide bytecodes
4052 
4053 // Wide instructions. Simply redirects to the wide entry point for that instruction.
4054 void TemplateTable::wide() {
4055   transition(vtos, vtos);
4056 
4057   const Register Rtable = R11_scratch1,
4058                  Rindex = R12_scratch2,
4059                  Rtmp   = R0;
4060 
4061   __ lbz(Rindex, 1, R14_bcp);
4062 
4063   __ load_dispatch_table(Rtable, Interpreter::_wentry_point);
4064 
4065   __ slwi(Rindex, Rindex, LogBytesPerWord);
4066   __ ldx(Rtmp, Rtable, Rindex);
4067   __ mtctr(Rtmp);
4068   __ bctr();
4069   // Note: the bcp increment step is part of the individual wide bytecode implementations.
4070 }
4071 #endif // !CC_INTERP