1 /*
   2  * Copyright (c) 2014, Oracle and/or its affiliates. All rights reserved.
   3  * Copyright 2013, 2014 SAP AG. All rights reserved.
   4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   5  *
   6  * This code is free software; you can redistribute it and/or modify it
   7  * under the terms of the GNU General Public License version 2 only, as
   8  * published by the Free Software Foundation.
   9  *
  10  * This code is distributed in the hope that it will be useful, but WITHOUT
  11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  13  * version 2 for more details (a copy is included in the LICENSE file that
  14  * accompanied this code).
  15  *
  16  * You should have received a copy of the GNU General Public License version
  17  * 2 along with this work; if not, write to the Free Software Foundation,
  18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19  *
  20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21  * or visit www.oracle.com if you need additional information or have any
  22  * questions.
  23  *
  24  */
  25 
  26 #include "precompiled.hpp"
  27 #include "asm/macroAssembler.inline.hpp"
  28 #include "interpreter/interpreter.hpp"
  29 #include "interpreter/interpreterRuntime.hpp"
  30 #include "interpreter/templateInterpreter.hpp"
  31 #include "interpreter/templateTable.hpp"
  32 #include "memory/universe.inline.hpp"
  33 #include "oops/objArrayKlass.hpp"
  34 #include "oops/oop.inline.hpp"
  35 #include "prims/methodHandles.hpp"
  36 #include "runtime/sharedRuntime.hpp"
  37 #include "runtime/stubRoutines.hpp"
  38 #include "runtime/synchronizer.hpp"
  39 #include "utilities/macros.hpp"
  40 
  41 #ifndef CC_INTERP
  42 
  43 #undef __
  44 #define __ _masm->
  45 
  46 // ============================================================================
  47 // Misc helpers
  48 
  49 // Do an oop store like *(base + index) = val OR *(base + offset) = val
  50 // (only one of both variants is possible at the same time).
  51 // Index can be noreg.
  52 // Kills:
  53 //   Rbase, Rtmp
  54 static void do_oop_store(InterpreterMacroAssembler* _masm,
  55                          Register           Rbase,
  56                          RegisterOrConstant offset,
  57                          Register           Rval,         // Noreg means always null.
  58                          Register           Rtmp1,
  59                          Register           Rtmp2,
  60                          Register           Rtmp3,
  61                          BarrierSet::Name   barrier,
  62                          bool               precise,
  63                          bool               check_null) {
  64   assert_different_registers(Rtmp1, Rtmp2, Rtmp3, Rval, Rbase);
  65 
  66   switch (barrier) {
  67 #if INCLUDE_ALL_GCS
  68     case BarrierSet::G1SATBCT:
  69     case BarrierSet::G1SATBCTLogging:
  70       {
  71         // Load and record the previous value.
  72         __ g1_write_barrier_pre(Rbase, offset,
  73                                 Rtmp3, /* holder of pre_val ? */
  74                                 Rtmp1, Rtmp2, false /* frame */);
  75 
  76         Label Lnull, Ldone;
  77         if (Rval != noreg) {
  78           if (check_null) {
  79             __ cmpdi(CCR0, Rval, 0);
  80             __ beq(CCR0, Lnull);
  81           }
  82           __ store_heap_oop_not_null(Rval, offset, Rbase, /*Rval must stay uncompressed.*/ Rtmp1);
  83           // Mark the card.
  84           if (!(offset.is_constant() && offset.as_constant() == 0) && precise) {
  85             __ add(Rbase, offset, Rbase);
  86           }
  87           __ g1_write_barrier_post(Rbase, Rval, Rtmp1, Rtmp2, Rtmp3, /*filtered (fast path)*/ &Ldone);
  88           if (check_null) { __ b(Ldone); }
  89         }
  90 
  91         if (Rval == noreg || check_null) { // Store null oop.
  92           Register Rnull = Rval;
  93           __ bind(Lnull);
  94           if (Rval == noreg) {
  95             Rnull = Rtmp1;
  96             __ li(Rnull, 0);
  97           }
  98           if (UseCompressedOops) {
  99             __ stw(Rnull, offset, Rbase);
 100           } else {
 101             __ std(Rnull, offset, Rbase);
 102           }
 103         }
 104         __ bind(Ldone);
 105       }
 106       break;
 107 #endif // INCLUDE_ALL_GCS
 108     case BarrierSet::CardTableModRef:
 109     case BarrierSet::CardTableExtension:
 110       {
 111         Label Lnull, Ldone;
 112         if (Rval != noreg) {
 113           if (check_null) {
 114             __ cmpdi(CCR0, Rval, 0);
 115             __ beq(CCR0, Lnull);
 116           }
 117           __ store_heap_oop_not_null(Rval, offset, Rbase, /*Rval should better stay uncompressed.*/ Rtmp1);
 118           // Mark the card.
 119           if (!(offset.is_constant() && offset.as_constant() == 0) && precise) {
 120             __ add(Rbase, offset, Rbase);
 121           }
 122           __ card_write_barrier_post(Rbase, Rval, Rtmp1);
 123           if (check_null) {
 124             __ b(Ldone);
 125           }
 126         }
 127 
 128         if (Rval == noreg || check_null) { // Store null oop.
 129           Register Rnull = Rval;
 130           __ bind(Lnull);
 131           if (Rval == noreg) {
 132             Rnull = Rtmp1;
 133             __ li(Rnull, 0);
 134           }
 135           if (UseCompressedOops) {
 136             __ stw(Rnull, offset, Rbase);
 137           } else {
 138             __ std(Rnull, offset, Rbase);
 139           }
 140         }
 141         __ bind(Ldone);
 142       }
 143       break;
 144     case BarrierSet::ModRef:
 145     case BarrierSet::Other:
 146       ShouldNotReachHere();
 147       break;
 148     default:
 149       ShouldNotReachHere();
 150   }
 151 }
 152 
 153 // ============================================================================
 154 // Platform-dependent initialization
 155 
 156 void TemplateTable::pd_initialize() {
 157   // No ppc64 specific initialization.
 158 }
 159 
 160 Address TemplateTable::at_bcp(int offset) {
 161   // Not used on ppc.
 162   ShouldNotReachHere();
 163   return Address();
 164 }
 165 
 166 // Patches the current bytecode (ptr to it located in bcp)
 167 // in the bytecode stream with a new one.
 168 void TemplateTable::patch_bytecode(Bytecodes::Code new_bc, Register Rnew_bc, Register Rtemp, bool load_bc_into_bc_reg /*=true*/, int byte_no) {
 169   // With sharing on, may need to test method flag.
 170   if (!RewriteBytecodes) return;
 171   Label L_patch_done;
 172 
 173   switch (new_bc) {
 174     case Bytecodes::_fast_aputfield:
 175     case Bytecodes::_fast_bputfield:
 176     case Bytecodes::_fast_cputfield:
 177     case Bytecodes::_fast_dputfield:
 178     case Bytecodes::_fast_fputfield:
 179     case Bytecodes::_fast_iputfield:
 180     case Bytecodes::_fast_lputfield:
 181     case Bytecodes::_fast_sputfield:
 182     {
 183       // We skip bytecode quickening for putfield instructions when
 184       // the put_code written to the constant pool cache is zero.
 185       // This is required so that every execution of this instruction
 186       // calls out to InterpreterRuntime::resolve_get_put to do
 187       // additional, required work.
 188       assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range");
 189       assert(load_bc_into_bc_reg, "we use bc_reg as temp");
 190       __ get_cache_and_index_at_bcp(Rtemp /* dst = cache */, 1);
 191       // ((*(cache+indices))>>((1+byte_no)*8))&0xFF:
 192 #if defined(VM_LITTLE_ENDIAN)
 193       __ lbz(Rnew_bc, in_bytes(ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::indices_offset()) + 1 + byte_no, Rtemp);
 194 #else
 195       __ lbz(Rnew_bc, in_bytes(ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::indices_offset()) + 7 - (1 + byte_no), Rtemp);
 196 #endif
 197       __ cmpwi(CCR0, Rnew_bc, 0);
 198       __ li(Rnew_bc, (unsigned int)(unsigned char)new_bc);
 199       __ beq(CCR0, L_patch_done);
 200       // __ isync(); // acquire not needed
 201       break;
 202     }
 203 
 204     default:
 205       assert(byte_no == -1, "sanity");
 206       if (load_bc_into_bc_reg) {
 207         __ li(Rnew_bc, (unsigned int)(unsigned char)new_bc);
 208       }
 209   }
 210 
 211   if (JvmtiExport::can_post_breakpoint()) {
 212     Label L_fast_patch;
 213     __ lbz(Rtemp, 0, R14_bcp);
 214     __ cmpwi(CCR0, Rtemp, (unsigned int)(unsigned char)Bytecodes::_breakpoint);
 215     __ bne(CCR0, L_fast_patch);
 216     // Perform the quickening, slowly, in the bowels of the breakpoint table.
 217     __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::set_original_bytecode_at), R19_method, R14_bcp, Rnew_bc);
 218     __ b(L_patch_done);
 219     __ bind(L_fast_patch);
 220   }
 221 
 222   // Patch bytecode.
 223   __ stb(Rnew_bc, 0, R14_bcp);
 224 
 225   __ bind(L_patch_done);
 226 }
 227 
 228 // ============================================================================
 229 // Individual instructions
 230 
 231 void TemplateTable::nop() {
 232   transition(vtos, vtos);
 233   // Nothing to do.
 234 }
 235 
 236 void TemplateTable::shouldnotreachhere() {
 237   transition(vtos, vtos);
 238   __ stop("shouldnotreachhere bytecode");
 239 }
 240 
 241 void TemplateTable::aconst_null() {
 242   transition(vtos, atos);
 243   __ li(R17_tos, 0);
 244 }
 245 
 246 void TemplateTable::iconst(int value) {
 247   transition(vtos, itos);
 248   assert(value >= -1 && value <= 5, "");
 249   __ li(R17_tos, value);
 250 }
 251 
 252 void TemplateTable::lconst(int value) {
 253   transition(vtos, ltos);
 254   assert(value >= -1 && value <= 5, "");
 255   __ li(R17_tos, value);
 256 }
 257 
 258 void TemplateTable::fconst(int value) {
 259   transition(vtos, ftos);
 260   static float zero = 0.0;
 261   static float one  = 1.0;
 262   static float two  = 2.0;
 263   switch (value) {
 264     default: ShouldNotReachHere();
 265     case 0: {
 266       int simm16_offset = __ load_const_optimized(R11_scratch1, (address*)&zero, R0, true);
 267       __ lfs(F15_ftos, simm16_offset, R11_scratch1);
 268       break;
 269     }
 270     case 1: {
 271       int simm16_offset = __ load_const_optimized(R11_scratch1, (address*)&one, R0, true);
 272       __ lfs(F15_ftos, simm16_offset, R11_scratch1);
 273       break;
 274     }
 275     case 2: {
 276       int simm16_offset = __ load_const_optimized(R11_scratch1, (address*)&two, R0, true);
 277       __ lfs(F15_ftos, simm16_offset, R11_scratch1);
 278       break;
 279     }
 280   }
 281 }
 282 
 283 void TemplateTable::dconst(int value) {
 284   transition(vtos, dtos);
 285   static double zero = 0.0;
 286   static double one  = 1.0;
 287   switch (value) {
 288     case 0: {
 289       int simm16_offset = __ load_const_optimized(R11_scratch1, (address*)&zero, R0, true);
 290       __ lfd(F15_ftos, simm16_offset, R11_scratch1);
 291       break;
 292     }
 293     case 1: {
 294       int simm16_offset = __ load_const_optimized(R11_scratch1, (address*)&one, R0, true);
 295       __ lfd(F15_ftos, simm16_offset, R11_scratch1);
 296       break;
 297     }
 298     default: ShouldNotReachHere();
 299   }
 300 }
 301 
 302 void TemplateTable::bipush() {
 303   transition(vtos, itos);
 304   __ lbz(R17_tos, 1, R14_bcp);
 305   __ extsb(R17_tos, R17_tos);
 306 }
 307 
 308 void TemplateTable::sipush() {
 309   transition(vtos, itos);
 310   __ get_2_byte_integer_at_bcp(1, R17_tos, InterpreterMacroAssembler::Signed);
 311 }
 312 
 313 void TemplateTable::ldc(bool wide) {
 314   Register Rscratch1 = R11_scratch1,
 315            Rscratch2 = R12_scratch2,
 316            Rcpool    = R3_ARG1;
 317 
 318   transition(vtos, vtos);
 319   Label notInt, notClass, exit;
 320 
 321   __ get_cpool_and_tags(Rcpool, Rscratch2); // Set Rscratch2 = &tags.
 322   if (wide) { // Read index.
 323     __ get_2_byte_integer_at_bcp(1, Rscratch1, InterpreterMacroAssembler::Unsigned);
 324   } else {
 325     __ lbz(Rscratch1, 1, R14_bcp);
 326   }
 327 
 328   const int base_offset = ConstantPool::header_size() * wordSize;
 329   const int tags_offset = Array<u1>::base_offset_in_bytes();
 330 
 331   // Get type from tags.
 332   __ addi(Rscratch2, Rscratch2, tags_offset);
 333   __ lbzx(Rscratch2, Rscratch2, Rscratch1);
 334 
 335   __ cmpwi(CCR0, Rscratch2, JVM_CONSTANT_UnresolvedClass); // Unresolved class?
 336   __ cmpwi(CCR1, Rscratch2, JVM_CONSTANT_UnresolvedClassInError); // Unresolved class in error state?
 337   __ cror(/*CR0 eq*/2, /*CR1 eq*/4+2, /*CR0 eq*/2);
 338 
 339   // Resolved class - need to call vm to get java mirror of the class.
 340   __ cmpwi(CCR1, Rscratch2, JVM_CONSTANT_Class);
 341   __ crnor(/*CR0 eq*/2, /*CR1 eq*/4+2, /*CR0 eq*/2); // Neither resolved class nor unresolved case from above?
 342   __ beq(CCR0, notClass);
 343 
 344   __ li(R4, wide ? 1 : 0);
 345   call_VM(R17_tos, CAST_FROM_FN_PTR(address, InterpreterRuntime::ldc), R4);
 346   __ push(atos);
 347   __ b(exit);
 348 
 349   __ align(32, 12);
 350   __ bind(notClass);
 351   __ addi(Rcpool, Rcpool, base_offset);
 352   __ sldi(Rscratch1, Rscratch1, LogBytesPerWord);
 353   __ cmpdi(CCR0, Rscratch2, JVM_CONSTANT_Integer);
 354   __ bne(CCR0, notInt);
 355   __ isync(); // Order load of constant wrt. tags.
 356   __ lwax(R17_tos, Rcpool, Rscratch1);
 357   __ push(itos);
 358   __ b(exit);
 359 
 360   __ align(32, 12);
 361   __ bind(notInt);
 362 #ifdef ASSERT
 363   // String and Object are rewritten to fast_aldc
 364   __ cmpdi(CCR0, Rscratch2, JVM_CONSTANT_Float);
 365   __ asm_assert_eq("unexpected type", 0x8765);
 366 #endif
 367   __ isync(); // Order load of constant wrt. tags.
 368   __ lfsx(F15_ftos, Rcpool, Rscratch1);
 369   __ push(ftos);
 370 
 371   __ align(32, 12);
 372   __ bind(exit);
 373 }
 374 
 375 // Fast path for caching oop constants.
 376 void TemplateTable::fast_aldc(bool wide) {
 377   transition(vtos, atos);
 378 
 379   int index_size = wide ? sizeof(u2) : sizeof(u1);
 380   const Register Rscratch = R11_scratch1;
 381   Label resolved;
 382 
 383   // We are resolved if the resolved reference cache entry contains a
 384   // non-null object (CallSite, etc.)
 385   __ get_cache_index_at_bcp(Rscratch, 1, index_size);  // Load index.
 386   __ load_resolved_reference_at_index(R17_tos, Rscratch);
 387   __ cmpdi(CCR0, R17_tos, 0);
 388   __ bne(CCR0, resolved);
 389   __ load_const_optimized(R3_ARG1, (int)bytecode());
 390 
 391   address entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_ldc);
 392 
 393   // First time invocation - must resolve first.
 394   __ call_VM(R17_tos, entry, R3_ARG1);
 395 
 396   __ align(32, 12);
 397   __ bind(resolved);
 398   __ verify_oop(R17_tos);
 399 }
 400 
 401 void TemplateTable::ldc2_w() {
 402   transition(vtos, vtos);
 403   Label Llong, Lexit;
 404 
 405   Register Rindex = R11_scratch1,
 406            Rcpool = R12_scratch2,
 407            Rtag   = R3_ARG1;
 408   __ get_cpool_and_tags(Rcpool, Rtag);
 409   __ get_2_byte_integer_at_bcp(1, Rindex, InterpreterMacroAssembler::Unsigned);
 410 
 411   const int base_offset = ConstantPool::header_size() * wordSize;
 412   const int tags_offset = Array<u1>::base_offset_in_bytes();
 413   // Get type from tags.
 414   __ addi(Rcpool, Rcpool, base_offset);
 415   __ addi(Rtag, Rtag, tags_offset);
 416 
 417   __ lbzx(Rtag, Rtag, Rindex);
 418 
 419   __ sldi(Rindex, Rindex, LogBytesPerWord);
 420   __ cmpdi(CCR0, Rtag, JVM_CONSTANT_Double);
 421   __ bne(CCR0, Llong);
 422   // A double can be placed at word-aligned locations in the constant pool.
 423   // Check out Conversions.java for an example.
 424   // Also ConstantPool::header_size() is 20, which makes it very difficult
 425   // to double-align double on the constant pool. SG, 11/7/97
 426   __ isync(); // Order load of constant wrt. tags.
 427   __ lfdx(F15_ftos, Rcpool, Rindex);
 428   __ push(dtos);
 429   __ b(Lexit);
 430 
 431   __ bind(Llong);
 432   __ isync(); // Order load of constant wrt. tags.
 433   __ ldx(R17_tos, Rcpool, Rindex);
 434   __ push(ltos);
 435 
 436   __ bind(Lexit);
 437 }
 438 
 439 // Get the locals index located in the bytecode stream at bcp + offset.
 440 void TemplateTable::locals_index(Register Rdst, int offset) {
 441   __ lbz(Rdst, offset, R14_bcp);
 442 }
 443 
 444 void TemplateTable::iload() {
 445   transition(vtos, itos);
 446 
 447   // Get the local value into tos
 448   const Register Rindex = R22_tmp2;
 449   locals_index(Rindex);
 450 
 451   // Rewrite iload,iload  pair into fast_iload2
 452   //         iload,caload pair into fast_icaload
 453   if (RewriteFrequentPairs) {
 454     Label Lrewrite, Ldone;
 455     Register Rnext_byte  = R3_ARG1,
 456              Rrewrite_to = R6_ARG4,
 457              Rscratch    = R11_scratch1;
 458 
 459     // get next byte
 460     __ lbz(Rnext_byte, Bytecodes::length_for(Bytecodes::_iload), R14_bcp);
 461 
 462     // if _iload, wait to rewrite to iload2. We only want to rewrite the
 463     // last two iloads in a pair. Comparing against fast_iload means that
 464     // the next bytecode is neither an iload or a caload, and therefore
 465     // an iload pair.
 466     __ cmpwi(CCR0, Rnext_byte, (unsigned int)(unsigned char)Bytecodes::_iload);
 467     __ beq(CCR0, Ldone);
 468 
 469     __ cmpwi(CCR1, Rnext_byte, (unsigned int)(unsigned char)Bytecodes::_fast_iload);
 470     __ li(Rrewrite_to, (unsigned int)(unsigned char)Bytecodes::_fast_iload2);
 471     __ beq(CCR1, Lrewrite);
 472 
 473     __ cmpwi(CCR0, Rnext_byte, (unsigned int)(unsigned char)Bytecodes::_caload);
 474     __ li(Rrewrite_to, (unsigned int)(unsigned char)Bytecodes::_fast_icaload);
 475     __ beq(CCR0, Lrewrite);
 476 
 477     __ li(Rrewrite_to, (unsigned int)(unsigned char)Bytecodes::_fast_iload);
 478 
 479     __ bind(Lrewrite);
 480     patch_bytecode(Bytecodes::_iload, Rrewrite_to, Rscratch, false);
 481     __ bind(Ldone);
 482   }
 483 
 484   __ load_local_int(R17_tos, Rindex, Rindex);
 485 }
 486 
 487 // Load 2 integers in a row without dispatching
 488 void TemplateTable::fast_iload2() {
 489   transition(vtos, itos);
 490 
 491   __ lbz(R3_ARG1, 1, R14_bcp);
 492   __ lbz(R17_tos, Bytecodes::length_for(Bytecodes::_iload) + 1, R14_bcp);
 493 
 494   __ load_local_int(R3_ARG1, R11_scratch1, R3_ARG1);
 495   __ load_local_int(R17_tos, R12_scratch2, R17_tos);
 496   __ push_i(R3_ARG1);
 497 }
 498 
 499 void TemplateTable::fast_iload() {
 500   transition(vtos, itos);
 501   // Get the local value into tos
 502 
 503   const Register Rindex = R11_scratch1;
 504   locals_index(Rindex);
 505   __ load_local_int(R17_tos, Rindex, Rindex);
 506 }
 507 
 508 // Load a local variable type long from locals area to TOS cache register.
 509 // Local index resides in bytecodestream.
 510 void TemplateTable::lload() {
 511   transition(vtos, ltos);
 512 
 513   const Register Rindex = R11_scratch1;
 514   locals_index(Rindex);
 515   __ load_local_long(R17_tos, Rindex, Rindex);
 516 }
 517 
 518 void TemplateTable::fload() {
 519   transition(vtos, ftos);
 520 
 521   const Register Rindex = R11_scratch1;
 522   locals_index(Rindex);
 523   __ load_local_float(F15_ftos, Rindex, Rindex);
 524 }
 525 
 526 void TemplateTable::dload() {
 527   transition(vtos, dtos);
 528 
 529   const Register Rindex = R11_scratch1;
 530   locals_index(Rindex);
 531   __ load_local_double(F15_ftos, Rindex, Rindex);
 532 }
 533 
 534 void TemplateTable::aload() {
 535   transition(vtos, atos);
 536 
 537   const Register Rindex = R11_scratch1;
 538   locals_index(Rindex);
 539   __ load_local_ptr(R17_tos, Rindex, Rindex);
 540 }
 541 
 542 void TemplateTable::locals_index_wide(Register Rdst) {
 543   // Offset is 2, not 1, because Lbcp points to wide prefix code.
 544   __ get_2_byte_integer_at_bcp(2, Rdst, InterpreterMacroAssembler::Unsigned);
 545 }
 546 
 547 void TemplateTable::wide_iload() {
 548   // Get the local value into tos.
 549 
 550   const Register Rindex = R11_scratch1;
 551   locals_index_wide(Rindex);
 552   __ load_local_int(R17_tos, Rindex, Rindex);
 553 }
 554 
 555 void TemplateTable::wide_lload() {
 556   transition(vtos, ltos);
 557 
 558   const Register Rindex = R11_scratch1;
 559   locals_index_wide(Rindex);
 560   __ load_local_long(R17_tos, Rindex, Rindex);
 561 }
 562 
 563 void TemplateTable::wide_fload() {
 564   transition(vtos, ftos);
 565 
 566   const Register Rindex = R11_scratch1;
 567   locals_index_wide(Rindex);
 568   __ load_local_float(F15_ftos, Rindex, Rindex);
 569 }
 570 
 571 void TemplateTable::wide_dload() {
 572   transition(vtos, dtos);
 573 
 574   const Register Rindex = R11_scratch1;
 575   locals_index_wide(Rindex);
 576   __ load_local_double(F15_ftos, Rindex, Rindex);
 577 }
 578 
 579 void TemplateTable::wide_aload() {
 580   transition(vtos, atos);
 581 
 582   const Register Rindex = R11_scratch1;
 583   locals_index_wide(Rindex);
 584   __ load_local_ptr(R17_tos, Rindex, Rindex);
 585 }
 586 
 587 void TemplateTable::iaload() {
 588   transition(itos, itos);
 589 
 590   const Register Rload_addr = R3_ARG1,
 591                  Rarray     = R4_ARG2,
 592                  Rtemp      = R5_ARG3;
 593   __ index_check(Rarray, R17_tos /* index */, LogBytesPerInt, Rtemp, Rload_addr);
 594   __ lwa(R17_tos, arrayOopDesc::base_offset_in_bytes(T_INT), Rload_addr);
 595 }
 596 
 597 void TemplateTable::laload() {
 598   transition(itos, ltos);
 599 
 600   const Register Rload_addr = R3_ARG1,
 601                  Rarray     = R4_ARG2,
 602                  Rtemp      = R5_ARG3;
 603   __ index_check(Rarray, R17_tos /* index */, LogBytesPerLong, Rtemp, Rload_addr);
 604   __ ld(R17_tos, arrayOopDesc::base_offset_in_bytes(T_LONG), Rload_addr);
 605 }
 606 
 607 void TemplateTable::faload() {
 608   transition(itos, ftos);
 609 
 610   const Register Rload_addr = R3_ARG1,
 611                  Rarray     = R4_ARG2,
 612                  Rtemp      = R5_ARG3;
 613   __ index_check(Rarray, R17_tos /* index */, LogBytesPerInt, Rtemp, Rload_addr);
 614   __ lfs(F15_ftos, arrayOopDesc::base_offset_in_bytes(T_FLOAT), Rload_addr);
 615 }
 616 
 617 void TemplateTable::daload() {
 618   transition(itos, dtos);
 619 
 620   const Register Rload_addr = R3_ARG1,
 621                  Rarray     = R4_ARG2,
 622                  Rtemp      = R5_ARG3;
 623   __ index_check(Rarray, R17_tos /* index */, LogBytesPerLong, Rtemp, Rload_addr);
 624   __ lfd(F15_ftos, arrayOopDesc::base_offset_in_bytes(T_DOUBLE), Rload_addr);
 625 }
 626 
 627 void TemplateTable::aaload() {
 628   transition(itos, atos);
 629 
 630   // tos: index
 631   // result tos: array
 632   const Register Rload_addr = R3_ARG1,
 633                  Rarray     = R4_ARG2,
 634                  Rtemp      = R5_ARG3;
 635   __ index_check(Rarray, R17_tos /* index */, UseCompressedOops ? 2 : LogBytesPerWord, Rtemp, Rload_addr);
 636   __ load_heap_oop(R17_tos, arrayOopDesc::base_offset_in_bytes(T_OBJECT), Rload_addr);
 637   __ verify_oop(R17_tos);
 638   //__ dcbt(R17_tos); // prefetch
 639 }
 640 
 641 void TemplateTable::baload() {
 642   transition(itos, itos);
 643 
 644   const Register Rload_addr = R3_ARG1,
 645                  Rarray     = R4_ARG2,
 646                  Rtemp      = R5_ARG3;
 647   __ index_check(Rarray, R17_tos /* index */, 0, Rtemp, Rload_addr);
 648   __ lbz(R17_tos, arrayOopDesc::base_offset_in_bytes(T_BYTE), Rload_addr);
 649   __ extsb(R17_tos, R17_tos);
 650 }
 651 
 652 void TemplateTable::caload() {
 653   transition(itos, itos);
 654 
 655   const Register Rload_addr = R3_ARG1,
 656                  Rarray     = R4_ARG2,
 657                  Rtemp      = R5_ARG3;
 658   __ index_check(Rarray, R17_tos /* index */, LogBytesPerShort, Rtemp, Rload_addr);
 659   __ lhz(R17_tos, arrayOopDesc::base_offset_in_bytes(T_CHAR), Rload_addr);
 660 }
 661 
 662 // Iload followed by caload frequent pair.
 663 void TemplateTable::fast_icaload() {
 664   transition(vtos, itos);
 665 
 666   const Register Rload_addr = R3_ARG1,
 667                  Rarray     = R4_ARG2,
 668                  Rtemp      = R11_scratch1;
 669 
 670   locals_index(R17_tos);
 671   __ load_local_int(R17_tos, Rtemp, R17_tos);
 672   __ index_check(Rarray, R17_tos /* index */, LogBytesPerShort, Rtemp, Rload_addr);
 673   __ lhz(R17_tos, arrayOopDesc::base_offset_in_bytes(T_CHAR), Rload_addr);
 674 }
 675 
 676 void TemplateTable::saload() {
 677   transition(itos, itos);
 678 
 679   const Register Rload_addr = R11_scratch1,
 680                  Rarray     = R12_scratch2,
 681                  Rtemp      = R3_ARG1;
 682   __ index_check(Rarray, R17_tos /* index */, LogBytesPerShort, Rtemp, Rload_addr);
 683   __ lha(R17_tos, arrayOopDesc::base_offset_in_bytes(T_SHORT), Rload_addr);
 684 }
 685 
 686 void TemplateTable::iload(int n) {
 687   transition(vtos, itos);
 688 
 689   __ lwz(R17_tos, Interpreter::local_offset_in_bytes(n), R18_locals);
 690 }
 691 
 692 void TemplateTable::lload(int n) {
 693   transition(vtos, ltos);
 694 
 695   __ ld(R17_tos, Interpreter::local_offset_in_bytes(n + 1), R18_locals);
 696 }
 697 
 698 void TemplateTable::fload(int n) {
 699   transition(vtos, ftos);
 700 
 701   __ lfs(F15_ftos, Interpreter::local_offset_in_bytes(n), R18_locals);
 702 }
 703 
 704 void TemplateTable::dload(int n) {
 705   transition(vtos, dtos);
 706 
 707   __ lfd(F15_ftos, Interpreter::local_offset_in_bytes(n + 1), R18_locals);
 708 }
 709 
 710 void TemplateTable::aload(int n) {
 711   transition(vtos, atos);
 712 
 713   __ ld(R17_tos, Interpreter::local_offset_in_bytes(n), R18_locals);
 714 }
 715 
 716 void TemplateTable::aload_0() {
 717   transition(vtos, atos);
 718   // According to bytecode histograms, the pairs:
 719   //
 720   // _aload_0, _fast_igetfield
 721   // _aload_0, _fast_agetfield
 722   // _aload_0, _fast_fgetfield
 723   //
 724   // occur frequently. If RewriteFrequentPairs is set, the (slow)
 725   // _aload_0 bytecode checks if the next bytecode is either
 726   // _fast_igetfield, _fast_agetfield or _fast_fgetfield and then
 727   // rewrites the current bytecode into a pair bytecode; otherwise it
 728   // rewrites the current bytecode into _0 that doesn't do
 729   // the pair check anymore.
 730   //
 731   // Note: If the next bytecode is _getfield, the rewrite must be
 732   //       delayed, otherwise we may miss an opportunity for a pair.
 733   //
 734   // Also rewrite frequent pairs
 735   //   aload_0, aload_1
 736   //   aload_0, iload_1
 737   // These bytecodes with a small amount of code are most profitable
 738   // to rewrite.
 739 
 740   if (RewriteFrequentPairs) {
 741 
 742     Label Lrewrite, Ldont_rewrite;
 743     Register Rnext_byte  = R3_ARG1,
 744              Rrewrite_to = R6_ARG4,
 745              Rscratch    = R11_scratch1;
 746 
 747     // Get next byte.
 748     __ lbz(Rnext_byte, Bytecodes::length_for(Bytecodes::_aload_0), R14_bcp);
 749 
 750     // If _getfield, wait to rewrite. We only want to rewrite the last two bytecodes in a pair.
 751     __ cmpwi(CCR0, Rnext_byte, (unsigned int)(unsigned char)Bytecodes::_getfield);
 752     __ beq(CCR0, Ldont_rewrite);
 753 
 754     __ cmpwi(CCR1, Rnext_byte, (unsigned int)(unsigned char)Bytecodes::_fast_igetfield);
 755     __ li(Rrewrite_to, (unsigned int)(unsigned char)Bytecodes::_fast_iaccess_0);
 756     __ beq(CCR1, Lrewrite);
 757 
 758     __ cmpwi(CCR0, Rnext_byte, (unsigned int)(unsigned char)Bytecodes::_fast_agetfield);
 759     __ li(Rrewrite_to, (unsigned int)(unsigned char)Bytecodes::_fast_aaccess_0);
 760     __ beq(CCR0, Lrewrite);
 761 
 762     __ cmpwi(CCR1, Rnext_byte, (unsigned int)(unsigned char)Bytecodes::_fast_fgetfield);
 763     __ li(Rrewrite_to, (unsigned int)(unsigned char)Bytecodes::_fast_faccess_0);
 764     __ beq(CCR1, Lrewrite);
 765 
 766     __ li(Rrewrite_to, (unsigned int)(unsigned char)Bytecodes::_fast_aload_0);
 767 
 768     __ bind(Lrewrite);
 769     patch_bytecode(Bytecodes::_aload_0, Rrewrite_to, Rscratch, false);
 770     __ bind(Ldont_rewrite);
 771   }
 772 
 773   // Do actual aload_0 (must do this after patch_bytecode which might call VM and GC might change oop).
 774   aload(0);
 775 }
 776 
 777 void TemplateTable::istore() {
 778   transition(itos, vtos);
 779 
 780   const Register Rindex = R11_scratch1;
 781   locals_index(Rindex);
 782   __ store_local_int(R17_tos, Rindex);
 783 }
 784 
 785 void TemplateTable::lstore() {
 786   transition(ltos, vtos);
 787   const Register Rindex = R11_scratch1;
 788   locals_index(Rindex);
 789   __ store_local_long(R17_tos, Rindex);
 790 }
 791 
 792 void TemplateTable::fstore() {
 793   transition(ftos, vtos);
 794 
 795   const Register Rindex = R11_scratch1;
 796   locals_index(Rindex);
 797   __ store_local_float(F15_ftos, Rindex);
 798 }
 799 
 800 void TemplateTable::dstore() {
 801   transition(dtos, vtos);
 802 
 803   const Register Rindex = R11_scratch1;
 804   locals_index(Rindex);
 805   __ store_local_double(F15_ftos, Rindex);
 806 }
 807 
 808 void TemplateTable::astore() {
 809   transition(vtos, vtos);
 810 
 811   const Register Rindex = R11_scratch1;
 812   __ pop_ptr();
 813   __ verify_oop_or_return_address(R17_tos, Rindex);
 814   locals_index(Rindex);
 815   __ store_local_ptr(R17_tos, Rindex);
 816 }
 817 
 818 void TemplateTable::wide_istore() {
 819   transition(vtos, vtos);
 820 
 821   const Register Rindex = R11_scratch1;
 822   __ pop_i();
 823   locals_index_wide(Rindex);
 824   __ store_local_int(R17_tos, Rindex);
 825 }
 826 
 827 void TemplateTable::wide_lstore() {
 828   transition(vtos, vtos);
 829 
 830   const Register Rindex = R11_scratch1;
 831   __ pop_l();
 832   locals_index_wide(Rindex);
 833   __ store_local_long(R17_tos, Rindex);
 834 }
 835 
 836 void TemplateTable::wide_fstore() {
 837   transition(vtos, vtos);
 838 
 839   const Register Rindex = R11_scratch1;
 840   __ pop_f();
 841   locals_index_wide(Rindex);
 842   __ store_local_float(F15_ftos, Rindex);
 843 }
 844 
 845 void TemplateTable::wide_dstore() {
 846   transition(vtos, vtos);
 847 
 848   const Register Rindex = R11_scratch1;
 849   __ pop_d();
 850   locals_index_wide(Rindex);
 851   __ store_local_double(F15_ftos, Rindex);
 852 }
 853 
 854 void TemplateTable::wide_astore() {
 855   transition(vtos, vtos);
 856 
 857   const Register Rindex = R11_scratch1;
 858   __ pop_ptr();
 859   __ verify_oop_or_return_address(R17_tos, Rindex);
 860   locals_index_wide(Rindex);
 861   __ store_local_ptr(R17_tos, Rindex);
 862 }
 863 
 864 void TemplateTable::iastore() {
 865   transition(itos, vtos);
 866 
 867   const Register Rindex      = R3_ARG1,
 868                  Rstore_addr = R4_ARG2,
 869                  Rarray      = R5_ARG3,
 870                  Rtemp       = R6_ARG4;
 871   __ pop_i(Rindex);
 872   __ index_check(Rarray, Rindex, LogBytesPerInt, Rtemp, Rstore_addr);
 873   __ stw(R17_tos, arrayOopDesc::base_offset_in_bytes(T_INT), Rstore_addr);
 874   }
 875 
 876 void TemplateTable::lastore() {
 877   transition(ltos, vtos);
 878 
 879   const Register Rindex      = R3_ARG1,
 880                  Rstore_addr = R4_ARG2,
 881                  Rarray      = R5_ARG3,
 882                  Rtemp       = R6_ARG4;
 883   __ pop_i(Rindex);
 884   __ index_check(Rarray, Rindex, LogBytesPerLong, Rtemp, Rstore_addr);
 885   __ std(R17_tos, arrayOopDesc::base_offset_in_bytes(T_LONG), Rstore_addr);
 886   }
 887 
 888 void TemplateTable::fastore() {
 889   transition(ftos, vtos);
 890 
 891   const Register Rindex      = R3_ARG1,
 892                  Rstore_addr = R4_ARG2,
 893                  Rarray      = R5_ARG3,
 894                  Rtemp       = R6_ARG4;
 895   __ pop_i(Rindex);
 896   __ index_check(Rarray, Rindex, LogBytesPerInt, Rtemp, Rstore_addr);
 897   __ stfs(F15_ftos, arrayOopDesc::base_offset_in_bytes(T_FLOAT), Rstore_addr);
 898   }
 899 
 900 void TemplateTable::dastore() {
 901   transition(dtos, vtos);
 902 
 903   const Register Rindex      = R3_ARG1,
 904                  Rstore_addr = R4_ARG2,
 905                  Rarray      = R5_ARG3,
 906                  Rtemp       = R6_ARG4;
 907   __ pop_i(Rindex);
 908   __ index_check(Rarray, Rindex, LogBytesPerLong, Rtemp, Rstore_addr);
 909   __ stfd(F15_ftos, arrayOopDesc::base_offset_in_bytes(T_DOUBLE), Rstore_addr);
 910   }
 911 
 912 // Pop 3 values from the stack and...
 913 void TemplateTable::aastore() {
 914   transition(vtos, vtos);
 915 
 916   Label Lstore_ok, Lis_null, Ldone;
 917   const Register Rindex    = R3_ARG1,
 918                  Rarray    = R4_ARG2,
 919                  Rscratch  = R11_scratch1,
 920                  Rscratch2 = R12_scratch2,
 921                  Rarray_klass = R5_ARG3,
 922                  Rarray_element_klass = Rarray_klass,
 923                  Rvalue_klass = R6_ARG4,
 924                  Rstore_addr = R31;    // Use register which survives VM call.
 925 
 926   __ ld(R17_tos, Interpreter::expr_offset_in_bytes(0), R15_esp); // Get value to store.
 927   __ lwz(Rindex, Interpreter::expr_offset_in_bytes(1), R15_esp); // Get index.
 928   __ ld(Rarray, Interpreter::expr_offset_in_bytes(2), R15_esp);  // Get array.
 929 
 930   __ verify_oop(R17_tos);
 931   __ index_check_without_pop(Rarray, Rindex, UseCompressedOops ? 2 : LogBytesPerWord, Rscratch, Rstore_addr);
 932   // Rindex is dead!
 933   Register Rscratch3 = Rindex;
 934 
 935   // Do array store check - check for NULL value first.
 936   __ cmpdi(CCR0, R17_tos, 0);
 937   __ beq(CCR0, Lis_null);
 938 
 939   __ load_klass(Rarray_klass, Rarray);
 940   __ load_klass(Rvalue_klass, R17_tos);
 941 
 942   // Do fast instanceof cache test.
 943   __ ld(Rarray_element_klass, in_bytes(ObjArrayKlass::element_klass_offset()), Rarray_klass);
 944 
 945   // Generate a fast subtype check. Branch to store_ok if no failure. Throw if failure.
 946   __ gen_subtype_check(Rvalue_klass /*subklass*/, Rarray_element_klass /*superklass*/, Rscratch, Rscratch2, Rscratch3, Lstore_ok);
 947 
 948   // Fell through: subtype check failed => throw an exception.
 949   __ load_dispatch_table(R11_scratch1, (address*)Interpreter::_throw_ArrayStoreException_entry);
 950   __ mtctr(R11_scratch1);
 951   __ bctr();
 952 
 953   __ bind(Lis_null);
 954   do_oop_store(_masm, Rstore_addr, arrayOopDesc::base_offset_in_bytes(T_OBJECT), noreg /* 0 */,
 955                Rscratch, Rscratch2, Rscratch3, _bs->kind(), true /* precise */, false /* check_null */);
 956   __ profile_null_seen(Rscratch, Rscratch2);
 957   __ b(Ldone);
 958 
 959   // Store is OK.
 960   __ bind(Lstore_ok);
 961   do_oop_store(_masm, Rstore_addr, arrayOopDesc::base_offset_in_bytes(T_OBJECT), R17_tos /* value */,
 962                Rscratch, Rscratch2, Rscratch3, _bs->kind(), true /* precise */, false /* check_null */);
 963 
 964   __ bind(Ldone);
 965   // Adjust sp (pops array, index and value).
 966   __ addi(R15_esp, R15_esp, 3 * Interpreter::stackElementSize);
 967 }
 968 
 969 void TemplateTable::bastore() {
 970   transition(itos, vtos);
 971 
 972   const Register Rindex   = R11_scratch1,
 973                  Rarray   = R12_scratch2,
 974                  Rscratch = R3_ARG1;
 975   __ pop_i(Rindex);
 976   // tos: val
 977   // Rarray: array ptr (popped by index_check)
 978   __ index_check(Rarray, Rindex, 0, Rscratch, Rarray);
 979   __ stb(R17_tos, arrayOopDesc::base_offset_in_bytes(T_BYTE), Rarray);
 980 }
 981 
 982 void TemplateTable::castore() {
 983   transition(itos, vtos);
 984 
 985   const Register Rindex   = R11_scratch1,
 986                  Rarray   = R12_scratch2,
 987                  Rscratch = R3_ARG1;
 988   __ pop_i(Rindex);
 989   // tos: val
 990   // Rarray: array ptr (popped by index_check)
 991   __ index_check(Rarray, Rindex, LogBytesPerShort, Rscratch, Rarray);
 992   __ sth(R17_tos, arrayOopDesc::base_offset_in_bytes(T_CHAR), Rarray);
 993 }
 994 
 995 void TemplateTable::sastore() {
 996   castore();
 997 }
 998 
 999 void TemplateTable::istore(int n) {
1000   transition(itos, vtos);
1001   __ stw(R17_tos, Interpreter::local_offset_in_bytes(n), R18_locals);
1002 }
1003 
1004 void TemplateTable::lstore(int n) {
1005   transition(ltos, vtos);
1006   __ std(R17_tos, Interpreter::local_offset_in_bytes(n + 1), R18_locals);
1007 }
1008 
1009 void TemplateTable::fstore(int n) {
1010   transition(ftos, vtos);
1011   __ stfs(F15_ftos, Interpreter::local_offset_in_bytes(n), R18_locals);
1012 }
1013 
1014 void TemplateTable::dstore(int n) {
1015   transition(dtos, vtos);
1016   __ stfd(F15_ftos, Interpreter::local_offset_in_bytes(n + 1), R18_locals);
1017 }
1018 
1019 void TemplateTable::astore(int n) {
1020   transition(vtos, vtos);
1021 
1022   __ pop_ptr();
1023   __ verify_oop_or_return_address(R17_tos, R11_scratch1);
1024   __ std(R17_tos, Interpreter::local_offset_in_bytes(n), R18_locals);
1025 }
1026 
1027 void TemplateTable::pop() {
1028   transition(vtos, vtos);
1029 
1030   __ addi(R15_esp, R15_esp, Interpreter::stackElementSize);
1031 }
1032 
1033 void TemplateTable::pop2() {
1034   transition(vtos, vtos);
1035 
1036   __ addi(R15_esp, R15_esp, Interpreter::stackElementSize * 2);
1037 }
1038 
1039 void TemplateTable::dup() {
1040   transition(vtos, vtos);
1041 
1042   __ ld(R11_scratch1, Interpreter::stackElementSize, R15_esp);
1043   __ push_ptr(R11_scratch1);
1044 }
1045 
1046 void TemplateTable::dup_x1() {
1047   transition(vtos, vtos);
1048 
1049   Register Ra = R11_scratch1,
1050            Rb = R12_scratch2;
1051   // stack: ..., a, b
1052   __ ld(Rb, Interpreter::stackElementSize,     R15_esp);
1053   __ ld(Ra, Interpreter::stackElementSize * 2, R15_esp);
1054   __ std(Rb, Interpreter::stackElementSize * 2, R15_esp);
1055   __ std(Ra, Interpreter::stackElementSize,     R15_esp);
1056   __ push_ptr(Rb);
1057   // stack: ..., b, a, b
1058 }
1059 
1060 void TemplateTable::dup_x2() {
1061   transition(vtos, vtos);
1062 
1063   Register Ra = R11_scratch1,
1064            Rb = R12_scratch2,
1065            Rc = R3_ARG1;
1066 
1067   // stack: ..., a, b, c
1068   __ ld(Rc, Interpreter::stackElementSize,     R15_esp);  // load c
1069   __ ld(Ra, Interpreter::stackElementSize * 3, R15_esp);  // load a
1070   __ std(Rc, Interpreter::stackElementSize * 3, R15_esp); // store c in a
1071   __ ld(Rb, Interpreter::stackElementSize * 2, R15_esp);  // load b
1072   // stack: ..., c, b, c
1073   __ std(Ra, Interpreter::stackElementSize * 2, R15_esp); // store a in b
1074   // stack: ..., c, a, c
1075   __ std(Rb, Interpreter::stackElementSize,     R15_esp); // store b in c
1076   __ push_ptr(Rc);                                        // push c
1077   // stack: ..., c, a, b, c
1078 }
1079 
1080 void TemplateTable::dup2() {
1081   transition(vtos, vtos);
1082 
1083   Register Ra = R11_scratch1,
1084            Rb = R12_scratch2;
1085   // stack: ..., a, b
1086   __ ld(Rb, Interpreter::stackElementSize,     R15_esp);
1087   __ ld(Ra, Interpreter::stackElementSize * 2, R15_esp);
1088   __ push_2ptrs(Ra, Rb);
1089   // stack: ..., a, b, a, b
1090 }
1091 
1092 void TemplateTable::dup2_x1() {
1093   transition(vtos, vtos);
1094 
1095   Register Ra = R11_scratch1,
1096            Rb = R12_scratch2,
1097            Rc = R3_ARG1;
1098   // stack: ..., a, b, c
1099   __ ld(Rc, Interpreter::stackElementSize,     R15_esp);
1100   __ ld(Rb, Interpreter::stackElementSize * 2, R15_esp);
1101   __ std(Rc, Interpreter::stackElementSize * 2, R15_esp);
1102   __ ld(Ra, Interpreter::stackElementSize * 3, R15_esp);
1103   __ std(Ra, Interpreter::stackElementSize,     R15_esp);
1104   __ std(Rb, Interpreter::stackElementSize * 3, R15_esp);
1105   // stack: ..., b, c, a
1106   __ push_2ptrs(Rb, Rc);
1107   // stack: ..., b, c, a, b, c
1108 }
1109 
1110 void TemplateTable::dup2_x2() {
1111   transition(vtos, vtos);
1112 
1113   Register Ra = R11_scratch1,
1114            Rb = R12_scratch2,
1115            Rc = R3_ARG1,
1116            Rd = R4_ARG2;
1117   // stack: ..., a, b, c, d
1118   __ ld(Rb, Interpreter::stackElementSize * 3, R15_esp);
1119   __ ld(Rd, Interpreter::stackElementSize,     R15_esp);
1120   __ std(Rb, Interpreter::stackElementSize,     R15_esp);  // store b in d
1121   __ std(Rd, Interpreter::stackElementSize * 3, R15_esp);  // store d in b
1122   __ ld(Ra, Interpreter::stackElementSize * 4, R15_esp);
1123   __ ld(Rc, Interpreter::stackElementSize * 2, R15_esp);
1124   __ std(Ra, Interpreter::stackElementSize * 2, R15_esp);  // store a in c
1125   __ std(Rc, Interpreter::stackElementSize * 4, R15_esp);  // store c in a
1126   // stack: ..., c, d, a, b
1127   __ push_2ptrs(Rc, Rd);
1128   // stack: ..., c, d, a, b, c, d
1129 }
1130 
1131 void TemplateTable::swap() {
1132   transition(vtos, vtos);
1133   // stack: ..., a, b
1134 
1135   Register Ra = R11_scratch1,
1136            Rb = R12_scratch2;
1137   // stack: ..., a, b
1138   __ ld(Rb, Interpreter::stackElementSize,     R15_esp);
1139   __ ld(Ra, Interpreter::stackElementSize * 2, R15_esp);
1140   __ std(Rb, Interpreter::stackElementSize * 2, R15_esp);
1141   __ std(Ra, Interpreter::stackElementSize,     R15_esp);
1142   // stack: ..., b, a
1143 }
1144 
1145 void TemplateTable::iop2(Operation op) {
1146   transition(itos, itos);
1147 
1148   Register Rscratch = R11_scratch1;
1149 
1150   __ pop_i(Rscratch);
1151   // tos  = number of bits to shift
1152   // Rscratch = value to shift
1153   switch (op) {
1154     case  add:   __ add(R17_tos, Rscratch, R17_tos); break;
1155     case  sub:   __ sub(R17_tos, Rscratch, R17_tos); break;
1156     case  mul:   __ mullw(R17_tos, Rscratch, R17_tos); break;
1157     case  _and:  __ andr(R17_tos, Rscratch, R17_tos); break;
1158     case  _or:   __ orr(R17_tos, Rscratch, R17_tos); break;
1159     case  _xor:  __ xorr(R17_tos, Rscratch, R17_tos); break;
1160     case  shl:   __ rldicl(R17_tos, R17_tos, 0, 64-5); __ slw(R17_tos, Rscratch, R17_tos); break;
1161     case  shr:   __ rldicl(R17_tos, R17_tos, 0, 64-5); __ sraw(R17_tos, Rscratch, R17_tos); break;
1162     case  ushr:  __ rldicl(R17_tos, R17_tos, 0, 64-5); __ srw(R17_tos, Rscratch, R17_tos); break;
1163     default:     ShouldNotReachHere();
1164   }
1165 }
1166 
1167 void TemplateTable::lop2(Operation op) {
1168   transition(ltos, ltos);
1169 
1170   Register Rscratch = R11_scratch1;
1171   __ pop_l(Rscratch);
1172   switch (op) {
1173     case  add:   __ add(R17_tos, Rscratch, R17_tos); break;
1174     case  sub:   __ sub(R17_tos, Rscratch, R17_tos); break;
1175     case  _and:  __ andr(R17_tos, Rscratch, R17_tos); break;
1176     case  _or:   __ orr(R17_tos, Rscratch, R17_tos); break;
1177     case  _xor:  __ xorr(R17_tos, Rscratch, R17_tos); break;
1178     default:     ShouldNotReachHere();
1179   }
1180 }
1181 
1182 void TemplateTable::idiv() {
1183   transition(itos, itos);
1184 
1185   Label Lnormal, Lexception, Ldone;
1186   Register Rdividend = R11_scratch1; // Used by irem.
1187 
1188   __ addi(R0, R17_tos, 1);
1189   __ cmplwi(CCR0, R0, 2);
1190   __ bgt(CCR0, Lnormal); // divisor <-1 or >1
1191 
1192   __ cmpwi(CCR1, R17_tos, 0);
1193   __ beq(CCR1, Lexception); // divisor == 0
1194 
1195   __ pop_i(Rdividend);
1196   __ mullw(R17_tos, Rdividend, R17_tos); // div by +/-1
1197   __ b(Ldone);
1198 
1199   __ bind(Lexception);
1200   __ load_dispatch_table(R11_scratch1, (address*)Interpreter::_throw_ArithmeticException_entry);
1201   __ mtctr(R11_scratch1);
1202   __ bctr();
1203 
1204   __ align(32, 12);
1205   __ bind(Lnormal);
1206   __ pop_i(Rdividend);
1207   __ divw(R17_tos, Rdividend, R17_tos); // Can't divide minint/-1.
1208   __ bind(Ldone);
1209 }
1210 
1211 void TemplateTable::irem() {
1212   transition(itos, itos);
1213 
1214   __ mr(R12_scratch2, R17_tos);
1215   idiv();
1216   __ mullw(R17_tos, R17_tos, R12_scratch2);
1217   __ subf(R17_tos, R17_tos, R11_scratch1); // Dividend set by idiv.
1218 }
1219 
1220 void TemplateTable::lmul() {
1221   transition(ltos, ltos);
1222 
1223   __ pop_l(R11_scratch1);
1224   __ mulld(R17_tos, R11_scratch1, R17_tos);
1225 }
1226 
1227 void TemplateTable::ldiv() {
1228   transition(ltos, ltos);
1229 
1230   Label Lnormal, Lexception, Ldone;
1231   Register Rdividend = R11_scratch1; // Used by lrem.
1232 
1233   __ addi(R0, R17_tos, 1);
1234   __ cmpldi(CCR0, R0, 2);
1235   __ bgt(CCR0, Lnormal); // divisor <-1 or >1
1236 
1237   __ cmpdi(CCR1, R17_tos, 0);
1238   __ beq(CCR1, Lexception); // divisor == 0
1239 
1240   __ pop_l(Rdividend);
1241   __ mulld(R17_tos, Rdividend, R17_tos); // div by +/-1
1242   __ b(Ldone);
1243 
1244   __ bind(Lexception);
1245   __ load_dispatch_table(R11_scratch1, (address*)Interpreter::_throw_ArithmeticException_entry);
1246   __ mtctr(R11_scratch1);
1247   __ bctr();
1248 
1249   __ align(32, 12);
1250   __ bind(Lnormal);
1251   __ pop_l(Rdividend);
1252   __ divd(R17_tos, Rdividend, R17_tos); // Can't divide minint/-1.
1253   __ bind(Ldone);
1254 }
1255 
1256 void TemplateTable::lrem() {
1257   transition(ltos, ltos);
1258 
1259   __ mr(R12_scratch2, R17_tos);
1260   ldiv();
1261   __ mulld(R17_tos, R17_tos, R12_scratch2);
1262   __ subf(R17_tos, R17_tos, R11_scratch1); // Dividend set by ldiv.
1263 }
1264 
1265 void TemplateTable::lshl() {
1266   transition(itos, ltos);
1267 
1268   __ rldicl(R17_tos, R17_tos, 0, 64-6); // Extract least significant bits.
1269   __ pop_l(R11_scratch1);
1270   __ sld(R17_tos, R11_scratch1, R17_tos);
1271 }
1272 
1273 void TemplateTable::lshr() {
1274   transition(itos, ltos);
1275 
1276   __ rldicl(R17_tos, R17_tos, 0, 64-6); // Extract least significant bits.
1277   __ pop_l(R11_scratch1);
1278   __ srad(R17_tos, R11_scratch1, R17_tos);
1279 }
1280 
1281 void TemplateTable::lushr() {
1282   transition(itos, ltos);
1283 
1284   __ rldicl(R17_tos, R17_tos, 0, 64-6); // Extract least significant bits.
1285   __ pop_l(R11_scratch1);
1286   __ srd(R17_tos, R11_scratch1, R17_tos);
1287 }
1288 
1289 void TemplateTable::fop2(Operation op) {
1290   transition(ftos, ftos);
1291 
1292   switch (op) {
1293     case add: __ pop_f(F0_SCRATCH); __ fadds(F15_ftos, F0_SCRATCH, F15_ftos); break;
1294     case sub: __ pop_f(F0_SCRATCH); __ fsubs(F15_ftos, F0_SCRATCH, F15_ftos); break;
1295     case mul: __ pop_f(F0_SCRATCH); __ fmuls(F15_ftos, F0_SCRATCH, F15_ftos); break;
1296     case div: __ pop_f(F0_SCRATCH); __ fdivs(F15_ftos, F0_SCRATCH, F15_ftos); break;
1297     case rem:
1298       __ pop_f(F1_ARG1);
1299       __ fmr(F2_ARG2, F15_ftos);
1300       __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::frem));
1301       __ fmr(F15_ftos, F1_RET);
1302       break;
1303 
1304     default: ShouldNotReachHere();
1305   }
1306 }
1307 
1308 void TemplateTable::dop2(Operation op) {
1309   transition(dtos, dtos);
1310 
1311   switch (op) {
1312     case add: __ pop_d(F0_SCRATCH); __ fadd(F15_ftos, F0_SCRATCH, F15_ftos); break;
1313     case sub: __ pop_d(F0_SCRATCH); __ fsub(F15_ftos, F0_SCRATCH, F15_ftos); break;
1314     case mul: __ pop_d(F0_SCRATCH); __ fmul(F15_ftos, F0_SCRATCH, F15_ftos); break;
1315     case div: __ pop_d(F0_SCRATCH); __ fdiv(F15_ftos, F0_SCRATCH, F15_ftos); break;
1316     case rem:
1317       __ pop_d(F1_ARG1);
1318       __ fmr(F2_ARG2, F15_ftos);
1319       __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::drem));
1320       __ fmr(F15_ftos, F1_RET);
1321       break;
1322 
1323     default: ShouldNotReachHere();
1324   }
1325 }
1326 
1327 // Negate the value in the TOS cache.
1328 void TemplateTable::ineg() {
1329   transition(itos, itos);
1330 
1331   __ neg(R17_tos, R17_tos);
1332 }
1333 
1334 // Negate the value in the TOS cache.
1335 void TemplateTable::lneg() {
1336   transition(ltos, ltos);
1337 
1338   __ neg(R17_tos, R17_tos);
1339 }
1340 
1341 void TemplateTable::fneg() {
1342   transition(ftos, ftos);
1343 
1344   __ fneg(F15_ftos, F15_ftos);
1345 }
1346 
1347 void TemplateTable::dneg() {
1348   transition(dtos, dtos);
1349 
1350   __ fneg(F15_ftos, F15_ftos);
1351 }
1352 
1353 // Increments a local variable in place.
1354 void TemplateTable::iinc() {
1355   transition(vtos, vtos);
1356 
1357   const Register Rindex     = R11_scratch1,
1358                  Rincrement = R0,
1359                  Rvalue     = R12_scratch2;
1360 
1361   locals_index(Rindex);              // Load locals index from bytecode stream.
1362   __ lbz(Rincrement, 2, R14_bcp);    // Load increment from the bytecode stream.
1363   __ extsb(Rincrement, Rincrement);
1364 
1365   __ load_local_int(Rvalue, Rindex, Rindex); // Puts address of local into Rindex.
1366 
1367   __ add(Rvalue, Rincrement, Rvalue);
1368   __ stw(Rvalue, 0, Rindex);
1369 }
1370 
1371 void TemplateTable::wide_iinc() {
1372   transition(vtos, vtos);
1373 
1374   Register Rindex       = R11_scratch1,
1375            Rlocals_addr = Rindex,
1376            Rincr        = R12_scratch2;
1377   locals_index_wide(Rindex);
1378   __ get_2_byte_integer_at_bcp(4, Rincr, InterpreterMacroAssembler::Signed);
1379   __ load_local_int(R17_tos, Rlocals_addr, Rindex);
1380   __ add(R17_tos, Rincr, R17_tos);
1381   __ stw(R17_tos, 0, Rlocals_addr);
1382 }
1383 
1384 void TemplateTable::convert() {
1385   // %%%%% Factor this first part accross platforms
1386 #ifdef ASSERT
1387   TosState tos_in  = ilgl;
1388   TosState tos_out = ilgl;
1389   switch (bytecode()) {
1390     case Bytecodes::_i2l: // fall through
1391     case Bytecodes::_i2f: // fall through
1392     case Bytecodes::_i2d: // fall through
1393     case Bytecodes::_i2b: // fall through
1394     case Bytecodes::_i2c: // fall through
1395     case Bytecodes::_i2s: tos_in = itos; break;
1396     case Bytecodes::_l2i: // fall through
1397     case Bytecodes::_l2f: // fall through
1398     case Bytecodes::_l2d: tos_in = ltos; break;
1399     case Bytecodes::_f2i: // fall through
1400     case Bytecodes::_f2l: // fall through
1401     case Bytecodes::_f2d: tos_in = ftos; break;
1402     case Bytecodes::_d2i: // fall through
1403     case Bytecodes::_d2l: // fall through
1404     case Bytecodes::_d2f: tos_in = dtos; break;
1405     default             : ShouldNotReachHere();
1406   }
1407   switch (bytecode()) {
1408     case Bytecodes::_l2i: // fall through
1409     case Bytecodes::_f2i: // fall through
1410     case Bytecodes::_d2i: // fall through
1411     case Bytecodes::_i2b: // fall through
1412     case Bytecodes::_i2c: // fall through
1413     case Bytecodes::_i2s: tos_out = itos; break;
1414     case Bytecodes::_i2l: // fall through
1415     case Bytecodes::_f2l: // fall through
1416     case Bytecodes::_d2l: tos_out = ltos; break;
1417     case Bytecodes::_i2f: // fall through
1418     case Bytecodes::_l2f: // fall through
1419     case Bytecodes::_d2f: tos_out = ftos; break;
1420     case Bytecodes::_i2d: // fall through
1421     case Bytecodes::_l2d: // fall through
1422     case Bytecodes::_f2d: tos_out = dtos; break;
1423     default             : ShouldNotReachHere();
1424   }
1425   transition(tos_in, tos_out);
1426 #endif
1427 
1428   // Conversion
1429   Label done;
1430   switch (bytecode()) {
1431     case Bytecodes::_i2l:
1432       __ extsw(R17_tos, R17_tos);
1433       break;
1434 
1435     case Bytecodes::_l2i:
1436       // Nothing to do, we'll continue to work with the lower bits.
1437       break;
1438 
1439     case Bytecodes::_i2b:
1440       __ extsb(R17_tos, R17_tos);
1441       break;
1442 
1443     case Bytecodes::_i2c:
1444       __ rldicl(R17_tos, R17_tos, 0, 64-2*8);
1445       break;
1446 
1447     case Bytecodes::_i2s:
1448       __ extsh(R17_tos, R17_tos);
1449       break;
1450 
1451     case Bytecodes::_i2d:
1452       __ extsw(R17_tos, R17_tos);
1453     case Bytecodes::_l2d:
1454       __ push_l_pop_d();
1455       __ fcfid(F15_ftos, F15_ftos);
1456       break;
1457 
1458     case Bytecodes::_i2f:
1459       __ extsw(R17_tos, R17_tos);
1460       __ push_l_pop_d();
1461       if (VM_Version::has_fcfids()) { // fcfids is >= Power7 only
1462         // Comment: alternatively, load with sign extend could be done by lfiwax.
1463         __ fcfids(F15_ftos, F15_ftos);
1464       } else {
1465         __ fcfid(F15_ftos, F15_ftos);
1466         __ frsp(F15_ftos, F15_ftos);
1467       }
1468       break;
1469 
1470     case Bytecodes::_l2f:
1471       if (VM_Version::has_fcfids()) { // fcfids is >= Power7 only
1472         __ push_l_pop_d();
1473         __ fcfids(F15_ftos, F15_ftos);
1474       } else {
1475         // Avoid rounding problem when result should be 0x3f800001: need fixup code before fcfid+frsp.
1476         __ mr(R3_ARG1, R17_tos);
1477         __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::l2f));
1478         __ fmr(F15_ftos, F1_RET);
1479       }
1480       break;
1481 
1482     case Bytecodes::_f2d:
1483       // empty
1484       break;
1485 
1486     case Bytecodes::_d2f:
1487       __ frsp(F15_ftos, F15_ftos);
1488       break;
1489 
1490     case Bytecodes::_d2i:
1491     case Bytecodes::_f2i:
1492       __ fcmpu(CCR0, F15_ftos, F15_ftos);
1493       __ li(R17_tos, 0); // 0 in case of NAN
1494       __ bso(CCR0, done);
1495       __ fctiwz(F15_ftos, F15_ftos);
1496       __ push_d_pop_l();
1497       break;
1498 
1499     case Bytecodes::_d2l:
1500     case Bytecodes::_f2l:
1501       __ fcmpu(CCR0, F15_ftos, F15_ftos);
1502       __ li(R17_tos, 0); // 0 in case of NAN
1503       __ bso(CCR0, done);
1504       __ fctidz(F15_ftos, F15_ftos);
1505       __ push_d_pop_l();
1506       break;
1507 
1508     default: ShouldNotReachHere();
1509   }
1510   __ bind(done);
1511 }
1512 
1513 // Long compare
1514 void TemplateTable::lcmp() {
1515   transition(ltos, itos);
1516 
1517   const Register Rscratch = R11_scratch1;
1518   __ pop_l(Rscratch); // first operand, deeper in stack
1519 
1520   __ cmpd(CCR0, Rscratch, R17_tos); // compare
1521   __ mfcr(R17_tos); // set bit 32..33 as follows: <: 0b10, =: 0b00, >: 0b01
1522   __ srwi(Rscratch, R17_tos, 30);
1523   __ srawi(R17_tos, R17_tos, 31);
1524   __ orr(R17_tos, Rscratch, R17_tos); // set result as follows: <: -1, =: 0, >: 1
1525 }
1526 
1527 // fcmpl/fcmpg and dcmpl/dcmpg bytecodes
1528 // unordered_result == -1 => fcmpl or dcmpl
1529 // unordered_result ==  1 => fcmpg or dcmpg
1530 void TemplateTable::float_cmp(bool is_float, int unordered_result) {
1531   const FloatRegister Rfirst  = F0_SCRATCH,
1532                       Rsecond = F15_ftos;
1533   const Register Rscratch = R11_scratch1;
1534 
1535   if (is_float) {
1536     __ pop_f(Rfirst);
1537   } else {
1538     __ pop_d(Rfirst);
1539   }
1540 
1541   Label Lunordered, Ldone;
1542   __ fcmpu(CCR0, Rfirst, Rsecond); // compare
1543   if (unordered_result) {
1544     __ bso(CCR0, Lunordered);
1545   }
1546   __ mfcr(R17_tos); // set bit 32..33 as follows: <: 0b10, =: 0b00, >: 0b01
1547   __ srwi(Rscratch, R17_tos, 30);
1548   __ srawi(R17_tos, R17_tos, 31);
1549   __ orr(R17_tos, Rscratch, R17_tos); // set result as follows: <: -1, =: 0, >: 1
1550   if (unordered_result) {
1551     __ b(Ldone);
1552     __ bind(Lunordered);
1553     __ load_const_optimized(R17_tos, unordered_result);
1554   }
1555   __ bind(Ldone);
1556 }
1557 
1558 // Branch_conditional which takes TemplateTable::Condition.
1559 void TemplateTable::branch_conditional(ConditionRegister crx, TemplateTable::Condition cc, Label& L, bool invert) {
1560   bool positive = false;
1561   Assembler::Condition cond = Assembler::equal;
1562   switch (cc) {
1563     case TemplateTable::equal:         positive = true ; cond = Assembler::equal  ; break;
1564     case TemplateTable::not_equal:     positive = false; cond = Assembler::equal  ; break;
1565     case TemplateTable::less:          positive = true ; cond = Assembler::less   ; break;
1566     case TemplateTable::less_equal:    positive = false; cond = Assembler::greater; break;
1567     case TemplateTable::greater:       positive = true ; cond = Assembler::greater; break;
1568     case TemplateTable::greater_equal: positive = false; cond = Assembler::less   ; break;
1569     default: ShouldNotReachHere();
1570   }
1571   int bo = (positive != invert) ? Assembler::bcondCRbiIs1 : Assembler::bcondCRbiIs0;
1572   int bi = Assembler::bi0(crx, cond);
1573   __ bc(bo, bi, L);
1574 }
1575 
1576 void TemplateTable::branch(bool is_jsr, bool is_wide) {
1577 
1578   // Note: on SPARC, we use InterpreterMacroAssembler::if_cmp also.
1579   __ verify_thread();
1580 
1581   const Register Rscratch1    = R11_scratch1,
1582                  Rscratch2    = R12_scratch2,
1583                  Rscratch3    = R3_ARG1,
1584                  R4_counters  = R4_ARG2,
1585                  bumped_count = R31,
1586                  Rdisp        = R22_tmp2;
1587 
1588   __ profile_taken_branch(Rscratch1, bumped_count);
1589 
1590   // Get (wide) offset.
1591   if (is_wide) {
1592     __ get_4_byte_integer_at_bcp(1, Rdisp, InterpreterMacroAssembler::Signed);
1593   } else {
1594     __ get_2_byte_integer_at_bcp(1, Rdisp, InterpreterMacroAssembler::Signed);
1595   }
1596 
1597   // --------------------------------------------------------------------------
1598   // Handle all the JSR stuff here, then exit.
1599   // It's much shorter and cleaner than intermingling with the
1600   // non-JSR normal-branch stuff occurring below.
1601   if (is_jsr) {
1602     // Compute return address as bci in Otos_i.
1603     __ ld(Rscratch1, in_bytes(Method::const_offset()), R19_method);
1604     __ addi(Rscratch2, R14_bcp, -in_bytes(ConstMethod::codes_offset()) + (is_wide ? 5 : 3));
1605     __ subf(R17_tos, Rscratch1, Rscratch2);
1606 
1607     // Bump bcp to target of JSR.
1608     __ add(R14_bcp, Rdisp, R14_bcp);
1609     // Push returnAddress for "ret" on stack.
1610     __ push_ptr(R17_tos);
1611     // And away we go!
1612     __ dispatch_next(vtos);
1613     return;
1614   }
1615 
1616   // --------------------------------------------------------------------------
1617   // Normal (non-jsr) branch handling
1618 
1619   const bool increment_invocation_counter_for_backward_branches = UseCompiler && UseLoopCounter;
1620   if (increment_invocation_counter_for_backward_branches) {
1621     //__ unimplemented("branch invocation counter");
1622 
1623     Label Lforward;
1624     __ add(R14_bcp, Rdisp, R14_bcp); // Add to bc addr.
1625 
1626     // Check branch direction.
1627     __ cmpdi(CCR0, Rdisp, 0);
1628     __ bgt(CCR0, Lforward);
1629 
1630     __ get_method_counters(R19_method, R4_counters, Lforward);
1631 
1632     if (TieredCompilation) {
1633       Label Lno_mdo, Loverflow;
1634       const int increment = InvocationCounter::count_increment;
1635       const int mask = ((1 << Tier0BackedgeNotifyFreqLog) - 1) << InvocationCounter::count_shift;
1636       if (ProfileInterpreter) {
1637         Register Rmdo = Rscratch1;
1638 
1639         // If no method data exists, go to profile_continue.
1640         __ ld(Rmdo, in_bytes(Method::method_data_offset()), R19_method);
1641         __ cmpdi(CCR0, Rmdo, 0);
1642         __ beq(CCR0, Lno_mdo);
1643 
1644         // Increment backedge counter in the MDO.
1645         const int mdo_bc_offs = in_bytes(MethodData::backedge_counter_offset()) + in_bytes(InvocationCounter::counter_offset());
1646         __ lwz(Rscratch2, mdo_bc_offs, Rmdo);
1647         __ load_const_optimized(Rscratch3, mask, R0);
1648         __ addi(Rscratch2, Rscratch2, increment);
1649         __ stw(Rscratch2, mdo_bc_offs, Rmdo);
1650         __ and_(Rscratch3, Rscratch2, Rscratch3);
1651         __ bne(CCR0, Lforward);
1652         __ b(Loverflow);
1653       }
1654 
1655       // If there's no MDO, increment counter in method.
1656       const int mo_bc_offs = in_bytes(MethodCounters::backedge_counter_offset()) + in_bytes(InvocationCounter::counter_offset());
1657       __ bind(Lno_mdo);
1658       __ lwz(Rscratch2, mo_bc_offs, R4_counters);
1659       __ load_const_optimized(Rscratch3, mask, R0);
1660       __ addi(Rscratch2, Rscratch2, increment);
1661       __ stw(Rscratch2, mo_bc_offs, R19_method);
1662       __ and_(Rscratch3, Rscratch2, Rscratch3);
1663       __ bne(CCR0, Lforward);
1664 
1665       __ bind(Loverflow);
1666 
1667       // Notify point for loop, pass branch bytecode.
1668       __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::frequency_counter_overflow), R14_bcp, true);
1669 
1670       // Was an OSR adapter generated?
1671       // O0 = osr nmethod
1672       __ cmpdi(CCR0, R3_RET, 0);
1673       __ beq(CCR0, Lforward);
1674 
1675       // Has the nmethod been invalidated already?
1676       __ lwz(R0, nmethod::entry_bci_offset(), R3_RET);
1677       __ cmpwi(CCR0, R0, InvalidOSREntryBci);
1678       __ beq(CCR0, Lforward);
1679 
1680       // Migrate the interpreter frame off of the stack.
1681       // We can use all registers because we will not return to interpreter from this point.
1682 
1683       // Save nmethod.
1684       const Register osr_nmethod = R31;
1685       __ mr(osr_nmethod, R3_RET);
1686       __ set_top_ijava_frame_at_SP_as_last_Java_frame(R1_SP, R11_scratch1);
1687       __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::OSR_migration_begin), R16_thread);
1688       __ reset_last_Java_frame();
1689       // OSR buffer is in ARG1.
1690 
1691       // Remove the interpreter frame.
1692       __ merge_frames(/*top_frame_sp*/ R21_sender_SP, /*return_pc*/ R0, R11_scratch1, R12_scratch2);
1693 
1694       // Jump to the osr code.
1695       __ ld(R11_scratch1, nmethod::osr_entry_point_offset(), osr_nmethod);
1696       __ mtlr(R0);
1697       __ mtctr(R11_scratch1);
1698       __ bctr();
1699 
1700     } else {
1701 
1702       const Register invoke_ctr = Rscratch1;
1703       // Update Backedge branch separately from invocations.
1704       __ increment_backedge_counter(R4_counters, invoke_ctr, Rscratch2, Rscratch3);
1705 
1706       if (ProfileInterpreter) {
1707         __ test_invocation_counter_for_mdp(invoke_ctr, Rscratch2, Lforward);
1708         if (UseOnStackReplacement) {
1709           __ test_backedge_count_for_osr(bumped_count, R14_bcp, Rscratch2);
1710         }
1711       } else {
1712         if (UseOnStackReplacement) {
1713           __ test_backedge_count_for_osr(invoke_ctr, R14_bcp, Rscratch2);
1714         }
1715       }
1716     }
1717 
1718     __ bind(Lforward);
1719 
1720   } else {
1721     // Bump bytecode pointer by displacement (take the branch).
1722     __ add(R14_bcp, Rdisp, R14_bcp); // Add to bc addr.
1723   }
1724   // Continue with bytecode @ target.
1725   // %%%%% Like Intel, could speed things up by moving bytecode fetch to code above,
1726   // %%%%% and changing dispatch_next to dispatch_only.
1727   __ dispatch_next(vtos);
1728 }
1729 
1730 // Helper function for if_cmp* methods below.
1731 // Factored out common compare and branch code.
1732 void TemplateTable::if_cmp_common(Register Rfirst, Register Rsecond, Register Rscratch1, Register Rscratch2, Condition cc, bool is_jint, bool cmp0) {
1733   Label Lnot_taken;
1734   // Note: The condition code we get is the condition under which we
1735   // *fall through*! So we have to inverse the CC here.
1736 
1737   if (is_jint) {
1738     if (cmp0) {
1739       __ cmpwi(CCR0, Rfirst, 0);
1740     } else {
1741       __ cmpw(CCR0, Rfirst, Rsecond);
1742     }
1743   } else {
1744     if (cmp0) {
1745       __ cmpdi(CCR0, Rfirst, 0);
1746     } else {
1747       __ cmpd(CCR0, Rfirst, Rsecond);
1748     }
1749   }
1750   branch_conditional(CCR0, cc, Lnot_taken, /*invert*/ true);
1751 
1752   // Conition is false => Jump!
1753   branch(false, false);
1754 
1755   // Condition is not true => Continue.
1756   __ align(32, 12);
1757   __ bind(Lnot_taken);
1758   __ profile_not_taken_branch(Rscratch1, Rscratch2);
1759 }
1760 
1761 // Compare integer values with zero and fall through if CC holds, branch away otherwise.
1762 void TemplateTable::if_0cmp(Condition cc) {
1763   transition(itos, vtos);
1764 
1765   if_cmp_common(R17_tos, noreg, R11_scratch1, R12_scratch2, cc, true, true);
1766 }
1767 
1768 // Compare integer values and fall through if CC holds, branch away otherwise.
1769 //
1770 // Interface:
1771 //  - Rfirst: First operand  (older stack value)
1772 //  - tos:    Second operand (younger stack value)
1773 void TemplateTable::if_icmp(Condition cc) {
1774   transition(itos, vtos);
1775 
1776   const Register Rfirst  = R0,
1777                  Rsecond = R17_tos;
1778 
1779   __ pop_i(Rfirst);
1780   if_cmp_common(Rfirst, Rsecond, R11_scratch1, R12_scratch2, cc, true, false);
1781 }
1782 
1783 void TemplateTable::if_nullcmp(Condition cc) {
1784   transition(atos, vtos);
1785 
1786   if_cmp_common(R17_tos, noreg, R11_scratch1, R12_scratch2, cc, false, true);
1787 }
1788 
1789 void TemplateTable::if_acmp(Condition cc) {
1790   transition(atos, vtos);
1791 
1792   const Register Rfirst  = R0,
1793                  Rsecond = R17_tos;
1794 
1795   __ pop_ptr(Rfirst);
1796   if_cmp_common(Rfirst, Rsecond, R11_scratch1, R12_scratch2, cc, false, false);
1797 }
1798 
1799 void TemplateTable::ret() {
1800   locals_index(R11_scratch1);
1801   __ load_local_ptr(R17_tos, R11_scratch1, R11_scratch1);
1802 
1803   __ profile_ret(vtos, R17_tos, R11_scratch1, R12_scratch2);
1804 
1805   __ ld(R11_scratch1, in_bytes(Method::const_offset()), R19_method);
1806   __ add(R11_scratch1, R17_tos, R11_scratch1);
1807   __ addi(R14_bcp, R11_scratch1, in_bytes(ConstMethod::codes_offset()));
1808   __ dispatch_next(vtos);
1809 }
1810 
1811 void TemplateTable::wide_ret() {
1812   transition(vtos, vtos);
1813 
1814   const Register Rindex = R3_ARG1,
1815                  Rscratch1 = R11_scratch1,
1816                  Rscratch2 = R12_scratch2;
1817 
1818   locals_index_wide(Rindex);
1819   __ load_local_ptr(R17_tos, R17_tos, Rindex);
1820   __ profile_ret(vtos, R17_tos, Rscratch1, R12_scratch2);
1821   // Tos now contains the bci, compute the bcp from that.
1822   __ ld(Rscratch1, in_bytes(Method::const_offset()), R19_method);
1823   __ addi(Rscratch2, R17_tos, in_bytes(ConstMethod::codes_offset()));
1824   __ add(R14_bcp, Rscratch1, Rscratch2);
1825   __ dispatch_next(vtos);
1826 }
1827 
1828 void TemplateTable::tableswitch() {
1829   transition(itos, vtos);
1830 
1831   Label Ldispatch, Ldefault_case;
1832   Register Rlow_byte         = R3_ARG1,
1833            Rindex            = Rlow_byte,
1834            Rhigh_byte        = R4_ARG2,
1835            Rdef_offset_addr  = R5_ARG3, // is going to contain address of default offset
1836            Rscratch1         = R11_scratch1,
1837            Rscratch2         = R12_scratch2,
1838            Roffset           = R6_ARG4;
1839 
1840   // Align bcp.
1841   __ addi(Rdef_offset_addr, R14_bcp, BytesPerInt);
1842   __ clrrdi(Rdef_offset_addr, Rdef_offset_addr, log2_long((jlong)BytesPerInt));
1843 
1844   // Load lo & hi.
1845   __ get_u4(Rlow_byte, Rdef_offset_addr, BytesPerInt, InterpreterMacroAssembler::Unsigned);
1846   __ get_u4(Rhigh_byte, Rdef_offset_addr, 2 *BytesPerInt, InterpreterMacroAssembler::Unsigned);
1847 
1848   // Check for default case (=index outside [low,high]).
1849   __ cmpw(CCR0, R17_tos, Rlow_byte);
1850   __ cmpw(CCR1, R17_tos, Rhigh_byte);
1851   __ blt(CCR0, Ldefault_case);
1852   __ bgt(CCR1, Ldefault_case);
1853 
1854   // Lookup dispatch offset.
1855   __ sub(Rindex, R17_tos, Rlow_byte);
1856   __ extsw(Rindex, Rindex);
1857   __ profile_switch_case(Rindex, Rhigh_byte /* scratch */, Rscratch1, Rscratch2);
1858   __ sldi(Rindex, Rindex, LogBytesPerInt);
1859   __ addi(Rindex, Rindex, 3 * BytesPerInt);
1860 #if defined(VM_LITTLE_ENDIAN)
1861   __ lwbrx(Roffset, Rdef_offset_addr, Rindex);
1862   __ extsw(Roffset, Roffset);
1863 #else
1864   __ lwax(Roffset, Rdef_offset_addr, Rindex);
1865 #endif
1866   __ b(Ldispatch);
1867 
1868   __ bind(Ldefault_case);
1869   __ profile_switch_default(Rhigh_byte, Rscratch1);
1870   __ get_u4(Roffset, Rdef_offset_addr, 0, InterpreterMacroAssembler::Signed);
1871 
1872   __ bind(Ldispatch);
1873 
1874   __ add(R14_bcp, Roffset, R14_bcp);
1875   __ dispatch_next(vtos);
1876 }
1877 
1878 void TemplateTable::lookupswitch() {
1879   transition(itos, itos);
1880   __ stop("lookupswitch bytecode should have been rewritten");
1881 }
1882 
1883 // Table switch using linear search through cases.
1884 // Bytecode stream format:
1885 // Bytecode (1) | 4-byte padding | default offset (4) | count (4) | value/offset pair1 (8) | value/offset pair2 (8) | ...
1886 // Note: Everything is big-endian format here.
1887 void TemplateTable::fast_linearswitch() {
1888   transition(itos, vtos);
1889 
1890   Label Lloop_entry, Lsearch_loop, Lfound, Lcontinue_execution, Ldefault_case;
1891 
1892   Register Rcount           = R3_ARG1,
1893            Rcurrent_pair    = R4_ARG2,
1894            Rdef_offset_addr = R5_ARG3, // Is going to contain address of default offset.
1895            Roffset          = R31,     // Might need to survive C call.
1896            Rvalue           = R12_scratch2,
1897            Rscratch         = R11_scratch1,
1898            Rcmp_value       = R17_tos;
1899 
1900   // Align bcp.
1901   __ addi(Rdef_offset_addr, R14_bcp, BytesPerInt);
1902   __ clrrdi(Rdef_offset_addr, Rdef_offset_addr, log2_long((jlong)BytesPerInt));
1903 
1904   // Setup loop counter and limit.
1905   __ get_u4(Rcount, Rdef_offset_addr, BytesPerInt, InterpreterMacroAssembler::Unsigned);
1906   __ addi(Rcurrent_pair, Rdef_offset_addr, 2 * BytesPerInt); // Rcurrent_pair now points to first pair.
1907 
1908   // Set up search loop.
1909   __ cmpwi(CCR0, Rcount, 0);
1910   __ beq(CCR0, Ldefault_case);
1911 
1912   __ mtctr(Rcount);
1913 
1914   // linear table search
1915   __ bind(Lsearch_loop);
1916 
1917   // TODO(asmundak): there is no need to fetch bytecode offset immediately,
1918   // do it only when we have found the matching value.
1919   __ get_u4(Rvalue, Rcurrent_pair, 0, InterpreterMacroAssembler::Unsigned);
1920   __ get_u4(Roffset, Rcurrent_pair, BytesPerInt, InterpreterMacroAssembler::Signed);
1921 
1922   __ cmpw(CCR0, Rvalue, Rcmp_value);
1923   __ beq(CCR0, Lfound);
1924 
1925   __ addi(Rcurrent_pair, Rcurrent_pair, 2 * BytesPerInt);
1926   __ bdnz(Lsearch_loop);
1927 
1928   // default case
1929   __ bind(Ldefault_case);
1930 
1931   __ get_u4(Roffset, Rdef_offset_addr, 0, InterpreterMacroAssembler::Unsigned);
1932   if (ProfileInterpreter) {
1933     __ profile_switch_default(Rdef_offset_addr, Rcount/* scratch */);
1934     __ b(Lcontinue_execution);
1935   }
1936 
1937   // Entry found, skip Roffset bytecodes and continue.
1938   __ bind(Lfound);
1939   if (ProfileInterpreter) {
1940     // Calc the num of the pair we hit. Careful, Rcurrent_pair points 2 ints
1941     // beyond the actual current pair due to the auto update load above!
1942     __ sub(Rcurrent_pair, Rcurrent_pair, Rdef_offset_addr);
1943     __ addi(Rcurrent_pair, Rcurrent_pair, - 2 * BytesPerInt);
1944     __ srdi(Rcurrent_pair, Rcurrent_pair, LogBytesPerInt + 1);
1945     __ profile_switch_case(Rcurrent_pair, Rcount /*scratch*/, Rdef_offset_addr/*scratch*/, Rscratch);
1946     __ bind(Lcontinue_execution);
1947   }
1948   __ add(R14_bcp, Roffset, R14_bcp);
1949   __ dispatch_next(vtos);
1950 }
1951 
1952 // Table switch using binary search (value/offset pairs are ordered).
1953 // Bytecode stream format:
1954 // Bytecode (1) | 4-byte padding | default offset (4) | count (4) | value/offset pair1 (8) | value/offset pair2 (8) | ...
1955 // Note: Everything is big-endian format here. So on little endian machines, we have to revers offset and count and cmp value.
1956 void TemplateTable::fast_binaryswitch() {
1957 
1958   transition(itos, vtos);
1959   // Implementation using the following core algorithm: (copied from Intel)
1960   //
1961   // int binary_search(int key, LookupswitchPair* array, int n) {
1962   //   // Binary search according to "Methodik des Programmierens" by
1963   //   // Edsger W. Dijkstra and W.H.J. Feijen, Addison Wesley Germany 1985.
1964   //   int i = 0;
1965   //   int j = n;
1966   //   while (i+1 < j) {
1967   //     // invariant P: 0 <= i < j <= n and (a[i] <= key < a[j] or Q)
1968   //     // with      Q: for all i: 0 <= i < n: key < a[i]
1969   //     // where a stands for the array and assuming that the (inexisting)
1970   //     // element a[n] is infinitely big.
1971   //     int h = (i + j) >> 1;
1972   //     // i < h < j
1973   //     if (key < array[h].fast_match()) {
1974   //       j = h;
1975   //     } else {
1976   //       i = h;
1977   //     }
1978   //   }
1979   //   // R: a[i] <= key < a[i+1] or Q
1980   //   // (i.e., if key is within array, i is the correct index)
1981   //   return i;
1982   // }
1983 
1984   // register allocation
1985   const Register Rkey     = R17_tos;          // already set (tosca)
1986   const Register Rarray   = R3_ARG1;
1987   const Register Ri       = R4_ARG2;
1988   const Register Rj       = R5_ARG3;
1989   const Register Rh       = R6_ARG4;
1990   const Register Rscratch = R11_scratch1;
1991 
1992   const int log_entry_size = 3;
1993   const int entry_size = 1 << log_entry_size;
1994 
1995   Label found;
1996 
1997   // Find Array start,
1998   __ addi(Rarray, R14_bcp, 3 * BytesPerInt);
1999   __ clrrdi(Rarray, Rarray, log2_long((jlong)BytesPerInt));
2000 
2001   // initialize i & j
2002   __ li(Ri,0);
2003   __ get_u4(Rj, Rarray, -BytesPerInt, InterpreterMacroAssembler::Unsigned);
2004 
2005   // and start.
2006   Label entry;
2007   __ b(entry);
2008 
2009   // binary search loop
2010   { Label loop;
2011     __ bind(loop);
2012     // int h = (i + j) >> 1;
2013     __ srdi(Rh, Rh, 1);
2014     // if (key < array[h].fast_match()) {
2015     //   j = h;
2016     // } else {
2017     //   i = h;
2018     // }
2019     __ sldi(Rscratch, Rh, log_entry_size);
2020 #if defined(VM_LITTLE_ENDIAN)
2021     __ lwbrx(Rscratch, Rscratch, Rarray);
2022 #else
2023     __ lwzx(Rscratch, Rscratch, Rarray);
2024 #endif
2025 
2026     // if (key < current value)
2027     //   Rh = Rj
2028     // else
2029     //   Rh = Ri
2030     Label Lgreater;
2031     __ cmpw(CCR0, Rkey, Rscratch);
2032     __ bge(CCR0, Lgreater);
2033     __ mr(Rj, Rh);
2034     __ b(entry);
2035     __ bind(Lgreater);
2036     __ mr(Ri, Rh);
2037 
2038     // while (i+1 < j)
2039     __ bind(entry);
2040     __ addi(Rscratch, Ri, 1);
2041     __ cmpw(CCR0, Rscratch, Rj);
2042     __ add(Rh, Ri, Rj); // start h = i + j >> 1;
2043 
2044     __ blt(CCR0, loop);
2045   }
2046 
2047   // End of binary search, result index is i (must check again!).
2048   Label default_case;
2049   Label continue_execution;
2050   if (ProfileInterpreter) {
2051     __ mr(Rh, Ri);              // Save index in i for profiling.
2052   }
2053   // Ri = value offset
2054   __ sldi(Ri, Ri, log_entry_size);
2055   __ add(Ri, Ri, Rarray);
2056   __ get_u4(Rscratch, Ri, 0, InterpreterMacroAssembler::Unsigned);
2057 
2058   Label not_found;
2059   // Ri = offset offset
2060   __ cmpw(CCR0, Rkey, Rscratch);
2061   __ beq(CCR0, not_found);
2062   // entry not found -> j = default offset
2063   __ get_u4(Rj, Rarray, -2 * BytesPerInt, InterpreterMacroAssembler::Unsigned);
2064   __ b(default_case);
2065 
2066   __ bind(not_found);
2067   // entry found -> j = offset
2068   __ profile_switch_case(Rh, Rj, Rscratch, Rkey);
2069   __ get_u4(Rj, Ri, BytesPerInt, InterpreterMacroAssembler::Unsigned);
2070 
2071   if (ProfileInterpreter) {
2072     __ b(continue_execution);
2073   }
2074 
2075   __ bind(default_case); // fall through (if not profiling)
2076   __ profile_switch_default(Ri, Rscratch);
2077 
2078   __ bind(continue_execution);
2079 
2080   __ extsw(Rj, Rj);
2081   __ add(R14_bcp, Rj, R14_bcp);
2082   __ dispatch_next(vtos);
2083 }
2084 
2085 void TemplateTable::_return(TosState state) {
2086   transition(state, state);
2087   assert(_desc->calls_vm(),
2088          "inconsistent calls_vm information"); // call in remove_activation
2089 
2090   if (_desc->bytecode() == Bytecodes::_return_register_finalizer) {
2091 
2092     Register Rscratch     = R11_scratch1,
2093              Rklass       = R12_scratch2,
2094              Rklass_flags = Rklass;
2095     Label Lskip_register_finalizer;
2096 
2097     // Check if the method has the FINALIZER flag set and call into the VM to finalize in this case.
2098     assert(state == vtos, "only valid state");
2099     __ ld(R17_tos, 0, R18_locals);
2100 
2101     // Load klass of this obj.
2102     __ load_klass(Rklass, R17_tos);
2103     __ lwz(Rklass_flags, in_bytes(Klass::access_flags_offset()), Rklass);
2104     __ testbitdi(CCR0, R0, Rklass_flags, exact_log2(JVM_ACC_HAS_FINALIZER));
2105     __ bfalse(CCR0, Lskip_register_finalizer);
2106 
2107     __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::register_finalizer), R17_tos /* obj */);
2108 
2109     __ align(32, 12);
2110     __ bind(Lskip_register_finalizer);
2111   }
2112 
2113   // Move the result value into the correct register and remove memory stack frame.
2114   __ remove_activation(state, /* throw_monitor_exception */ true);
2115   // Restoration of lr done by remove_activation.
2116   switch (state) {
2117     case ltos:
2118     case btos:
2119     case ctos:
2120     case stos:
2121     case atos:
2122     case itos: __ mr(R3_RET, R17_tos); break;
2123     case ftos:
2124     case dtos: __ fmr(F1_RET, F15_ftos); break;
2125     case vtos: // This might be a constructor. Final fields (and volatile fields on PPC64) need
2126                // to get visible before the reference to the object gets stored anywhere.
2127                __ membar(Assembler::StoreStore); break;
2128     default  : ShouldNotReachHere();
2129   }
2130   __ blr();
2131 }
2132 
2133 // ============================================================================
2134 // Constant pool cache access
2135 //
2136 // Memory ordering:
2137 //
2138 // Like done in C++ interpreter, we load the fields
2139 //   - _indices
2140 //   - _f12_oop
2141 // acquired, because these are asked if the cache is already resolved. We don't
2142 // want to float loads above this check.
2143 // See also comments in ConstantPoolCacheEntry::bytecode_1(),
2144 // ConstantPoolCacheEntry::bytecode_2() and ConstantPoolCacheEntry::f1();
2145 
2146 // Call into the VM if call site is not yet resolved
2147 //
2148 // Input regs:
2149 //   - None, all passed regs are outputs.
2150 //
2151 // Returns:
2152 //   - Rcache:  The const pool cache entry that contains the resolved result.
2153 //   - Rresult: Either noreg or output for f1/f2.
2154 //
2155 // Kills:
2156 //   - Rscratch
2157 void TemplateTable::resolve_cache_and_index(int byte_no, Register Rcache, Register Rscratch, size_t index_size) {
2158 
2159   __ get_cache_and_index_at_bcp(Rcache, 1, index_size);
2160   Label Lresolved, Ldone;
2161 
2162   assert(byte_no == f1_byte || byte_no == f2_byte, "byte_no out of range");
2163   // We are resolved if the indices offset contains the current bytecode.
2164 #if defined(VM_LITTLE_ENDIAN)
2165   __ lbz(Rscratch, in_bytes(ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::indices_offset()) + byte_no + 1, Rcache);
2166 #else
2167   __ lbz(Rscratch, in_bytes(ConstantPoolCache::base_offset() + ConstantPoolCacheEntry::indices_offset()) + 7 - (byte_no + 1), Rcache);
2168 #endif
2169   // Acquire by cmp-br-isync (see below).
2170   __ cmpdi(CCR0, Rscratch, (int)bytecode());
2171   __ beq(CCR0, Lresolved);
2172 
2173   address entry = NULL;
2174   switch (bytecode()) {
2175     case Bytecodes::_getstatic      : // fall through
2176     case Bytecodes::_putstatic      : // fall through
2177     case Bytecodes::_getfield       : // fall through
2178     case Bytecodes::_putfield       : entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_get_put); break;
2179     case Bytecodes::_invokevirtual  : // fall through
2180     case Bytecodes::_invokespecial  : // fall through
2181     case Bytecodes::_invokestatic   : // fall through
2182     case Bytecodes::_invokeinterface: entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invoke); break;
2183     case Bytecodes::_invokehandle   : entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invokehandle); break;
2184     case Bytecodes::_invokedynamic  : entry = CAST_FROM_FN_PTR(address, InterpreterRuntime::resolve_invokedynamic); break;
2185     default                         : ShouldNotReachHere(); break;
2186   }
2187   __ li(R4_ARG2, (int)bytecode());
2188   __ call_VM(noreg, entry, R4_ARG2, true);
2189 
2190   // Update registers with resolved info.
2191   __ get_cache_and_index_at_bcp(Rcache, 1, index_size);
2192   __ b(Ldone);
2193 
2194   __ bind(Lresolved);
2195   __ isync(); // Order load wrt. succeeding loads.
2196   __ bind(Ldone);
2197 }
2198 
2199 // Load the constant pool cache entry at field accesses into registers.
2200 // The Rcache and Rindex registers must be set before call.
2201 // Input:
2202 //   - Rcache, Rindex
2203 // Output:
2204 //   - Robj, Roffset, Rflags
2205 void TemplateTable::load_field_cp_cache_entry(Register Robj,
2206                                               Register Rcache,
2207                                               Register Rindex /* unused on PPC64 */,
2208                                               Register Roffset,
2209                                               Register Rflags,
2210                                               bool is_static = false) {
2211   assert_different_registers(Rcache, Rflags, Roffset);
2212   // assert(Rindex == noreg, "parameter not used on PPC64");
2213 
2214   ByteSize cp_base_offset = ConstantPoolCache::base_offset();
2215   __ ld(Rflags, in_bytes(cp_base_offset) + in_bytes(ConstantPoolCacheEntry::flags_offset()), Rcache);
2216   __ ld(Roffset, in_bytes(cp_base_offset) + in_bytes(ConstantPoolCacheEntry::f2_offset()), Rcache);
2217   if (is_static) {
2218     __ ld(Robj, in_bytes(cp_base_offset) + in_bytes(ConstantPoolCacheEntry::f1_offset()), Rcache);
2219     __ ld(Robj, in_bytes(Klass::java_mirror_offset()), Robj);
2220     // Acquire not needed here. Following access has an address dependency on this value.
2221   }
2222 }
2223 
2224 // Load the constant pool cache entry at invokes into registers.
2225 // Resolve if necessary.
2226 
2227 // Input Registers:
2228 //   - None, bcp is used, though
2229 //
2230 // Return registers:
2231 //   - Rmethod       (f1 field or f2 if invokevirtual)
2232 //   - Ritable_index (f2 field)
2233 //   - Rflags        (flags field)
2234 //
2235 // Kills:
2236 //   - R21
2237 //
2238 void TemplateTable::load_invoke_cp_cache_entry(int byte_no,
2239                                                Register Rmethod,
2240                                                Register Ritable_index,
2241                                                Register Rflags,
2242                                                bool is_invokevirtual,
2243                                                bool is_invokevfinal,
2244                                                bool is_invokedynamic) {
2245 
2246   ByteSize cp_base_offset = ConstantPoolCache::base_offset();
2247   // Determine constant pool cache field offsets.
2248   assert(is_invokevirtual == (byte_no == f2_byte), "is_invokevirtual flag redundant");
2249   const int method_offset = in_bytes(cp_base_offset + (is_invokevirtual ? ConstantPoolCacheEntry::f2_offset() : ConstantPoolCacheEntry::f1_offset()));
2250   const int flags_offset  = in_bytes(cp_base_offset + ConstantPoolCacheEntry::flags_offset());
2251   // Access constant pool cache fields.
2252   const int index_offset  = in_bytes(cp_base_offset + ConstantPoolCacheEntry::f2_offset());
2253 
2254   Register Rcache = R21_tmp1; // Note: same register as R21_sender_SP.
2255 
2256   if (is_invokevfinal) {
2257     assert(Ritable_index == noreg, "register not used");
2258     // Already resolved.
2259     __ get_cache_and_index_at_bcp(Rcache, 1);
2260   } else {
2261     resolve_cache_and_index(byte_no, Rcache, R0, is_invokedynamic ? sizeof(u4) : sizeof(u2));
2262   }
2263 
2264   __ ld(Rmethod, method_offset, Rcache);
2265   __ ld(Rflags, flags_offset, Rcache);
2266 
2267   if (Ritable_index != noreg) {
2268     __ ld(Ritable_index, index_offset, Rcache);
2269   }
2270 }
2271 
2272 // ============================================================================
2273 // Field access
2274 
2275 // Volatile variables demand their effects be made known to all CPU's
2276 // in order. Store buffers on most chips allow reads & writes to
2277 // reorder; the JMM's ReadAfterWrite.java test fails in -Xint mode
2278 // without some kind of memory barrier (i.e., it's not sufficient that
2279 // the interpreter does not reorder volatile references, the hardware
2280 // also must not reorder them).
2281 //
2282 // According to the new Java Memory Model (JMM):
2283 // (1) All volatiles are serialized wrt to each other. ALSO reads &
2284 //     writes act as aquire & release, so:
2285 // (2) A read cannot let unrelated NON-volatile memory refs that
2286 //     happen after the read float up to before the read. It's OK for
2287 //     non-volatile memory refs that happen before the volatile read to
2288 //     float down below it.
2289 // (3) Similar a volatile write cannot let unrelated NON-volatile
2290 //     memory refs that happen BEFORE the write float down to after the
2291 //     write. It's OK for non-volatile memory refs that happen after the
2292 //     volatile write to float up before it.
2293 //
2294 // We only put in barriers around volatile refs (they are expensive),
2295 // not _between_ memory refs (that would require us to track the
2296 // flavor of the previous memory refs). Requirements (2) and (3)
2297 // require some barriers before volatile stores and after volatile
2298 // loads. These nearly cover requirement (1) but miss the
2299 // volatile-store-volatile-load case.  This final case is placed after
2300 // volatile-stores although it could just as well go before
2301 // volatile-loads.
2302 
2303 // The registers cache and index expected to be set before call.
2304 // Correct values of the cache and index registers are preserved.
2305 // Kills:
2306 //   Rcache (if has_tos)
2307 //   Rscratch
2308 void TemplateTable::jvmti_post_field_access(Register Rcache, Register Rscratch, bool is_static, bool has_tos) {
2309 
2310   assert_different_registers(Rcache, Rscratch);
2311 
2312   if (JvmtiExport::can_post_field_access()) {
2313     ByteSize cp_base_offset = ConstantPoolCache::base_offset();
2314     Label Lno_field_access_post;
2315 
2316     // Check if post field access in enabled.
2317     int offs = __ load_const_optimized(Rscratch, JvmtiExport::get_field_access_count_addr(), R0, true);
2318     __ lwz(Rscratch, offs, Rscratch);
2319 
2320     __ cmpwi(CCR0, Rscratch, 0);
2321     __ beq(CCR0, Lno_field_access_post);
2322 
2323     // Post access enabled - do it!
2324     __ addi(Rcache, Rcache, in_bytes(cp_base_offset));
2325     if (is_static) {
2326       __ li(R17_tos, 0);
2327     } else {
2328       if (has_tos) {
2329         // The fast bytecode versions have obj ptr in register.
2330         // Thus, save object pointer before call_VM() clobbers it
2331         // put object on tos where GC wants it.
2332         __ push_ptr(R17_tos);
2333       } else {
2334         // Load top of stack (do not pop the value off the stack).
2335         __ ld(R17_tos, Interpreter::expr_offset_in_bytes(0), R15_esp);
2336       }
2337       __ verify_oop(R17_tos);
2338     }
2339     // tos:   object pointer or NULL if static
2340     // cache: cache entry pointer
2341     __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_access), R17_tos, Rcache);
2342     if (!is_static && has_tos) {
2343       // Restore object pointer.
2344       __ pop_ptr(R17_tos);
2345       __ verify_oop(R17_tos);
2346     } else {
2347       // Cache is still needed to get class or obj.
2348       __ get_cache_and_index_at_bcp(Rcache, 1);
2349     }
2350 
2351     __ align(32, 12);
2352     __ bind(Lno_field_access_post);
2353   }
2354 }
2355 
2356 // kills R11_scratch1
2357 void TemplateTable::pop_and_check_object(Register Roop) {
2358   Register Rtmp = R11_scratch1;
2359 
2360   assert_different_registers(Rtmp, Roop);
2361   __ pop_ptr(Roop);
2362   // For field access must check obj.
2363   __ null_check_throw(Roop, -1, Rtmp);
2364   __ verify_oop(Roop);
2365 }
2366 
2367 // PPC64: implement volatile loads as fence-store-acquire.
2368 void TemplateTable::getfield_or_static(int byte_no, bool is_static) {
2369   transition(vtos, vtos);
2370 
2371   Label Lacquire, Lisync;
2372 
2373   const Register Rcache        = R3_ARG1,
2374                  Rclass_or_obj = R22_tmp2,
2375                  Roffset       = R23_tmp3,
2376                  Rflags        = R31,
2377                  Rbtable       = R5_ARG3,
2378                  Rbc           = R6_ARG4,
2379                  Rscratch      = R12_scratch2;
2380 
2381   static address field_branch_table[number_of_states],
2382                  static_branch_table[number_of_states];
2383 
2384   address* branch_table = is_static ? static_branch_table : field_branch_table;
2385 
2386   // Get field offset.
2387   resolve_cache_and_index(byte_no, Rcache, Rscratch, sizeof(u2));
2388 
2389   // JVMTI support
2390   jvmti_post_field_access(Rcache, Rscratch, is_static, false);
2391 
2392   // Load after possible GC.
2393   load_field_cp_cache_entry(Rclass_or_obj, Rcache, noreg, Roffset, Rflags, is_static);
2394 
2395   // Load pointer to branch table.
2396   __ load_const_optimized(Rbtable, (address)branch_table, Rscratch);
2397 
2398   // Get volatile flag.
2399   __ rldicl(Rscratch, Rflags, 64-ConstantPoolCacheEntry::is_volatile_shift, 63); // Extract volatile bit.
2400   // Note: sync is needed before volatile load on PPC64.
2401 
2402   // Check field type.
2403   __ rldicl(Rflags, Rflags, 64-ConstantPoolCacheEntry::tos_state_shift, 64-ConstantPoolCacheEntry::tos_state_bits);
2404 
2405 #ifdef ASSERT
2406   Label LFlagInvalid;
2407   __ cmpldi(CCR0, Rflags, number_of_states);
2408   __ bge(CCR0, LFlagInvalid);
2409 #endif
2410 
2411   // Load from branch table and dispatch (volatile case: one instruction ahead).
2412   __ sldi(Rflags, Rflags, LogBytesPerWord);
2413   __ cmpwi(CCR6, Rscratch, 1); // Volatile?
2414   if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
2415     __ sldi(Rscratch, Rscratch, exact_log2(BytesPerInstWord)); // Volatile ? size of 1 instruction : 0.
2416   }
2417   __ ldx(Rbtable, Rbtable, Rflags);
2418 
2419   // Get the obj from stack.
2420   if (!is_static) {
2421     pop_and_check_object(Rclass_or_obj); // Kills R11_scratch1.
2422   } else {
2423     __ verify_oop(Rclass_or_obj);
2424   }
2425 
2426   if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
2427     __ subf(Rbtable, Rscratch, Rbtable); // Point to volatile/non-volatile entry point.
2428   }
2429   __ mtctr(Rbtable);
2430   __ bctr();
2431 
2432 #ifdef ASSERT
2433   __ bind(LFlagInvalid);
2434   __ stop("got invalid flag", 0x654);
2435 
2436   // __ bind(Lvtos);
2437   address pc_before_fence = __ pc();
2438   __ fence(); // Volatile entry point (one instruction before non-volatile_entry point).
2439   assert(__ pc() - pc_before_fence == (ptrdiff_t)BytesPerInstWord, "must be single instruction");
2440   assert(branch_table[vtos] == 0, "can't compute twice");
2441   branch_table[vtos] = __ pc(); // non-volatile_entry point
2442   __ stop("vtos unexpected", 0x655);
2443 #endif
2444 
2445   __ align(32, 28, 28); // Align load.
2446   // __ bind(Ldtos);
2447   __ fence(); // Volatile entry point (one instruction before non-volatile_entry point).
2448   assert(branch_table[dtos] == 0, "can't compute twice");
2449   branch_table[dtos] = __ pc(); // non-volatile_entry point
2450   __ lfdx(F15_ftos, Rclass_or_obj, Roffset);
2451   __ push(dtos);
2452   if (!is_static) patch_bytecode(Bytecodes::_fast_dgetfield, Rbc, Rscratch);
2453   {
2454     Label acquire_double;
2455     __ beq(CCR6, acquire_double); // Volatile?
2456     __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode()));
2457 
2458     __ bind(acquire_double);
2459     __ fcmpu(CCR0, F15_ftos, F15_ftos); // Acquire by cmp-br-isync.
2460     __ beq_predict_taken(CCR0, Lisync);
2461     __ b(Lisync); // In case of NAN.
2462   }
2463 
2464   __ align(32, 28, 28); // Align load.
2465   // __ bind(Lftos);
2466   __ fence(); // Volatile entry point (one instruction before non-volatile_entry point).
2467   assert(branch_table[ftos] == 0, "can't compute twice");
2468   branch_table[ftos] = __ pc(); // non-volatile_entry point
2469   __ lfsx(F15_ftos, Rclass_or_obj, Roffset);
2470   __ push(ftos);
2471   if (!is_static) { patch_bytecode(Bytecodes::_fast_fgetfield, Rbc, Rscratch); }
2472   {
2473     Label acquire_float;
2474     __ beq(CCR6, acquire_float); // Volatile?
2475     __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode()));
2476 
2477     __ bind(acquire_float);
2478     __ fcmpu(CCR0, F15_ftos, F15_ftos); // Acquire by cmp-br-isync.
2479     __ beq_predict_taken(CCR0, Lisync);
2480     __ b(Lisync); // In case of NAN.
2481   }
2482 
2483   __ align(32, 28, 28); // Align load.
2484   // __ bind(Litos);
2485   __ fence(); // Volatile entry point (one instruction before non-volatile_entry point).
2486   assert(branch_table[itos] == 0, "can't compute twice");
2487   branch_table[itos] = __ pc(); // non-volatile_entry point
2488   __ lwax(R17_tos, Rclass_or_obj, Roffset);
2489   __ push(itos);
2490   if (!is_static) patch_bytecode(Bytecodes::_fast_igetfield, Rbc, Rscratch);
2491   __ beq(CCR6, Lacquire); // Volatile?
2492   __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode()));
2493 
2494   __ align(32, 28, 28); // Align load.
2495   // __ bind(Lltos);
2496   __ fence(); // Volatile entry point (one instruction before non-volatile_entry point).
2497   assert(branch_table[ltos] == 0, "can't compute twice");
2498   branch_table[ltos] = __ pc(); // non-volatile_entry point
2499   __ ldx(R17_tos, Rclass_or_obj, Roffset);
2500   __ push(ltos);
2501   if (!is_static) patch_bytecode(Bytecodes::_fast_lgetfield, Rbc, Rscratch);
2502   __ beq(CCR6, Lacquire); // Volatile?
2503   __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode()));
2504 
2505   __ align(32, 28, 28); // Align load.
2506   // __ bind(Lbtos);
2507   __ fence(); // Volatile entry point (one instruction before non-volatile_entry point).
2508   assert(branch_table[btos] == 0, "can't compute twice");
2509   branch_table[btos] = __ pc(); // non-volatile_entry point
2510   __ lbzx(R17_tos, Rclass_or_obj, Roffset);
2511   __ extsb(R17_tos, R17_tos);
2512   __ push(btos);
2513   if (!is_static) patch_bytecode(Bytecodes::_fast_bgetfield, Rbc, Rscratch);
2514   __ beq(CCR6, Lacquire); // Volatile?
2515   __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode()));
2516 
2517   __ align(32, 28, 28); // Align load.
2518   // __ bind(Lctos);
2519   __ fence(); // Volatile entry point (one instruction before non-volatile_entry point).
2520   assert(branch_table[ctos] == 0, "can't compute twice");
2521   branch_table[ctos] = __ pc(); // non-volatile_entry point
2522   __ lhzx(R17_tos, Rclass_or_obj, Roffset);
2523   __ push(ctos);
2524   if (!is_static) patch_bytecode(Bytecodes::_fast_cgetfield, Rbc, Rscratch);
2525   __ beq(CCR6, Lacquire); // Volatile?
2526   __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode()));
2527 
2528   __ align(32, 28, 28); // Align load.
2529   // __ bind(Lstos);
2530   __ fence(); // Volatile entry point (one instruction before non-volatile_entry point).
2531   assert(branch_table[stos] == 0, "can't compute twice");
2532   branch_table[stos] = __ pc(); // non-volatile_entry point
2533   __ lhax(R17_tos, Rclass_or_obj, Roffset);
2534   __ push(stos);
2535   if (!is_static) patch_bytecode(Bytecodes::_fast_sgetfield, Rbc, Rscratch);
2536   __ beq(CCR6, Lacquire); // Volatile?
2537   __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode()));
2538 
2539   __ align(32, 28, 28); // Align load.
2540   // __ bind(Latos);
2541   __ fence(); // Volatile entry point (one instruction before non-volatile_entry point).
2542   assert(branch_table[atos] == 0, "can't compute twice");
2543   branch_table[atos] = __ pc(); // non-volatile_entry point
2544   __ load_heap_oop(R17_tos, (RegisterOrConstant)Roffset, Rclass_or_obj);
2545   __ verify_oop(R17_tos);
2546   __ push(atos);
2547   //__ dcbt(R17_tos); // prefetch
2548   if (!is_static) patch_bytecode(Bytecodes::_fast_agetfield, Rbc, Rscratch);
2549   __ beq(CCR6, Lacquire); // Volatile?
2550   __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode()));
2551 
2552   __ align(32, 12);
2553   __ bind(Lacquire);
2554   __ twi_0(R17_tos);
2555   __ bind(Lisync);
2556   __ isync(); // acquire
2557 
2558 #ifdef ASSERT
2559   for (int i = 0; i<number_of_states; ++i) {
2560     assert(branch_table[i], "get initialization");
2561     //tty->print_cr("get: %s_branch_table[%d] = 0x%llx (opcode 0x%llx)",
2562     //              is_static ? "static" : "field", i, branch_table[i], *((unsigned int*)branch_table[i]));
2563   }
2564 #endif
2565 }
2566 
2567 void TemplateTable::getfield(int byte_no) {
2568   getfield_or_static(byte_no, false);
2569 }
2570 
2571 void TemplateTable::getstatic(int byte_no) {
2572   getfield_or_static(byte_no, true);
2573 }
2574 
2575 // The registers cache and index expected to be set before call.
2576 // The function may destroy various registers, just not the cache and index registers.
2577 void TemplateTable::jvmti_post_field_mod(Register Rcache, Register Rscratch, bool is_static) {
2578 
2579   assert_different_registers(Rcache, Rscratch, R6_ARG4);
2580 
2581   if (JvmtiExport::can_post_field_modification()) {
2582     Label Lno_field_mod_post;
2583 
2584     // Check if post field access in enabled.
2585     int offs = __ load_const_optimized(Rscratch, JvmtiExport::get_field_modification_count_addr(), R0, true);
2586     __ lwz(Rscratch, offs, Rscratch);
2587 
2588     __ cmpwi(CCR0, Rscratch, 0);
2589     __ beq(CCR0, Lno_field_mod_post);
2590 
2591     // Do the post
2592     ByteSize cp_base_offset = ConstantPoolCache::base_offset();
2593     const Register Robj = Rscratch;
2594 
2595     __ addi(Rcache, Rcache, in_bytes(cp_base_offset));
2596     if (is_static) {
2597       // Life is simple. Null out the object pointer.
2598       __ li(Robj, 0);
2599     } else {
2600       // In case of the fast versions, value lives in registers => put it back on tos.
2601       int offs = Interpreter::expr_offset_in_bytes(0);
2602       Register base = R15_esp;
2603       switch(bytecode()) {
2604         case Bytecodes::_fast_aputfield: __ push_ptr(); offs+= Interpreter::stackElementSize; break;
2605         case Bytecodes::_fast_iputfield: // Fall through
2606         case Bytecodes::_fast_bputfield: // Fall through
2607         case Bytecodes::_fast_cputfield: // Fall through
2608         case Bytecodes::_fast_sputfield: __ push_i(); offs+=  Interpreter::stackElementSize; break;
2609         case Bytecodes::_fast_lputfield: __ push_l(); offs+=2*Interpreter::stackElementSize; break;
2610         case Bytecodes::_fast_fputfield: __ push_f(); offs+=  Interpreter::stackElementSize; break;
2611         case Bytecodes::_fast_dputfield: __ push_d(); offs+=2*Interpreter::stackElementSize; break;
2612         default: {
2613           offs = 0;
2614           base = Robj;
2615           const Register Rflags = Robj;
2616           Label is_one_slot;
2617           // Life is harder. The stack holds the value on top, followed by the
2618           // object. We don't know the size of the value, though; it could be
2619           // one or two words depending on its type. As a result, we must find
2620           // the type to determine where the object is.
2621           __ ld(Rflags, in_bytes(ConstantPoolCacheEntry::flags_offset()), Rcache); // Big Endian
2622           __ rldicl(Rflags, Rflags, 64-ConstantPoolCacheEntry::tos_state_shift, 64-ConstantPoolCacheEntry::tos_state_bits);
2623 
2624           __ cmpwi(CCR0, Rflags, ltos);
2625           __ cmpwi(CCR1, Rflags, dtos);
2626           __ addi(base, R15_esp, Interpreter::expr_offset_in_bytes(1));
2627           __ crnor(/*CR0 eq*/2, /*CR1 eq*/4+2, /*CR0 eq*/2);
2628           __ beq(CCR0, is_one_slot);
2629           __ addi(base, R15_esp, Interpreter::expr_offset_in_bytes(2));
2630           __ bind(is_one_slot);
2631           break;
2632         }
2633       }
2634       __ ld(Robj, offs, base);
2635       __ verify_oop(Robj);
2636     }
2637 
2638     __ addi(R6_ARG4, R15_esp, Interpreter::expr_offset_in_bytes(0));
2639     __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_modification), Robj, Rcache, R6_ARG4);
2640     __ get_cache_and_index_at_bcp(Rcache, 1);
2641 
2642     // In case of the fast versions, value lives in registers => put it back on tos.
2643     switch(bytecode()) {
2644       case Bytecodes::_fast_aputfield: __ pop_ptr(); break;
2645       case Bytecodes::_fast_iputfield: // Fall through
2646       case Bytecodes::_fast_bputfield: // Fall through
2647       case Bytecodes::_fast_cputfield: // Fall through
2648       case Bytecodes::_fast_sputfield: __ pop_i(); break;
2649       case Bytecodes::_fast_lputfield: __ pop_l(); break;
2650       case Bytecodes::_fast_fputfield: __ pop_f(); break;
2651       case Bytecodes::_fast_dputfield: __ pop_d(); break;
2652       default: break; // Nothin' to do.
2653     }
2654 
2655     __ align(32, 12);
2656     __ bind(Lno_field_mod_post);
2657   }
2658 }
2659 
2660 // PPC64: implement volatile stores as release-store (return bytecode contains an additional release).
2661 void TemplateTable::putfield_or_static(int byte_no, bool is_static) {
2662   Label Lvolatile;
2663 
2664   const Register Rcache        = R5_ARG3,  // Do not use ARG1/2 (causes trouble in jvmti_post_field_mod).
2665                  Rclass_or_obj = R31,      // Needs to survive C call.
2666                  Roffset       = R22_tmp2, // Needs to survive C call.
2667                  Rflags        = R3_ARG1,
2668                  Rbtable       = R4_ARG2,
2669                  Rscratch      = R11_scratch1,
2670                  Rscratch2     = R12_scratch2,
2671                  Rscratch3     = R6_ARG4,
2672                  Rbc           = Rscratch3;
2673   const ConditionRegister CR_is_vol = CCR2; // Non-volatile condition register (survives runtime call in do_oop_store).
2674 
2675   static address field_branch_table[number_of_states],
2676                  static_branch_table[number_of_states];
2677 
2678   address* branch_table = is_static ? static_branch_table : field_branch_table;
2679 
2680   // Stack (grows up):
2681   //  value
2682   //  obj
2683 
2684   // Load the field offset.
2685   resolve_cache_and_index(byte_no, Rcache, Rscratch, sizeof(u2));
2686   jvmti_post_field_mod(Rcache, Rscratch, is_static);
2687   load_field_cp_cache_entry(Rclass_or_obj, Rcache, noreg, Roffset, Rflags, is_static);
2688 
2689   // Load pointer to branch table.
2690   __ load_const_optimized(Rbtable, (address)branch_table, Rscratch);
2691 
2692   // Get volatile flag.
2693   __ rldicl(Rscratch, Rflags, 64-ConstantPoolCacheEntry::is_volatile_shift, 63); // Extract volatile bit.
2694 
2695   // Check the field type.
2696   __ rldicl(Rflags, Rflags, 64-ConstantPoolCacheEntry::tos_state_shift, 64-ConstantPoolCacheEntry::tos_state_bits);
2697 
2698 #ifdef ASSERT
2699   Label LFlagInvalid;
2700   __ cmpldi(CCR0, Rflags, number_of_states);
2701   __ bge(CCR0, LFlagInvalid);
2702 #endif
2703 
2704   // Load from branch table and dispatch (volatile case: one instruction ahead).
2705   __ sldi(Rflags, Rflags, LogBytesPerWord);
2706   if (!support_IRIW_for_not_multiple_copy_atomic_cpu) { __ cmpwi(CR_is_vol, Rscratch, 1); } // Volatile?
2707   __ sldi(Rscratch, Rscratch, exact_log2(BytesPerInstWord)); // Volatile? size of instruction 1 : 0.
2708   __ ldx(Rbtable, Rbtable, Rflags);
2709 
2710   __ subf(Rbtable, Rscratch, Rbtable); // Point to volatile/non-volatile entry point.
2711   __ mtctr(Rbtable);
2712   __ bctr();
2713 
2714 #ifdef ASSERT
2715   __ bind(LFlagInvalid);
2716   __ stop("got invalid flag", 0x656);
2717 
2718   // __ bind(Lvtos);
2719   address pc_before_release = __ pc();
2720   __ release(); // Volatile entry point (one instruction before non-volatile_entry point).
2721   assert(__ pc() - pc_before_release == (ptrdiff_t)BytesPerInstWord, "must be single instruction");
2722   assert(branch_table[vtos] == 0, "can't compute twice");
2723   branch_table[vtos] = __ pc(); // non-volatile_entry point
2724   __ stop("vtos unexpected", 0x657);
2725 #endif
2726 
2727   __ align(32, 28, 28); // Align pop.
2728   // __ bind(Ldtos);
2729   __ release(); // Volatile entry point (one instruction before non-volatile_entry point).
2730   assert(branch_table[dtos] == 0, "can't compute twice");
2731   branch_table[dtos] = __ pc(); // non-volatile_entry point
2732   __ pop(dtos);
2733   if (!is_static) { pop_and_check_object(Rclass_or_obj); } // Kills R11_scratch1.
2734   __ stfdx(F15_ftos, Rclass_or_obj, Roffset);
2735   if (!is_static) { patch_bytecode(Bytecodes::_fast_dputfield, Rbc, Rscratch, true, byte_no); }
2736   if (!support_IRIW_for_not_multiple_copy_atomic_cpu) {
2737     __ beq(CR_is_vol, Lvolatile); // Volatile?
2738   }
2739   __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode()));
2740 
2741   __ align(32, 28, 28); // Align pop.
2742   // __ bind(Lftos);
2743   __ release(); // Volatile entry point (one instruction before non-volatile_entry point).
2744   assert(branch_table[ftos] == 0, "can't compute twice");
2745   branch_table[ftos] = __ pc(); // non-volatile_entry point
2746   __ pop(ftos);
2747   if (!is_static) { pop_and_check_object(Rclass_or_obj); } // Kills R11_scratch1.
2748   __ stfsx(F15_ftos, Rclass_or_obj, Roffset);
2749   if (!is_static) { patch_bytecode(Bytecodes::_fast_fputfield, Rbc, Rscratch, true, byte_no); }
2750   if (!support_IRIW_for_not_multiple_copy_atomic_cpu) {
2751     __ beq(CR_is_vol, Lvolatile); // Volatile?
2752   }
2753   __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode()));
2754 
2755   __ align(32, 28, 28); // Align pop.
2756   // __ bind(Litos);
2757   __ release(); // Volatile entry point (one instruction before non-volatile_entry point).
2758   assert(branch_table[itos] == 0, "can't compute twice");
2759   branch_table[itos] = __ pc(); // non-volatile_entry point
2760   __ pop(itos);
2761   if (!is_static) { pop_and_check_object(Rclass_or_obj); } // Kills R11_scratch1.
2762   __ stwx(R17_tos, Rclass_or_obj, Roffset);
2763   if (!is_static) { patch_bytecode(Bytecodes::_fast_iputfield, Rbc, Rscratch, true, byte_no); }
2764   if (!support_IRIW_for_not_multiple_copy_atomic_cpu) {
2765     __ beq(CR_is_vol, Lvolatile); // Volatile?
2766   }
2767   __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode()));
2768 
2769   __ align(32, 28, 28); // Align pop.
2770   // __ bind(Lltos);
2771   __ release(); // Volatile entry point (one instruction before non-volatile_entry point).
2772   assert(branch_table[ltos] == 0, "can't compute twice");
2773   branch_table[ltos] = __ pc(); // non-volatile_entry point
2774   __ pop(ltos);
2775   if (!is_static) { pop_and_check_object(Rclass_or_obj); } // Kills R11_scratch1.
2776   __ stdx(R17_tos, Rclass_or_obj, Roffset);
2777   if (!is_static) { patch_bytecode(Bytecodes::_fast_lputfield, Rbc, Rscratch, true, byte_no); }
2778   if (!support_IRIW_for_not_multiple_copy_atomic_cpu) {
2779     __ beq(CR_is_vol, Lvolatile); // Volatile?
2780   }
2781   __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode()));
2782 
2783   __ align(32, 28, 28); // Align pop.
2784   // __ bind(Lbtos);
2785   __ release(); // Volatile entry point (one instruction before non-volatile_entry point).
2786   assert(branch_table[btos] == 0, "can't compute twice");
2787   branch_table[btos] = __ pc(); // non-volatile_entry point
2788   __ pop(btos);
2789   if (!is_static) { pop_and_check_object(Rclass_or_obj); } // Kills R11_scratch1.
2790   __ stbx(R17_tos, Rclass_or_obj, Roffset);
2791   if (!is_static) { patch_bytecode(Bytecodes::_fast_bputfield, Rbc, Rscratch, true, byte_no); }
2792   if (!support_IRIW_for_not_multiple_copy_atomic_cpu) {
2793     __ beq(CR_is_vol, Lvolatile); // Volatile?
2794   }
2795   __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode()));
2796 
2797   __ align(32, 28, 28); // Align pop.
2798   // __ bind(Lctos);
2799   __ release(); // Volatile entry point (one instruction before non-volatile_entry point).
2800   assert(branch_table[ctos] == 0, "can't compute twice");
2801   branch_table[ctos] = __ pc(); // non-volatile_entry point
2802   __ pop(ctos);
2803   if (!is_static) { pop_and_check_object(Rclass_or_obj); } // Kills R11_scratch1..
2804   __ sthx(R17_tos, Rclass_or_obj, Roffset);
2805   if (!is_static) { patch_bytecode(Bytecodes::_fast_cputfield, Rbc, Rscratch, true, byte_no); }
2806   if (!support_IRIW_for_not_multiple_copy_atomic_cpu) {
2807     __ beq(CR_is_vol, Lvolatile); // Volatile?
2808   }
2809   __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode()));
2810 
2811   __ align(32, 28, 28); // Align pop.
2812   // __ bind(Lstos);
2813   __ release(); // Volatile entry point (one instruction before non-volatile_entry point).
2814   assert(branch_table[stos] == 0, "can't compute twice");
2815   branch_table[stos] = __ pc(); // non-volatile_entry point
2816   __ pop(stos);
2817   if (!is_static) { pop_and_check_object(Rclass_or_obj); } // Kills R11_scratch1.
2818   __ sthx(R17_tos, Rclass_or_obj, Roffset);
2819   if (!is_static) { patch_bytecode(Bytecodes::_fast_sputfield, Rbc, Rscratch, true, byte_no); }
2820   if (!support_IRIW_for_not_multiple_copy_atomic_cpu) {
2821     __ beq(CR_is_vol, Lvolatile); // Volatile?
2822   }
2823   __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode()));
2824 
2825   __ align(32, 28, 28); // Align pop.
2826   // __ bind(Latos);
2827   __ release(); // Volatile entry point (one instruction before non-volatile_entry point).
2828   assert(branch_table[atos] == 0, "can't compute twice");
2829   branch_table[atos] = __ pc(); // non-volatile_entry point
2830   __ pop(atos);
2831   if (!is_static) { pop_and_check_object(Rclass_or_obj); } // kills R11_scratch1
2832   do_oop_store(_masm, Rclass_or_obj, Roffset, R17_tos, Rscratch, Rscratch2, Rscratch3, _bs->kind(), false /* precise */, true /* check null */);
2833   if (!is_static) { patch_bytecode(Bytecodes::_fast_aputfield, Rbc, Rscratch, true, byte_no); }
2834   if (!support_IRIW_for_not_multiple_copy_atomic_cpu) {
2835     __ beq(CR_is_vol, Lvolatile); // Volatile?
2836     __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode()));
2837 
2838     __ align(32, 12);
2839     __ bind(Lvolatile);
2840     __ fence();
2841   }
2842   // fallthru: __ b(Lexit);
2843 
2844 #ifdef ASSERT
2845   for (int i = 0; i<number_of_states; ++i) {
2846     assert(branch_table[i], "put initialization");
2847     //tty->print_cr("put: %s_branch_table[%d] = 0x%llx (opcode 0x%llx)",
2848     //              is_static ? "static" : "field", i, branch_table[i], *((unsigned int*)branch_table[i]));
2849   }
2850 #endif
2851 }
2852 
2853 void TemplateTable::putfield(int byte_no) {
2854   putfield_or_static(byte_no, false);
2855 }
2856 
2857 void TemplateTable::putstatic(int byte_no) {
2858   putfield_or_static(byte_no, true);
2859 }
2860 
2861 // See SPARC. On PPC64, we have a different jvmti_post_field_mod which does the job.
2862 void TemplateTable::jvmti_post_fast_field_mod() {
2863   __ should_not_reach_here();
2864 }
2865 
2866 void TemplateTable::fast_storefield(TosState state) {
2867   transition(state, vtos);
2868 
2869   const Register Rcache        = R5_ARG3,  // Do not use ARG1/2 (causes trouble in jvmti_post_field_mod).
2870                  Rclass_or_obj = R31,      // Needs to survive C call.
2871                  Roffset       = R22_tmp2, // Needs to survive C call.
2872                  Rflags        = R3_ARG1,
2873                  Rscratch      = R11_scratch1,
2874                  Rscratch2     = R12_scratch2,
2875                  Rscratch3     = R4_ARG2;
2876   const ConditionRegister CR_is_vol = CCR2; // Non-volatile condition register (survives runtime call in do_oop_store).
2877 
2878   // Constant pool already resolved => Load flags and offset of field.
2879   __ get_cache_and_index_at_bcp(Rcache, 1);
2880   jvmti_post_field_mod(Rcache, Rscratch, false /* not static */);
2881   load_field_cp_cache_entry(noreg, Rcache, noreg, Roffset, Rflags, false);
2882 
2883   // Get the obj and the final store addr.
2884   pop_and_check_object(Rclass_or_obj); // Kills R11_scratch1.
2885 
2886   // Get volatile flag.
2887   __ rldicl_(Rscratch, Rflags, 64-ConstantPoolCacheEntry::is_volatile_shift, 63); // Extract volatile bit.
2888   if (!support_IRIW_for_not_multiple_copy_atomic_cpu) { __ cmpdi(CR_is_vol, Rscratch, 1); }
2889   {
2890     Label LnotVolatile;
2891     __ beq(CCR0, LnotVolatile);
2892     __ release();
2893     __ align(32, 12);
2894     __ bind(LnotVolatile);
2895   }
2896 
2897   // Do the store and fencing.
2898   switch(bytecode()) {
2899     case Bytecodes::_fast_aputfield:
2900       // Store into the field.
2901       do_oop_store(_masm, Rclass_or_obj, Roffset, R17_tos, Rscratch, Rscratch2, Rscratch3, _bs->kind(), false /* precise */, true /* check null */);
2902       break;
2903 
2904     case Bytecodes::_fast_iputfield:
2905       __ stwx(R17_tos, Rclass_or_obj, Roffset);
2906       break;
2907 
2908     case Bytecodes::_fast_lputfield:
2909       __ stdx(R17_tos, Rclass_or_obj, Roffset);
2910       break;
2911 
2912     case Bytecodes::_fast_bputfield:
2913       __ stbx(R17_tos, Rclass_or_obj, Roffset);
2914       break;
2915 
2916     case Bytecodes::_fast_cputfield:
2917     case Bytecodes::_fast_sputfield:
2918       __ sthx(R17_tos, Rclass_or_obj, Roffset);
2919       break;
2920 
2921     case Bytecodes::_fast_fputfield:
2922       __ stfsx(F15_ftos, Rclass_or_obj, Roffset);
2923       break;
2924 
2925     case Bytecodes::_fast_dputfield:
2926       __ stfdx(F15_ftos, Rclass_or_obj, Roffset);
2927       break;
2928 
2929     default: ShouldNotReachHere();
2930   }
2931 
2932   if (!support_IRIW_for_not_multiple_copy_atomic_cpu) {
2933     Label LVolatile;
2934     __ beq(CR_is_vol, LVolatile);
2935     __ dispatch_epilog(vtos, Bytecodes::length_for(bytecode()));
2936 
2937     __ align(32, 12);
2938     __ bind(LVolatile);
2939     __ fence();
2940   }
2941 }
2942 
2943 void TemplateTable::fast_accessfield(TosState state) {
2944   transition(atos, state);
2945 
2946   Label LisVolatile;
2947   ByteSize cp_base_offset = ConstantPoolCache::base_offset();
2948 
2949   const Register Rcache        = R3_ARG1,
2950                  Rclass_or_obj = R17_tos,
2951                  Roffset       = R22_tmp2,
2952                  Rflags        = R23_tmp3,
2953                  Rscratch      = R12_scratch2;
2954 
2955   // Constant pool already resolved. Get the field offset.
2956   __ get_cache_and_index_at_bcp(Rcache, 1);
2957   load_field_cp_cache_entry(noreg, Rcache, noreg, Roffset, Rflags, false);
2958 
2959   // JVMTI support
2960   jvmti_post_field_access(Rcache, Rscratch, false, true);
2961 
2962   // Get the load address.
2963   __ null_check_throw(Rclass_or_obj, -1, Rscratch);
2964 
2965   // Get volatile flag.
2966   __ rldicl_(Rscratch, Rflags, 64-ConstantPoolCacheEntry::is_volatile_shift, 63); // Extract volatile bit.
2967   __ bne(CCR0, LisVolatile);
2968 
2969   switch(bytecode()) {
2970     case Bytecodes::_fast_agetfield:
2971     {
2972       __ load_heap_oop(R17_tos, (RegisterOrConstant)Roffset, Rclass_or_obj);
2973       __ verify_oop(R17_tos);
2974       __ dispatch_epilog(state, Bytecodes::length_for(bytecode()));
2975 
2976       __ bind(LisVolatile);
2977       if (support_IRIW_for_not_multiple_copy_atomic_cpu) { __ fence(); }
2978       __ load_heap_oop(R17_tos, (RegisterOrConstant)Roffset, Rclass_or_obj);
2979       __ verify_oop(R17_tos);
2980       __ twi_0(R17_tos);
2981       __ isync();
2982       break;
2983     }
2984     case Bytecodes::_fast_igetfield:
2985     {
2986       __ lwax(R17_tos, Rclass_or_obj, Roffset);
2987       __ dispatch_epilog(state, Bytecodes::length_for(bytecode()));
2988 
2989       __ bind(LisVolatile);
2990       if (support_IRIW_for_not_multiple_copy_atomic_cpu) { __ fence(); }
2991       __ lwax(R17_tos, Rclass_or_obj, Roffset);
2992       __ twi_0(R17_tos);
2993       __ isync();
2994       break;
2995     }
2996     case Bytecodes::_fast_lgetfield:
2997     {
2998       __ ldx(R17_tos, Rclass_or_obj, Roffset);
2999       __ dispatch_epilog(state, Bytecodes::length_for(bytecode()));
3000 
3001       __ bind(LisVolatile);
3002       if (support_IRIW_for_not_multiple_copy_atomic_cpu) { __ fence(); }
3003       __ ldx(R17_tos, Rclass_or_obj, Roffset);
3004       __ twi_0(R17_tos);
3005       __ isync();
3006       break;
3007     }
3008     case Bytecodes::_fast_bgetfield:
3009     {
3010       __ lbzx(R17_tos, Rclass_or_obj, Roffset);
3011       __ extsb(R17_tos, R17_tos);
3012       __ dispatch_epilog(state, Bytecodes::length_for(bytecode()));
3013 
3014       __ bind(LisVolatile);
3015       if (support_IRIW_for_not_multiple_copy_atomic_cpu) { __ fence(); }
3016       __ lbzx(R17_tos, Rclass_or_obj, Roffset);
3017       __ twi_0(R17_tos);
3018       __ extsb(R17_tos, R17_tos);
3019       __ isync();
3020       break;
3021     }
3022     case Bytecodes::_fast_cgetfield:
3023     {
3024       __ lhzx(R17_tos, Rclass_or_obj, Roffset);
3025       __ dispatch_epilog(state, Bytecodes::length_for(bytecode()));
3026 
3027       __ bind(LisVolatile);
3028       if (support_IRIW_for_not_multiple_copy_atomic_cpu) { __ fence(); }
3029       __ lhzx(R17_tos, Rclass_or_obj, Roffset);
3030       __ twi_0(R17_tos);
3031       __ isync();
3032       break;
3033     }
3034     case Bytecodes::_fast_sgetfield:
3035     {
3036       __ lhax(R17_tos, Rclass_or_obj, Roffset);
3037       __ dispatch_epilog(state, Bytecodes::length_for(bytecode()));
3038 
3039       __ bind(LisVolatile);
3040       if (support_IRIW_for_not_multiple_copy_atomic_cpu) { __ fence(); }
3041       __ lhax(R17_tos, Rclass_or_obj, Roffset);
3042       __ twi_0(R17_tos);
3043       __ isync();
3044       break;
3045     }
3046     case Bytecodes::_fast_fgetfield:
3047     {
3048       __ lfsx(F15_ftos, Rclass_or_obj, Roffset);
3049       __ dispatch_epilog(state, Bytecodes::length_for(bytecode()));
3050 
3051       __ bind(LisVolatile);
3052       Label Ldummy;
3053       if (support_IRIW_for_not_multiple_copy_atomic_cpu) { __ fence(); }
3054       __ lfsx(F15_ftos, Rclass_or_obj, Roffset);
3055       __ fcmpu(CCR0, F15_ftos, F15_ftos); // Acquire by cmp-br-isync.
3056       __ bne_predict_not_taken(CCR0, Ldummy);
3057       __ bind(Ldummy);
3058       __ isync();
3059       break;
3060     }
3061     case Bytecodes::_fast_dgetfield:
3062     {
3063       __ lfdx(F15_ftos, Rclass_or_obj, Roffset);
3064       __ dispatch_epilog(state, Bytecodes::length_for(bytecode()));
3065 
3066       __ bind(LisVolatile);
3067       Label Ldummy;
3068       if (support_IRIW_for_not_multiple_copy_atomic_cpu) { __ fence(); }
3069       __ lfdx(F15_ftos, Rclass_or_obj, Roffset);
3070       __ fcmpu(CCR0, F15_ftos, F15_ftos); // Acquire by cmp-br-isync.
3071       __ bne_predict_not_taken(CCR0, Ldummy);
3072       __ bind(Ldummy);
3073       __ isync();
3074       break;
3075     }
3076     default: ShouldNotReachHere();
3077   }
3078 }
3079 
3080 void TemplateTable::fast_xaccess(TosState state) {
3081   transition(vtos, state);
3082 
3083   Label LisVolatile;
3084   ByteSize cp_base_offset = ConstantPoolCache::base_offset();
3085   const Register Rcache        = R3_ARG1,
3086                  Rclass_or_obj = R17_tos,
3087                  Roffset       = R22_tmp2,
3088                  Rflags        = R23_tmp3,
3089                  Rscratch      = R12_scratch2;
3090 
3091   __ ld(Rclass_or_obj, 0, R18_locals);
3092 
3093   // Constant pool already resolved. Get the field offset.
3094   __ get_cache_and_index_at_bcp(Rcache, 2);
3095   load_field_cp_cache_entry(noreg, Rcache, noreg, Roffset, Rflags, false);
3096 
3097   // JVMTI support not needed, since we switch back to single bytecode as soon as debugger attaches.
3098 
3099   // Needed to report exception at the correct bcp.
3100   __ addi(R14_bcp, R14_bcp, 1);
3101 
3102   // Get the load address.
3103   __ null_check_throw(Rclass_or_obj, -1, Rscratch);
3104 
3105   // Get volatile flag.
3106   __ rldicl_(Rscratch, Rflags, 64-ConstantPoolCacheEntry::is_volatile_shift, 63); // Extract volatile bit.
3107   __ bne(CCR0, LisVolatile);
3108 
3109   switch(state) {
3110   case atos:
3111     {
3112       __ load_heap_oop(R17_tos, (RegisterOrConstant)Roffset, Rclass_or_obj);
3113       __ verify_oop(R17_tos);
3114       __ dispatch_epilog(state, Bytecodes::length_for(bytecode()) - 1); // Undo bcp increment.
3115 
3116       __ bind(LisVolatile);
3117       if (support_IRIW_for_not_multiple_copy_atomic_cpu) { __ fence(); }
3118       __ load_heap_oop(R17_tos, (RegisterOrConstant)Roffset, Rclass_or_obj);
3119       __ verify_oop(R17_tos);
3120       __ twi_0(R17_tos);
3121       __ isync();
3122       break;
3123     }
3124   case itos:
3125     {
3126       __ lwax(R17_tos, Rclass_or_obj, Roffset);
3127       __ dispatch_epilog(state, Bytecodes::length_for(bytecode()) - 1); // Undo bcp increment.
3128 
3129       __ bind(LisVolatile);
3130       if (support_IRIW_for_not_multiple_copy_atomic_cpu) { __ fence(); }
3131       __ lwax(R17_tos, Rclass_or_obj, Roffset);
3132       __ twi_0(R17_tos);
3133       __ isync();
3134       break;
3135     }
3136   case ftos:
3137     {
3138       __ lfsx(F15_ftos, Rclass_or_obj, Roffset);
3139       __ dispatch_epilog(state, Bytecodes::length_for(bytecode()) - 1); // Undo bcp increment.
3140 
3141       __ bind(LisVolatile);
3142       Label Ldummy;
3143       if (support_IRIW_for_not_multiple_copy_atomic_cpu) { __ fence(); }
3144       __ lfsx(F15_ftos, Rclass_or_obj, Roffset);
3145       __ fcmpu(CCR0, F15_ftos, F15_ftos); // Acquire by cmp-br-isync.
3146       __ bne_predict_not_taken(CCR0, Ldummy);
3147       __ bind(Ldummy);
3148       __ isync();
3149       break;
3150     }
3151   default: ShouldNotReachHere();
3152   }
3153   __ addi(R14_bcp, R14_bcp, -1);
3154 }
3155 
3156 // ============================================================================
3157 // Calls
3158 
3159 // Common code for invoke
3160 //
3161 // Input:
3162 //   - byte_no
3163 //
3164 // Output:
3165 //   - Rmethod:        The method to invoke next.
3166 //   - Rret_addr:      The return address to return to.
3167 //   - Rindex:         MethodType (invokehandle) or CallSite obj (invokedynamic)
3168 //   - Rrecv:          Cache for "this" pointer, might be noreg if static call.
3169 //   - Rflags:         Method flags from const pool cache.
3170 //
3171 //  Kills:
3172 //   - Rscratch1
3173 //
3174 void TemplateTable::prepare_invoke(int byte_no,
3175                                    Register Rmethod,  // linked method (or i-klass)
3176                                    Register Rret_addr,// return address
3177                                    Register Rindex,   // itable index, MethodType, etc.
3178                                    Register Rrecv,    // If caller wants to see it.
3179                                    Register Rflags,   // If caller wants to test it.
3180                                    Register Rscratch
3181                                    ) {
3182   // Determine flags.
3183   const Bytecodes::Code code = bytecode();
3184   const bool is_invokeinterface  = code == Bytecodes::_invokeinterface;
3185   const bool is_invokedynamic    = code == Bytecodes::_invokedynamic;
3186   const bool is_invokehandle     = code == Bytecodes::_invokehandle;
3187   const bool is_invokevirtual    = code == Bytecodes::_invokevirtual;
3188   const bool is_invokespecial    = code == Bytecodes::_invokespecial;
3189   const bool load_receiver       = (Rrecv != noreg);
3190   assert(load_receiver == (code != Bytecodes::_invokestatic && code != Bytecodes::_invokedynamic), "");
3191 
3192   assert_different_registers(Rmethod, Rindex, Rflags, Rscratch);
3193   assert_different_registers(Rmethod, Rrecv, Rflags, Rscratch);
3194   assert_different_registers(Rret_addr, Rscratch);
3195 
3196   load_invoke_cp_cache_entry(byte_no, Rmethod, Rindex, Rflags, is_invokevirtual, false, is_invokedynamic);
3197 
3198   // Saving of SP done in call_from_interpreter.
3199 
3200   // Maybe push "appendix" to arguments.
3201   if (is_invokedynamic || is_invokehandle) {
3202     Label Ldone;
3203     __ rldicl_(R0, Rflags, 64-ConstantPoolCacheEntry::has_appendix_shift, 63);
3204     __ beq(CCR0, Ldone);
3205     // Push "appendix" (MethodType, CallSite, etc.).
3206     // This must be done before we get the receiver,
3207     // since the parameter_size includes it.
3208     __ load_resolved_reference_at_index(Rscratch, Rindex);
3209     __ verify_oop(Rscratch);
3210     __ push_ptr(Rscratch);
3211     __ bind(Ldone);
3212   }
3213 
3214   // Load receiver if needed (after appendix is pushed so parameter size is correct).
3215   if (load_receiver) {
3216     const Register Rparam_count = Rscratch;
3217     __ andi(Rparam_count, Rflags, ConstantPoolCacheEntry::parameter_size_mask);
3218     __ load_receiver(Rparam_count, Rrecv);
3219     __ verify_oop(Rrecv);
3220   }
3221 
3222   // Get return address.
3223   {
3224     Register Rtable_addr = Rscratch;
3225     Register Rret_type = Rret_addr;
3226     address table_addr = (address) Interpreter::invoke_return_entry_table_for(code);
3227 
3228     // Get return type. It's coded into the upper 4 bits of the lower half of the 64 bit value.
3229     __ rldicl(Rret_type, Rflags, 64-ConstantPoolCacheEntry::tos_state_shift, 64-ConstantPoolCacheEntry::tos_state_bits);
3230     __ load_dispatch_table(Rtable_addr, (address*)table_addr);
3231     __ sldi(Rret_type, Rret_type, LogBytesPerWord);
3232     // Get return address.
3233     __ ldx(Rret_addr, Rtable_addr, Rret_type);
3234   }
3235 }
3236 
3237 // Helper for virtual calls. Load target out of vtable and jump off!
3238 // Kills all passed registers.
3239 void TemplateTable::generate_vtable_call(Register Rrecv_klass, Register Rindex, Register Rret, Register Rtemp) {
3240 
3241   assert_different_registers(Rrecv_klass, Rtemp, Rret);
3242   const Register Rtarget_method = Rindex;
3243 
3244   // Get target method & entry point.
3245   const int base = InstanceKlass::vtable_start_offset() * wordSize;
3246   // Calc vtable addr scale the vtable index by 8.
3247   __ sldi(Rindex, Rindex, exact_log2(vtableEntry::size() * wordSize));
3248   // Load target.
3249   __ addi(Rrecv_klass, Rrecv_klass, base + vtableEntry::method_offset_in_bytes());
3250   __ ldx(Rtarget_method, Rindex, Rrecv_klass);
3251   __ call_from_interpreter(Rtarget_method, Rret, Rrecv_klass /* scratch1 */, Rtemp /* scratch2 */);
3252 }
3253 
3254 // Virtual or final call. Final calls are rewritten on the fly to run through "fast_finalcall" next time.
3255 void TemplateTable::invokevirtual(int byte_no) {
3256   transition(vtos, vtos);
3257 
3258   Register Rtable_addr = R11_scratch1,
3259            Rret_type = R12_scratch2,
3260            Rret_addr = R5_ARG3,
3261            Rflags = R22_tmp2, // Should survive C call.
3262            Rrecv = R3_ARG1,
3263            Rrecv_klass = Rrecv,
3264            Rvtableindex_or_method = R31, // Should survive C call.
3265            Rnum_params = R4_ARG2,
3266            Rnew_bc = R6_ARG4;
3267 
3268   Label LnotFinal;
3269 
3270   load_invoke_cp_cache_entry(byte_no, Rvtableindex_or_method, noreg, Rflags, /*virtual*/ true, false, false);
3271 
3272   __ testbitdi(CCR0, R0, Rflags, ConstantPoolCacheEntry::is_vfinal_shift);
3273   __ bfalse(CCR0, LnotFinal);
3274 
3275   patch_bytecode(Bytecodes::_fast_invokevfinal, Rnew_bc, R12_scratch2);
3276   invokevfinal_helper(Rvtableindex_or_method, Rflags, R11_scratch1, R12_scratch2);
3277 
3278   __ align(32, 12);
3279   __ bind(LnotFinal);
3280   // Load "this" pointer (receiver).
3281   __ rldicl(Rnum_params, Rflags, 64, 48);
3282   __ load_receiver(Rnum_params, Rrecv);
3283   __ verify_oop(Rrecv);
3284 
3285   // Get return type. It's coded into the upper 4 bits of the lower half of the 64 bit value.
3286   __ rldicl(Rret_type, Rflags, 64-ConstantPoolCacheEntry::tos_state_shift, 64-ConstantPoolCacheEntry::tos_state_bits);
3287   __ load_dispatch_table(Rtable_addr, Interpreter::invoke_return_entry_table());
3288   __ sldi(Rret_type, Rret_type, LogBytesPerWord);
3289   __ ldx(Rret_addr, Rret_type, Rtable_addr);
3290   __ null_check_throw(Rrecv, oopDesc::klass_offset_in_bytes(), R11_scratch1);
3291   __ load_klass(Rrecv_klass, Rrecv);
3292   __ verify_klass_ptr(Rrecv_klass);
3293   __ profile_virtual_call(Rrecv_klass, R11_scratch1, R12_scratch2, false);
3294 
3295   generate_vtable_call(Rrecv_klass, Rvtableindex_or_method, Rret_addr, R11_scratch1);
3296 }
3297 
3298 void TemplateTable::fast_invokevfinal(int byte_no) {
3299   transition(vtos, vtos);
3300 
3301   assert(byte_no == f2_byte, "use this argument");
3302   Register Rflags  = R22_tmp2,
3303            Rmethod = R31;
3304   load_invoke_cp_cache_entry(byte_no, Rmethod, noreg, Rflags, /*virtual*/ true, /*is_invokevfinal*/ true, false);
3305   invokevfinal_helper(Rmethod, Rflags, R11_scratch1, R12_scratch2);
3306 }
3307 
3308 void TemplateTable::invokevfinal_helper(Register Rmethod, Register Rflags, Register Rscratch1, Register Rscratch2) {
3309 
3310   assert_different_registers(Rmethod, Rflags, Rscratch1, Rscratch2);
3311 
3312   // Load receiver from stack slot.
3313   Register Rrecv = Rscratch2;
3314   Register Rnum_params = Rrecv;
3315 
3316   __ ld(Rnum_params, in_bytes(Method::const_offset()), Rmethod);
3317   __ lhz(Rnum_params /* number of params */, in_bytes(ConstMethod::size_of_parameters_offset()), Rnum_params);
3318 
3319   // Get return address.
3320   Register Rtable_addr = Rscratch1,
3321            Rret_addr   = Rflags,
3322            Rret_type   = Rret_addr;
3323   // Get return type. It's coded into the upper 4 bits of the lower half of the 64 bit value.
3324   __ rldicl(Rret_type, Rflags, 64-ConstantPoolCacheEntry::tos_state_shift, 64-ConstantPoolCacheEntry::tos_state_bits);
3325   __ load_dispatch_table(Rtable_addr, Interpreter::invoke_return_entry_table());
3326   __ sldi(Rret_type, Rret_type, LogBytesPerWord);
3327   __ ldx(Rret_addr, Rret_type, Rtable_addr);
3328 
3329   // Load receiver and receiver NULL check.
3330   __ load_receiver(Rnum_params, Rrecv);
3331   __ null_check_throw(Rrecv, -1, Rscratch1);
3332 
3333   __ profile_final_call(Rrecv, Rscratch1);
3334 
3335   // Do the call.
3336   __ call_from_interpreter(Rmethod, Rret_addr, Rscratch1, Rscratch2);
3337 }
3338 
3339 void TemplateTable::invokespecial(int byte_no) {
3340   assert(byte_no == f1_byte, "use this argument");
3341   transition(vtos, vtos);
3342 
3343   Register Rtable_addr = R3_ARG1,
3344            Rret_addr   = R4_ARG2,
3345            Rflags      = R5_ARG3,
3346            Rreceiver   = R6_ARG4,
3347            Rmethod     = R31;
3348 
3349   prepare_invoke(byte_no, Rmethod, Rret_addr, noreg, Rreceiver, Rflags, R11_scratch1);
3350 
3351   // Receiver NULL check.
3352   __ null_check_throw(Rreceiver, -1, R11_scratch1);
3353 
3354   __ profile_call(R11_scratch1, R12_scratch2);
3355   __ call_from_interpreter(Rmethod, Rret_addr, R11_scratch1, R12_scratch2);
3356 }
3357 
3358 void TemplateTable::invokestatic(int byte_no) {
3359   assert(byte_no == f1_byte, "use this argument");
3360   transition(vtos, vtos);
3361 
3362   Register Rtable_addr = R3_ARG1,
3363            Rret_addr   = R4_ARG2,
3364            Rflags      = R5_ARG3;
3365 
3366   prepare_invoke(byte_no, R19_method, Rret_addr, noreg, noreg, Rflags, R11_scratch1);
3367 
3368   __ profile_call(R11_scratch1, R12_scratch2);
3369   __ call_from_interpreter(R19_method, Rret_addr, R11_scratch1, R12_scratch2);
3370 }
3371 
3372 void TemplateTable::invokeinterface_object_method(Register Rrecv_klass,
3373                                                   Register Rret,
3374                                                   Register Rflags,
3375                                                   Register Rindex,
3376                                                   Register Rtemp1,
3377                                                   Register Rtemp2) {
3378 
3379   assert_different_registers(Rindex, Rret, Rrecv_klass, Rflags, Rtemp1, Rtemp2);
3380   Label LnotFinal;
3381 
3382   // Check for vfinal.
3383   __ testbitdi(CCR0, R0, Rflags, ConstantPoolCacheEntry::is_vfinal_shift);
3384   __ bfalse(CCR0, LnotFinal);
3385 
3386   Register Rscratch = Rflags; // Rflags is dead now.
3387 
3388   // Final call case.
3389   __ profile_final_call(Rtemp1, Rscratch);
3390   // Do the final call - the index (f2) contains the method.
3391   __ call_from_interpreter(Rindex, Rret, Rscratch, Rrecv_klass /* scratch */);
3392 
3393   // Non-final callc case.
3394   __ bind(LnotFinal);
3395   __ profile_virtual_call(Rrecv_klass, Rtemp1, Rscratch, false);
3396   generate_vtable_call(Rrecv_klass, Rindex, Rret, Rscratch);
3397 }
3398 
3399 void TemplateTable::invokeinterface(int byte_no) {
3400   assert(byte_no == f1_byte, "use this argument");
3401   transition(vtos, vtos);
3402 
3403   const Register Rscratch1        = R11_scratch1,
3404                  Rscratch2        = R12_scratch2,
3405                  Rscratch3        = R9_ARG7,
3406                  Rscratch4        = R10_ARG8,
3407                  Rtable_addr      = Rscratch2,
3408                  Rinterface_klass = R5_ARG3,
3409                  Rret_type        = R8_ARG6,
3410                  Rret_addr        = Rret_type,
3411                  Rindex           = R6_ARG4,
3412                  Rreceiver        = R4_ARG2,
3413                  Rrecv_klass      = Rreceiver,
3414                  Rflags           = R7_ARG5;
3415 
3416   prepare_invoke(byte_no, Rinterface_klass, Rret_addr, Rindex, Rreceiver, Rflags, Rscratch1);
3417 
3418   // Get receiver klass.
3419   __ null_check_throw(Rreceiver, oopDesc::klass_offset_in_bytes(), Rscratch3);
3420   __ load_klass(Rrecv_klass, Rreceiver);
3421 
3422   // Check corner case object method.
3423   Label LobjectMethod;
3424 
3425   __ testbitdi(CCR0, R0, Rflags, ConstantPoolCacheEntry::is_forced_virtual_shift);
3426   __ btrue(CCR0, LobjectMethod);
3427 
3428   // Fallthrough: The normal invokeinterface case.
3429   __ profile_virtual_call(Rrecv_klass, Rscratch1, Rscratch2, false);
3430 
3431   // Find entry point to call.
3432   Label Lthrow_icc, Lthrow_ame;
3433   // Result will be returned in Rindex.
3434   __ mr(Rscratch4, Rrecv_klass);
3435   __ mr(Rscratch3, Rindex);
3436   __ lookup_interface_method(Rrecv_klass, Rinterface_klass, Rindex, Rindex, Rscratch1, Rscratch2, Lthrow_icc);
3437 
3438   __ cmpdi(CCR0, Rindex, 0);
3439   __ beq(CCR0, Lthrow_ame);
3440   // Found entry. Jump off!
3441   __ call_from_interpreter(Rindex, Rret_addr, Rscratch1, Rscratch2);
3442 
3443   // Vtable entry was NULL => Throw abstract method error.
3444   __ bind(Lthrow_ame);
3445   __ mr(Rrecv_klass, Rscratch4);
3446   __ mr(Rindex, Rscratch3);
3447   call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_AbstractMethodError));
3448 
3449   // Interface was not found => Throw incompatible class change error.
3450   __ bind(Lthrow_icc);
3451   __ mr(Rrecv_klass, Rscratch4);
3452   call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_IncompatibleClassChangeError));
3453 
3454   __ should_not_reach_here();
3455 
3456   // Special case of invokeinterface called for virtual method of
3457   // java.lang.Object. See ConstantPoolCacheEntry::set_method() for details:
3458   // The invokeinterface was rewritten to a invokevirtual, hence we have
3459   // to handle this corner case. This code isn't produced by javac, but could
3460   // be produced by another compliant java compiler.
3461   __ bind(LobjectMethod);
3462   invokeinterface_object_method(Rrecv_klass, Rret_addr, Rflags, Rindex, Rscratch1, Rscratch2);
3463 }
3464 
3465 void TemplateTable::invokedynamic(int byte_no) {
3466   transition(vtos, vtos);
3467 
3468   const Register Rret_addr = R3_ARG1,
3469                  Rflags    = R4_ARG2,
3470                  Rmethod   = R22_tmp2,
3471                  Rscratch1 = R11_scratch1,
3472                  Rscratch2 = R12_scratch2;
3473 
3474   prepare_invoke(byte_no, Rmethod, Rret_addr, Rscratch1, noreg, Rflags, Rscratch2);
3475 
3476   // Profile this call.
3477   __ profile_call(Rscratch1, Rscratch2);
3478 
3479   // Off we go. With the new method handles, we don't jump to a method handle
3480   // entry any more. Instead, we pushed an "appendix" in prepare invoke, which happens
3481   // to be the callsite object the bootstrap method returned. This is passed to a
3482   // "link" method which does the dispatch (Most likely just grabs the MH stored
3483   // inside the callsite and does an invokehandle).
3484   __ call_from_interpreter(Rmethod, Rret_addr, Rscratch1 /* scratch1 */, Rscratch2 /* scratch2 */);
3485 }
3486 
3487 void TemplateTable::invokehandle(int byte_no) {
3488   transition(vtos, vtos);
3489 
3490   const Register Rret_addr = R3_ARG1,
3491                  Rflags    = R4_ARG2,
3492                  Rrecv     = R5_ARG3,
3493                  Rmethod   = R22_tmp2,
3494                  Rscratch1 = R11_scratch1,
3495                  Rscratch2 = R12_scratch2;
3496 
3497   prepare_invoke(byte_no, Rmethod, Rret_addr, Rscratch1, Rrecv, Rflags, Rscratch2);
3498   __ verify_method_ptr(Rmethod);
3499   __ null_check_throw(Rrecv, -1, Rscratch2);
3500 
3501   __ profile_final_call(Rrecv, Rscratch1);
3502 
3503   // Still no call from handle => We call the method handle interpreter here.
3504   __ call_from_interpreter(Rmethod, Rret_addr, Rscratch1 /* scratch1 */, Rscratch2 /* scratch2 */);
3505 }
3506 
3507 // =============================================================================
3508 // Allocation
3509 
3510 // Puts allocated obj ref onto the expression stack.
3511 void TemplateTable::_new() {
3512   transition(vtos, atos);
3513 
3514   Label Lslow_case,
3515         Ldone,
3516         Linitialize_header,
3517         Lallocate_shared,
3518         Linitialize_object;  // Including clearing the fields.
3519 
3520   const Register RallocatedObject = R17_tos,
3521                  RinstanceKlass   = R9_ARG7,
3522                  Rscratch         = R11_scratch1,
3523                  Roffset          = R8_ARG6,
3524                  Rinstance_size   = Roffset,
3525                  Rcpool           = R4_ARG2,
3526                  Rtags            = R3_ARG1,
3527                  Rindex           = R5_ARG3;
3528 
3529   const bool allow_shared_alloc = Universe::heap()->supports_inline_contig_alloc() && !CMSIncrementalMode;
3530 
3531   // --------------------------------------------------------------------------
3532   // Check if fast case is possible.
3533 
3534   // Load pointers to const pool and const pool's tags array.
3535   __ get_cpool_and_tags(Rcpool, Rtags);
3536   // Load index of constant pool entry.
3537   __ get_2_byte_integer_at_bcp(1, Rindex, InterpreterMacroAssembler::Unsigned);
3538 
3539   if (UseTLAB) {
3540     // Make sure the class we're about to instantiate has been resolved
3541     // This is done before loading instanceKlass to be consistent with the order
3542     // how Constant Pool is updated (see ConstantPoolCache::klass_at_put).
3543     __ addi(Rtags, Rtags, Array<u1>::base_offset_in_bytes());
3544     __ lbzx(Rtags, Rindex, Rtags);
3545 
3546     __ cmpdi(CCR0, Rtags, JVM_CONSTANT_Class);
3547     __ bne(CCR0, Lslow_case);
3548 
3549     // Get instanceKlass (load from Rcpool + sizeof(ConstantPool) + Rindex*BytesPerWord).
3550     __ sldi(Roffset, Rindex, LogBytesPerWord);
3551     __ addi(Rscratch, Rcpool, sizeof(ConstantPool));
3552     __ isync(); // Order load of instance Klass wrt. tags.
3553     __ ldx(RinstanceKlass, Roffset, Rscratch);
3554 
3555     // Make sure klass is fully initialized and get instance_size.
3556     __ lbz(Rscratch, in_bytes(InstanceKlass::init_state_offset()), RinstanceKlass);
3557     __ lwz(Rinstance_size, in_bytes(Klass::layout_helper_offset()), RinstanceKlass);
3558 
3559     __ cmpdi(CCR1, Rscratch, InstanceKlass::fully_initialized);
3560     // Make sure klass does not have has_finalizer, or is abstract, or interface or java/lang/Class.
3561     __ andi_(R0, Rinstance_size, Klass::_lh_instance_slow_path_bit); // slow path bit equals 0?
3562 
3563     __ crnand(/*CR0 eq*/2, /*CR1 eq*/4+2, /*CR0 eq*/2); // slow path bit set or not fully initialized?
3564     __ beq(CCR0, Lslow_case);
3565 
3566     // --------------------------------------------------------------------------
3567     // Fast case:
3568     // Allocate the instance.
3569     // 1) Try to allocate in the TLAB.
3570     // 2) If fail, and the TLAB is not full enough to discard, allocate in the shared Eden.
3571     // 3) If the above fails (or is not applicable), go to a slow case (creates a new TLAB, etc.).
3572 
3573     Register RoldTopValue = RallocatedObject; // Object will be allocated here if it fits.
3574     Register RnewTopValue = R6_ARG4;
3575     Register RendValue    = R7_ARG5;
3576 
3577     // Check if we can allocate in the TLAB.
3578     __ ld(RoldTopValue, in_bytes(JavaThread::tlab_top_offset()), R16_thread);
3579     __ ld(RendValue,    in_bytes(JavaThread::tlab_end_offset()), R16_thread);
3580 
3581     __ add(RnewTopValue, Rinstance_size, RoldTopValue);
3582 
3583     // If there is enough space, we do not CAS and do not clear.
3584     __ cmpld(CCR0, RnewTopValue, RendValue);
3585     __ bgt(CCR0, allow_shared_alloc ? Lallocate_shared : Lslow_case);
3586 
3587     __ std(RnewTopValue, in_bytes(JavaThread::tlab_top_offset()), R16_thread);
3588 
3589     if (ZeroTLAB) {
3590       // The fields have already been cleared.
3591       __ b(Linitialize_header);
3592     } else {
3593       // Initialize both the header and fields.
3594       __ b(Linitialize_object);
3595     }
3596 
3597     // Fall through: TLAB was too small.
3598     if (allow_shared_alloc) {
3599       Register RtlabWasteLimitValue = R10_ARG8;
3600       Register RfreeValue = RnewTopValue;
3601 
3602       __ bind(Lallocate_shared);
3603       // Check if tlab should be discarded (refill_waste_limit >= free).
3604       __ ld(RtlabWasteLimitValue, in_bytes(JavaThread::tlab_refill_waste_limit_offset()), R16_thread);
3605       __ subf(RfreeValue, RoldTopValue, RendValue);
3606       __ srdi(RfreeValue, RfreeValue, LogHeapWordSize); // in dwords
3607       __ cmpld(CCR0, RtlabWasteLimitValue, RfreeValue);
3608       __ bge(CCR0, Lslow_case);
3609 
3610       // Increment waste limit to prevent getting stuck on this slow path.
3611       __ addi(RtlabWasteLimitValue, RtlabWasteLimitValue, (int)ThreadLocalAllocBuffer::refill_waste_limit_increment());
3612       __ std(RtlabWasteLimitValue, in_bytes(JavaThread::tlab_refill_waste_limit_offset()), R16_thread);
3613     }
3614     // else: No allocation in the shared eden. // fallthru: __ b(Lslow_case);
3615   }
3616   // else: Always go the slow path.
3617 
3618   // --------------------------------------------------------------------------
3619   // slow case
3620   __ bind(Lslow_case);
3621   call_VM(R17_tos, CAST_FROM_FN_PTR(address, InterpreterRuntime::_new), Rcpool, Rindex);
3622 
3623   if (UseTLAB) {
3624     __ b(Ldone);
3625     // --------------------------------------------------------------------------
3626     // Init1: Zero out newly allocated memory.
3627 
3628     if (!ZeroTLAB || allow_shared_alloc) {
3629       // Clear object fields.
3630       __ bind(Linitialize_object);
3631 
3632       // Initialize remaining object fields.
3633       Register Rbase = Rtags;
3634       __ addi(Rinstance_size, Rinstance_size, 7 - (int)sizeof(oopDesc));
3635       __ addi(Rbase, RallocatedObject, sizeof(oopDesc));
3636       __ srdi(Rinstance_size, Rinstance_size, 3);
3637 
3638       // Clear out object skipping header. Takes also care of the zero length case.
3639       __ clear_memory_doubleword(Rbase, Rinstance_size);
3640       // fallthru: __ b(Linitialize_header);
3641     }
3642 
3643     // --------------------------------------------------------------------------
3644     // Init2: Initialize the header: mark, klass
3645     __ bind(Linitialize_header);
3646 
3647     // Init mark.
3648     if (UseBiasedLocking) {
3649       __ ld(Rscratch, in_bytes(Klass::prototype_header_offset()), RinstanceKlass);
3650     } else {
3651       __ load_const_optimized(Rscratch, markOopDesc::prototype(), R0);
3652     }
3653     __ std(Rscratch, oopDesc::mark_offset_in_bytes(), RallocatedObject);
3654 
3655     // Init klass.
3656     __ store_klass_gap(RallocatedObject);
3657     __ store_klass(RallocatedObject, RinstanceKlass, Rscratch); // klass (last for cms)
3658 
3659     // Check and trigger dtrace event.
3660     {
3661       SkipIfEqualZero skip_if(_masm, Rscratch, &DTraceAllocProbes);
3662       __ push(atos);
3663       __ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_object_alloc));
3664       __ pop(atos);
3665     }
3666   }
3667 
3668   // continue
3669   __ bind(Ldone);
3670 
3671   // Must prevent reordering of stores for object initialization with stores that publish the new object.
3672   __ membar(Assembler::StoreStore);
3673 }
3674 
3675 void TemplateTable::newarray() {
3676   transition(itos, atos);
3677 
3678   __ lbz(R4, 1, R14_bcp);
3679   __ extsw(R5, R17_tos);
3680   call_VM(R17_tos, CAST_FROM_FN_PTR(address, InterpreterRuntime::newarray), R4, R5 /* size */);
3681 
3682   // Must prevent reordering of stores for object initialization with stores that publish the new object.
3683   __ membar(Assembler::StoreStore);
3684 }
3685 
3686 void TemplateTable::anewarray() {
3687   transition(itos, atos);
3688 
3689   __ get_constant_pool(R4);
3690   __ get_2_byte_integer_at_bcp(1, R5, InterpreterMacroAssembler::Unsigned);
3691   __ extsw(R6, R17_tos); // size
3692   call_VM(R17_tos, CAST_FROM_FN_PTR(address, InterpreterRuntime::anewarray), R4 /* pool */, R5 /* index */, R6 /* size */);
3693 
3694   // Must prevent reordering of stores for object initialization with stores that publish the new object.
3695   __ membar(Assembler::StoreStore);
3696 }
3697 
3698 // Allocate a multi dimensional array
3699 void TemplateTable::multianewarray() {
3700   transition(vtos, atos);
3701 
3702   Register Rptr = R31; // Needs to survive C call.
3703 
3704   // Put ndims * wordSize into frame temp slot
3705   __ lbz(Rptr, 3, R14_bcp);
3706   __ sldi(Rptr, Rptr, Interpreter::logStackElementSize);
3707   // Esp points past last_dim, so set to R4 to first_dim address.
3708   __ add(R4, Rptr, R15_esp);
3709   call_VM(R17_tos, CAST_FROM_FN_PTR(address, InterpreterRuntime::multianewarray), R4 /* first_size_address */);
3710   // Pop all dimensions off the stack.
3711   __ add(R15_esp, Rptr, R15_esp);
3712 
3713   // Must prevent reordering of stores for object initialization with stores that publish the new object.
3714   __ membar(Assembler::StoreStore);
3715 }
3716 
3717 void TemplateTable::arraylength() {
3718   transition(atos, itos);
3719 
3720   Label LnoException;
3721   __ verify_oop(R17_tos);
3722   __ null_check_throw(R17_tos, arrayOopDesc::length_offset_in_bytes(), R11_scratch1);
3723   __ lwa(R17_tos, arrayOopDesc::length_offset_in_bytes(), R17_tos);
3724 }
3725 
3726 // ============================================================================
3727 // Typechecks
3728 
3729 void TemplateTable::checkcast() {
3730   transition(atos, atos);
3731 
3732   Label Ldone, Lis_null, Lquicked, Lresolved;
3733   Register Roffset         = R6_ARG4,
3734            RobjKlass       = R4_ARG2,
3735            RspecifiedKlass = R5_ARG3, // Generate_ClassCastException_verbose_handler will read value from this register.
3736            Rcpool          = R11_scratch1,
3737            Rtags           = R12_scratch2;
3738 
3739   // Null does not pass.
3740   __ cmpdi(CCR0, R17_tos, 0);
3741   __ beq(CCR0, Lis_null);
3742 
3743   // Get constant pool tag to find out if the bytecode has already been "quickened".
3744   __ get_cpool_and_tags(Rcpool, Rtags);
3745 
3746   __ get_2_byte_integer_at_bcp(1, Roffset, InterpreterMacroAssembler::Unsigned);
3747 
3748   __ addi(Rtags, Rtags, Array<u1>::base_offset_in_bytes());
3749   __ lbzx(Rtags, Rtags, Roffset);
3750 
3751   __ cmpdi(CCR0, Rtags, JVM_CONSTANT_Class);
3752   __ beq(CCR0, Lquicked);
3753 
3754   // Call into the VM to "quicken" instanceof.
3755   __ push_ptr();  // for GC
3756   call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc));
3757   __ get_vm_result_2(RspecifiedKlass);
3758   __ pop_ptr();   // Restore receiver.
3759   __ b(Lresolved);
3760 
3761   // Extract target class from constant pool.
3762   __ bind(Lquicked);
3763   __ sldi(Roffset, Roffset, LogBytesPerWord);
3764   __ addi(Rcpool, Rcpool, sizeof(ConstantPool));
3765   __ isync(); // Order load of specified Klass wrt. tags.
3766   __ ldx(RspecifiedKlass, Rcpool, Roffset);
3767 
3768   // Do the checkcast.
3769   __ bind(Lresolved);
3770   // Get value klass in RobjKlass.
3771   __ load_klass(RobjKlass, R17_tos);
3772   // Generate a fast subtype check. Branch to cast_ok if no failure. Return 0 if failure.
3773   __ gen_subtype_check(RobjKlass, RspecifiedKlass, /*3 temp regs*/ Roffset, Rcpool, Rtags, /*target if subtype*/ Ldone);
3774 
3775   // Not a subtype; so must throw exception
3776   // Target class oop is in register R6_ARG4 == RspecifiedKlass by convention.
3777   __ load_dispatch_table(R11_scratch1, (address*)Interpreter::_throw_ClassCastException_entry);
3778   __ mtctr(R11_scratch1);
3779   __ bctr();
3780 
3781   // Profile the null case.
3782   __ align(32, 12);
3783   __ bind(Lis_null);
3784   __ profile_null_seen(R11_scratch1, Rtags); // Rtags used as scratch.
3785 
3786   __ align(32, 12);
3787   __ bind(Ldone);
3788 }
3789 
3790 // Output:
3791 //   - tos == 0: Obj was null or not an instance of class.
3792 //   - tos == 1: Obj was an instance of class.
3793 void TemplateTable::instanceof() {
3794   transition(atos, itos);
3795 
3796   Label Ldone, Lis_null, Lquicked, Lresolved;
3797   Register Roffset         = R5_ARG3,
3798            RobjKlass       = R4_ARG2,
3799            RspecifiedKlass = R6_ARG4, // Generate_ClassCastException_verbose_handler will expect the value in this register.
3800            Rcpool          = R11_scratch1,
3801            Rtags           = R12_scratch2;
3802 
3803   // Null does not pass.
3804   __ cmpdi(CCR0, R17_tos, 0);
3805   __ beq(CCR0, Lis_null);
3806 
3807   // Get constant pool tag to find out if the bytecode has already been "quickened".
3808   __ get_cpool_and_tags(Rcpool, Rtags);
3809 
3810   __ get_2_byte_integer_at_bcp(1, Roffset, InterpreterMacroAssembler::Unsigned);
3811 
3812   __ addi(Rtags, Rtags, Array<u1>::base_offset_in_bytes());
3813   __ lbzx(Rtags, Rtags, Roffset);
3814 
3815   __ cmpdi(CCR0, Rtags, JVM_CONSTANT_Class);
3816   __ beq(CCR0, Lquicked);
3817 
3818   // Call into the VM to "quicken" instanceof.
3819   __ push_ptr();  // for GC
3820   call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc));
3821   __ get_vm_result_2(RspecifiedKlass);
3822   __ pop_ptr();   // Restore receiver.
3823   __ b(Lresolved);
3824 
3825   // Extract target class from constant pool.
3826   __ bind(Lquicked);
3827   __ sldi(Roffset, Roffset, LogBytesPerWord);
3828   __ addi(Rcpool, Rcpool, sizeof(ConstantPool));
3829   __ isync(); // Order load of specified Klass wrt. tags.
3830   __ ldx(RspecifiedKlass, Rcpool, Roffset);
3831 
3832   // Do the checkcast.
3833   __ bind(Lresolved);
3834   // Get value klass in RobjKlass.
3835   __ load_klass(RobjKlass, R17_tos);
3836   // Generate a fast subtype check. Branch to cast_ok if no failure. Return 0 if failure.
3837   __ li(R17_tos, 1);
3838   __ gen_subtype_check(RobjKlass, RspecifiedKlass, /*3 temp regs*/ Roffset, Rcpool, Rtags, /*target if subtype*/ Ldone);
3839   __ li(R17_tos, 0);
3840 
3841   if (ProfileInterpreter) {
3842     __ b(Ldone);
3843   }
3844 
3845   // Profile the null case.
3846   __ align(32, 12);
3847   __ bind(Lis_null);
3848   __ profile_null_seen(Rcpool, Rtags); // Rcpool and Rtags used as scratch.
3849 
3850   __ align(32, 12);
3851   __ bind(Ldone);
3852 }
3853 
3854 // =============================================================================
3855 // Breakpoints
3856 
3857 void TemplateTable::_breakpoint() {
3858   transition(vtos, vtos);
3859 
3860   // Get the unpatched byte code.
3861   __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::get_original_bytecode_at), R19_method, R14_bcp);
3862   __ mr(R31, R3_RET);
3863 
3864   // Post the breakpoint event.
3865   __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::_breakpoint), R19_method, R14_bcp);
3866 
3867   // Complete the execution of original bytecode.
3868   __ dispatch_Lbyte_code(vtos, R31, Interpreter::normal_table(vtos));
3869 }
3870 
3871 // =============================================================================
3872 // Exceptions
3873 
3874 void TemplateTable::athrow() {
3875   transition(atos, vtos);
3876 
3877   // Exception oop is in tos
3878   __ verify_oop(R17_tos);
3879 
3880   __ null_check_throw(R17_tos, -1, R11_scratch1);
3881 
3882   // Throw exception interpreter entry expects exception oop to be in R3.
3883   __ mr(R3_RET, R17_tos);
3884   __ load_dispatch_table(R11_scratch1, (address*)Interpreter::throw_exception_entry());
3885   __ mtctr(R11_scratch1);
3886   __ bctr();
3887 }
3888 
3889 // =============================================================================
3890 // Synchronization
3891 // Searches the basic object lock list on the stack for a free slot
3892 // and uses it to lock the obect in tos.
3893 //
3894 // Recursive locking is enabled by exiting the search if the same
3895 // object is already found in the list. Thus, a new basic lock obj lock
3896 // is allocated "higher up" in the stack and thus is found first
3897 // at next monitor exit.
3898 void TemplateTable::monitorenter() {
3899   transition(atos, vtos);
3900 
3901   __ verify_oop(R17_tos);
3902 
3903   Register Rcurrent_monitor  = R11_scratch1,
3904            Rcurrent_obj      = R12_scratch2,
3905            Robj_to_lock      = R17_tos,
3906            Rscratch1         = R3_ARG1,
3907            Rscratch2         = R4_ARG2,
3908            Rscratch3         = R5_ARG3,
3909            Rcurrent_obj_addr = R6_ARG4;
3910 
3911   // ------------------------------------------------------------------------------
3912   // Null pointer exception.
3913   __ null_check_throw(Robj_to_lock, -1, R11_scratch1);
3914 
3915   // Try to acquire a lock on the object.
3916   // Repeat until succeeded (i.e., until monitorenter returns true).
3917 
3918   // ------------------------------------------------------------------------------
3919   // Find a free slot in the monitor block.
3920   Label Lfound, Lexit, Lallocate_new;
3921   ConditionRegister found_free_slot = CCR0,
3922                     found_same_obj  = CCR1,
3923                     reached_limit   = CCR6;
3924   {
3925     Label Lloop, Lentry;
3926     Register Rlimit = Rcurrent_monitor;
3927 
3928     // Set up search loop - start with topmost monitor.
3929     __ add(Rcurrent_obj_addr, BasicObjectLock::obj_offset_in_bytes(), R26_monitor);
3930 
3931     __ ld(Rlimit, 0, R1_SP);
3932     __ addi(Rlimit, Rlimit, - (frame::ijava_state_size + frame::interpreter_frame_monitor_size_in_bytes() - BasicObjectLock::obj_offset_in_bytes())); // Monitor base
3933 
3934     // Check if any slot is present => short cut to allocation if not.
3935     __ cmpld(reached_limit, Rcurrent_obj_addr, Rlimit);
3936     __ bgt(reached_limit, Lallocate_new);
3937 
3938     // Pre-load topmost slot.
3939     __ ld(Rcurrent_obj, 0, Rcurrent_obj_addr);
3940     __ addi(Rcurrent_obj_addr, Rcurrent_obj_addr, frame::interpreter_frame_monitor_size() * wordSize);
3941     // The search loop.
3942     __ bind(Lloop);
3943     // Found free slot?
3944     __ cmpdi(found_free_slot, Rcurrent_obj, 0);
3945     // Is this entry for same obj? If so, stop the search and take the found
3946     // free slot or allocate a new one to enable recursive locking.
3947     __ cmpd(found_same_obj, Rcurrent_obj, Robj_to_lock);
3948     __ cmpld(reached_limit, Rcurrent_obj_addr, Rlimit);
3949     __ beq(found_free_slot, Lexit);
3950     __ beq(found_same_obj, Lallocate_new);
3951     __ bgt(reached_limit, Lallocate_new);
3952     // Check if last allocated BasicLockObj reached.
3953     __ ld(Rcurrent_obj, 0, Rcurrent_obj_addr);
3954     __ addi(Rcurrent_obj_addr, Rcurrent_obj_addr, frame::interpreter_frame_monitor_size() * wordSize);
3955     // Next iteration if unchecked BasicObjectLocks exist on the stack.
3956     __ b(Lloop);
3957   }
3958 
3959   // ------------------------------------------------------------------------------
3960   // Check if we found a free slot.
3961   __ bind(Lexit);
3962 
3963   __ addi(Rcurrent_monitor, Rcurrent_obj_addr, -(frame::interpreter_frame_monitor_size() * wordSize) - BasicObjectLock::obj_offset_in_bytes());
3964   __ addi(Rcurrent_obj_addr, Rcurrent_obj_addr, - frame::interpreter_frame_monitor_size() * wordSize);
3965   __ b(Lfound);
3966 
3967   // We didn't find a free BasicObjLock => allocate one.
3968   __ align(32, 12);
3969   __ bind(Lallocate_new);
3970   __ add_monitor_to_stack(false, Rscratch1, Rscratch2);
3971   __ mr(Rcurrent_monitor, R26_monitor);
3972   __ addi(Rcurrent_obj_addr, R26_monitor, BasicObjectLock::obj_offset_in_bytes());
3973 
3974   // ------------------------------------------------------------------------------
3975   // We now have a slot to lock.
3976   __ bind(Lfound);
3977 
3978   // Increment bcp to point to the next bytecode, so exception handling for async. exceptions work correctly.
3979   // The object has already been poped from the stack, so the expression stack looks correct.
3980   __ addi(R14_bcp, R14_bcp, 1);
3981 
3982   __ std(Robj_to_lock, 0, Rcurrent_obj_addr);
3983   __ lock_object(Rcurrent_monitor, Robj_to_lock);
3984 
3985   // Check if there's enough space on the stack for the monitors after locking.
3986   Label Lskip_stack_check;
3987   // Optimization: If the monitors stack section is less then a std page size (4K) don't run
3988   // the stack check. There should be enough shadow pages to fit that in.
3989   __ ld(Rscratch3, 0, R1_SP);
3990   __ sub(Rscratch3, Rscratch3, R26_monitor);
3991   __ cmpdi(CCR0, Rscratch3, 4*K);
3992   __ blt(CCR0, Lskip_stack_check);
3993 
3994   DEBUG_ONLY(__ untested("stack overflow check during monitor enter");)
3995   __ li(Rscratch1, 0);
3996   __ generate_stack_overflow_check_with_compare_and_throw(Rscratch1, Rscratch2);
3997 
3998   __ align(32, 12);
3999   __ bind(Lskip_stack_check);
4000 
4001   // The bcp has already been incremented. Just need to dispatch to next instruction.
4002   __ dispatch_next(vtos);
4003 }
4004 
4005 void TemplateTable::monitorexit() {
4006   transition(atos, vtos);
4007   __ verify_oop(R17_tos);
4008 
4009   Register Rcurrent_monitor  = R11_scratch1,
4010            Rcurrent_obj      = R12_scratch2,
4011            Robj_to_lock      = R17_tos,
4012            Rcurrent_obj_addr = R3_ARG1,
4013            Rlimit            = R4_ARG2;
4014   Label Lfound, Lillegal_monitor_state;
4015 
4016   // Check corner case: unbalanced monitorEnter / Exit.
4017   __ ld(Rlimit, 0, R1_SP);
4018   __ addi(Rlimit, Rlimit, - (frame::ijava_state_size + frame::interpreter_frame_monitor_size_in_bytes())); // Monitor base
4019 
4020   // Null pointer check.
4021   __ null_check_throw(Robj_to_lock, -1, R11_scratch1);
4022 
4023   __ cmpld(CCR0, R26_monitor, Rlimit);
4024   __ bgt(CCR0, Lillegal_monitor_state);
4025 
4026   // Find the corresponding slot in the monitors stack section.
4027   {
4028     Label Lloop;
4029 
4030     // Start with topmost monitor.
4031     __ addi(Rcurrent_obj_addr, R26_monitor, BasicObjectLock::obj_offset_in_bytes());
4032     __ addi(Rlimit, Rlimit, BasicObjectLock::obj_offset_in_bytes());
4033     __ ld(Rcurrent_obj, 0, Rcurrent_obj_addr);
4034     __ addi(Rcurrent_obj_addr, Rcurrent_obj_addr, frame::interpreter_frame_monitor_size() * wordSize);
4035 
4036     __ bind(Lloop);
4037     // Is this entry for same obj?
4038     __ cmpd(CCR0, Rcurrent_obj, Robj_to_lock);
4039     __ beq(CCR0, Lfound);
4040 
4041     // Check if last allocated BasicLockObj reached.
4042 
4043     __ ld(Rcurrent_obj, 0, Rcurrent_obj_addr);
4044     __ cmpld(CCR0, Rcurrent_obj_addr, Rlimit);
4045     __ addi(Rcurrent_obj_addr, Rcurrent_obj_addr, frame::interpreter_frame_monitor_size() * wordSize);
4046 
4047     // Next iteration if unchecked BasicObjectLocks exist on the stack.
4048     __ ble(CCR0, Lloop);
4049   }
4050 
4051   // Fell through without finding the basic obj lock => throw up!
4052   __ bind(Lillegal_monitor_state);
4053   call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_illegal_monitor_state_exception));
4054   __ should_not_reach_here();
4055 
4056   __ align(32, 12);
4057   __ bind(Lfound);
4058   __ addi(Rcurrent_monitor, Rcurrent_obj_addr,
4059           -(frame::interpreter_frame_monitor_size() * wordSize) - BasicObjectLock::obj_offset_in_bytes());
4060   __ unlock_object(Rcurrent_monitor);
4061 }
4062 
4063 // ============================================================================
4064 // Wide bytecodes
4065 
4066 // Wide instructions. Simply redirects to the wide entry point for that instruction.
4067 void TemplateTable::wide() {
4068   transition(vtos, vtos);
4069 
4070   const Register Rtable = R11_scratch1,
4071                  Rindex = R12_scratch2,
4072                  Rtmp   = R0;
4073 
4074   __ lbz(Rindex, 1, R14_bcp);
4075 
4076   __ load_dispatch_table(Rtable, Interpreter::_wentry_point);
4077 
4078   __ slwi(Rindex, Rindex, LogBytesPerWord);
4079   __ ldx(Rtmp, Rtable, Rindex);
4080   __ mtctr(Rtmp);
4081   __ bctr();
4082   // Note: the bcp increment step is part of the individual wide bytecode implementations.
4083 }
4084 #endif // !CC_INTERP