1 /*
   2  * Copyright (c) 2003, 2015, Oracle and/or its affiliates. All rights reserved.
   3  * Copyright 2012, 2015 SAP AG. All rights reserved.
   4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   5  *
   6  * This code is free software; you can redistribute it and/or modify it
   7  * under the terms of the GNU General Public License version 2 only, as
   8  * published by the Free Software Foundation.
   9  *
  10  * This code is distributed in the hope that it will be useful, but WITHOUT
  11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  13  * version 2 for more details (a copy is included in the LICENSE file that
  14  * accompanied this code).
  15  *
  16  * You should have received a copy of the GNU General Public License version
  17  * 2 along with this work; if not, write to the Free Software Foundation,
  18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19  *
  20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21  * or visit www.oracle.com if you need additional information or have any
  22  * questions.
  23  *
  24  */
  25 
  26 
  27 #include "precompiled.hpp"
  28 #include "asm/macroAssembler.inline.hpp"
  29 #include "interp_masm_ppc_64.hpp"
  30 #include "interpreter/interpreterRuntime.hpp"
  31 #include "prims/jvmtiThreadState.hpp"
  32 #include "runtime/sharedRuntime.hpp"
  33 
  34 #ifdef PRODUCT
  35 #define BLOCK_COMMENT(str) // nothing
  36 #else
  37 #define BLOCK_COMMENT(str) block_comment(str)
  38 #endif
  39 
  40 void InterpreterMacroAssembler::null_check_throw(Register a, int offset, Register temp_reg) {
  41 #ifdef CC_INTERP
  42   address exception_entry = StubRoutines::throw_NullPointerException_at_call_entry();
  43 #else
  44   address exception_entry = Interpreter::throw_NullPointerException_entry();
  45 #endif
  46   MacroAssembler::null_check_throw(a, offset, temp_reg, exception_entry);
  47 }
  48 
  49 void InterpreterMacroAssembler::branch_to_entry(address entry, Register Rscratch) {
  50   assert(entry, "Entry must have been generated by now");
  51   if (is_within_range_of_b(entry, pc())) {
  52     b(entry);
  53   } else {
  54     load_const_optimized(Rscratch, entry, R0);
  55     mtctr(Rscratch);
  56     bctr();
  57   }
  58 }
  59 
  60 #ifndef CC_INTERP
  61 
  62 void InterpreterMacroAssembler::dispatch_next(TosState state, int bcp_incr) {
  63   Register bytecode = R12_scratch2;
  64   if (bcp_incr != 0) {
  65     lbzu(bytecode, bcp_incr, R14_bcp);
  66   } else {
  67     lbz(bytecode, 0, R14_bcp);
  68   }
  69 
  70   dispatch_Lbyte_code(state, bytecode, Interpreter::dispatch_table(state));
  71 }
  72 
  73 void InterpreterMacroAssembler::dispatch_via(TosState state, address* table) {
  74   // Load current bytecode.
  75   Register bytecode = R12_scratch2;
  76   lbz(bytecode, 0, R14_bcp);
  77   dispatch_Lbyte_code(state, bytecode, table);
  78 }
  79 
  80 // Dispatch code executed in the prolog of a bytecode which does not do it's
  81 // own dispatch. The dispatch address is computed and placed in R24_dispatch_addr.
  82 void InterpreterMacroAssembler::dispatch_prolog(TosState state, int bcp_incr) {
  83   Register bytecode = R12_scratch2;
  84   lbz(bytecode, bcp_incr, R14_bcp);
  85 
  86   load_dispatch_table(R24_dispatch_addr, Interpreter::dispatch_table(state));
  87 
  88   sldi(bytecode, bytecode, LogBytesPerWord);
  89   ldx(R24_dispatch_addr, R24_dispatch_addr, bytecode);
  90 }
  91 
  92 // Dispatch code executed in the epilog of a bytecode which does not do it's
  93 // own dispatch. The dispatch address in R24_dispatch_addr is used for the
  94 // dispatch.
  95 void InterpreterMacroAssembler::dispatch_epilog(TosState state, int bcp_incr) {
  96   mtctr(R24_dispatch_addr);
  97   addi(R14_bcp, R14_bcp, bcp_incr);
  98   bctr();
  99 }
 100 
 101 void InterpreterMacroAssembler::check_and_handle_popframe(Register scratch_reg) {
 102   assert(scratch_reg != R0, "can't use R0 as scratch_reg here");
 103   if (JvmtiExport::can_pop_frame()) {
 104     Label L;
 105 
 106     // Check the "pending popframe condition" flag in the current thread.
 107     lwz(scratch_reg, in_bytes(JavaThread::popframe_condition_offset()), R16_thread);
 108 
 109     // Initiate popframe handling only if it is not already being
 110     // processed. If the flag has the popframe_processing bit set, it
 111     // means that this code is called *during* popframe handling - we
 112     // don't want to reenter.
 113     andi_(R0, scratch_reg, JavaThread::popframe_pending_bit);
 114     beq(CCR0, L);
 115 
 116     andi_(R0, scratch_reg, JavaThread::popframe_processing_bit);
 117     bne(CCR0, L);
 118 
 119     // Call the Interpreter::remove_activation_preserving_args_entry()
 120     // func to get the address of the same-named entrypoint in the
 121     // generated interpreter code.
 122 #if defined(ABI_ELFv2)
 123     call_c(CAST_FROM_FN_PTR(address,
 124                             Interpreter::remove_activation_preserving_args_entry),
 125            relocInfo::none);
 126 #else
 127     call_c(CAST_FROM_FN_PTR(FunctionDescriptor*,
 128                             Interpreter::remove_activation_preserving_args_entry),
 129            relocInfo::none);
 130 #endif
 131 
 132     // Jump to Interpreter::_remove_activation_preserving_args_entry.
 133     mtctr(R3_RET);
 134     bctr();
 135 
 136     align(32, 12);
 137     bind(L);
 138   }
 139 }
 140 
 141 void InterpreterMacroAssembler::check_and_handle_earlyret(Register scratch_reg) {
 142   const Register Rthr_state_addr = scratch_reg;
 143   if (JvmtiExport::can_force_early_return()) {
 144     Label Lno_early_ret;
 145     ld(Rthr_state_addr, in_bytes(JavaThread::jvmti_thread_state_offset()), R16_thread);
 146     cmpdi(CCR0, Rthr_state_addr, 0);
 147     beq(CCR0, Lno_early_ret);
 148 
 149     lwz(R0, in_bytes(JvmtiThreadState::earlyret_state_offset()), Rthr_state_addr);
 150     cmpwi(CCR0, R0, JvmtiThreadState::earlyret_pending);
 151     bne(CCR0, Lno_early_ret);
 152 
 153     // Jump to Interpreter::_earlyret_entry.
 154     lwz(R3_ARG1, in_bytes(JvmtiThreadState::earlyret_tos_offset()), Rthr_state_addr);
 155     call_VM_leaf(CAST_FROM_FN_PTR(address, Interpreter::remove_activation_early_entry));
 156     mtlr(R3_RET);
 157     blr();
 158 
 159     align(32, 12);
 160     bind(Lno_early_ret);
 161   }
 162 }
 163 
 164 void InterpreterMacroAssembler::load_earlyret_value(TosState state, Register Rscratch1) {
 165   const Register RjvmtiState = Rscratch1;
 166   const Register Rscratch2   = R0;
 167 
 168   ld(RjvmtiState, in_bytes(JavaThread::jvmti_thread_state_offset()), R16_thread);
 169   li(Rscratch2, 0);
 170 
 171   switch (state) {
 172     case atos: ld(R17_tos, in_bytes(JvmtiThreadState::earlyret_oop_offset()), RjvmtiState);
 173                std(Rscratch2, in_bytes(JvmtiThreadState::earlyret_oop_offset()), RjvmtiState);
 174                break;
 175     case ltos: ld(R17_tos, in_bytes(JvmtiThreadState::earlyret_value_offset()), RjvmtiState);
 176                break;
 177     case btos: // fall through
 178     case ctos: // fall through
 179     case stos: // fall through
 180     case itos: lwz(R17_tos, in_bytes(JvmtiThreadState::earlyret_value_offset()), RjvmtiState);
 181                break;
 182     case ftos: lfs(F15_ftos, in_bytes(JvmtiThreadState::earlyret_value_offset()), RjvmtiState);
 183                break;
 184     case dtos: lfd(F15_ftos, in_bytes(JvmtiThreadState::earlyret_value_offset()), RjvmtiState);
 185                break;
 186     case vtos: break;
 187     default  : ShouldNotReachHere();
 188   }
 189 
 190   // Clean up tos value in the jvmti thread state.
 191   std(Rscratch2, in_bytes(JvmtiThreadState::earlyret_value_offset()), RjvmtiState);
 192   // Set tos state field to illegal value.
 193   li(Rscratch2, ilgl);
 194   stw(Rscratch2, in_bytes(JvmtiThreadState::earlyret_tos_offset()), RjvmtiState);
 195 }
 196 
 197 // Common code to dispatch and dispatch_only.
 198 // Dispatch value in Lbyte_code and increment Lbcp.
 199 
 200 void InterpreterMacroAssembler::load_dispatch_table(Register dst, address* table) {
 201   address table_base = (address)Interpreter::dispatch_table((TosState)0);
 202   intptr_t table_offs = (intptr_t)table - (intptr_t)table_base;
 203   if (is_simm16(table_offs)) {
 204     addi(dst, R25_templateTableBase, (int)table_offs);
 205   } else {
 206     load_const_optimized(dst, table, R0);
 207   }
 208 }
 209 
 210 void InterpreterMacroAssembler::dispatch_Lbyte_code(TosState state, Register bytecode, address* table, bool verify) {
 211   if (verify) {
 212     unimplemented("dispatch_Lbyte_code: verify"); // See Sparc Implementation to implement this
 213   }
 214 
 215 #ifdef FAST_DISPATCH
 216   unimplemented("dispatch_Lbyte_code FAST_DISPATCH");
 217 #else
 218   assert_different_registers(bytecode, R11_scratch1);
 219 
 220   // Calc dispatch table address.
 221   load_dispatch_table(R11_scratch1, table);
 222 
 223   sldi(R12_scratch2, bytecode, LogBytesPerWord);
 224   ldx(R11_scratch1, R11_scratch1, R12_scratch2);
 225 
 226   // Jump off!
 227   mtctr(R11_scratch1);
 228   bctr();
 229 #endif
 230 }
 231 
 232 void InterpreterMacroAssembler::load_receiver(Register Rparam_count, Register Rrecv_dst) {
 233   sldi(Rrecv_dst, Rparam_count, Interpreter::logStackElementSize);
 234   ldx(Rrecv_dst, Rrecv_dst, R15_esp);
 235 }
 236 
 237 // helpers for expression stack
 238 
 239 void InterpreterMacroAssembler::pop_i(Register r) {
 240   lwzu(r, Interpreter::stackElementSize, R15_esp);
 241 }
 242 
 243 void InterpreterMacroAssembler::pop_ptr(Register r) {
 244   ldu(r, Interpreter::stackElementSize, R15_esp);
 245 }
 246 
 247 void InterpreterMacroAssembler::pop_l(Register r) {
 248   ld(r, Interpreter::stackElementSize, R15_esp);
 249   addi(R15_esp, R15_esp, 2 * Interpreter::stackElementSize);
 250 }
 251 
 252 void InterpreterMacroAssembler::pop_f(FloatRegister f) {
 253   lfsu(f, Interpreter::stackElementSize, R15_esp);
 254 }
 255 
 256 void InterpreterMacroAssembler::pop_d(FloatRegister f) {
 257   lfd(f, Interpreter::stackElementSize, R15_esp);
 258   addi(R15_esp, R15_esp, 2 * Interpreter::stackElementSize);
 259 }
 260 
 261 void InterpreterMacroAssembler::push_i(Register r) {
 262   stw(r, 0, R15_esp);
 263   addi(R15_esp, R15_esp, - Interpreter::stackElementSize );
 264 }
 265 
 266 void InterpreterMacroAssembler::push_ptr(Register r) {
 267   std(r, 0, R15_esp);
 268   addi(R15_esp, R15_esp, - Interpreter::stackElementSize );
 269 }
 270 
 271 void InterpreterMacroAssembler::push_l(Register r) {
 272   std(r, - Interpreter::stackElementSize, R15_esp);
 273   addi(R15_esp, R15_esp, - 2 * Interpreter::stackElementSize );
 274 }
 275 
 276 void InterpreterMacroAssembler::push_f(FloatRegister f) {
 277   stfs(f, 0, R15_esp);
 278   addi(R15_esp, R15_esp, - Interpreter::stackElementSize );
 279 }
 280 
 281 void InterpreterMacroAssembler::push_d(FloatRegister f)   {
 282   stfd(f, - Interpreter::stackElementSize, R15_esp);
 283   addi(R15_esp, R15_esp, - 2 * Interpreter::stackElementSize );
 284 }
 285 
 286 void InterpreterMacroAssembler::push_2ptrs(Register first, Register second) {
 287   std(first, 0, R15_esp);
 288   std(second, -Interpreter::stackElementSize, R15_esp);
 289   addi(R15_esp, R15_esp, - 2 * Interpreter::stackElementSize );
 290 }
 291 
 292 void InterpreterMacroAssembler::push_l_pop_d(Register l, FloatRegister d) {
 293   std(l, 0, R15_esp);
 294   lfd(d, 0, R15_esp);
 295 }
 296 
 297 void InterpreterMacroAssembler::push_d_pop_l(FloatRegister d, Register l) {
 298   stfd(d, 0, R15_esp);
 299   ld(l, 0, R15_esp);
 300 }
 301 
 302 void InterpreterMacroAssembler::push(TosState state) {
 303   switch (state) {
 304     case atos: push_ptr();                break;
 305     case btos:
 306     case ctos:
 307     case stos:
 308     case itos: push_i();                  break;
 309     case ltos: push_l();                  break;
 310     case ftos: push_f();                  break;
 311     case dtos: push_d();                  break;
 312     case vtos: /* nothing to do */        break;
 313     default  : ShouldNotReachHere();
 314   }
 315 }
 316 
 317 void InterpreterMacroAssembler::pop(TosState state) {
 318   switch (state) {
 319     case atos: pop_ptr();            break;
 320     case btos:
 321     case ctos:
 322     case stos:
 323     case itos: pop_i();              break;
 324     case ltos: pop_l();              break;
 325     case ftos: pop_f();              break;
 326     case dtos: pop_d();              break;
 327     case vtos: /* nothing to do */   break;
 328     default  : ShouldNotReachHere();
 329   }
 330   verify_oop(R17_tos, state);
 331 }
 332 
 333 void InterpreterMacroAssembler::empty_expression_stack() {
 334   addi(R15_esp, R26_monitor, - Interpreter::stackElementSize);
 335 }
 336 
 337 void InterpreterMacroAssembler::get_2_byte_integer_at_bcp(int         bcp_offset,
 338                                                           Register    Rdst,
 339                                                           signedOrNot is_signed) {
 340 #if defined(VM_LITTLE_ENDIAN)
 341   if (bcp_offset) {
 342     load_const_optimized(Rdst, bcp_offset);
 343     lhbrx(Rdst, R14_bcp, Rdst);
 344   } else {
 345     lhbrx(Rdst, R14_bcp);
 346   }
 347   if (is_signed == Signed) {
 348     extsh(Rdst, Rdst);
 349   }
 350 #else
 351   // Read Java big endian format.
 352   if (is_signed == Signed) {
 353     lha(Rdst, bcp_offset, R14_bcp);
 354   } else {
 355     lhz(Rdst, bcp_offset, R14_bcp);
 356   }
 357 #endif
 358 }
 359 
 360 void InterpreterMacroAssembler::get_4_byte_integer_at_bcp(int         bcp_offset,
 361                                                           Register    Rdst,
 362                                                           signedOrNot is_signed) {
 363 #if defined(VM_LITTLE_ENDIAN)
 364   if (bcp_offset) {
 365     load_const_optimized(Rdst, bcp_offset);
 366     lwbrx(Rdst, R14_bcp, Rdst);
 367   } else {
 368     lwbrx(Rdst, R14_bcp);
 369   }
 370   if (is_signed == Signed) {
 371     extsw(Rdst, Rdst);
 372   }
 373 #else
 374   // Read Java big endian format.
 375   if (bcp_offset & 3) { // Offset unaligned?
 376     load_const_optimized(Rdst, bcp_offset);
 377     if (is_signed == Signed) {
 378       lwax(Rdst, R14_bcp, Rdst);
 379     } else {
 380       lwzx(Rdst, R14_bcp, Rdst);
 381     }
 382   } else {
 383     if (is_signed == Signed) {
 384       lwa(Rdst, bcp_offset, R14_bcp);
 385     } else {
 386       lwz(Rdst, bcp_offset, R14_bcp);
 387     }
 388   }
 389 #endif
 390 }
 391 
 392 
 393 // Load the constant pool cache index from the bytecode stream.
 394 //
 395 // Kills / writes:
 396 //   - Rdst, Rscratch
 397 void InterpreterMacroAssembler::get_cache_index_at_bcp(Register Rdst, int bcp_offset, size_t index_size) {
 398   assert(bcp_offset > 0, "bcp is still pointing to start of bytecode");
 399   // Cache index is always in the native format, courtesy of Rewriter.
 400   if (index_size == sizeof(u2)) {
 401     lhz(Rdst, bcp_offset, R14_bcp);
 402   } else if (index_size == sizeof(u4)) {
 403     if (bcp_offset & 3) {
 404       load_const_optimized(Rdst, bcp_offset);
 405       lwax(Rdst, R14_bcp, Rdst);
 406     } else {
 407       lwa(Rdst, bcp_offset, R14_bcp);
 408     }
 409     assert(ConstantPool::decode_invokedynamic_index(~123) == 123, "else change next line");
 410     nand(Rdst, Rdst, Rdst); // convert to plain index
 411   } else if (index_size == sizeof(u1)) {
 412     lbz(Rdst, bcp_offset, R14_bcp);
 413   } else {
 414     ShouldNotReachHere();
 415   }
 416   // Rdst now contains cp cache index.
 417 }
 418 
 419 void InterpreterMacroAssembler::get_cache_and_index_at_bcp(Register cache, int bcp_offset, size_t index_size) {
 420   get_cache_index_at_bcp(cache, bcp_offset, index_size);
 421   sldi(cache, cache, exact_log2(in_words(ConstantPoolCacheEntry::size()) * BytesPerWord));
 422   add(cache, R27_constPoolCache, cache);
 423 }
 424 
 425 // Load 4-byte signed or unsigned integer in Java format (that is, big-endian format)
 426 // from (Rsrc)+offset.
 427 void InterpreterMacroAssembler::get_u4(Register Rdst, Register Rsrc, int offset,
 428                                        signedOrNot is_signed) {
 429 #if defined(VM_LITTLE_ENDIAN)
 430   if (offset) {
 431     load_const_optimized(Rdst, offset);
 432     lwbrx(Rdst, Rdst, Rsrc);
 433   } else {
 434     lwbrx(Rdst, Rsrc);
 435   }
 436   if (is_signed == Signed) {
 437     extsw(Rdst, Rdst);
 438   }
 439 #else
 440   if (is_signed == Signed) {
 441     lwa(Rdst, offset, Rsrc);
 442   } else {
 443     lwz(Rdst, offset, Rsrc);
 444   }
 445 #endif
 446 }
 447 
 448 // Load object from cpool->resolved_references(index).
 449 void InterpreterMacroAssembler::load_resolved_reference_at_index(Register result, Register index, Label *is_null) {
 450   assert_different_registers(result, index);
 451   get_constant_pool(result);
 452 
 453   // Convert from field index to resolved_references() index and from
 454   // word index to byte offset. Since this is a java object, it can be compressed.
 455   Register tmp = index;  // reuse
 456   sldi(tmp, index, LogBytesPerHeapOop);
 457   // Load pointer for resolved_references[] objArray.
 458   ld(result, ConstantPool::resolved_references_offset_in_bytes(), result);
 459   // JNIHandles::resolve(result)
 460   ld(result, 0, result);
 461 #ifdef ASSERT
 462   Label index_ok;
 463   lwa(R0, arrayOopDesc::length_offset_in_bytes(), result);
 464   sldi(R0, R0, LogBytesPerHeapOop);
 465   cmpd(CCR0, tmp, R0);
 466   blt(CCR0, index_ok);
 467   stop("resolved reference index out of bounds", 0x09256);
 468   bind(index_ok);
 469 #endif
 470   // Add in the index.
 471   add(result, tmp, result);
 472   load_heap_oop(result, arrayOopDesc::base_offset_in_bytes(T_OBJECT), result, is_null);
 473 }
 474 
 475 // Generate a subtype check: branch to ok_is_subtype if sub_klass is
 476 // a subtype of super_klass. Blows registers Rsub_klass, tmp1, tmp2.
 477 void InterpreterMacroAssembler::gen_subtype_check(Register Rsub_klass, Register Rsuper_klass, Register Rtmp1,
 478                                                   Register Rtmp2, Register Rtmp3, Label &ok_is_subtype) {
 479   // Profile the not-null value's klass.
 480   profile_typecheck(Rsub_klass, Rtmp1, Rtmp2);
 481   check_klass_subtype(Rsub_klass, Rsuper_klass, Rtmp1, Rtmp2, ok_is_subtype);
 482   profile_typecheck_failed(Rtmp1, Rtmp2);
 483 }
 484 
 485 void InterpreterMacroAssembler::generate_stack_overflow_check_with_compare_and_throw(Register Rmem_frame_size, Register Rscratch1) {
 486   Label done;
 487   sub(Rmem_frame_size, R1_SP, Rmem_frame_size);
 488   ld(Rscratch1, thread_(stack_overflow_limit));
 489   cmpld(CCR0/*is_stack_overflow*/, Rmem_frame_size, Rscratch1);
 490   bgt(CCR0/*is_stack_overflow*/, done);
 491 
 492   // Load target address of the runtime stub.
 493   assert(StubRoutines::throw_StackOverflowError_entry() != NULL, "generated in wrong order");
 494   load_const_optimized(Rscratch1, (StubRoutines::throw_StackOverflowError_entry()), R0);
 495   mtctr(Rscratch1);
 496   // Restore caller_sp.
 497 #ifdef ASSERT
 498   ld(Rscratch1, 0, R1_SP);
 499   ld(R0, 0, R21_sender_SP);
 500   cmpd(CCR0, R0, Rscratch1);
 501   asm_assert_eq("backlink", 0x547);
 502 #endif // ASSERT
 503   mr(R1_SP, R21_sender_SP);
 504   bctr();
 505 
 506   align(32, 12);
 507   bind(done);
 508 }
 509 
 510 // Separate these two to allow for delay slot in middle.
 511 // These are used to do a test and full jump to exception-throwing code.
 512 
 513 // Check that index is in range for array, then shift index by index_shift,
 514 // and put arrayOop + shifted_index into res.
 515 // Note: res is still shy of address by array offset into object.
 516 
 517 void InterpreterMacroAssembler::index_check_without_pop(Register Rarray, Register Rindex, int index_shift, Register Rtmp, Register Rres) {
 518   // Check that index is in range for array, then shift index by index_shift,
 519   // and put arrayOop + shifted_index into res.
 520   // Note: res is still shy of address by array offset into object.
 521   // Kills:
 522   //   - Rindex
 523   // Writes:
 524   //   - Rres: Address that corresponds to the array index if check was successful.
 525   verify_oop(Rarray);
 526   const Register Rlength   = R0;
 527   const Register RsxtIndex = Rtmp;
 528   Label LisNull, LnotOOR;
 529 
 530   // Array nullcheck
 531   if (!ImplicitNullChecks) {
 532     cmpdi(CCR0, Rarray, 0);
 533     beq(CCR0, LisNull);
 534   } else {
 535     null_check_throw(Rarray, arrayOopDesc::length_offset_in_bytes(), /*temp*/RsxtIndex);
 536   }
 537 
 538   // Rindex might contain garbage in upper bits (remember that we don't sign extend
 539   // during integer arithmetic operations). So kill them and put value into same register
 540   // where ArrayIndexOutOfBounds would expect the index in.
 541   rldicl(RsxtIndex, Rindex, 0, 32); // zero extend 32 bit -> 64 bit
 542 
 543   // Index check
 544   lwz(Rlength, arrayOopDesc::length_offset_in_bytes(), Rarray);
 545   cmplw(CCR0, Rindex, Rlength);
 546   sldi(RsxtIndex, RsxtIndex, index_shift);
 547   blt(CCR0, LnotOOR);
 548   // Index should be in R17_tos, array should be in R4_ARG2.
 549   mr(R17_tos, Rindex);
 550   mr(R4_ARG2, Rarray);
 551   load_dispatch_table(Rtmp, (address*)Interpreter::_throw_ArrayIndexOutOfBoundsException_entry);
 552   mtctr(Rtmp);
 553   bctr();
 554 
 555   if (!ImplicitNullChecks) {
 556     bind(LisNull);
 557     load_dispatch_table(Rtmp, (address*)Interpreter::_throw_NullPointerException_entry);
 558     mtctr(Rtmp);
 559     bctr();
 560   }
 561 
 562   align(32, 16);
 563   bind(LnotOOR);
 564 
 565   // Calc address
 566   add(Rres, RsxtIndex, Rarray);
 567 }
 568 
 569 void InterpreterMacroAssembler::index_check(Register array, Register index, int index_shift, Register tmp, Register res) {
 570   // pop array
 571   pop_ptr(array);
 572 
 573   // check array
 574   index_check_without_pop(array, index, index_shift, tmp, res);
 575 }
 576 
 577 void InterpreterMacroAssembler::get_const(Register Rdst) {
 578   ld(Rdst, in_bytes(Method::const_offset()), R19_method);
 579 }
 580 
 581 void InterpreterMacroAssembler::get_constant_pool(Register Rdst) {
 582   get_const(Rdst);
 583   ld(Rdst, in_bytes(ConstMethod::constants_offset()), Rdst);
 584 }
 585 
 586 void InterpreterMacroAssembler::get_constant_pool_cache(Register Rdst) {
 587   get_constant_pool(Rdst);
 588   ld(Rdst, ConstantPool::cache_offset_in_bytes(), Rdst);
 589 }
 590 
 591 void InterpreterMacroAssembler::get_cpool_and_tags(Register Rcpool, Register Rtags) {
 592   get_constant_pool(Rcpool);
 593   ld(Rtags, ConstantPool::tags_offset_in_bytes(), Rcpool);
 594 }
 595 
 596 // Unlock if synchronized method.
 597 //
 598 // Unlock the receiver if this is a synchronized method.
 599 // Unlock any Java monitors from synchronized blocks.
 600 //
 601 // If there are locked Java monitors
 602 //   If throw_monitor_exception
 603 //     throws IllegalMonitorStateException
 604 //   Else if install_monitor_exception
 605 //     installs IllegalMonitorStateException
 606 //   Else
 607 //     no error processing
 608 void InterpreterMacroAssembler::unlock_if_synchronized_method(TosState state,
 609                                                               bool throw_monitor_exception,
 610                                                               bool install_monitor_exception) {
 611   Label Lunlocked, Lno_unlock;
 612   {
 613     Register Rdo_not_unlock_flag = R11_scratch1;
 614     Register Raccess_flags       = R12_scratch2;
 615 
 616     // Check if synchronized method or unlocking prevented by
 617     // JavaThread::do_not_unlock_if_synchronized flag.
 618     lbz(Rdo_not_unlock_flag, in_bytes(JavaThread::do_not_unlock_if_synchronized_offset()), R16_thread);
 619     lwz(Raccess_flags, in_bytes(Method::access_flags_offset()), R19_method);
 620     li(R0, 0);
 621     stb(R0, in_bytes(JavaThread::do_not_unlock_if_synchronized_offset()), R16_thread); // reset flag
 622 
 623     push(state);
 624 
 625     // Skip if we don't have to unlock.
 626     rldicl_(R0, Raccess_flags, 64-JVM_ACC_SYNCHRONIZED_BIT, 63); // Extract bit and compare to 0.
 627     beq(CCR0, Lunlocked);
 628 
 629     cmpwi(CCR0, Rdo_not_unlock_flag, 0);
 630     bne(CCR0, Lno_unlock);
 631   }
 632 
 633   // Unlock
 634   {
 635     Register Rmonitor_base = R11_scratch1;
 636 
 637     Label Lunlock;
 638     // If it's still locked, everything is ok, unlock it.
 639     ld(Rmonitor_base, 0, R1_SP);
 640     addi(Rmonitor_base, Rmonitor_base, - (frame::ijava_state_size + frame::interpreter_frame_monitor_size_in_bytes())); // Monitor base
 641 
 642     ld(R0, BasicObjectLock::obj_offset_in_bytes(), Rmonitor_base);
 643     cmpdi(CCR0, R0, 0);
 644     bne(CCR0, Lunlock);
 645 
 646     // If it's already unlocked, throw exception.
 647     if (throw_monitor_exception) {
 648       call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_illegal_monitor_state_exception));
 649       should_not_reach_here();
 650     } else {
 651       if (install_monitor_exception) {
 652         call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::new_illegal_monitor_state_exception));
 653         b(Lunlocked);
 654       }
 655     }
 656 
 657     bind(Lunlock);
 658     unlock_object(Rmonitor_base);
 659   }
 660 
 661   // Check that all other monitors are unlocked. Throw IllegelMonitorState exception if not.
 662   bind(Lunlocked);
 663   {
 664     Label Lexception, Lrestart;
 665     Register Rcurrent_obj_addr = R11_scratch1;
 666     const int delta = frame::interpreter_frame_monitor_size_in_bytes();
 667     assert((delta & LongAlignmentMask) == 0, "sizeof BasicObjectLock must be even number of doublewords");
 668 
 669     bind(Lrestart);
 670     // Set up search loop: Calc num of iterations.
 671     {
 672       Register Riterations = R12_scratch2;
 673       Register Rmonitor_base = Rcurrent_obj_addr;
 674       ld(Rmonitor_base, 0, R1_SP);
 675       addi(Rmonitor_base, Rmonitor_base, - frame::ijava_state_size);  // Monitor base
 676 
 677       subf_(Riterations, R26_monitor, Rmonitor_base);
 678       ble(CCR0, Lno_unlock);
 679 
 680       addi(Rcurrent_obj_addr, Rmonitor_base, BasicObjectLock::obj_offset_in_bytes() - frame::interpreter_frame_monitor_size_in_bytes());
 681       // Check if any monitor is on stack, bail out if not
 682       srdi(Riterations, Riterations, exact_log2(delta));
 683       mtctr(Riterations);
 684     }
 685 
 686     // The search loop: Look for locked monitors.
 687     {
 688       const Register Rcurrent_obj = R0;
 689       Label Lloop;
 690 
 691       ld(Rcurrent_obj, 0, Rcurrent_obj_addr);
 692       addi(Rcurrent_obj_addr, Rcurrent_obj_addr, -delta);
 693       bind(Lloop);
 694 
 695       // Check if current entry is used.
 696       cmpdi(CCR0, Rcurrent_obj, 0);
 697       bne(CCR0, Lexception);
 698       // Preload next iteration's compare value.
 699       ld(Rcurrent_obj, 0, Rcurrent_obj_addr);
 700       addi(Rcurrent_obj_addr, Rcurrent_obj_addr, -delta);
 701       bdnz(Lloop);
 702     }
 703     // Fell through: Everything's unlocked => finish.
 704     b(Lno_unlock);
 705 
 706     // An object is still locked => need to throw exception.
 707     bind(Lexception);
 708     if (throw_monitor_exception) {
 709       call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_illegal_monitor_state_exception));
 710       should_not_reach_here();
 711     } else {
 712       // Stack unrolling. Unlock object and if requested, install illegal_monitor_exception.
 713       // Unlock does not block, so don't have to worry about the frame.
 714       Register Rmonitor_addr = R11_scratch1;
 715       addi(Rmonitor_addr, Rcurrent_obj_addr, -BasicObjectLock::obj_offset_in_bytes() + delta);
 716       unlock_object(Rmonitor_addr);
 717       if (install_monitor_exception) {
 718         call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::new_illegal_monitor_state_exception));
 719       }
 720       b(Lrestart);
 721     }
 722   }
 723 
 724   align(32, 12);
 725   bind(Lno_unlock);
 726   pop(state);
 727 }
 728 
 729 // Support function for remove_activation & Co.
 730 void InterpreterMacroAssembler::merge_frames(Register Rsender_sp, Register return_pc, Register Rscratch1, Register Rscratch2) {
 731   // Pop interpreter frame.
 732   ld(Rscratch1, 0, R1_SP); // *SP
 733   ld(Rsender_sp, _ijava_state_neg(sender_sp), Rscratch1); // top_frame_sp
 734   ld(Rscratch2, 0, Rscratch1); // **SP
 735 #ifdef ASSERT
 736   {
 737     Label Lok;
 738     ld(R0, _ijava_state_neg(ijava_reserved), Rscratch1);
 739     cmpdi(CCR0, R0, 0x5afe);
 740     beq(CCR0, Lok);
 741     stop("frame corrupted (remove activation)", 0x5afe);
 742     bind(Lok);
 743   }
 744 #endif
 745   if (return_pc!=noreg) {
 746     ld(return_pc, _abi(lr), Rscratch1); // LR
 747   }
 748 
 749   // Merge top frames.
 750   subf(Rscratch1, R1_SP, Rsender_sp); // top_frame_sp - SP
 751   stdux(Rscratch2, R1_SP, Rscratch1); // atomically set *(SP = top_frame_sp) = **SP
 752 }
 753 
 754 // Remove activation.
 755 //
 756 // Unlock the receiver if this is a synchronized method.
 757 // Unlock any Java monitors from synchronized blocks.
 758 // Remove the activation from the stack.
 759 //
 760 // If there are locked Java monitors
 761 //    If throw_monitor_exception
 762 //       throws IllegalMonitorStateException
 763 //    Else if install_monitor_exception
 764 //       installs IllegalMonitorStateException
 765 //    Else
 766 //       no error processing
 767 void InterpreterMacroAssembler::remove_activation(TosState state,
 768                                                   bool throw_monitor_exception,
 769                                                   bool install_monitor_exception) {
 770   unlock_if_synchronized_method(state, throw_monitor_exception, install_monitor_exception);
 771 
 772   // Save result (push state before jvmti call and pop it afterwards) and notify jvmti.
 773   notify_method_exit(false, state, NotifyJVMTI, true);
 774 
 775   verify_oop(R17_tos, state);
 776   verify_thread();
 777 
 778   merge_frames(/*top_frame_sp*/ R21_sender_SP, /*return_pc*/ R0, R11_scratch1, R12_scratch2);
 779   mtlr(R0);
 780 }
 781 
 782 #endif // !CC_INTERP
 783 
 784 // Lock object
 785 //
 786 // Registers alive
 787 //   monitor - Address of the BasicObjectLock to be used for locking,
 788 //             which must be initialized with the object to lock.
 789 //   object  - Address of the object to be locked.
 790 //
 791 void InterpreterMacroAssembler::lock_object(Register monitor, Register object) {
 792   if (UseHeavyMonitors) {
 793     call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorenter),
 794             monitor, /*check_for_exceptions=*/true CC_INTERP_ONLY(&& false));
 795   } else {
 796     // template code:
 797     //
 798     // markOop displaced_header = obj->mark().set_unlocked();
 799     // monitor->lock()->set_displaced_header(displaced_header);
 800     // if (Atomic::cmpxchg_ptr(/*ex=*/monitor, /*addr*/obj->mark_addr(), /*cmp*/displaced_header) == displaced_header) {
 801     //   // We stored the monitor address into the object's mark word.
 802     // } else if (THREAD->is_lock_owned((address)displaced_header))
 803     //   // Simple recursive case.
 804     //   monitor->lock()->set_displaced_header(NULL);
 805     // } else {
 806     //   // Slow path.
 807     //   InterpreterRuntime::monitorenter(THREAD, monitor);
 808     // }
 809 
 810     const Register displaced_header = R7_ARG5;
 811     const Register object_mark_addr = R8_ARG6;
 812     const Register current_header   = R9_ARG7;
 813     const Register tmp              = R10_ARG8;
 814 
 815     Label done;
 816     Label cas_failed, slow_case;
 817 
 818     assert_different_registers(displaced_header, object_mark_addr, current_header, tmp);
 819 
 820     // markOop displaced_header = obj->mark().set_unlocked();
 821 
 822     // Load markOop from object into displaced_header.
 823     ld(displaced_header, oopDesc::mark_offset_in_bytes(), object);
 824 
 825     if (UseBiasedLocking) {
 826       biased_locking_enter(CCR0, object, displaced_header, tmp, current_header, done, &slow_case);
 827     }
 828 
 829     // Set displaced_header to be (markOop of object | UNLOCK_VALUE).
 830     ori(displaced_header, displaced_header, markOopDesc::unlocked_value);
 831 
 832     // monitor->lock()->set_displaced_header(displaced_header);
 833 
 834     // Initialize the box (Must happen before we update the object mark!).
 835     std(displaced_header, BasicObjectLock::lock_offset_in_bytes() +
 836         BasicLock::displaced_header_offset_in_bytes(), monitor);
 837 
 838     // if (Atomic::cmpxchg_ptr(/*ex=*/monitor, /*addr*/obj->mark_addr(), /*cmp*/displaced_header) == displaced_header) {
 839 
 840     // Store stack address of the BasicObjectLock (this is monitor) into object.
 841     addi(object_mark_addr, object, oopDesc::mark_offset_in_bytes());
 842 
 843     // Must fence, otherwise, preceding store(s) may float below cmpxchg.
 844     // CmpxchgX sets CCR0 to cmpX(current, displaced).
 845     fence(); // TODO: replace by MacroAssembler::MemBarRel | MacroAssembler::MemBarAcq ?
 846     cmpxchgd(/*flag=*/CCR0,
 847              /*current_value=*/current_header,
 848              /*compare_value=*/displaced_header, /*exchange_value=*/monitor,
 849              /*where=*/object_mark_addr,
 850              MacroAssembler::MemBarRel | MacroAssembler::MemBarAcq,
 851              MacroAssembler::cmpxchgx_hint_acquire_lock(),
 852              noreg,
 853              &cas_failed);
 854 
 855     // If the compare-and-exchange succeeded, then we found an unlocked
 856     // object and we have now locked it.
 857     b(done);
 858     bind(cas_failed);
 859 
 860     // } else if (THREAD->is_lock_owned((address)displaced_header))
 861     //   // Simple recursive case.
 862     //   monitor->lock()->set_displaced_header(NULL);
 863 
 864     // We did not see an unlocked object so try the fast recursive case.
 865 
 866     // Check if owner is self by comparing the value in the markOop of object
 867     // (current_header) with the stack pointer.
 868     sub(current_header, current_header, R1_SP);
 869 
 870     assert(os::vm_page_size() > 0xfff, "page size too small - change the constant");
 871     load_const_optimized(tmp,
 872                          (address) (~(os::vm_page_size()-1) |
 873                                     markOopDesc::lock_mask_in_place));
 874 
 875     and_(R0/*==0?*/, current_header, tmp);
 876     // If condition is true we are done and hence we can store 0 in the displaced
 877     // header indicating it is a recursive lock.
 878     bne(CCR0, slow_case);
 879     std(R0/*==0!*/, BasicObjectLock::lock_offset_in_bytes() +
 880         BasicLock::displaced_header_offset_in_bytes(), monitor);
 881     b(done);
 882 
 883     // } else {
 884     //   // Slow path.
 885     //   InterpreterRuntime::monitorenter(THREAD, monitor);
 886 
 887     // None of the above fast optimizations worked so we have to get into the
 888     // slow case of monitor enter.
 889     bind(slow_case);
 890     call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorenter),
 891             monitor, /*check_for_exceptions=*/true CC_INTERP_ONLY(&& false));
 892     // }
 893     align(32, 12);
 894     bind(done);
 895   }
 896 }
 897 
 898 // Unlocks an object. Used in monitorexit bytecode and remove_activation.
 899 //
 900 // Registers alive
 901 //   monitor - Address of the BasicObjectLock to be used for locking,
 902 //             which must be initialized with the object to lock.
 903 //
 904 // Throw IllegalMonitorException if object is not locked by current thread.
 905 void InterpreterMacroAssembler::unlock_object(Register monitor, bool check_for_exceptions) {
 906   if (UseHeavyMonitors) {
 907     call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorexit),
 908             monitor, check_for_exceptions CC_INTERP_ONLY(&& false));
 909   } else {
 910 
 911     // template code:
 912     //
 913     // if ((displaced_header = monitor->displaced_header()) == NULL) {
 914     //   // Recursive unlock. Mark the monitor unlocked by setting the object field to NULL.
 915     //   monitor->set_obj(NULL);
 916     // } else if (Atomic::cmpxchg_ptr(displaced_header, obj->mark_addr(), monitor) == monitor) {
 917     //   // We swapped the unlocked mark in displaced_header into the object's mark word.
 918     //   monitor->set_obj(NULL);
 919     // } else {
 920     //   // Slow path.
 921     //   InterpreterRuntime::monitorexit(THREAD, monitor);
 922     // }
 923 
 924     const Register object           = R7_ARG5;
 925     const Register displaced_header = R8_ARG6;
 926     const Register object_mark_addr = R9_ARG7;
 927     const Register current_header   = R10_ARG8;
 928 
 929     Label free_slot;
 930     Label slow_case;
 931 
 932     assert_different_registers(object, displaced_header, object_mark_addr, current_header);
 933 
 934     if (UseBiasedLocking) {
 935       // The object address from the monitor is in object.
 936       ld(object, BasicObjectLock::obj_offset_in_bytes(), monitor);
 937       assert(oopDesc::mark_offset_in_bytes() == 0, "offset of _mark is not 0");
 938       biased_locking_exit(CCR0, object, displaced_header, free_slot);
 939     }
 940 
 941     // Test first if we are in the fast recursive case.
 942     ld(displaced_header, BasicObjectLock::lock_offset_in_bytes() +
 943            BasicLock::displaced_header_offset_in_bytes(), monitor);
 944 
 945     // If the displaced header is zero, we have a recursive unlock.
 946     cmpdi(CCR0, displaced_header, 0);
 947     beq(CCR0, free_slot); // recursive unlock
 948 
 949     // } else if (Atomic::cmpxchg_ptr(displaced_header, obj->mark_addr(), monitor) == monitor) {
 950     //   // We swapped the unlocked mark in displaced_header into the object's mark word.
 951     //   monitor->set_obj(NULL);
 952 
 953     // If we still have a lightweight lock, unlock the object and be done.
 954 
 955     // The object address from the monitor is in object.
 956     if (!UseBiasedLocking) { ld(object, BasicObjectLock::obj_offset_in_bytes(), monitor); }
 957     addi(object_mark_addr, object, oopDesc::mark_offset_in_bytes());
 958 
 959     // We have the displaced header in displaced_header. If the lock is still
 960     // lightweight, it will contain the monitor address and we'll store the
 961     // displaced header back into the object's mark word.
 962     // CmpxchgX sets CCR0 to cmpX(current, monitor).
 963     cmpxchgd(/*flag=*/CCR0,
 964              /*current_value=*/current_header,
 965              /*compare_value=*/monitor, /*exchange_value=*/displaced_header,
 966              /*where=*/object_mark_addr,
 967              MacroAssembler::MemBarRel,
 968              MacroAssembler::cmpxchgx_hint_release_lock(),
 969              noreg,
 970              &slow_case);
 971     b(free_slot);
 972 
 973     // } else {
 974     //   // Slow path.
 975     //   InterpreterRuntime::monitorexit(THREAD, monitor);
 976 
 977     // The lock has been converted into a heavy lock and hence
 978     // we need to get into the slow case.
 979     bind(slow_case);
 980     call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorexit),
 981             monitor, check_for_exceptions CC_INTERP_ONLY(&& false));
 982     // }
 983 
 984     Label done;
 985     b(done); // Monitor register may be overwritten! Runtime has already freed the slot.
 986 
 987     // Exchange worked, do monitor->set_obj(NULL);
 988     align(32, 12);
 989     bind(free_slot);
 990     li(R0, 0);
 991     std(R0, BasicObjectLock::obj_offset_in_bytes(), monitor);
 992     bind(done);
 993   }
 994 }
 995 
 996 #ifndef CC_INTERP
 997 
 998 // Load compiled (i2c) or interpreter entry when calling from interpreted and
 999 // do the call. Centralized so that all interpreter calls will do the same actions.
1000 // If jvmti single stepping is on for a thread we must not call compiled code.
1001 //
1002 // Input:
1003 //   - Rtarget_method: method to call
1004 //   - Rret_addr:      return address
1005 //   - 2 scratch regs
1006 //
1007 void InterpreterMacroAssembler::call_from_interpreter(Register Rtarget_method, Register Rret_addr, Register Rscratch1, Register Rscratch2) {
1008   assert_different_registers(Rscratch1, Rscratch2, Rtarget_method, Rret_addr);
1009   // Assume we want to go compiled if available.
1010   const Register Rtarget_addr = Rscratch1;
1011   const Register Rinterp_only = Rscratch2;
1012 
1013   ld(Rtarget_addr, in_bytes(Method::from_interpreted_offset()), Rtarget_method);
1014 
1015   if (JvmtiExport::can_post_interpreter_events()) {
1016     lwz(Rinterp_only, in_bytes(JavaThread::interp_only_mode_offset()), R16_thread);
1017 
1018     // JVMTI events, such as single-stepping, are implemented partly by avoiding running
1019     // compiled code in threads for which the event is enabled. Check here for
1020     // interp_only_mode if these events CAN be enabled.
1021     Label done;
1022     verify_thread();
1023     cmpwi(CCR0, Rinterp_only, 0);
1024     beq(CCR0, done);
1025     ld(Rtarget_addr, in_bytes(Method::interpreter_entry_offset()), Rtarget_method);
1026     align(32, 12);
1027     bind(done);
1028   }
1029 
1030 #ifdef ASSERT
1031   {
1032     Label Lok;
1033     cmpdi(CCR0, Rtarget_addr, 0);
1034     bne(CCR0, Lok);
1035     stop("null entry point");
1036     bind(Lok);
1037   }
1038 #endif // ASSERT
1039 
1040   mr(R21_sender_SP, R1_SP);
1041 
1042   // Calc a precise SP for the call. The SP value we calculated in
1043   // generate_fixed_frame() is based on the max_stack() value, so we would waste stack space
1044   // if esp is not max. Also, the i2c adapter extends the stack space without restoring
1045   // our pre-calced value, so repeating calls via i2c would result in stack overflow.
1046   // Since esp already points to an empty slot, we just have to sub 1 additional slot
1047   // to meet the abi scratch requirements.
1048   // The max_stack pointer will get restored by means of the GR_Lmax_stack local in
1049   // the return entry of the interpreter.
1050   addi(Rscratch2, R15_esp, Interpreter::stackElementSize - frame::abi_reg_args_size);
1051   clrrdi(Rscratch2, Rscratch2, exact_log2(frame::alignment_in_bytes)); // round towards smaller address
1052   resize_frame_absolute(Rscratch2, Rscratch2, R0);
1053 
1054   mr_if_needed(R19_method, Rtarget_method);
1055   mtctr(Rtarget_addr);
1056   mtlr(Rret_addr);
1057 
1058   save_interpreter_state(Rscratch2);
1059 #ifdef ASSERT
1060   ld(Rscratch1, _ijava_state_neg(top_frame_sp), Rscratch2); // Rscratch2 contains fp
1061   cmpd(CCR0, R21_sender_SP, Rscratch1);
1062   asm_assert_eq("top_frame_sp incorrect", 0x951);
1063 #endif
1064 
1065   bctr();
1066 }
1067 
1068 // Set the method data pointer for the current bcp.
1069 void InterpreterMacroAssembler::set_method_data_pointer_for_bcp() {
1070   assert(ProfileInterpreter, "must be profiling interpreter");
1071   Label get_continue;
1072   ld(R28_mdx, in_bytes(Method::method_data_offset()), R19_method);
1073   test_method_data_pointer(get_continue);
1074   call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::bcp_to_di), R19_method, R14_bcp);
1075 
1076   addi(R28_mdx, R28_mdx, in_bytes(MethodData::data_offset()));
1077   add(R28_mdx, R28_mdx, R3_RET);
1078   bind(get_continue);
1079 }
1080 
1081 // Test ImethodDataPtr. If it is null, continue at the specified label.
1082 void InterpreterMacroAssembler::test_method_data_pointer(Label& zero_continue) {
1083   assert(ProfileInterpreter, "must be profiling interpreter");
1084   cmpdi(CCR0, R28_mdx, 0);
1085   beq(CCR0, zero_continue);
1086 }
1087 
1088 void InterpreterMacroAssembler::verify_method_data_pointer() {
1089   assert(ProfileInterpreter, "must be profiling interpreter");
1090 #ifdef ASSERT
1091   Label verify_continue;
1092   test_method_data_pointer(verify_continue);
1093 
1094   // If the mdp is valid, it will point to a DataLayout header which is
1095   // consistent with the bcp. The converse is highly probable also.
1096   lhz(R11_scratch1, in_bytes(DataLayout::bci_offset()), R28_mdx);
1097   ld(R12_scratch2, in_bytes(Method::const_offset()), R19_method);
1098   addi(R11_scratch1, R11_scratch1, in_bytes(ConstMethod::codes_offset()));
1099   add(R11_scratch1, R12_scratch2, R12_scratch2);
1100   cmpd(CCR0, R11_scratch1, R14_bcp);
1101   beq(CCR0, verify_continue);
1102 
1103   call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::verify_mdp ), R19_method, R14_bcp, R28_mdx);
1104 
1105   bind(verify_continue);
1106 #endif
1107 }
1108 
1109 void InterpreterMacroAssembler::test_invocation_counter_for_mdp(Register invocation_count,
1110                                                                 Register Rscratch,
1111                                                                 Label &profile_continue) {
1112   assert(ProfileInterpreter, "must be profiling interpreter");
1113   // Control will flow to "profile_continue" if the counter is less than the
1114   // limit or if we call profile_method().
1115   Label done;
1116 
1117   // If no method data exists, and the counter is high enough, make one.
1118   int ipl_offs = load_const_optimized(Rscratch, &InvocationCounter::InterpreterProfileLimit, R0, true);
1119   lwz(Rscratch, ipl_offs, Rscratch);
1120 
1121   cmpdi(CCR0, R28_mdx, 0);
1122   // Test to see if we should create a method data oop.
1123   cmpd(CCR1, Rscratch /* InterpreterProfileLimit */, invocation_count);
1124   bne(CCR0, done);
1125   bge(CCR1, profile_continue);
1126 
1127   // Build it now.
1128   call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::profile_method));
1129   set_method_data_pointer_for_bcp();
1130   b(profile_continue);
1131 
1132   align(32, 12);
1133   bind(done);
1134 }
1135 
1136 void InterpreterMacroAssembler::test_backedge_count_for_osr(Register backedge_count, Register branch_bcp, Register Rtmp) {
1137   assert_different_registers(backedge_count, Rtmp, branch_bcp);
1138   assert(UseOnStackReplacement,"Must UseOnStackReplacement to test_backedge_count_for_osr");
1139 
1140   Label did_not_overflow;
1141   Label overflow_with_error;
1142 
1143   int ibbl_offs = load_const_optimized(Rtmp, &InvocationCounter::InterpreterBackwardBranchLimit, R0, true);
1144   lwz(Rtmp, ibbl_offs, Rtmp);
1145   cmpw(CCR0, backedge_count, Rtmp);
1146 
1147   blt(CCR0, did_not_overflow);
1148 
1149   // When ProfileInterpreter is on, the backedge_count comes from the
1150   // methodDataOop, which value does not get reset on the call to
1151   // frequency_counter_overflow(). To avoid excessive calls to the overflow
1152   // routine while the method is being compiled, add a second test to make sure
1153   // the overflow function is called only once every overflow_frequency.
1154   if (ProfileInterpreter) {
1155     const int overflow_frequency = 1024;
1156     li(Rtmp, overflow_frequency-1);
1157     andr(Rtmp, Rtmp, backedge_count);
1158     cmpwi(CCR0, Rtmp, 0);
1159     bne(CCR0, did_not_overflow);
1160   }
1161 
1162   // Overflow in loop, pass branch bytecode.
1163   call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::frequency_counter_overflow), branch_bcp, true);
1164 
1165   // Was an OSR adapter generated?
1166   // O0 = osr nmethod
1167   cmpdi(CCR0, R3_RET, 0);
1168   beq(CCR0, overflow_with_error);
1169 
1170   // Has the nmethod been invalidated already?
1171   lbz(Rtmp, nmethod::state_offset(), R3_RET);
1172   cmpwi(CCR0, Rtmp, nmethod::in_use);
1173   bne(CCR0, overflow_with_error);
1174 
1175   // Migrate the interpreter frame off of the stack.
1176   // We can use all registers because we will not return to interpreter from this point.
1177 
1178   // Save nmethod.
1179   const Register osr_nmethod = R31;
1180   mr(osr_nmethod, R3_RET);
1181   set_top_ijava_frame_at_SP_as_last_Java_frame(R1_SP, R11_scratch1);
1182   call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::OSR_migration_begin), R16_thread);
1183   reset_last_Java_frame();
1184   // OSR buffer is in ARG1
1185 
1186   // Remove the interpreter frame.
1187   merge_frames(/*top_frame_sp*/ R21_sender_SP, /*return_pc*/ R0, R11_scratch1, R12_scratch2);
1188 
1189   // Jump to the osr code.
1190   ld(R11_scratch1, nmethod::osr_entry_point_offset(), osr_nmethod);
1191   mtlr(R0);
1192   mtctr(R11_scratch1);
1193   bctr();
1194 
1195   align(32, 12);
1196   bind(overflow_with_error);
1197   bind(did_not_overflow);
1198 }
1199 
1200 // Store a value at some constant offset from the method data pointer.
1201 void InterpreterMacroAssembler::set_mdp_data_at(int constant, Register value) {
1202   assert(ProfileInterpreter, "must be profiling interpreter");
1203 
1204   std(value, constant, R28_mdx);
1205 }
1206 
1207 // Increment the value at some constant offset from the method data pointer.
1208 void InterpreterMacroAssembler::increment_mdp_data_at(int constant,
1209                                                       Register counter_addr,
1210                                                       Register Rbumped_count,
1211                                                       bool decrement) {
1212   // Locate the counter at a fixed offset from the mdp:
1213   addi(counter_addr, R28_mdx, constant);
1214   increment_mdp_data_at(counter_addr, Rbumped_count, decrement);
1215 }
1216 
1217 // Increment the value at some non-fixed (reg + constant) offset from
1218 // the method data pointer.
1219 void InterpreterMacroAssembler::increment_mdp_data_at(Register reg,
1220                                                       int constant,
1221                                                       Register scratch,
1222                                                       Register Rbumped_count,
1223                                                       bool decrement) {
1224   // Add the constant to reg to get the offset.
1225   add(scratch, R28_mdx, reg);
1226   // Then calculate the counter address.
1227   addi(scratch, scratch, constant);
1228   increment_mdp_data_at(scratch, Rbumped_count, decrement);
1229 }
1230 
1231 void InterpreterMacroAssembler::increment_mdp_data_at(Register counter_addr,
1232                                                       Register Rbumped_count,
1233                                                       bool decrement) {
1234   assert(ProfileInterpreter, "must be profiling interpreter");
1235 
1236   // Load the counter.
1237   ld(Rbumped_count, 0, counter_addr);
1238 
1239   if (decrement) {
1240     // Decrement the register. Set condition codes.
1241     addi(Rbumped_count, Rbumped_count, - DataLayout::counter_increment);
1242     // Store the decremented counter, if it is still negative.
1243     std(Rbumped_count, 0, counter_addr);
1244     // Note: add/sub overflow check are not ported, since 64 bit
1245     // calculation should never overflow.
1246   } else {
1247     // Increment the register. Set carry flag.
1248     addi(Rbumped_count, Rbumped_count, DataLayout::counter_increment);
1249     // Store the incremented counter.
1250     std(Rbumped_count, 0, counter_addr);
1251   }
1252 }
1253 
1254 // Set a flag value at the current method data pointer position.
1255 void InterpreterMacroAssembler::set_mdp_flag_at(int flag_constant,
1256                                                 Register scratch) {
1257   assert(ProfileInterpreter, "must be profiling interpreter");
1258   // Load the data header.
1259   lbz(scratch, in_bytes(DataLayout::flags_offset()), R28_mdx);
1260   // Set the flag.
1261   ori(scratch, scratch, flag_constant);
1262   // Store the modified header.
1263   stb(scratch, in_bytes(DataLayout::flags_offset()), R28_mdx);
1264 }
1265 
1266 // Test the location at some offset from the method data pointer.
1267 // If it is not equal to value, branch to the not_equal_continue Label.
1268 void InterpreterMacroAssembler::test_mdp_data_at(int offset,
1269                                                  Register value,
1270                                                  Label& not_equal_continue,
1271                                                  Register test_out) {
1272   assert(ProfileInterpreter, "must be profiling interpreter");
1273 
1274   ld(test_out, offset, R28_mdx);
1275   cmpd(CCR0,  value, test_out);
1276   bne(CCR0, not_equal_continue);
1277 }
1278 
1279 // Update the method data pointer by the displacement located at some fixed
1280 // offset from the method data pointer.
1281 void InterpreterMacroAssembler::update_mdp_by_offset(int offset_of_disp,
1282                                                      Register scratch) {
1283   assert(ProfileInterpreter, "must be profiling interpreter");
1284 
1285   ld(scratch, offset_of_disp, R28_mdx);
1286   add(R28_mdx, scratch, R28_mdx);
1287 }
1288 
1289 // Update the method data pointer by the displacement located at the
1290 // offset (reg + offset_of_disp).
1291 void InterpreterMacroAssembler::update_mdp_by_offset(Register reg,
1292                                                      int offset_of_disp,
1293                                                      Register scratch) {
1294   assert(ProfileInterpreter, "must be profiling interpreter");
1295 
1296   add(scratch, reg, R28_mdx);
1297   ld(scratch, offset_of_disp, scratch);
1298   add(R28_mdx, scratch, R28_mdx);
1299 }
1300 
1301 // Update the method data pointer by a simple constant displacement.
1302 void InterpreterMacroAssembler::update_mdp_by_constant(int constant) {
1303   assert(ProfileInterpreter, "must be profiling interpreter");
1304   addi(R28_mdx, R28_mdx, constant);
1305 }
1306 
1307 // Update the method data pointer for a _ret bytecode whose target
1308 // was not among our cached targets.
1309 void InterpreterMacroAssembler::update_mdp_for_ret(TosState state,
1310                                                    Register return_bci) {
1311   assert(ProfileInterpreter, "must be profiling interpreter");
1312 
1313   push(state);
1314   assert(return_bci->is_nonvolatile(), "need to protect return_bci");
1315   call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::update_mdp_for_ret), return_bci);
1316   pop(state);
1317 }
1318 
1319 // Increments the backedge counter.
1320 // Returns backedge counter + invocation counter in Rdst.
1321 void InterpreterMacroAssembler::increment_backedge_counter(const Register Rcounters, const Register Rdst,
1322                                                            const Register Rtmp1, Register Rscratch) {
1323   assert(UseCompiler, "incrementing must be useful");
1324   assert_different_registers(Rdst, Rtmp1);
1325   const Register invocation_counter = Rtmp1;
1326   const Register counter = Rdst;
1327   // TODO ppc port assert(4 == InvocationCounter::sz_counter(), "unexpected field size.");
1328 
1329   // Load backedge counter.
1330   lwz(counter, in_bytes(MethodCounters::backedge_counter_offset()) +
1331                in_bytes(InvocationCounter::counter_offset()), Rcounters);
1332   // Load invocation counter.
1333   lwz(invocation_counter, in_bytes(MethodCounters::invocation_counter_offset()) +
1334                           in_bytes(InvocationCounter::counter_offset()), Rcounters);
1335 
1336   // Add the delta to the backedge counter.
1337   addi(counter, counter, InvocationCounter::count_increment);
1338 
1339   // Mask the invocation counter.
1340   li(Rscratch, InvocationCounter::count_mask_value);
1341   andr(invocation_counter, invocation_counter, Rscratch);
1342 
1343   // Store new counter value.
1344   stw(counter, in_bytes(MethodCounters::backedge_counter_offset()) +
1345                in_bytes(InvocationCounter::counter_offset()), Rcounters);
1346   // Return invocation counter + backedge counter.
1347   add(counter, counter, invocation_counter);
1348 }
1349 
1350 // Count a taken branch in the bytecodes.
1351 void InterpreterMacroAssembler::profile_taken_branch(Register scratch, Register bumped_count) {
1352   if (ProfileInterpreter) {
1353     Label profile_continue;
1354 
1355     // If no method data exists, go to profile_continue.
1356     test_method_data_pointer(profile_continue);
1357 
1358     // We are taking a branch. Increment the taken count.
1359     increment_mdp_data_at(in_bytes(JumpData::taken_offset()), scratch, bumped_count);
1360 
1361     // The method data pointer needs to be updated to reflect the new target.
1362     update_mdp_by_offset(in_bytes(JumpData::displacement_offset()), scratch);
1363     bind (profile_continue);
1364   }
1365 }
1366 
1367 // Count a not-taken branch in the bytecodes.
1368 void InterpreterMacroAssembler::profile_not_taken_branch(Register scratch1, Register scratch2) {
1369   if (ProfileInterpreter) {
1370     Label profile_continue;
1371 
1372     // If no method data exists, go to profile_continue.
1373     test_method_data_pointer(profile_continue);
1374 
1375     // We are taking a branch. Increment the not taken count.
1376     increment_mdp_data_at(in_bytes(BranchData::not_taken_offset()), scratch1, scratch2);
1377 
1378     // The method data pointer needs to be updated to correspond to the
1379     // next bytecode.
1380     update_mdp_by_constant(in_bytes(BranchData::branch_data_size()));
1381     bind (profile_continue);
1382   }
1383 }
1384 
1385 // Count a non-virtual call in the bytecodes.
1386 void InterpreterMacroAssembler::profile_call(Register scratch1, Register scratch2) {
1387   if (ProfileInterpreter) {
1388     Label profile_continue;
1389 
1390     // If no method data exists, go to profile_continue.
1391     test_method_data_pointer(profile_continue);
1392 
1393     // We are making a call. Increment the count.
1394     increment_mdp_data_at(in_bytes(CounterData::count_offset()), scratch1, scratch2);
1395 
1396     // The method data pointer needs to be updated to reflect the new target.
1397     update_mdp_by_constant(in_bytes(CounterData::counter_data_size()));
1398     bind (profile_continue);
1399   }
1400 }
1401 
1402 // Count a final call in the bytecodes.
1403 void InterpreterMacroAssembler::profile_final_call(Register scratch1, Register scratch2) {
1404   if (ProfileInterpreter) {
1405     Label profile_continue;
1406 
1407     // If no method data exists, go to profile_continue.
1408     test_method_data_pointer(profile_continue);
1409 
1410     // We are making a call. Increment the count.
1411     increment_mdp_data_at(in_bytes(CounterData::count_offset()), scratch1, scratch2);
1412 
1413     // The method data pointer needs to be updated to reflect the new target.
1414     update_mdp_by_constant(in_bytes(VirtualCallData::virtual_call_data_size()));
1415     bind (profile_continue);
1416   }
1417 }
1418 
1419 // Count a virtual call in the bytecodes.
1420 void InterpreterMacroAssembler::profile_virtual_call(Register Rreceiver,
1421                                                      Register Rscratch1,
1422                                                      Register Rscratch2,
1423                                                      bool receiver_can_be_null) {
1424   if (!ProfileInterpreter) { return; }
1425   Label profile_continue;
1426 
1427   // If no method data exists, go to profile_continue.
1428   test_method_data_pointer(profile_continue);
1429 
1430   Label skip_receiver_profile;
1431   if (receiver_can_be_null) {
1432     Label not_null;
1433     cmpdi(CCR0, Rreceiver, 0);
1434     bne(CCR0, not_null);
1435     // We are making a call. Increment the count for null receiver.
1436     increment_mdp_data_at(in_bytes(CounterData::count_offset()), Rscratch1, Rscratch2);
1437     b(skip_receiver_profile);
1438     bind(not_null);
1439   }
1440 
1441   // Record the receiver type.
1442   record_klass_in_profile(Rreceiver, Rscratch1, Rscratch2, true);
1443   bind(skip_receiver_profile);
1444 
1445   // The method data pointer needs to be updated to reflect the new target.
1446   update_mdp_by_constant(in_bytes(VirtualCallData::virtual_call_data_size()));
1447   bind (profile_continue);
1448 }
1449 
1450 void InterpreterMacroAssembler::profile_typecheck(Register Rklass, Register Rscratch1, Register Rscratch2) {
1451   if (ProfileInterpreter) {
1452     Label profile_continue;
1453 
1454     // If no method data exists, go to profile_continue.
1455     test_method_data_pointer(profile_continue);
1456 
1457     int mdp_delta = in_bytes(BitData::bit_data_size());
1458     if (TypeProfileCasts) {
1459       mdp_delta = in_bytes(VirtualCallData::virtual_call_data_size());
1460 
1461       // Record the object type.
1462       record_klass_in_profile(Rklass, Rscratch1, Rscratch2, false);
1463     }
1464 
1465     // The method data pointer needs to be updated.
1466     update_mdp_by_constant(mdp_delta);
1467 
1468     bind (profile_continue);
1469   }
1470 }
1471 
1472 void InterpreterMacroAssembler::profile_typecheck_failed(Register Rscratch1, Register Rscratch2) {
1473   if (ProfileInterpreter && TypeProfileCasts) {
1474     Label profile_continue;
1475 
1476     // If no method data exists, go to profile_continue.
1477     test_method_data_pointer(profile_continue);
1478 
1479     int count_offset = in_bytes(CounterData::count_offset());
1480     // Back up the address, since we have already bumped the mdp.
1481     count_offset -= in_bytes(VirtualCallData::virtual_call_data_size());
1482 
1483     // *Decrement* the counter. We expect to see zero or small negatives.
1484     increment_mdp_data_at(count_offset, Rscratch1, Rscratch2, true);
1485 
1486     bind (profile_continue);
1487   }
1488 }
1489 
1490 // Count a ret in the bytecodes.
1491 void InterpreterMacroAssembler::profile_ret(TosState state, Register return_bci, Register scratch1, Register scratch2) {
1492   if (ProfileInterpreter) {
1493     Label profile_continue;
1494     uint row;
1495 
1496     // If no method data exists, go to profile_continue.
1497     test_method_data_pointer(profile_continue);
1498 
1499     // Update the total ret count.
1500     increment_mdp_data_at(in_bytes(CounterData::count_offset()), scratch1, scratch2 );
1501 
1502     for (row = 0; row < RetData::row_limit(); row++) {
1503       Label next_test;
1504 
1505       // See if return_bci is equal to bci[n]:
1506       test_mdp_data_at(in_bytes(RetData::bci_offset(row)), return_bci, next_test, scratch1);
1507 
1508       // return_bci is equal to bci[n]. Increment the count.
1509       increment_mdp_data_at(in_bytes(RetData::bci_count_offset(row)), scratch1, scratch2);
1510 
1511       // The method data pointer needs to be updated to reflect the new target.
1512       update_mdp_by_offset(in_bytes(RetData::bci_displacement_offset(row)), scratch1);
1513       b(profile_continue);
1514       bind(next_test);
1515     }
1516 
1517     update_mdp_for_ret(state, return_bci);
1518 
1519     bind (profile_continue);
1520   }
1521 }
1522 
1523 // Count the default case of a switch construct.
1524 void InterpreterMacroAssembler::profile_switch_default(Register scratch1,  Register scratch2) {
1525   if (ProfileInterpreter) {
1526     Label profile_continue;
1527 
1528     // If no method data exists, go to profile_continue.
1529     test_method_data_pointer(profile_continue);
1530 
1531     // Update the default case count
1532     increment_mdp_data_at(in_bytes(MultiBranchData::default_count_offset()),
1533                           scratch1, scratch2);
1534 
1535     // The method data pointer needs to be updated.
1536     update_mdp_by_offset(in_bytes(MultiBranchData::default_displacement_offset()),
1537                          scratch1);
1538 
1539     bind (profile_continue);
1540   }
1541 }
1542 
1543 // Count the index'th case of a switch construct.
1544 void InterpreterMacroAssembler::profile_switch_case(Register index,
1545                                                     Register scratch1,
1546                                                     Register scratch2,
1547                                                     Register scratch3) {
1548   if (ProfileInterpreter) {
1549     assert_different_registers(index, scratch1, scratch2, scratch3);
1550     Label profile_continue;
1551 
1552     // If no method data exists, go to profile_continue.
1553     test_method_data_pointer(profile_continue);
1554 
1555     // Build the base (index * per_case_size_in_bytes()) + case_array_offset_in_bytes().
1556     li(scratch3, in_bytes(MultiBranchData::case_array_offset()));
1557 
1558     assert (in_bytes(MultiBranchData::per_case_size()) == 16, "so that shladd works");
1559     sldi(scratch1, index, exact_log2(in_bytes(MultiBranchData::per_case_size())));
1560     add(scratch1, scratch1, scratch3);
1561 
1562     // Update the case count.
1563     increment_mdp_data_at(scratch1, in_bytes(MultiBranchData::relative_count_offset()), scratch2, scratch3);
1564 
1565     // The method data pointer needs to be updated.
1566     update_mdp_by_offset(scratch1, in_bytes(MultiBranchData::relative_displacement_offset()), scratch2);
1567 
1568     bind (profile_continue);
1569   }
1570 }
1571 
1572 void InterpreterMacroAssembler::profile_null_seen(Register Rscratch1, Register Rscratch2) {
1573   if (ProfileInterpreter) {
1574     assert_different_registers(Rscratch1, Rscratch2);
1575     Label profile_continue;
1576 
1577     // If no method data exists, go to profile_continue.
1578     test_method_data_pointer(profile_continue);
1579 
1580     set_mdp_flag_at(BitData::null_seen_byte_constant(), Rscratch1);
1581 
1582     // The method data pointer needs to be updated.
1583     int mdp_delta = in_bytes(BitData::bit_data_size());
1584     if (TypeProfileCasts) {
1585       mdp_delta = in_bytes(VirtualCallData::virtual_call_data_size());
1586     }
1587     update_mdp_by_constant(mdp_delta);
1588 
1589     bind (profile_continue);
1590   }
1591 }
1592 
1593 void InterpreterMacroAssembler::record_klass_in_profile(Register Rreceiver,
1594                                                         Register Rscratch1, Register Rscratch2,
1595                                                         bool is_virtual_call) {
1596   assert(ProfileInterpreter, "must be profiling");
1597   assert_different_registers(Rreceiver, Rscratch1, Rscratch2);
1598 
1599   Label done;
1600   record_klass_in_profile_helper(Rreceiver, Rscratch1, Rscratch2, 0, done, is_virtual_call);
1601   bind (done);
1602 }
1603 
1604 void InterpreterMacroAssembler::record_klass_in_profile_helper(
1605                                         Register receiver, Register scratch1, Register scratch2,
1606                                         int start_row, Label& done, bool is_virtual_call) {
1607   if (TypeProfileWidth == 0) {
1608     if (is_virtual_call) {
1609       increment_mdp_data_at(in_bytes(CounterData::count_offset()), scratch1, scratch2);
1610     }
1611     return;
1612   }
1613 
1614   int last_row = VirtualCallData::row_limit() - 1;
1615   assert(start_row <= last_row, "must be work left to do");
1616   // Test this row for both the receiver and for null.
1617   // Take any of three different outcomes:
1618   //   1. found receiver => increment count and goto done
1619   //   2. found null => keep looking for case 1, maybe allocate this cell
1620   //   3. found something else => keep looking for cases 1 and 2
1621   // Case 3 is handled by a recursive call.
1622   for (int row = start_row; row <= last_row; row++) {
1623     Label next_test;
1624     bool test_for_null_also = (row == start_row);
1625 
1626     // See if the receiver is receiver[n].
1627     int recvr_offset = in_bytes(VirtualCallData::receiver_offset(row));
1628     test_mdp_data_at(recvr_offset, receiver, next_test, scratch1);
1629     // delayed()->tst(scratch);
1630 
1631     // The receiver is receiver[n]. Increment count[n].
1632     int count_offset = in_bytes(VirtualCallData::receiver_count_offset(row));
1633     increment_mdp_data_at(count_offset, scratch1, scratch2);
1634     b(done);
1635     bind(next_test);
1636 
1637     if (test_for_null_also) {
1638       Label found_null;
1639       // Failed the equality check on receiver[n]... Test for null.
1640       if (start_row == last_row) {
1641         // The only thing left to do is handle the null case.
1642         if (is_virtual_call) {
1643           // Scratch1 contains test_out from test_mdp_data_at.
1644           cmpdi(CCR0, scratch1, 0);
1645           beq(CCR0, found_null);
1646           // Receiver did not match any saved receiver and there is no empty row for it.
1647           // Increment total counter to indicate polymorphic case.
1648           increment_mdp_data_at(in_bytes(CounterData::count_offset()), scratch1, scratch2);
1649           b(done);
1650           bind(found_null);
1651         } else {
1652           cmpdi(CCR0, scratch1, 0);
1653           bne(CCR0, done);
1654         }
1655         break;
1656       }
1657       // Since null is rare, make it be the branch-taken case.
1658       cmpdi(CCR0, scratch1, 0);
1659       beq(CCR0, found_null);
1660 
1661       // Put all the "Case 3" tests here.
1662       record_klass_in_profile_helper(receiver, scratch1, scratch2, start_row + 1, done, is_virtual_call);
1663 
1664       // Found a null. Keep searching for a matching receiver,
1665       // but remember that this is an empty (unused) slot.
1666       bind(found_null);
1667     }
1668   }
1669 
1670   // In the fall-through case, we found no matching receiver, but we
1671   // observed the receiver[start_row] is NULL.
1672 
1673   // Fill in the receiver field and increment the count.
1674   int recvr_offset = in_bytes(VirtualCallData::receiver_offset(start_row));
1675   set_mdp_data_at(recvr_offset, receiver);
1676   int count_offset = in_bytes(VirtualCallData::receiver_count_offset(start_row));
1677   li(scratch1, DataLayout::counter_increment);
1678   set_mdp_data_at(count_offset, scratch1);
1679   if (start_row > 0) {
1680     b(done);
1681   }
1682 }
1683 
1684 // Argument and return type profilig.
1685 // kills: tmp, tmp2, R0, CR0, CR1
1686 void InterpreterMacroAssembler::profile_obj_type(Register obj, Register mdo_addr_base,
1687                                                  RegisterOrConstant mdo_addr_offs, Register tmp, Register tmp2) {
1688   Label do_nothing, do_update;
1689 
1690   // tmp2 = obj is allowed
1691   assert_different_registers(obj, mdo_addr_base, tmp, R0);
1692   assert_different_registers(tmp2, mdo_addr_base, tmp, R0);
1693   const Register klass = tmp2;
1694 
1695   verify_oop(obj);
1696 
1697   ld(tmp, mdo_addr_offs, mdo_addr_base);
1698 
1699   // Set null_seen if obj is 0.
1700   cmpdi(CCR0, obj, 0);
1701   ori(R0, tmp, TypeEntries::null_seen);
1702   beq(CCR0, do_update);
1703 
1704   load_klass(klass, obj);
1705 
1706   clrrdi(R0, tmp, exact_log2(-TypeEntries::type_klass_mask));
1707   // Basically same as andi(R0, tmp, TypeEntries::type_klass_mask);
1708   cmpd(CCR1, R0, klass);
1709   // Klass seen before, nothing to do (regardless of unknown bit).
1710   //beq(CCR1, do_nothing);
1711 
1712   andi_(R0, klass, TypeEntries::type_unknown);
1713   // Already unknown. Nothing to do anymore.
1714   //bne(CCR0, do_nothing);
1715   crorc(CCR0, Assembler::equal, CCR1, Assembler::equal); // cr0 eq = cr1 eq or cr0 ne
1716   beq(CCR0, do_nothing);
1717 
1718   clrrdi_(R0, tmp, exact_log2(-TypeEntries::type_mask));
1719   orr(R0, klass, tmp); // Combine klass and null_seen bit (only used if (tmp & type_mask)==0).
1720   beq(CCR0, do_update); // First time here. Set profile type.
1721 
1722   // Different than before. Cannot keep accurate profile.
1723   ori(R0, tmp, TypeEntries::type_unknown);
1724 
1725   bind(do_update);
1726   // update profile
1727   std(R0, mdo_addr_offs, mdo_addr_base);
1728 
1729   align(32, 12);
1730   bind(do_nothing);
1731 }
1732 
1733 void InterpreterMacroAssembler::profile_arguments_type(Register callee, Register tmp1, Register tmp2, bool is_virtual) {
1734   if (!ProfileInterpreter) {
1735     return;
1736   }
1737 
1738   assert_different_registers(callee, tmp1, tmp2, R28_mdx);
1739 
1740   if (MethodData::profile_arguments() || MethodData::profile_return()) {
1741     Label profile_continue;
1742 
1743     test_method_data_pointer(profile_continue);
1744 
1745     int off_to_start = is_virtual ? in_bytes(VirtualCallData::virtual_call_data_size()) : in_bytes(CounterData::counter_data_size());
1746 
1747     lbz(tmp1, in_bytes(DataLayout::tag_offset()) - off_to_start, R28_mdx);
1748     cmpwi(CCR0, tmp1, is_virtual ? DataLayout::virtual_call_type_data_tag : DataLayout::call_type_data_tag);
1749     bne(CCR0, profile_continue);
1750 
1751     if (MethodData::profile_arguments()) {
1752       Label done;
1753       int off_to_args = in_bytes(TypeEntriesAtCall::args_data_offset());
1754       add(R28_mdx, off_to_args, R28_mdx);
1755 
1756       for (int i = 0; i < TypeProfileArgsLimit; i++) {
1757         if (i > 0 || MethodData::profile_return()) {
1758           // If return value type is profiled we may have no argument to profile.
1759           ld(tmp1, in_bytes(TypeEntriesAtCall::cell_count_offset())-off_to_args, R28_mdx);
1760           cmpdi(CCR0, tmp1, (i+1)*TypeStackSlotEntries::per_arg_count());
1761           addi(tmp1, tmp1, -i*TypeStackSlotEntries::per_arg_count());
1762           blt(CCR0, done);
1763         }
1764         ld(tmp1, in_bytes(Method::const_offset()), callee);
1765         lhz(tmp1, in_bytes(ConstMethod::size_of_parameters_offset()), tmp1);
1766         // Stack offset o (zero based) from the start of the argument
1767         // list, for n arguments translates into offset n - o - 1 from
1768         // the end of the argument list. But there's an extra slot at
1769         // the top of the stack. So the offset is n - o from Lesp.
1770         ld(tmp2, in_bytes(TypeEntriesAtCall::stack_slot_offset(i))-off_to_args, R28_mdx);
1771         subf(tmp1, tmp2, tmp1);
1772 
1773         sldi(tmp1, tmp1, Interpreter::logStackElementSize);
1774         ldx(tmp1, tmp1, R15_esp);
1775 
1776         profile_obj_type(tmp1, R28_mdx, in_bytes(TypeEntriesAtCall::argument_type_offset(i))-off_to_args, tmp2, tmp1);
1777 
1778         int to_add = in_bytes(TypeStackSlotEntries::per_arg_size());
1779         addi(R28_mdx, R28_mdx, to_add);
1780         off_to_args += to_add;
1781       }
1782 
1783       if (MethodData::profile_return()) {
1784         ld(tmp1, in_bytes(TypeEntriesAtCall::cell_count_offset())-off_to_args, R28_mdx);
1785         addi(tmp1, tmp1, -TypeProfileArgsLimit*TypeStackSlotEntries::per_arg_count());
1786       }
1787 
1788       bind(done);
1789 
1790       if (MethodData::profile_return()) {
1791         // We're right after the type profile for the last
1792         // argument. tmp1 is the number of cells left in the
1793         // CallTypeData/VirtualCallTypeData to reach its end. Non null
1794         // if there's a return to profile.
1795         assert(ReturnTypeEntry::static_cell_count() < TypeStackSlotEntries::per_arg_count(), "can't move past ret type");
1796         sldi(tmp1, tmp1, exact_log2(DataLayout::cell_size));
1797         add(R28_mdx, tmp1, R28_mdx);
1798       }
1799     } else {
1800       assert(MethodData::profile_return(), "either profile call args or call ret");
1801       update_mdp_by_constant(in_bytes(TypeEntriesAtCall::return_only_size()));
1802     }
1803 
1804     // Mdp points right after the end of the
1805     // CallTypeData/VirtualCallTypeData, right after the cells for the
1806     // return value type if there's one.
1807     align(32, 12);
1808     bind(profile_continue);
1809   }
1810 }
1811 
1812 void InterpreterMacroAssembler::profile_return_type(Register ret, Register tmp1, Register tmp2) {
1813   assert_different_registers(ret, tmp1, tmp2);
1814   if (ProfileInterpreter && MethodData::profile_return()) {
1815     Label profile_continue;
1816 
1817     test_method_data_pointer(profile_continue);
1818 
1819     if (MethodData::profile_return_jsr292_only()) {
1820       // If we don't profile all invoke bytecodes we must make sure
1821       // it's a bytecode we indeed profile. We can't go back to the
1822       // begining of the ProfileData we intend to update to check its
1823       // type because we're right after it and we don't known its
1824       // length.
1825       lbz(tmp1, 0, R14_bcp);
1826       lbz(tmp2, Method::intrinsic_id_offset_in_bytes(), R19_method);
1827       cmpwi(CCR0, tmp1, Bytecodes::_invokedynamic);
1828       cmpwi(CCR1, tmp1, Bytecodes::_invokehandle);
1829       cror(CCR0, Assembler::equal, CCR1, Assembler::equal);
1830       cmpwi(CCR1, tmp2, vmIntrinsics::_compiledLambdaForm);
1831       cror(CCR0, Assembler::equal, CCR1, Assembler::equal);
1832       bne(CCR0, profile_continue);
1833     }
1834 
1835     profile_obj_type(ret, R28_mdx, -in_bytes(ReturnTypeEntry::size()), tmp1, tmp2);
1836 
1837     align(32, 12);
1838     bind(profile_continue);
1839   }
1840 }
1841 
1842 void InterpreterMacroAssembler::profile_parameters_type(Register tmp1, Register tmp2, Register tmp3, Register tmp4) {
1843   if (ProfileInterpreter && MethodData::profile_parameters()) {
1844     Label profile_continue, done;
1845 
1846     test_method_data_pointer(profile_continue);
1847 
1848     // Load the offset of the area within the MDO used for
1849     // parameters. If it's negative we're not profiling any parameters.
1850     lwz(tmp1, in_bytes(MethodData::parameters_type_data_di_offset()) - in_bytes(MethodData::data_offset()), R28_mdx);
1851     cmpwi(CCR0, tmp1, 0);
1852     blt(CCR0, profile_continue);
1853 
1854     // Compute a pointer to the area for parameters from the offset
1855     // and move the pointer to the slot for the last
1856     // parameters. Collect profiling from last parameter down.
1857     // mdo start + parameters offset + array length - 1
1858 
1859     // Pointer to the parameter area in the MDO.
1860     const Register mdp = tmp1;
1861     add(mdp, tmp1, R28_mdx);
1862 
1863     // Offset of the current profile entry to update.
1864     const Register entry_offset = tmp2;
1865     // entry_offset = array len in number of cells
1866     ld(entry_offset, in_bytes(ArrayData::array_len_offset()), mdp);
1867 
1868     int off_base = in_bytes(ParametersTypeData::stack_slot_offset(0));
1869     assert(off_base % DataLayout::cell_size == 0, "should be a number of cells");
1870 
1871     // entry_offset (number of cells)  = array len - size of 1 entry + offset of the stack slot field
1872     addi(entry_offset, entry_offset, -TypeStackSlotEntries::per_arg_count() + (off_base / DataLayout::cell_size));
1873     // entry_offset in bytes
1874     sldi(entry_offset, entry_offset, exact_log2(DataLayout::cell_size));
1875 
1876     Label loop;
1877     align(32, 12);
1878     bind(loop);
1879 
1880     // Load offset on the stack from the slot for this parameter.
1881     ld(tmp3, entry_offset, mdp);
1882     sldi(tmp3, tmp3, Interpreter::logStackElementSize);
1883     neg(tmp3, tmp3);
1884     // Read the parameter from the local area.
1885     ldx(tmp3, tmp3, R18_locals);
1886 
1887     // Make entry_offset now point to the type field for this parameter.
1888     int type_base = in_bytes(ParametersTypeData::type_offset(0));
1889     assert(type_base > off_base, "unexpected");
1890     addi(entry_offset, entry_offset, type_base - off_base);
1891 
1892     // Profile the parameter.
1893     profile_obj_type(tmp3, mdp, entry_offset, tmp4, tmp3);
1894 
1895     // Go to next parameter.
1896     int delta = TypeStackSlotEntries::per_arg_count() * DataLayout::cell_size + (type_base - off_base);
1897     cmpdi(CCR0, entry_offset, off_base + delta);
1898     addi(entry_offset, entry_offset, -delta);
1899     bge(CCR0, loop);
1900 
1901     align(32, 12);
1902     bind(profile_continue);
1903   }
1904 }
1905 
1906 // Add a InterpMonitorElem to stack (see frame_sparc.hpp).
1907 void InterpreterMacroAssembler::add_monitor_to_stack(bool stack_is_empty, Register Rtemp1, Register Rtemp2) {
1908 
1909   // Very-local scratch registers.
1910   const Register esp  = Rtemp1;
1911   const Register slot = Rtemp2;
1912 
1913   // Extracted monitor_size.
1914   int monitor_size = frame::interpreter_frame_monitor_size_in_bytes();
1915   assert(Assembler::is_aligned((unsigned int)monitor_size,
1916                                (unsigned int)frame::alignment_in_bytes),
1917          "size of a monitor must respect alignment of SP");
1918 
1919   resize_frame(-monitor_size, /*temp*/esp); // Allocate space for new monitor
1920   std(R1_SP, _ijava_state_neg(top_frame_sp), esp); // esp contains fp
1921 
1922   // Shuffle expression stack down. Recall that stack_base points
1923   // just above the new expression stack bottom. Old_tos and new_tos
1924   // are used to scan thru the old and new expression stacks.
1925   if (!stack_is_empty) {
1926     Label copy_slot, copy_slot_finished;
1927     const Register n_slots = slot;
1928 
1929     addi(esp, R15_esp, Interpreter::stackElementSize); // Point to first element (pre-pushed stack).
1930     subf(n_slots, esp, R26_monitor);
1931     srdi_(n_slots, n_slots, LogBytesPerWord);          // Compute number of slots to copy.
1932     assert(LogBytesPerWord == 3, "conflicts assembler instructions");
1933     beq(CCR0, copy_slot_finished);                     // Nothing to copy.
1934 
1935     mtctr(n_slots);
1936 
1937     // loop
1938     bind(copy_slot);
1939     ld(slot, 0, esp);              // Move expression stack down.
1940     std(slot, -monitor_size, esp); // distance = monitor_size
1941     addi(esp, esp, BytesPerWord);
1942     bdnz(copy_slot);
1943 
1944     bind(copy_slot_finished);
1945   }
1946 
1947   addi(R15_esp, R15_esp, -monitor_size);
1948   addi(R26_monitor, R26_monitor, -monitor_size);
1949 
1950   // Restart interpreter
1951 }
1952 
1953 // ============================================================================
1954 // Java locals access
1955 
1956 // Load a local variable at index in Rindex into register Rdst_value.
1957 // Also puts address of local into Rdst_address as a service.
1958 // Kills:
1959 //   - Rdst_value
1960 //   - Rdst_address
1961 void InterpreterMacroAssembler::load_local_int(Register Rdst_value, Register Rdst_address, Register Rindex) {
1962   sldi(Rdst_address, Rindex, Interpreter::logStackElementSize);
1963   subf(Rdst_address, Rdst_address, R18_locals);
1964   lwz(Rdst_value, 0, Rdst_address);
1965 }
1966 
1967 // Load a local variable at index in Rindex into register Rdst_value.
1968 // Also puts address of local into Rdst_address as a service.
1969 // Kills:
1970 //   - Rdst_value
1971 //   - Rdst_address
1972 void InterpreterMacroAssembler::load_local_long(Register Rdst_value, Register Rdst_address, Register Rindex) {
1973   sldi(Rdst_address, Rindex, Interpreter::logStackElementSize);
1974   subf(Rdst_address, Rdst_address, R18_locals);
1975   ld(Rdst_value, -8, Rdst_address);
1976 }
1977 
1978 // Load a local variable at index in Rindex into register Rdst_value.
1979 // Also puts address of local into Rdst_address as a service.
1980 // Input:
1981 //   - Rindex:      slot nr of local variable
1982 // Kills:
1983 //   - Rdst_value
1984 //   - Rdst_address
1985 void InterpreterMacroAssembler::load_local_ptr(Register Rdst_value, Register Rdst_address, Register Rindex) {
1986   sldi(Rdst_address, Rindex, Interpreter::logStackElementSize);
1987   subf(Rdst_address, Rdst_address, R18_locals);
1988   ld(Rdst_value, 0, Rdst_address);
1989 }
1990 
1991 // Load a local variable at index in Rindex into register Rdst_value.
1992 // Also puts address of local into Rdst_address as a service.
1993 // Kills:
1994 //   - Rdst_value
1995 //   - Rdst_address
1996 void InterpreterMacroAssembler::load_local_float(FloatRegister Rdst_value, Register Rdst_address, Register Rindex) {
1997   sldi(Rdst_address, Rindex, Interpreter::logStackElementSize);
1998   subf(Rdst_address, Rdst_address, R18_locals);
1999   lfs(Rdst_value, 0, Rdst_address);
2000 }
2001 
2002 // Load a local variable at index in Rindex into register Rdst_value.
2003 // Also puts address of local into Rdst_address as a service.
2004 // Kills:
2005 //   - Rdst_value
2006 //   - Rdst_address
2007 void InterpreterMacroAssembler::load_local_double(FloatRegister Rdst_value, Register Rdst_address, Register Rindex) {
2008   sldi(Rdst_address, Rindex, Interpreter::logStackElementSize);
2009   subf(Rdst_address, Rdst_address, R18_locals);
2010   lfd(Rdst_value, -8, Rdst_address);
2011 }
2012 
2013 // Store an int value at local variable slot Rindex.
2014 // Kills:
2015 //   - Rindex
2016 void InterpreterMacroAssembler::store_local_int(Register Rvalue, Register Rindex) {
2017   sldi(Rindex, Rindex, Interpreter::logStackElementSize);
2018   subf(Rindex, Rindex, R18_locals);
2019   stw(Rvalue, 0, Rindex);
2020 }
2021 
2022 // Store a long value at local variable slot Rindex.
2023 // Kills:
2024 //   - Rindex
2025 void InterpreterMacroAssembler::store_local_long(Register Rvalue, Register Rindex) {
2026   sldi(Rindex, Rindex, Interpreter::logStackElementSize);
2027   subf(Rindex, Rindex, R18_locals);
2028   std(Rvalue, -8, Rindex);
2029 }
2030 
2031 // Store an oop value at local variable slot Rindex.
2032 // Kills:
2033 //   - Rindex
2034 void InterpreterMacroAssembler::store_local_ptr(Register Rvalue, Register Rindex) {
2035   sldi(Rindex, Rindex, Interpreter::logStackElementSize);
2036   subf(Rindex, Rindex, R18_locals);
2037   std(Rvalue, 0, Rindex);
2038 }
2039 
2040 // Store an int value at local variable slot Rindex.
2041 // Kills:
2042 //   - Rindex
2043 void InterpreterMacroAssembler::store_local_float(FloatRegister Rvalue, Register Rindex) {
2044   sldi(Rindex, Rindex, Interpreter::logStackElementSize);
2045   subf(Rindex, Rindex, R18_locals);
2046   stfs(Rvalue, 0, Rindex);
2047 }
2048 
2049 // Store an int value at local variable slot Rindex.
2050 // Kills:
2051 //   - Rindex
2052 void InterpreterMacroAssembler::store_local_double(FloatRegister Rvalue, Register Rindex) {
2053   sldi(Rindex, Rindex, Interpreter::logStackElementSize);
2054   subf(Rindex, Rindex, R18_locals);
2055   stfd(Rvalue, -8, Rindex);
2056 }
2057 
2058 // Read pending exception from thread and jump to interpreter.
2059 // Throw exception entry if one if pending. Fall through otherwise.
2060 void InterpreterMacroAssembler::check_and_forward_exception(Register Rscratch1, Register Rscratch2) {
2061   assert_different_registers(Rscratch1, Rscratch2, R3);
2062   Register Rexception = Rscratch1;
2063   Register Rtmp       = Rscratch2;
2064   Label Ldone;
2065   // Get pending exception oop.
2066   ld(Rexception, thread_(pending_exception));
2067   cmpdi(CCR0, Rexception, 0);
2068   beq(CCR0, Ldone);
2069   li(Rtmp, 0);
2070   mr_if_needed(R3, Rexception);
2071   std(Rtmp, thread_(pending_exception)); // Clear exception in thread
2072   if (Interpreter::rethrow_exception_entry() != NULL) {
2073     // Already got entry address.
2074     load_dispatch_table(Rtmp, (address*)Interpreter::rethrow_exception_entry());
2075   } else {
2076     // Dynamically load entry address.
2077     int simm16_rest = load_const_optimized(Rtmp, &Interpreter::_rethrow_exception_entry, R0, true);
2078     ld(Rtmp, simm16_rest, Rtmp);
2079   }
2080   mtctr(Rtmp);
2081   save_interpreter_state(Rtmp);
2082   bctr();
2083 
2084   align(32, 12);
2085   bind(Ldone);
2086 }
2087 
2088 void InterpreterMacroAssembler::call_VM(Register oop_result, address entry_point, bool check_exceptions) {
2089   save_interpreter_state(R11_scratch1);
2090 
2091   MacroAssembler::call_VM(oop_result, entry_point, false);
2092 
2093   restore_interpreter_state(R11_scratch1, /*bcp_and_mdx_only*/ true);
2094 
2095   check_and_handle_popframe(R11_scratch1);
2096   check_and_handle_earlyret(R11_scratch1);
2097   // Now check exceptions manually.
2098   if (check_exceptions) {
2099     check_and_forward_exception(R11_scratch1, R12_scratch2);
2100   }
2101 }
2102 
2103 void InterpreterMacroAssembler::call_VM(Register oop_result, address entry_point, Register arg_1, bool check_exceptions) {
2104   // ARG1 is reserved for the thread.
2105   mr_if_needed(R4_ARG2, arg_1);
2106   call_VM(oop_result, entry_point, check_exceptions);
2107 }
2108 
2109 void InterpreterMacroAssembler::call_VM(Register oop_result, address entry_point, Register arg_1, Register arg_2, bool check_exceptions) {
2110   // ARG1 is reserved for the thread.
2111   mr_if_needed(R4_ARG2, arg_1);
2112   assert(arg_2 != R4_ARG2, "smashed argument");
2113   mr_if_needed(R5_ARG3, arg_2);
2114   call_VM(oop_result, entry_point, check_exceptions);
2115 }
2116 
2117 void InterpreterMacroAssembler::call_VM(Register oop_result, address entry_point, Register arg_1, Register arg_2, Register arg_3, bool check_exceptions) {
2118   // ARG1 is reserved for the thread.
2119   mr_if_needed(R4_ARG2, arg_1);
2120   assert(arg_2 != R4_ARG2, "smashed argument");
2121   mr_if_needed(R5_ARG3, arg_2);
2122   assert(arg_3 != R4_ARG2 && arg_3 != R5_ARG3, "smashed argument");
2123   mr_if_needed(R6_ARG4, arg_3);
2124   call_VM(oop_result, entry_point, check_exceptions);
2125 }
2126 
2127 void InterpreterMacroAssembler::save_interpreter_state(Register scratch) {
2128   ld(scratch, 0, R1_SP);
2129   std(R15_esp, _ijava_state_neg(esp), scratch);
2130   std(R14_bcp, _ijava_state_neg(bcp), scratch);
2131   std(R26_monitor, _ijava_state_neg(monitors), scratch);
2132   if (ProfileInterpreter) { std(R28_mdx, _ijava_state_neg(mdx), scratch); }
2133   // Other entries should be unchanged.
2134 }
2135 
2136 void InterpreterMacroAssembler::restore_interpreter_state(Register scratch, bool bcp_and_mdx_only) {
2137   ld(scratch, 0, R1_SP);
2138   ld(R14_bcp, _ijava_state_neg(bcp), scratch); // Changed by VM code (exception).
2139   if (ProfileInterpreter) { ld(R28_mdx, _ijava_state_neg(mdx), scratch); } // Changed by VM code.
2140   if (!bcp_and_mdx_only) {
2141     // Following ones are Metadata.
2142     ld(R19_method, _ijava_state_neg(method), scratch);
2143     ld(R27_constPoolCache, _ijava_state_neg(cpoolCache), scratch);
2144     // Following ones are stack addresses and don't require reload.
2145     ld(R15_esp, _ijava_state_neg(esp), scratch);
2146     ld(R18_locals, _ijava_state_neg(locals), scratch);
2147     ld(R26_monitor, _ijava_state_neg(monitors), scratch);
2148   }
2149 #ifdef ASSERT
2150   {
2151     Label Lok;
2152     subf(R0, R1_SP, scratch);
2153     cmpdi(CCR0, R0, frame::abi_reg_args_size + frame::ijava_state_size);
2154     bge(CCR0, Lok);
2155     stop("frame too small (restore istate)", 0x5432);
2156     bind(Lok);
2157   }
2158   {
2159     Label Lok;
2160     ld(R0, _ijava_state_neg(ijava_reserved), scratch);
2161     cmpdi(CCR0, R0, 0x5afe);
2162     beq(CCR0, Lok);
2163     stop("frame corrupted (restore istate)", 0x5afe);
2164     bind(Lok);
2165   }
2166 #endif
2167 }
2168 
2169 #endif // !CC_INTERP
2170 
2171 void InterpreterMacroAssembler::get_method_counters(Register method,
2172                                                     Register Rcounters,
2173                                                     Label& skip) {
2174   BLOCK_COMMENT("Load and ev. allocate counter object {");
2175   Label has_counters;
2176   ld(Rcounters, in_bytes(Method::method_counters_offset()), method);
2177   cmpdi(CCR0, Rcounters, 0);
2178   bne(CCR0, has_counters);
2179   call_VM(noreg, CAST_FROM_FN_PTR(address,
2180                                   InterpreterRuntime::build_method_counters), method, false);
2181   ld(Rcounters, in_bytes(Method::method_counters_offset()), method);
2182   cmpdi(CCR0, Rcounters, 0);
2183   beq(CCR0, skip); // No MethodCounters, OutOfMemory.
2184   BLOCK_COMMENT("} Load and ev. allocate counter object");
2185 
2186   bind(has_counters);
2187 }
2188 
2189 void InterpreterMacroAssembler::increment_invocation_counter(Register Rcounters, Register iv_be_count, Register Rtmp_r0) {
2190   assert(UseCompiler || LogTouchedMethods, "incrementing must be useful");
2191   Register invocation_count = iv_be_count;
2192   Register backedge_count   = Rtmp_r0;
2193   int delta = InvocationCounter::count_increment;
2194 
2195   // Load each counter in a register.
2196   //  ld(inv_counter, Rtmp);
2197   //  ld(be_counter, Rtmp2);
2198   int inv_counter_offset = in_bytes(MethodCounters::invocation_counter_offset() +
2199                                     InvocationCounter::counter_offset());
2200   int be_counter_offset  = in_bytes(MethodCounters::backedge_counter_offset() +
2201                                     InvocationCounter::counter_offset());
2202 
2203   BLOCK_COMMENT("Increment profiling counters {");
2204 
2205   // Load the backedge counter.
2206   lwz(backedge_count, be_counter_offset, Rcounters); // is unsigned int
2207   // Mask the backedge counter.
2208   Register tmp = invocation_count;
2209   li(tmp, InvocationCounter::count_mask_value);
2210   andr(backedge_count, tmp, backedge_count); // Cannot use andi, need sign extension of count_mask_value.
2211 
2212   // Load the invocation counter.
2213   lwz(invocation_count, inv_counter_offset, Rcounters); // is unsigned int
2214   // Add the delta to the invocation counter and store the result.
2215   addi(invocation_count, invocation_count, delta);
2216   // Store value.
2217   stw(invocation_count, inv_counter_offset, Rcounters);
2218 
2219   // Add invocation counter + backedge counter.
2220   add(iv_be_count, backedge_count, invocation_count);
2221 
2222   // Note that this macro must leave the backedge_count + invocation_count in
2223   // register iv_be_count!
2224   BLOCK_COMMENT("} Increment profiling counters");
2225 }
2226 
2227 void InterpreterMacroAssembler::verify_oop(Register reg, TosState state) {
2228   if (state == atos) { MacroAssembler::verify_oop(reg); }
2229 }
2230 
2231 #ifndef CC_INTERP
2232 // Local helper function for the verify_oop_or_return_address macro.
2233 static bool verify_return_address(Method* m, int bci) {
2234 #ifndef PRODUCT
2235   address pc = (address)(m->constMethod()) + in_bytes(ConstMethod::codes_offset()) + bci;
2236   // Assume it is a valid return address if it is inside m and is preceded by a jsr.
2237   if (!m->contains(pc))                                            return false;
2238   address jsr_pc;
2239   jsr_pc = pc - Bytecodes::length_for(Bytecodes::_jsr);
2240   if (*jsr_pc == Bytecodes::_jsr   && jsr_pc >= m->code_base())    return true;
2241   jsr_pc = pc - Bytecodes::length_for(Bytecodes::_jsr_w);
2242   if (*jsr_pc == Bytecodes::_jsr_w && jsr_pc >= m->code_base())    return true;
2243 #endif // PRODUCT
2244   return false;
2245 }
2246 
2247 void InterpreterMacroAssembler::verify_FPU(int stack_depth, TosState state) {
2248   if (VerifyFPU) {
2249     unimplemented("verfiyFPU");
2250   }
2251 }
2252 
2253 void InterpreterMacroAssembler::verify_oop_or_return_address(Register reg, Register Rtmp) {
2254   if (!VerifyOops) return;
2255 
2256   // The VM documentation for the astore[_wide] bytecode allows
2257   // the TOS to be not only an oop but also a return address.
2258   Label test;
2259   Label skip;
2260   // See if it is an address (in the current method):
2261 
2262   const int log2_bytecode_size_limit = 16;
2263   srdi_(Rtmp, reg, log2_bytecode_size_limit);
2264   bne(CCR0, test);
2265 
2266   address fd = CAST_FROM_FN_PTR(address, verify_return_address);
2267   const int nbytes_save = 11*8; // volatile gprs except R0
2268   save_volatile_gprs(R1_SP, -nbytes_save); // except R0
2269   save_LR_CR(Rtmp); // Save in old frame.
2270   push_frame_reg_args(nbytes_save, Rtmp);
2271 
2272   load_const_optimized(Rtmp, fd, R0);
2273   mr_if_needed(R4_ARG2, reg);
2274   mr(R3_ARG1, R19_method);
2275   call_c(Rtmp); // call C
2276 
2277   pop_frame();
2278   restore_LR_CR(Rtmp);
2279   restore_volatile_gprs(R1_SP, -nbytes_save); // except R0
2280   b(skip);
2281 
2282   // Perform a more elaborate out-of-line call.
2283   // Not an address; verify it:
2284   bind(test);
2285   verify_oop(reg);
2286   bind(skip);
2287 }
2288 #endif // !CC_INTERP
2289 
2290 // Inline assembly for:
2291 //
2292 // if (thread is in interp_only_mode) {
2293 //   InterpreterRuntime::post_method_entry();
2294 // }
2295 // if (*jvmpi::event_flags_array_at_addr(JVMPI_EVENT_METHOD_ENTRY ) ||
2296 //     *jvmpi::event_flags_array_at_addr(JVMPI_EVENT_METHOD_ENTRY2)   ) {
2297 //   SharedRuntime::jvmpi_method_entry(method, receiver);
2298 // }
2299 void InterpreterMacroAssembler::notify_method_entry() {
2300   // JVMTI
2301   // Whenever JVMTI puts a thread in interp_only_mode, method
2302   // entry/exit events are sent for that thread to track stack
2303   // depth. If it is possible to enter interp_only_mode we add
2304   // the code to check if the event should be sent.
2305   if (JvmtiExport::can_post_interpreter_events()) {
2306     Label jvmti_post_done;
2307 
2308     lwz(R0, in_bytes(JavaThread::interp_only_mode_offset()), R16_thread);
2309     cmpwi(CCR0, R0, 0);
2310     beq(CCR0, jvmti_post_done);
2311     call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_method_entry),
2312             /*check_exceptions=*/true CC_INTERP_ONLY(&& false));
2313 
2314     bind(jvmti_post_done);
2315   }
2316 }
2317 
2318 // Inline assembly for:
2319 //
2320 // if (thread is in interp_only_mode) {
2321 //   // save result
2322 //   InterpreterRuntime::post_method_exit();
2323 //   // restore result
2324 // }
2325 // if (*jvmpi::event_flags_array_at_addr(JVMPI_EVENT_METHOD_EXIT)) {
2326 //   // save result
2327 //   SharedRuntime::jvmpi_method_exit();
2328 //   // restore result
2329 // }
2330 //
2331 // Native methods have their result stored in d_tmp and l_tmp.
2332 // Java methods have their result stored in the expression stack.
2333 void InterpreterMacroAssembler::notify_method_exit(bool is_native_method, TosState state,
2334                                                    NotifyMethodExitMode mode, bool check_exceptions) {
2335   // JVMTI
2336   // Whenever JVMTI puts a thread in interp_only_mode, method
2337   // entry/exit events are sent for that thread to track stack
2338   // depth. If it is possible to enter interp_only_mode we add
2339   // the code to check if the event should be sent.
2340   if (mode == NotifyJVMTI && JvmtiExport::can_post_interpreter_events()) {
2341     Label jvmti_post_done;
2342 
2343     lwz(R0, in_bytes(JavaThread::interp_only_mode_offset()), R16_thread);
2344     cmpwi(CCR0, R0, 0);
2345     beq(CCR0, jvmti_post_done);
2346     CC_INTERP_ONLY(assert(is_native_method && !check_exceptions, "must not push state"));
2347     if (!is_native_method) push(state); // Expose tos to GC.
2348     call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_method_exit),
2349             /*check_exceptions=*/check_exceptions);
2350     if (!is_native_method) pop(state);
2351 
2352     align(32, 12);
2353     bind(jvmti_post_done);
2354   }
2355 
2356   // Dtrace support not implemented.
2357 }
2358 
2359 #ifdef CC_INTERP
2360 // Convert the current TOP_IJAVA_FRAME into a PARENT_IJAVA_FRAME
2361 // (using parent_frame_resize) and push a new interpreter
2362 // TOP_IJAVA_FRAME (using frame_size).
2363 void InterpreterMacroAssembler::push_interpreter_frame(Register top_frame_size, Register parent_frame_resize,
2364                                                        Register tmp1, Register tmp2, Register tmp3,
2365                                                        Register tmp4, Register pc) {
2366   assert_different_registers(top_frame_size, parent_frame_resize, tmp1, tmp2, tmp3, tmp4);
2367   ld(tmp1, _top_ijava_frame_abi(frame_manager_lr), R1_SP);
2368   mr(tmp2/*top_frame_sp*/, R1_SP);
2369   // Move initial_caller_sp.
2370   ld(tmp4, _top_ijava_frame_abi(initial_caller_sp), R1_SP);
2371   neg(parent_frame_resize, parent_frame_resize);
2372   resize_frame(parent_frame_resize/*-parent_frame_resize*/, tmp3);
2373 
2374   // Set LR in new parent frame.
2375   std(tmp1, _abi(lr), R1_SP);
2376   // Set top_frame_sp info for new parent frame.
2377   std(tmp2, _parent_ijava_frame_abi(top_frame_sp), R1_SP);
2378   std(tmp4, _parent_ijava_frame_abi(initial_caller_sp), R1_SP);
2379 
2380   // Push new TOP_IJAVA_FRAME.
2381   push_frame(top_frame_size, tmp2);
2382 
2383   get_PC_trash_LR(tmp3);
2384   std(tmp3, _top_ijava_frame_abi(frame_manager_lr), R1_SP);
2385   // Used for non-initial callers by unextended_sp().
2386   std(R1_SP, _top_ijava_frame_abi(initial_caller_sp), R1_SP);
2387 }
2388 
2389 // Pop the topmost TOP_IJAVA_FRAME and convert the previous
2390 // PARENT_IJAVA_FRAME back into a TOP_IJAVA_FRAME.
2391 void InterpreterMacroAssembler::pop_interpreter_frame(Register tmp1, Register tmp2, Register tmp3, Register tmp4) {
2392   assert_different_registers(tmp1, tmp2, tmp3, tmp4);
2393 
2394   ld(tmp1/*caller's sp*/, _abi(callers_sp), R1_SP);
2395   ld(tmp3, _abi(lr), tmp1);
2396 
2397   ld(tmp4, _parent_ijava_frame_abi(initial_caller_sp), tmp1);
2398 
2399   ld(tmp2/*caller's caller's sp*/, _abi(callers_sp), tmp1);
2400   // Merge top frame.
2401   std(tmp2, _abi(callers_sp), R1_SP);
2402 
2403   ld(tmp2, _parent_ijava_frame_abi(top_frame_sp), tmp1);
2404 
2405   // Update C stack pointer to caller's top_abi.
2406   resize_frame_absolute(tmp2/*addr*/, tmp1/*tmp*/, tmp2/*tmp*/);
2407 
2408   // Update LR in top_frame.
2409   std(tmp3, _top_ijava_frame_abi(frame_manager_lr), R1_SP);
2410 
2411   std(tmp4, _top_ijava_frame_abi(initial_caller_sp), R1_SP);
2412 
2413   // Store the top-frame stack-pointer for c2i adapters.
2414   std(R1_SP, _top_ijava_frame_abi(top_frame_sp), R1_SP);
2415 }
2416 
2417 // Turn state's interpreter frame into the current TOP_IJAVA_FRAME.
2418 void InterpreterMacroAssembler::pop_interpreter_frame_to_state(Register state, Register tmp1, Register tmp2, Register tmp3) {
2419   assert_different_registers(R14_state, R15_prev_state, tmp1, tmp2, tmp3);
2420 
2421   if (state == R14_state) {
2422     ld(tmp1/*state's fp*/, state_(_last_Java_fp));
2423     ld(tmp2/*state's sp*/, state_(_last_Java_sp));
2424   } else if (state == R15_prev_state) {
2425     ld(tmp1/*state's fp*/, prev_state_(_last_Java_fp));
2426     ld(tmp2/*state's sp*/, prev_state_(_last_Java_sp));
2427   } else {
2428     ShouldNotReachHere();
2429   }
2430 
2431   // Merge top frames.
2432   std(tmp1, _abi(callers_sp), R1_SP);
2433 
2434   // Tmp2 is new SP.
2435   // Tmp1 is parent's SP.
2436   resize_frame_absolute(tmp2/*addr*/, tmp1/*tmp*/, tmp2/*tmp*/);
2437 
2438   // Update LR in top_frame.
2439   // Must be interpreter frame.
2440   get_PC_trash_LR(tmp3);
2441   std(tmp3, _top_ijava_frame_abi(frame_manager_lr), R1_SP);
2442   // Used for non-initial callers by unextended_sp().
2443   std(R1_SP, _top_ijava_frame_abi(initial_caller_sp), R1_SP);
2444 }
2445 
2446 // Set SP to initial caller's sp, but before fix the back chain.
2447 void InterpreterMacroAssembler::resize_frame_to_initial_caller(Register tmp1, Register tmp2) {
2448   ld(tmp1, _parent_ijava_frame_abi(initial_caller_sp), R1_SP);
2449   ld(tmp2, _parent_ijava_frame_abi(callers_sp), R1_SP);
2450   std(tmp2, _parent_ijava_frame_abi(callers_sp), tmp1); // Fix back chain ...
2451   mr(R1_SP, tmp1); // ... and resize to initial caller.
2452 }
2453 
2454 // Pop the current interpreter state (without popping the correspoding
2455 // frame) and restore R14_state and R15_prev_state accordingly.
2456 // Use prev_state_may_be_0 to indicate whether prev_state may be 0
2457 // in order to generate an extra check before retrieving prev_state_(_prev_link).
2458 void InterpreterMacroAssembler::pop_interpreter_state(bool prev_state_may_be_0)
2459 {
2460   // Move prev_state to state and restore prev_state from state_(_prev_link).
2461   Label prev_state_is_0;
2462   mr(R14_state, R15_prev_state);
2463 
2464   // Don't retrieve /*state==*/prev_state_(_prev_link)
2465   // if /*state==*/prev_state is 0.
2466   if (prev_state_may_be_0) {
2467     cmpdi(CCR0, R15_prev_state, 0);
2468     beq(CCR0, prev_state_is_0);
2469   }
2470 
2471   ld(R15_prev_state, /*state==*/prev_state_(_prev_link));
2472   bind(prev_state_is_0);
2473 }
2474 
2475 void InterpreterMacroAssembler::restore_prev_state() {
2476   // _prev_link is private, but cInterpreter is a friend.
2477   ld(R15_prev_state, state_(_prev_link));
2478 }
2479 #endif // CC_INTERP