1 /*
   2  * Copyright 1997-2010 Sun Microsystems, Inc.  All Rights Reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
  20  * CA 95054 USA or visit www.sun.com if you need additional information or
  21  * have any questions.
  22  *
  23  */
  24 
  25 #include "incls/_precompiled.incl"
  26 #include "incls/_interp_masm_sparc.cpp.incl"
  27 
  28 #ifndef CC_INTERP
  29 #ifndef FAST_DISPATCH
  30 #define FAST_DISPATCH 1
  31 #endif
  32 #undef FAST_DISPATCH
  33 
  34 // Implementation of InterpreterMacroAssembler
  35 
  36 // This file specializes the assember with interpreter-specific macros
  37 
  38 const Address InterpreterMacroAssembler::l_tmp(FP, (frame::interpreter_frame_l_scratch_fp_offset * wordSize) + STACK_BIAS);
  39 const Address InterpreterMacroAssembler::d_tmp(FP, (frame::interpreter_frame_d_scratch_fp_offset * wordSize) + STACK_BIAS);
  40 
  41 #else // CC_INTERP
  42 #ifndef STATE
  43 #define STATE(field_name) Lstate, in_bytes(byte_offset_of(BytecodeInterpreter, field_name))
  44 #endif // STATE
  45 
  46 #endif // CC_INTERP
  47 
  48 void InterpreterMacroAssembler::compute_extra_locals_size_in_bytes(Register args_size, Register locals_size, Register delta) {
  49   // Note: this algorithm is also used by C1's OSR entry sequence.
  50   // Any changes should also be applied to CodeEmitter::emit_osr_entry().
  51   assert_different_registers(args_size, locals_size);
  52   // max_locals*2 for TAGS.  Assumes that args_size has already been adjusted.
  53   if (TaggedStackInterpreter) sll(locals_size, 1, locals_size);
  54   subcc(locals_size, args_size, delta);// extra space for non-arguments locals in words
  55   // Use br/mov combination because it works on both V8 and V9 and is
  56   // faster.
  57   Label skip_move;
  58   br(Assembler::negative, true, Assembler::pt, skip_move);
  59   delayed()->mov(G0, delta);
  60   bind(skip_move);
  61   round_to(delta, WordsPerLong);       // make multiple of 2 (SP must be 2-word aligned)
  62   sll(delta, LogBytesPerWord, delta);  // extra space for locals in bytes
  63 }
  64 
  65 #ifndef CC_INTERP
  66 
  67 // Dispatch code executed in the prolog of a bytecode which does not do it's
  68 // own dispatch. The dispatch address is computed and placed in IdispatchAddress
  69 void InterpreterMacroAssembler::dispatch_prolog(TosState state, int bcp_incr) {
  70   assert_not_delayed();
  71 #ifdef FAST_DISPATCH
  72   // FAST_DISPATCH and ProfileInterpreter are mutually exclusive since
  73   // they both use I2.
  74   assert(!ProfileInterpreter, "FAST_DISPATCH and +ProfileInterpreter are mutually exclusive");
  75   ldub(Lbcp, bcp_incr, Lbyte_code);                     // load next bytecode
  76   add(Lbyte_code, Interpreter::distance_from_dispatch_table(state), Lbyte_code);
  77                                                         // add offset to correct dispatch table
  78   sll(Lbyte_code, LogBytesPerWord, Lbyte_code);         // multiply by wordSize
  79   ld_ptr(IdispatchTables, Lbyte_code, IdispatchAddress);// get entry addr
  80 #else
  81   ldub( Lbcp, bcp_incr, Lbyte_code);                    // load next bytecode
  82   // dispatch table to use
  83   AddressLiteral tbl(Interpreter::dispatch_table(state));
  84   sll(Lbyte_code, LogBytesPerWord, Lbyte_code);         // multiply by wordSize
  85   set(tbl, G3_scratch);                                 // compute addr of table
  86   ld_ptr(G3_scratch, Lbyte_code, IdispatchAddress);     // get entry addr
  87 #endif
  88 }
  89 
  90 
  91 // Dispatch code executed in the epilog of a bytecode which does not do it's
  92 // own dispatch. The dispatch address in IdispatchAddress is used for the
  93 // dispatch.
  94 void InterpreterMacroAssembler::dispatch_epilog(TosState state, int bcp_incr) {
  95   assert_not_delayed();
  96   verify_FPU(1, state);
  97   interp_verify_oop(Otos_i, state, __FILE__, __LINE__);
  98   jmp( IdispatchAddress, 0 );
  99   if (bcp_incr != 0)  delayed()->inc(Lbcp, bcp_incr);
 100   else                delayed()->nop();
 101 }
 102 
 103 
 104 void InterpreterMacroAssembler::dispatch_next(TosState state, int bcp_incr) {
 105   // %%%% consider branching to a single shared dispatch stub (for each bcp_incr)
 106   assert_not_delayed();
 107   ldub( Lbcp, bcp_incr, Lbyte_code);               // load next bytecode
 108   dispatch_Lbyte_code(state, Interpreter::dispatch_table(state), bcp_incr);
 109 }
 110 
 111 
 112 void InterpreterMacroAssembler::dispatch_next_noverify_oop(TosState state, int bcp_incr) {
 113   // %%%% consider branching to a single shared dispatch stub (for each bcp_incr)
 114   assert_not_delayed();
 115   ldub( Lbcp, bcp_incr, Lbyte_code);               // load next bytecode
 116   dispatch_Lbyte_code(state, Interpreter::dispatch_table(state), bcp_incr, false);
 117 }
 118 
 119 
 120 void InterpreterMacroAssembler::dispatch_via(TosState state, address* table) {
 121   // load current bytecode
 122   assert_not_delayed();
 123   ldub( Lbcp, 0, Lbyte_code);               // load next bytecode
 124   dispatch_base(state, table);
 125 }
 126 
 127 
 128 void InterpreterMacroAssembler::call_VM_leaf_base(
 129   Register java_thread,
 130   address  entry_point,
 131   int      number_of_arguments
 132 ) {
 133   if (!java_thread->is_valid())
 134     java_thread = L7_thread_cache;
 135   // super call
 136   MacroAssembler::call_VM_leaf_base(java_thread, entry_point, number_of_arguments);
 137 }
 138 
 139 
 140 void InterpreterMacroAssembler::call_VM_base(
 141   Register        oop_result,
 142   Register        java_thread,
 143   Register        last_java_sp,
 144   address         entry_point,
 145   int             number_of_arguments,
 146   bool            check_exception
 147 ) {
 148   if (!java_thread->is_valid())
 149     java_thread = L7_thread_cache;
 150   // See class ThreadInVMfromInterpreter, which assumes that the interpreter
 151   // takes responsibility for setting its own thread-state on call-out.
 152   // However, ThreadInVMfromInterpreter resets the state to "in_Java".
 153 
 154   //save_bcp();                                  // save bcp
 155   MacroAssembler::call_VM_base(oop_result, java_thread, last_java_sp, entry_point, number_of_arguments, check_exception);
 156   //restore_bcp();                               // restore bcp
 157   //restore_locals();                            // restore locals pointer
 158 }
 159 
 160 
 161 void InterpreterMacroAssembler::check_and_handle_popframe(Register scratch_reg) {
 162   if (JvmtiExport::can_pop_frame()) {
 163     Label L;
 164 
 165     // Check the "pending popframe condition" flag in the current thread
 166     ld(G2_thread, JavaThread::popframe_condition_offset(), scratch_reg);
 167 
 168     // Initiate popframe handling only if it is not already being processed.  If the flag
 169     // has the popframe_processing bit set, it means that this code is called *during* popframe
 170     // handling - we don't want to reenter.
 171     btst(JavaThread::popframe_pending_bit, scratch_reg);
 172     br(zero, false, pt, L);
 173     delayed()->nop();
 174     btst(JavaThread::popframe_processing_bit, scratch_reg);
 175     br(notZero, false, pt, L);
 176     delayed()->nop();
 177 
 178     // Call Interpreter::remove_activation_preserving_args_entry() to get the
 179     // address of the same-named entrypoint in the generated interpreter code.
 180     call_VM_leaf(noreg, CAST_FROM_FN_PTR(address, Interpreter::remove_activation_preserving_args_entry));
 181 
 182     // Jump to Interpreter::_remove_activation_preserving_args_entry
 183     jmpl(O0, G0, G0);
 184     delayed()->nop();
 185     bind(L);
 186   }
 187 }
 188 
 189 
 190 void InterpreterMacroAssembler::load_earlyret_value(TosState state) {
 191   Register thr_state = G4_scratch;
 192   ld_ptr(G2_thread, JavaThread::jvmti_thread_state_offset(), thr_state);
 193   const Address tos_addr(thr_state, JvmtiThreadState::earlyret_tos_offset());
 194   const Address oop_addr(thr_state, JvmtiThreadState::earlyret_oop_offset());
 195   const Address val_addr(thr_state, JvmtiThreadState::earlyret_value_offset());
 196   switch (state) {
 197   case ltos: ld_long(val_addr, Otos_l);                   break;
 198   case atos: ld_ptr(oop_addr, Otos_l);
 199              st_ptr(G0, oop_addr);                        break;
 200   case btos:                                           // fall through
 201   case ctos:                                           // fall through
 202   case stos:                                           // fall through
 203   case itos: ld(val_addr, Otos_l1);                       break;
 204   case ftos: ldf(FloatRegisterImpl::S, val_addr, Ftos_f); break;
 205   case dtos: ldf(FloatRegisterImpl::D, val_addr, Ftos_d); break;
 206   case vtos: /* nothing to do */                          break;
 207   default  : ShouldNotReachHere();
 208   }
 209   // Clean up tos value in the jvmti thread state
 210   or3(G0, ilgl, G3_scratch);
 211   stw(G3_scratch, tos_addr);
 212   st_long(G0, val_addr);
 213   interp_verify_oop(Otos_i, state, __FILE__, __LINE__);
 214 }
 215 
 216 
 217 void InterpreterMacroAssembler::check_and_handle_earlyret(Register scratch_reg) {
 218   if (JvmtiExport::can_force_early_return()) {
 219     Label L;
 220     Register thr_state = G3_scratch;
 221     ld_ptr(G2_thread, JavaThread::jvmti_thread_state_offset(), thr_state);
 222     tst(thr_state);
 223     br(zero, false, pt, L); // if (thread->jvmti_thread_state() == NULL) exit;
 224     delayed()->nop();
 225 
 226     // Initiate earlyret handling only if it is not already being processed.
 227     // If the flag has the earlyret_processing bit set, it means that this code
 228     // is called *during* earlyret handling - we don't want to reenter.
 229     ld(thr_state, JvmtiThreadState::earlyret_state_offset(), G4_scratch);
 230     cmp(G4_scratch, JvmtiThreadState::earlyret_pending);
 231     br(Assembler::notEqual, false, pt, L);
 232     delayed()->nop();
 233 
 234     // Call Interpreter::remove_activation_early_entry() to get the address of the
 235     // same-named entrypoint in the generated interpreter code
 236     ld(thr_state, JvmtiThreadState::earlyret_tos_offset(), Otos_l1);
 237     call_VM_leaf(noreg, CAST_FROM_FN_PTR(address, Interpreter::remove_activation_early_entry), Otos_l1);
 238 
 239     // Jump to Interpreter::_remove_activation_early_entry
 240     jmpl(O0, G0, G0);
 241     delayed()->nop();
 242     bind(L);
 243   }
 244 }
 245 
 246 
 247 void InterpreterMacroAssembler::super_call_VM_leaf(Register thread_cache, address entry_point, Register arg_1) {
 248   mov(arg_1, O0);
 249   MacroAssembler::call_VM_leaf_base(thread_cache, entry_point, 1);
 250 }
 251 #endif /* CC_INTERP */
 252 
 253 
 254 #ifndef CC_INTERP
 255 
 256 void InterpreterMacroAssembler::dispatch_base(TosState state, address* table) {
 257   assert_not_delayed();
 258   dispatch_Lbyte_code(state, table);
 259 }
 260 
 261 
 262 void InterpreterMacroAssembler::dispatch_normal(TosState state) {
 263   dispatch_base(state, Interpreter::normal_table(state));
 264 }
 265 
 266 
 267 void InterpreterMacroAssembler::dispatch_only(TosState state) {
 268   dispatch_base(state, Interpreter::dispatch_table(state));
 269 }
 270 
 271 
 272 // common code to dispatch and dispatch_only
 273 // dispatch value in Lbyte_code and increment Lbcp
 274 
 275 void InterpreterMacroAssembler::dispatch_Lbyte_code(TosState state, address* table, int bcp_incr, bool verify) {
 276   verify_FPU(1, state);
 277   // %%%%% maybe implement +VerifyActivationFrameSize here
 278   //verify_thread(); //too slow; we will just verify on method entry & exit
 279   if (verify) interp_verify_oop(Otos_i, state, __FILE__, __LINE__);
 280 #ifdef FAST_DISPATCH
 281   if (table == Interpreter::dispatch_table(state)) {
 282     // use IdispatchTables
 283     add(Lbyte_code, Interpreter::distance_from_dispatch_table(state), Lbyte_code);
 284                                                         // add offset to correct dispatch table
 285     sll(Lbyte_code, LogBytesPerWord, Lbyte_code);       // multiply by wordSize
 286     ld_ptr(IdispatchTables, Lbyte_code, G3_scratch);    // get entry addr
 287   } else {
 288 #endif
 289     // dispatch table to use
 290     AddressLiteral tbl(table);
 291     sll(Lbyte_code, LogBytesPerWord, Lbyte_code);       // multiply by wordSize
 292     set(tbl, G3_scratch);                               // compute addr of table
 293     ld_ptr(G3_scratch, Lbyte_code, G3_scratch);         // get entry addr
 294 #ifdef FAST_DISPATCH
 295   }
 296 #endif
 297   jmp( G3_scratch, 0 );
 298   if (bcp_incr != 0)  delayed()->inc(Lbcp, bcp_incr);
 299   else                delayed()->nop();
 300 }
 301 
 302 
 303 // Helpers for expression stack
 304 
 305 // Longs and doubles are Category 2 computational types in the
 306 // JVM specification (section 3.11.1) and take 2 expression stack or
 307 // local slots.
 308 // Aligning them on 32 bit with tagged stacks is hard because the code generated
 309 // for the dup* bytecodes depends on what types are already on the stack.
 310 // If the types are split into the two stack/local slots, that is much easier
 311 // (and we can use 0 for non-reference tags).
 312 
 313 // Known good alignment in _LP64 but unknown otherwise
 314 void InterpreterMacroAssembler::load_unaligned_double(Register r1, int offset, FloatRegister d) {
 315   assert_not_delayed();
 316 
 317 #ifdef _LP64
 318   ldf(FloatRegisterImpl::D, r1, offset, d);
 319 #else
 320   ldf(FloatRegisterImpl::S, r1, offset, d);
 321   ldf(FloatRegisterImpl::S, r1, offset + Interpreter::stackElementSize(), d->successor());
 322 #endif
 323 }
 324 
 325 // Known good alignment in _LP64 but unknown otherwise
 326 void InterpreterMacroAssembler::store_unaligned_double(FloatRegister d, Register r1, int offset) {
 327   assert_not_delayed();
 328 
 329 #ifdef _LP64
 330   stf(FloatRegisterImpl::D, d, r1, offset);
 331   // store something more useful here
 332   debug_only(stx(G0, r1, offset+Interpreter::stackElementSize());)
 333 #else
 334   stf(FloatRegisterImpl::S, d, r1, offset);
 335   stf(FloatRegisterImpl::S, d->successor(), r1, offset + Interpreter::stackElementSize());
 336 #endif
 337 }
 338 
 339 
 340 // Known good alignment in _LP64 but unknown otherwise
 341 void InterpreterMacroAssembler::load_unaligned_long(Register r1, int offset, Register rd) {
 342   assert_not_delayed();
 343 #ifdef _LP64
 344   ldx(r1, offset, rd);
 345 #else
 346   ld(r1, offset, rd);
 347   ld(r1, offset + Interpreter::stackElementSize(), rd->successor());
 348 #endif
 349 }
 350 
 351 // Known good alignment in _LP64 but unknown otherwise
 352 void InterpreterMacroAssembler::store_unaligned_long(Register l, Register r1, int offset) {
 353   assert_not_delayed();
 354 
 355 #ifdef _LP64
 356   stx(l, r1, offset);
 357   // store something more useful here
 358   debug_only(stx(G0, r1, offset+Interpreter::stackElementSize());)
 359 #else
 360   st(l, r1, offset);
 361   st(l->successor(), r1, offset + Interpreter::stackElementSize());
 362 #endif
 363 }
 364 
 365 #ifdef ASSERT
 366 void InterpreterMacroAssembler::verify_stack_tag(frame::Tag t,
 367                                                  Register r,
 368                                                  Register scratch) {
 369   if (TaggedStackInterpreter) {
 370     Label ok, long_ok;
 371     ld_ptr(Lesp, Interpreter::expr_tag_offset_in_bytes(0), r);
 372     if (t == frame::TagCategory2) {
 373       cmp(r, G0);
 374       brx(Assembler::equal, false, Assembler::pt, long_ok);
 375       delayed()->ld_ptr(Lesp, Interpreter::expr_tag_offset_in_bytes(1), r);
 376       stop("stack long/double tag value bad");
 377       bind(long_ok);
 378       cmp(r, G0);
 379     } else if (t == frame::TagValue) {
 380       cmp(r, G0);
 381     } else {
 382       assert_different_registers(r, scratch);
 383       mov(t, scratch);
 384       cmp(r, scratch);
 385     }
 386     brx(Assembler::equal, false, Assembler::pt, ok);
 387     delayed()->nop();
 388     // Also compare if the stack value is zero, then the tag might
 389     // not have been set coming from deopt.
 390     ld_ptr(Lesp, Interpreter::expr_offset_in_bytes(0), r);
 391     cmp(r, G0);
 392     brx(Assembler::equal, false, Assembler::pt, ok);
 393     delayed()->nop();
 394     stop("Stack tag value is bad");
 395     bind(ok);
 396   }
 397 }
 398 #endif // ASSERT
 399 
 400 void InterpreterMacroAssembler::pop_i(Register r) {
 401   assert_not_delayed();
 402   // Uses destination register r for scratch
 403   debug_only(verify_stack_tag(frame::TagValue, r));
 404   ld(Lesp, Interpreter::expr_offset_in_bytes(0), r);
 405   inc(Lesp, Interpreter::stackElementSize());
 406   debug_only(verify_esp(Lesp));
 407 }
 408 
 409 void InterpreterMacroAssembler::pop_ptr(Register r, Register scratch) {
 410   assert_not_delayed();
 411   // Uses destination register r for scratch
 412   debug_only(verify_stack_tag(frame::TagReference, r, scratch));
 413   ld_ptr(Lesp, Interpreter::expr_offset_in_bytes(0), r);
 414   inc(Lesp, Interpreter::stackElementSize());
 415   debug_only(verify_esp(Lesp));
 416 }
 417 
 418 void InterpreterMacroAssembler::pop_l(Register r) {
 419   assert_not_delayed();
 420   // Uses destination register r for scratch
 421   debug_only(verify_stack_tag(frame::TagCategory2, r));
 422   load_unaligned_long(Lesp, Interpreter::expr_offset_in_bytes(0), r);
 423   inc(Lesp, 2*Interpreter::stackElementSize());
 424   debug_only(verify_esp(Lesp));
 425 }
 426 
 427 
 428 void InterpreterMacroAssembler::pop_f(FloatRegister f, Register scratch) {
 429   assert_not_delayed();
 430   debug_only(verify_stack_tag(frame::TagValue, scratch));
 431   ldf(FloatRegisterImpl::S, Lesp, Interpreter::expr_offset_in_bytes(0), f);
 432   inc(Lesp, Interpreter::stackElementSize());
 433   debug_only(verify_esp(Lesp));
 434 }
 435 
 436 
 437 void InterpreterMacroAssembler::pop_d(FloatRegister f, Register scratch) {
 438   assert_not_delayed();
 439   debug_only(verify_stack_tag(frame::TagCategory2, scratch));
 440   load_unaligned_double(Lesp, Interpreter::expr_offset_in_bytes(0), f);
 441   inc(Lesp, 2*Interpreter::stackElementSize());
 442   debug_only(verify_esp(Lesp));
 443 }
 444 
 445 
 446 // (Note use register first, then decrement so dec can be done during store stall)
 447 void InterpreterMacroAssembler::tag_stack(Register r) {
 448   if (TaggedStackInterpreter) {
 449     st_ptr(r, Lesp, Interpreter::tag_offset_in_bytes());
 450   }
 451 }
 452 
 453 void InterpreterMacroAssembler::tag_stack(frame::Tag t, Register r) {
 454   if (TaggedStackInterpreter) {
 455     assert (frame::TagValue == 0, "TagValue must be zero");
 456     if (t == frame::TagValue) {
 457       st_ptr(G0, Lesp, Interpreter::tag_offset_in_bytes());
 458     } else if (t == frame::TagCategory2) {
 459       st_ptr(G0, Lesp, Interpreter::tag_offset_in_bytes());
 460       // Tag next slot down too
 461       st_ptr(G0, Lesp, -Interpreter::stackElementSize() + Interpreter::tag_offset_in_bytes());
 462     } else {
 463       assert_different_registers(r, O3);
 464       mov(t, O3);
 465       st_ptr(O3, Lesp, Interpreter::tag_offset_in_bytes());
 466     }
 467   }
 468 }
 469 
 470 void InterpreterMacroAssembler::push_i(Register r) {
 471   assert_not_delayed();
 472   debug_only(verify_esp(Lesp));
 473   tag_stack(frame::TagValue, r);
 474   st(  r,    Lesp, Interpreter::value_offset_in_bytes());
 475   dec( Lesp, Interpreter::stackElementSize());
 476 }
 477 
 478 void InterpreterMacroAssembler::push_ptr(Register r) {
 479   assert_not_delayed();
 480   tag_stack(frame::TagReference, r);
 481   st_ptr(  r,    Lesp, Interpreter::value_offset_in_bytes());
 482   dec( Lesp, Interpreter::stackElementSize());
 483 }
 484 
 485 void InterpreterMacroAssembler::push_ptr(Register r, Register tag) {
 486   assert_not_delayed();
 487   tag_stack(tag);
 488   st_ptr(r, Lesp, Interpreter::value_offset_in_bytes());
 489   dec( Lesp, Interpreter::stackElementSize());
 490 }
 491 
 492 // remember: our convention for longs in SPARC is:
 493 // O0 (Otos_l1) has high-order part in first word,
 494 // O1 (Otos_l2) has low-order part in second word
 495 
 496 void InterpreterMacroAssembler::push_l(Register r) {
 497   assert_not_delayed();
 498   debug_only(verify_esp(Lesp));
 499   tag_stack(frame::TagCategory2, r);
 500   // Longs are in stored in memory-correct order, even if unaligned.
 501   // and may be separated by stack tags.
 502   int offset = -Interpreter::stackElementSize() + Interpreter::value_offset_in_bytes();
 503   store_unaligned_long(r, Lesp, offset);
 504   dec(Lesp, 2 * Interpreter::stackElementSize());
 505 }
 506 
 507 
 508 void InterpreterMacroAssembler::push_f(FloatRegister f) {
 509   assert_not_delayed();
 510   debug_only(verify_esp(Lesp));
 511   tag_stack(frame::TagValue, Otos_i);
 512   stf(FloatRegisterImpl::S, f, Lesp, Interpreter::value_offset_in_bytes());
 513   dec(Lesp, Interpreter::stackElementSize());
 514 }
 515 
 516 
 517 void InterpreterMacroAssembler::push_d(FloatRegister d)   {
 518   assert_not_delayed();
 519   debug_only(verify_esp(Lesp));
 520   tag_stack(frame::TagCategory2, Otos_i);
 521   // Longs are in stored in memory-correct order, even if unaligned.
 522   // and may be separated by stack tags.
 523   int offset = -Interpreter::stackElementSize() + Interpreter::value_offset_in_bytes();
 524   store_unaligned_double(d, Lesp, offset);
 525   dec(Lesp, 2 * Interpreter::stackElementSize());
 526 }
 527 
 528 
 529 void InterpreterMacroAssembler::push(TosState state) {
 530   interp_verify_oop(Otos_i, state, __FILE__, __LINE__);
 531   switch (state) {
 532     case atos: push_ptr();            break;
 533     case btos: push_i();              break;
 534     case ctos:
 535     case stos: push_i();              break;
 536     case itos: push_i();              break;
 537     case ltos: push_l();              break;
 538     case ftos: push_f();              break;
 539     case dtos: push_d();              break;
 540     case vtos: /* nothing to do */    break;
 541     default  : ShouldNotReachHere();
 542   }
 543 }
 544 
 545 
 546 void InterpreterMacroAssembler::pop(TosState state) {
 547   switch (state) {
 548     case atos: pop_ptr();            break;
 549     case btos: pop_i();              break;
 550     case ctos:
 551     case stos: pop_i();              break;
 552     case itos: pop_i();              break;
 553     case ltos: pop_l();              break;
 554     case ftos: pop_f();              break;
 555     case dtos: pop_d();              break;
 556     case vtos: /* nothing to do */   break;
 557     default  : ShouldNotReachHere();
 558   }
 559   interp_verify_oop(Otos_i, state, __FILE__, __LINE__);
 560 }
 561 
 562 
 563 // Tagged stack helpers for swap and dup
 564 void InterpreterMacroAssembler::load_ptr_and_tag(int n, Register val,
 565                                                  Register tag) {
 566   ld_ptr(Lesp, Interpreter::expr_offset_in_bytes(n), val);
 567   if (TaggedStackInterpreter) {
 568     ld_ptr(Lesp, Interpreter::expr_tag_offset_in_bytes(n), tag);
 569   }
 570 }
 571 void InterpreterMacroAssembler::store_ptr_and_tag(int n, Register val,
 572                                                   Register tag) {
 573   st_ptr(val, Lesp, Interpreter::expr_offset_in_bytes(n));
 574   if (TaggedStackInterpreter) {
 575     st_ptr(tag, Lesp, Interpreter::expr_tag_offset_in_bytes(n));
 576   }
 577 }
 578 
 579 
 580 void InterpreterMacroAssembler::load_receiver(Register param_count,
 581                                               Register recv) {
 582 
 583   sll(param_count, Interpreter::logStackElementSize(), param_count);
 584   if (TaggedStackInterpreter) {
 585     add(param_count, Interpreter::value_offset_in_bytes(), param_count);  // get obj address
 586   }
 587   ld_ptr(Lesp, param_count, recv);                      // gets receiver Oop
 588 }
 589 
 590 void InterpreterMacroAssembler::empty_expression_stack() {
 591   // Reset Lesp.
 592   sub( Lmonitors, wordSize, Lesp );
 593 
 594   // Reset SP by subtracting more space from Lesp.
 595   Label done;
 596   verify_oop(Lmethod);
 597   assert(G4_scratch != Gframe_size, "Only you can prevent register aliasing!");
 598 
 599   // A native does not need to do this, since its callee does not change SP.
 600   ld(Lmethod, methodOopDesc::access_flags_offset(), Gframe_size);  // Load access flags.
 601   btst(JVM_ACC_NATIVE, Gframe_size);
 602   br(Assembler::notZero, false, Assembler::pt, done);
 603   delayed()->nop();
 604 
 605   // Compute max expression stack+register save area
 606   lduh(Lmethod, in_bytes(methodOopDesc::max_stack_offset()), Gframe_size);  // Load max stack.
 607   if (TaggedStackInterpreter) sll ( Gframe_size, 1, Gframe_size);  // max_stack * 2 for TAGS
 608   add( Gframe_size, frame::memory_parameter_word_sp_offset, Gframe_size );
 609 
 610   //
 611   // now set up a stack frame with the size computed above
 612   //
 613   //round_to( Gframe_size, WordsPerLong ); // -- moved down to the "and" below
 614   sll( Gframe_size, LogBytesPerWord, Gframe_size );
 615   sub( Lesp, Gframe_size, Gframe_size );
 616   and3( Gframe_size, -(2 * wordSize), Gframe_size );          // align SP (downwards) to an 8/16-byte boundary
 617   debug_only(verify_sp(Gframe_size, G4_scratch));
 618 #ifdef _LP64
 619   sub(Gframe_size, STACK_BIAS, Gframe_size );
 620 #endif
 621   mov(Gframe_size, SP);
 622 
 623   bind(done);
 624 }
 625 
 626 
 627 #ifdef ASSERT
 628 void InterpreterMacroAssembler::verify_sp(Register Rsp, Register Rtemp) {
 629   Label Bad, OK;
 630 
 631   // Saved SP must be aligned.
 632 #ifdef _LP64
 633   btst(2*BytesPerWord-1, Rsp);
 634 #else
 635   btst(LongAlignmentMask, Rsp);
 636 #endif
 637   br(Assembler::notZero, false, Assembler::pn, Bad);
 638   delayed()->nop();
 639 
 640   // Saved SP, plus register window size, must not be above FP.
 641   add(Rsp, frame::register_save_words * wordSize, Rtemp);
 642 #ifdef _LP64
 643   sub(Rtemp, STACK_BIAS, Rtemp);  // Bias Rtemp before cmp to FP
 644 #endif
 645   cmp(Rtemp, FP);
 646   brx(Assembler::greaterUnsigned, false, Assembler::pn, Bad);
 647   delayed()->nop();
 648 
 649   // Saved SP must not be ridiculously below current SP.
 650   size_t maxstack = MAX2(JavaThread::stack_size_at_create(), (size_t) 4*K*K);
 651   set(maxstack, Rtemp);
 652   sub(SP, Rtemp, Rtemp);
 653 #ifdef _LP64
 654   add(Rtemp, STACK_BIAS, Rtemp);  // Unbias Rtemp before cmp to Rsp
 655 #endif
 656   cmp(Rsp, Rtemp);
 657   brx(Assembler::lessUnsigned, false, Assembler::pn, Bad);
 658   delayed()->nop();
 659 
 660   br(Assembler::always, false, Assembler::pn, OK);
 661   delayed()->nop();
 662 
 663   bind(Bad);
 664   stop("on return to interpreted call, restored SP is corrupted");
 665 
 666   bind(OK);
 667 }
 668 
 669 
 670 void InterpreterMacroAssembler::verify_esp(Register Resp) {
 671   // about to read or write Resp[0]
 672   // make sure it is not in the monitors or the register save area
 673   Label OK1, OK2;
 674 
 675   cmp(Resp, Lmonitors);
 676   brx(Assembler::lessUnsigned, true, Assembler::pt, OK1);
 677   delayed()->sub(Resp, frame::memory_parameter_word_sp_offset * wordSize, Resp);
 678   stop("too many pops:  Lesp points into monitor area");
 679   bind(OK1);
 680 #ifdef _LP64
 681   sub(Resp, STACK_BIAS, Resp);
 682 #endif
 683   cmp(Resp, SP);
 684   brx(Assembler::greaterEqualUnsigned, false, Assembler::pt, OK2);
 685   delayed()->add(Resp, STACK_BIAS + frame::memory_parameter_word_sp_offset * wordSize, Resp);
 686   stop("too many pushes:  Lesp points into register window");
 687   bind(OK2);
 688 }
 689 #endif // ASSERT
 690 
 691 // Load compiled (i2c) or interpreter entry when calling from interpreted and
 692 // do the call. Centralized so that all interpreter calls will do the same actions.
 693 // If jvmti single stepping is on for a thread we must not call compiled code.
 694 void InterpreterMacroAssembler::call_from_interpreter(Register target, Register scratch, Register Rret) {
 695 
 696   // Assume we want to go compiled if available
 697 
 698   ld_ptr(G5_method, in_bytes(methodOopDesc::from_interpreted_offset()), target);
 699 
 700   if (JvmtiExport::can_post_interpreter_events()) {
 701     // JVMTI events, such as single-stepping, are implemented partly by avoiding running
 702     // compiled code in threads for which the event is enabled.  Check here for
 703     // interp_only_mode if these events CAN be enabled.
 704     verify_thread();
 705     Label skip_compiled_code;
 706 
 707     const Address interp_only(G2_thread, JavaThread::interp_only_mode_offset());
 708     ld(interp_only, scratch);
 709     tst(scratch);
 710     br(Assembler::notZero, true, Assembler::pn, skip_compiled_code);
 711     delayed()->ld_ptr(G5_method, in_bytes(methodOopDesc::interpreter_entry_offset()), target);
 712     bind(skip_compiled_code);
 713   }
 714 
 715   // the i2c_adapters need methodOop in G5_method (right? %%%)
 716   // do the call
 717 #ifdef ASSERT
 718   {
 719     Label ok;
 720     br_notnull(target, false, Assembler::pt, ok);
 721     delayed()->nop();
 722     stop("null entry point");
 723     bind(ok);
 724   }
 725 #endif // ASSERT
 726 
 727   // Adjust Rret first so Llast_SP can be same as Rret
 728   add(Rret, -frame::pc_return_offset, O7);
 729   add(Lesp, BytesPerWord, Gargs); // setup parameter pointer
 730   // Record SP so we can remove any stack space allocated by adapter transition
 731   jmp(target, 0);
 732   delayed()->mov(SP, Llast_SP);
 733 }
 734 
 735 void InterpreterMacroAssembler::if_cmp(Condition cc, bool ptr_compare) {
 736   assert_not_delayed();
 737 
 738   Label not_taken;
 739   if (ptr_compare) brx(cc, false, Assembler::pn, not_taken);
 740   else             br (cc, false, Assembler::pn, not_taken);
 741   delayed()->nop();
 742 
 743   TemplateTable::branch(false,false);
 744 
 745   bind(not_taken);
 746 
 747   profile_not_taken_branch(G3_scratch);
 748 }
 749 
 750 
 751 void InterpreterMacroAssembler::get_2_byte_integer_at_bcp(
 752                                   int         bcp_offset,
 753                                   Register    Rtmp,
 754                                   Register    Rdst,
 755                                   signedOrNot is_signed,
 756                                   setCCOrNot  should_set_CC ) {
 757   assert(Rtmp != Rdst, "need separate temp register");
 758   assert_not_delayed();
 759   switch (is_signed) {
 760    default: ShouldNotReachHere();
 761 
 762    case   Signed:  ldsb( Lbcp, bcp_offset, Rdst  );  break; // high byte
 763    case Unsigned:  ldub( Lbcp, bcp_offset, Rdst  );  break; // high byte
 764   }
 765   ldub( Lbcp, bcp_offset + 1, Rtmp ); // low byte
 766   sll( Rdst, BitsPerByte, Rdst);
 767   switch (should_set_CC ) {
 768    default: ShouldNotReachHere();
 769 
 770    case      set_CC:  orcc( Rdst, Rtmp, Rdst ); break;
 771    case dont_set_CC:  or3(  Rdst, Rtmp, Rdst ); break;
 772   }
 773 }
 774 
 775 
 776 void InterpreterMacroAssembler::get_4_byte_integer_at_bcp(
 777                                   int        bcp_offset,
 778                                   Register   Rtmp,
 779                                   Register   Rdst,
 780                                   setCCOrNot should_set_CC ) {
 781   assert(Rtmp != Rdst, "need separate temp register");
 782   assert_not_delayed();
 783   add( Lbcp, bcp_offset, Rtmp);
 784   andcc( Rtmp, 3, G0);
 785   Label aligned;
 786   switch (should_set_CC ) {
 787    default: ShouldNotReachHere();
 788 
 789    case      set_CC: break;
 790    case dont_set_CC: break;
 791   }
 792 
 793   br(Assembler::zero, true, Assembler::pn, aligned);
 794 #ifdef _LP64
 795   delayed()->ldsw(Rtmp, 0, Rdst);
 796 #else
 797   delayed()->ld(Rtmp, 0, Rdst);
 798 #endif
 799 
 800   ldub(Lbcp, bcp_offset + 3, Rdst);
 801   ldub(Lbcp, bcp_offset + 2, Rtmp);  sll(Rtmp,  8, Rtmp);  or3(Rtmp, Rdst, Rdst);
 802   ldub(Lbcp, bcp_offset + 1, Rtmp);  sll(Rtmp, 16, Rtmp);  or3(Rtmp, Rdst, Rdst);
 803 #ifdef _LP64
 804   ldsb(Lbcp, bcp_offset + 0, Rtmp);  sll(Rtmp, 24, Rtmp);
 805 #else
 806   // Unsigned load is faster than signed on some implementations
 807   ldub(Lbcp, bcp_offset + 0, Rtmp);  sll(Rtmp, 24, Rtmp);
 808 #endif
 809   or3(Rtmp, Rdst, Rdst );
 810 
 811   bind(aligned);
 812   if (should_set_CC == set_CC) tst(Rdst);
 813 }
 814 
 815 
 816 void InterpreterMacroAssembler::get_cache_and_index_at_bcp(Register cache, Register tmp, int bcp_offset) {
 817   assert(bcp_offset > 0, "bcp is still pointing to start of bytecode");
 818   assert_different_registers(cache, tmp);
 819   assert_not_delayed();
 820   get_2_byte_integer_at_bcp(bcp_offset, cache, tmp, Unsigned);
 821               // convert from field index to ConstantPoolCacheEntry index
 822               // and from word index to byte offset
 823   sll(tmp, exact_log2(in_words(ConstantPoolCacheEntry::size()) * BytesPerWord), tmp);
 824   add(LcpoolCache, tmp, cache);
 825 }
 826 
 827 
 828 void InterpreterMacroAssembler::get_cache_entry_pointer_at_bcp(Register cache, Register tmp, int bcp_offset) {
 829   assert(bcp_offset > 0, "bcp is still pointing to start of bytecode");
 830   assert_different_registers(cache, tmp);
 831   assert_not_delayed();
 832   get_2_byte_integer_at_bcp(bcp_offset, cache, tmp, Unsigned);
 833               // convert from field index to ConstantPoolCacheEntry index
 834               // and from word index to byte offset
 835   sll(tmp, exact_log2(in_words(ConstantPoolCacheEntry::size()) * BytesPerWord), tmp);
 836               // skip past the header
 837   add(tmp, in_bytes(constantPoolCacheOopDesc::base_offset()), tmp);
 838               // construct pointer to cache entry
 839   add(LcpoolCache, tmp, cache);
 840 }
 841 
 842 
 843 // Generate a subtype check: branch to ok_is_subtype if sub_klass is
 844 // a subtype of super_klass.  Blows registers Rsuper_klass, Rsub_klass, tmp1, tmp2.
 845 void InterpreterMacroAssembler::gen_subtype_check(Register Rsub_klass,
 846                                                   Register Rsuper_klass,
 847                                                   Register Rtmp1,
 848                                                   Register Rtmp2,
 849                                                   Register Rtmp3,
 850                                                   Label &ok_is_subtype ) {
 851   Label not_subtype;
 852 
 853   // Profile the not-null value's klass.
 854   profile_typecheck(Rsub_klass, Rtmp1);
 855 
 856   check_klass_subtype_fast_path(Rsub_klass, Rsuper_klass,
 857                                 Rtmp1, Rtmp2,
 858                                 &ok_is_subtype, &not_subtype, NULL);
 859 
 860   check_klass_subtype_slow_path(Rsub_klass, Rsuper_klass,
 861                                 Rtmp1, Rtmp2, Rtmp3, /*hack:*/ noreg,
 862                                 &ok_is_subtype, NULL);
 863 
 864   bind(not_subtype);
 865   profile_typecheck_failed(Rtmp1);
 866 }
 867 
 868 // Separate these two to allow for delay slot in middle
 869 // These are used to do a test and full jump to exception-throwing code.
 870 
 871 // %%%%% Could possibly reoptimize this by testing to see if could use
 872 // a single conditional branch (i.e. if span is small enough.
 873 // If you go that route, than get rid of the split and give up
 874 // on the delay-slot hack.
 875 
 876 void InterpreterMacroAssembler::throw_if_not_1_icc( Condition ok_condition,
 877                                                     Label&    ok ) {
 878   assert_not_delayed();
 879   br(ok_condition, true, pt, ok);
 880   // DELAY SLOT
 881 }
 882 
 883 void InterpreterMacroAssembler::throw_if_not_1_xcc( Condition ok_condition,
 884                                                     Label&    ok ) {
 885   assert_not_delayed();
 886   bp( ok_condition, true, Assembler::xcc, pt, ok);
 887   // DELAY SLOT
 888 }
 889 
 890 void InterpreterMacroAssembler::throw_if_not_1_x( Condition ok_condition,
 891                                                   Label&    ok ) {
 892   assert_not_delayed();
 893   brx(ok_condition, true, pt, ok);
 894   // DELAY SLOT
 895 }
 896 
 897 void InterpreterMacroAssembler::throw_if_not_2( address  throw_entry_point,
 898                                                 Register Rscratch,
 899                                                 Label&   ok ) {
 900   assert(throw_entry_point != NULL, "entry point must be generated by now");
 901   AddressLiteral dest(throw_entry_point);
 902   jump_to(dest, Rscratch);
 903   delayed()->nop();
 904   bind(ok);
 905 }
 906 
 907 
 908 // And if you cannot use the delay slot, here is a shorthand:
 909 
 910 void InterpreterMacroAssembler::throw_if_not_icc( Condition ok_condition,
 911                                                   address   throw_entry_point,
 912                                                   Register  Rscratch ) {
 913   Label ok;
 914   if (ok_condition != never) {
 915     throw_if_not_1_icc( ok_condition, ok);
 916     delayed()->nop();
 917   }
 918   throw_if_not_2( throw_entry_point, Rscratch, ok);
 919 }
 920 void InterpreterMacroAssembler::throw_if_not_xcc( Condition ok_condition,
 921                                                   address   throw_entry_point,
 922                                                   Register  Rscratch ) {
 923   Label ok;
 924   if (ok_condition != never) {
 925     throw_if_not_1_xcc( ok_condition, ok);
 926     delayed()->nop();
 927   }
 928   throw_if_not_2( throw_entry_point, Rscratch, ok);
 929 }
 930 void InterpreterMacroAssembler::throw_if_not_x( Condition ok_condition,
 931                                                 address   throw_entry_point,
 932                                                 Register  Rscratch ) {
 933   Label ok;
 934   if (ok_condition != never) {
 935     throw_if_not_1_x( ok_condition, ok);
 936     delayed()->nop();
 937   }
 938   throw_if_not_2( throw_entry_point, Rscratch, ok);
 939 }
 940 
 941 // Check that index is in range for array, then shift index by index_shift, and put arrayOop + shifted_index into res
 942 // Note: res is still shy of address by array offset into object.
 943 
 944 void InterpreterMacroAssembler::index_check_without_pop(Register array, Register index, int index_shift, Register tmp, Register res) {
 945   assert_not_delayed();
 946 
 947   verify_oop(array);
 948 #ifdef _LP64
 949   // sign extend since tos (index) can be a 32bit value
 950   sra(index, G0, index);
 951 #endif // _LP64
 952 
 953   // check array
 954   Label ptr_ok;
 955   tst(array);
 956   throw_if_not_1_x( notZero, ptr_ok );
 957   delayed()->ld( array, arrayOopDesc::length_offset_in_bytes(), tmp ); // check index
 958   throw_if_not_2( Interpreter::_throw_NullPointerException_entry, G3_scratch, ptr_ok);
 959 
 960   Label index_ok;
 961   cmp(index, tmp);
 962   throw_if_not_1_icc( lessUnsigned, index_ok );
 963   if (index_shift > 0)  delayed()->sll(index, index_shift, index);
 964   else                  delayed()->add(array, index, res); // addr - const offset in index
 965   // convention: move aberrant index into G3_scratch for exception message
 966   mov(index, G3_scratch);
 967   throw_if_not_2( Interpreter::_throw_ArrayIndexOutOfBoundsException_entry, G4_scratch, index_ok);
 968 
 969   // add offset if didn't do it in delay slot
 970   if (index_shift > 0)   add(array, index, res); // addr - const offset in index
 971 }
 972 
 973 
 974 void InterpreterMacroAssembler::index_check(Register array, Register index, int index_shift, Register tmp, Register res) {
 975   assert_not_delayed();
 976 
 977   // pop array
 978   pop_ptr(array);
 979 
 980   // check array
 981   index_check_without_pop(array, index, index_shift, tmp, res);
 982 }
 983 
 984 
 985 void InterpreterMacroAssembler::get_constant_pool(Register Rdst) {
 986   ld_ptr(Lmethod, in_bytes(methodOopDesc::constants_offset()), Rdst);
 987 }
 988 
 989 
 990 void InterpreterMacroAssembler::get_constant_pool_cache(Register Rdst) {
 991   get_constant_pool(Rdst);
 992   ld_ptr(Rdst, constantPoolOopDesc::cache_offset_in_bytes(), Rdst);
 993 }
 994 
 995 
 996 void InterpreterMacroAssembler::get_cpool_and_tags(Register Rcpool, Register Rtags) {
 997   get_constant_pool(Rcpool);
 998   ld_ptr(Rcpool, constantPoolOopDesc::tags_offset_in_bytes(), Rtags);
 999 }
1000 
1001 
1002 // unlock if synchronized method
1003 //
1004 // Unlock the receiver if this is a synchronized method.
1005 // Unlock any Java monitors from syncronized blocks.
1006 //
1007 // If there are locked Java monitors
1008 //    If throw_monitor_exception
1009 //       throws IllegalMonitorStateException
1010 //    Else if install_monitor_exception
1011 //       installs IllegalMonitorStateException
1012 //    Else
1013 //       no error processing
1014 void InterpreterMacroAssembler::unlock_if_synchronized_method(TosState state,
1015                                                               bool throw_monitor_exception,
1016                                                               bool install_monitor_exception) {
1017   Label unlocked, unlock, no_unlock;
1018 
1019   // get the value of _do_not_unlock_if_synchronized into G1_scratch
1020   const Address do_not_unlock_if_synchronized(G2_thread,
1021     JavaThread::do_not_unlock_if_synchronized_offset());
1022   ldbool(do_not_unlock_if_synchronized, G1_scratch);
1023   stbool(G0, do_not_unlock_if_synchronized); // reset the flag
1024 
1025   // check if synchronized method
1026   const Address access_flags(Lmethod, methodOopDesc::access_flags_offset());
1027   interp_verify_oop(Otos_i, state, __FILE__, __LINE__);
1028   push(state); // save tos
1029   ld(access_flags, G3_scratch); // Load access flags.
1030   btst(JVM_ACC_SYNCHRONIZED, G3_scratch);
1031   br(zero, false, pt, unlocked);
1032   delayed()->nop();
1033 
1034   // Don't unlock anything if the _do_not_unlock_if_synchronized flag
1035   // is set.
1036   tstbool(G1_scratch);
1037   br(Assembler::notZero, false, pn, no_unlock);
1038   delayed()->nop();
1039 
1040   // BasicObjectLock will be first in list, since this is a synchronized method. However, need
1041   // to check that the object has not been unlocked by an explicit monitorexit bytecode.
1042 
1043   //Intel: if (throw_monitor_exception) ... else ...
1044   // Entry already unlocked, need to throw exception
1045   //...
1046 
1047   // pass top-most monitor elem
1048   add( top_most_monitor(), O1 );
1049 
1050   ld_ptr(O1, BasicObjectLock::obj_offset_in_bytes(), G3_scratch);
1051   br_notnull(G3_scratch, false, pt, unlock);
1052   delayed()->nop();
1053 
1054   if (throw_monitor_exception) {
1055     // Entry already unlocked need to throw an exception
1056     MacroAssembler::call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_illegal_monitor_state_exception));
1057     should_not_reach_here();
1058   } else {
1059     // Monitor already unlocked during a stack unroll.
1060     // If requested, install an illegal_monitor_state_exception.
1061     // Continue with stack unrolling.
1062     if (install_monitor_exception) {
1063       MacroAssembler::call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::new_illegal_monitor_state_exception));
1064     }
1065     ba(false, unlocked);
1066     delayed()->nop();
1067   }
1068 
1069   bind(unlock);
1070 
1071   unlock_object(O1);
1072 
1073   bind(unlocked);
1074 
1075   // I0, I1: Might contain return value
1076 
1077   // Check that all monitors are unlocked
1078   { Label loop, exception, entry, restart;
1079 
1080     Register Rmptr   = O0;
1081     Register Rtemp   = O1;
1082     Register Rlimit  = Lmonitors;
1083     const jint delta = frame::interpreter_frame_monitor_size() * wordSize;
1084     assert( (delta & LongAlignmentMask) == 0,
1085             "sizeof BasicObjectLock must be even number of doublewords");
1086 
1087     #ifdef ASSERT
1088     add(top_most_monitor(), Rmptr, delta);
1089     { Label L;
1090       // ensure that Rmptr starts out above (or at) Rlimit
1091       cmp(Rmptr, Rlimit);
1092       brx(Assembler::greaterEqualUnsigned, false, pn, L);
1093       delayed()->nop();
1094       stop("monitor stack has negative size");
1095       bind(L);
1096     }
1097     #endif
1098     bind(restart);
1099     ba(false, entry);
1100     delayed()->
1101     add(top_most_monitor(), Rmptr, delta);      // points to current entry, starting with bottom-most entry
1102 
1103     // Entry is still locked, need to throw exception
1104     bind(exception);
1105     if (throw_monitor_exception) {
1106       MacroAssembler::call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_illegal_monitor_state_exception));
1107       should_not_reach_here();
1108     } else {
1109       // Stack unrolling. Unlock object and if requested, install illegal_monitor_exception.
1110       // Unlock does not block, so don't have to worry about the frame
1111       unlock_object(Rmptr);
1112       if (install_monitor_exception) {
1113         MacroAssembler::call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::new_illegal_monitor_state_exception));
1114       }
1115       ba(false, restart);
1116       delayed()->nop();
1117     }
1118 
1119     bind(loop);
1120     cmp(Rtemp, G0);                             // check if current entry is used
1121     brx(Assembler::notEqual, false, pn, exception);
1122     delayed()->
1123     dec(Rmptr, delta);                          // otherwise advance to next entry
1124     #ifdef ASSERT
1125     { Label L;
1126       // ensure that Rmptr has not somehow stepped below Rlimit
1127       cmp(Rmptr, Rlimit);
1128       brx(Assembler::greaterEqualUnsigned, false, pn, L);
1129       delayed()->nop();
1130       stop("ran off the end of the monitor stack");
1131       bind(L);
1132     }
1133     #endif
1134     bind(entry);
1135     cmp(Rmptr, Rlimit);                         // check if bottom reached
1136     brx(Assembler::notEqual, true, pn, loop);   // if not at bottom then check this entry
1137     delayed()->
1138     ld_ptr(Rmptr, BasicObjectLock::obj_offset_in_bytes() - delta, Rtemp);
1139   }
1140 
1141   bind(no_unlock);
1142   pop(state);
1143   interp_verify_oop(Otos_i, state, __FILE__, __LINE__);
1144 }
1145 
1146 
1147 // remove activation
1148 //
1149 // Unlock the receiver if this is a synchronized method.
1150 // Unlock any Java monitors from syncronized blocks.
1151 // Remove the activation from the stack.
1152 //
1153 // If there are locked Java monitors
1154 //    If throw_monitor_exception
1155 //       throws IllegalMonitorStateException
1156 //    Else if install_monitor_exception
1157 //       installs IllegalMonitorStateException
1158 //    Else
1159 //       no error processing
1160 void InterpreterMacroAssembler::remove_activation(TosState state,
1161                                                   bool throw_monitor_exception,
1162                                                   bool install_monitor_exception) {
1163 
1164   unlock_if_synchronized_method(state, throw_monitor_exception, install_monitor_exception);
1165 
1166   // save result (push state before jvmti call and pop it afterwards) and notify jvmti
1167   notify_method_exit(false, state, NotifyJVMTI);
1168 
1169   interp_verify_oop(Otos_i, state, __FILE__, __LINE__);
1170   verify_oop(Lmethod);
1171   verify_thread();
1172 
1173   // return tos
1174   assert(Otos_l1 == Otos_i, "adjust code below");
1175   switch (state) {
1176 #ifdef _LP64
1177   case ltos: mov(Otos_l, Otos_l->after_save()); break; // O0 -> I0
1178 #else
1179   case ltos: mov(Otos_l2, Otos_l2->after_save()); // fall through  // O1 -> I1
1180 #endif
1181   case btos:                                      // fall through
1182   case ctos:
1183   case stos:                                      // fall through
1184   case atos:                                      // fall through
1185   case itos: mov(Otos_l1, Otos_l1->after_save());    break;        // O0 -> I0
1186   case ftos:                                      // fall through
1187   case dtos:                                      // fall through
1188   case vtos: /* nothing to do */                     break;
1189   default  : ShouldNotReachHere();
1190   }
1191 
1192 #if defined(COMPILER2) && !defined(_LP64)
1193   if (state == ltos) {
1194     // C2 expects long results in G1 we can't tell if we're returning to interpreted
1195     // or compiled so just be safe use G1 and O0/O1
1196 
1197     // Shift bits into high (msb) of G1
1198     sllx(Otos_l1->after_save(), 32, G1);
1199     // Zero extend low bits
1200     srl (Otos_l2->after_save(), 0, Otos_l2->after_save());
1201     or3 (Otos_l2->after_save(), G1, G1);
1202   }
1203 #endif /* COMPILER2 */
1204 
1205 }
1206 #endif /* CC_INTERP */
1207 
1208 
1209 // Lock object
1210 //
1211 // Argument - lock_reg points to the BasicObjectLock to be used for locking,
1212 //            it must be initialized with the object to lock
1213 void InterpreterMacroAssembler::lock_object(Register lock_reg, Register Object) {
1214   if (UseHeavyMonitors) {
1215     call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorenter), lock_reg);
1216   }
1217   else {
1218     Register obj_reg = Object;
1219     Register mark_reg = G4_scratch;
1220     Register temp_reg = G1_scratch;
1221     Address  lock_addr(lock_reg, BasicObjectLock::lock_offset_in_bytes());
1222     Address  mark_addr(obj_reg, oopDesc::mark_offset_in_bytes());
1223     Label    done;
1224 
1225     Label slow_case;
1226 
1227     assert_different_registers(lock_reg, obj_reg, mark_reg, temp_reg);
1228 
1229     // load markOop from object into mark_reg
1230     ld_ptr(mark_addr, mark_reg);
1231 
1232     if (UseBiasedLocking) {
1233       biased_locking_enter(obj_reg, mark_reg, temp_reg, done, &slow_case);
1234     }
1235 
1236     // get the address of basicLock on stack that will be stored in the object
1237     // we need a temporary register here as we do not want to clobber lock_reg
1238     // (cas clobbers the destination register)
1239     mov(lock_reg, temp_reg);
1240     // set mark reg to be (markOop of object | UNLOCK_VALUE)
1241     or3(mark_reg, markOopDesc::unlocked_value, mark_reg);
1242     // initialize the box  (Must happen before we update the object mark!)
1243     st_ptr(mark_reg, lock_addr, BasicLock::displaced_header_offset_in_bytes());
1244     // compare and exchange object_addr, markOop | 1, stack address of basicLock
1245     assert(mark_addr.disp() == 0, "cas must take a zero displacement");
1246     casx_under_lock(mark_addr.base(), mark_reg, temp_reg,
1247       (address)StubRoutines::Sparc::atomic_memory_operation_lock_addr());
1248 
1249     // if the compare and exchange succeeded we are done (we saw an unlocked object)
1250     cmp(mark_reg, temp_reg);
1251     brx(Assembler::equal, true, Assembler::pt, done);
1252     delayed()->nop();
1253 
1254     // We did not see an unlocked object so try the fast recursive case
1255 
1256     // Check if owner is self by comparing the value in the markOop of object
1257     // with the stack pointer
1258     sub(temp_reg, SP, temp_reg);
1259 #ifdef _LP64
1260     sub(temp_reg, STACK_BIAS, temp_reg);
1261 #endif
1262     assert(os::vm_page_size() > 0xfff, "page size too small - change the constant");
1263 
1264     // Composite "andcc" test:
1265     // (a) %sp -vs- markword proximity check, and,
1266     // (b) verify mark word LSBs == 0 (Stack-locked).
1267     //
1268     // FFFFF003/FFFFFFFFFFFF003 is (markOopDesc::lock_mask_in_place | -os::vm_page_size())
1269     // Note that the page size used for %sp proximity testing is arbitrary and is
1270     // unrelated to the actual MMU page size.  We use a 'logical' page size of
1271     // 4096 bytes.   F..FFF003 is designed to fit conveniently in the SIMM13 immediate
1272     // field of the andcc instruction.
1273     andcc (temp_reg, 0xFFFFF003, G0) ;
1274 
1275     // if condition is true we are done and hence we can store 0 in the displaced
1276     // header indicating it is a recursive lock and be done
1277     brx(Assembler::zero, true, Assembler::pt, done);
1278     delayed()->st_ptr(G0, lock_addr, BasicLock::displaced_header_offset_in_bytes());
1279 
1280     // none of the above fast optimizations worked so we have to get into the
1281     // slow case of monitor enter
1282     bind(slow_case);
1283     call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorenter), lock_reg);
1284 
1285     bind(done);
1286   }
1287 }
1288 
1289 // Unlocks an object. Used in monitorexit bytecode and remove_activation.
1290 //
1291 // Argument - lock_reg points to the BasicObjectLock for lock
1292 // Throw IllegalMonitorException if object is not locked by current thread
1293 void InterpreterMacroAssembler::unlock_object(Register lock_reg) {
1294   if (UseHeavyMonitors) {
1295     call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorexit), lock_reg);
1296   } else {
1297     Register obj_reg = G3_scratch;
1298     Register mark_reg = G4_scratch;
1299     Register displaced_header_reg = G1_scratch;
1300     Address  lockobj_addr(lock_reg, BasicObjectLock::obj_offset_in_bytes());
1301     Address  mark_addr(obj_reg, oopDesc::mark_offset_in_bytes());
1302     Label    done;
1303 
1304     if (UseBiasedLocking) {
1305       // load the object out of the BasicObjectLock
1306       ld_ptr(lockobj_addr, obj_reg);
1307       biased_locking_exit(mark_addr, mark_reg, done, true);
1308       st_ptr(G0, lockobj_addr);  // free entry
1309     }
1310 
1311     // Test first if we are in the fast recursive case
1312     Address lock_addr(lock_reg, BasicObjectLock::lock_offset_in_bytes() + BasicLock::displaced_header_offset_in_bytes());
1313     ld_ptr(lock_addr, displaced_header_reg);
1314     br_null(displaced_header_reg, true, Assembler::pn, done);
1315     delayed()->st_ptr(G0, lockobj_addr);  // free entry
1316 
1317     // See if it is still a light weight lock, if so we just unlock
1318     // the object and we are done
1319 
1320     if (!UseBiasedLocking) {
1321       // load the object out of the BasicObjectLock
1322       ld_ptr(lockobj_addr, obj_reg);
1323     }
1324 
1325     // we have the displaced header in displaced_header_reg
1326     // we expect to see the stack address of the basicLock in case the
1327     // lock is still a light weight lock (lock_reg)
1328     assert(mark_addr.disp() == 0, "cas must take a zero displacement");
1329     casx_under_lock(mark_addr.base(), lock_reg, displaced_header_reg,
1330       (address)StubRoutines::Sparc::atomic_memory_operation_lock_addr());
1331     cmp(lock_reg, displaced_header_reg);
1332     brx(Assembler::equal, true, Assembler::pn, done);
1333     delayed()->st_ptr(G0, lockobj_addr);  // free entry
1334 
1335     // The lock has been converted into a heavy lock and hence
1336     // we need to get into the slow case
1337 
1338     call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorexit), lock_reg);
1339 
1340     bind(done);
1341   }
1342 }
1343 
1344 #ifndef CC_INTERP
1345 
1346 // Get the method data pointer from the methodOop and set the
1347 // specified register to its value.
1348 
1349 void InterpreterMacroAssembler::set_method_data_pointer_offset(Register Roff) {
1350   assert(ProfileInterpreter, "must be profiling interpreter");
1351   Label get_continue;
1352 
1353   ld_ptr(Lmethod, in_bytes(methodOopDesc::method_data_offset()), ImethodDataPtr);
1354   test_method_data_pointer(get_continue);
1355   add(ImethodDataPtr, in_bytes(methodDataOopDesc::data_offset()), ImethodDataPtr);
1356   if (Roff != noreg)
1357     // Roff contains a method data index ("mdi").  It defaults to zero.
1358     add(ImethodDataPtr, Roff, ImethodDataPtr);
1359   bind(get_continue);
1360 }
1361 
1362 // Set the method data pointer for the current bcp.
1363 
1364 void InterpreterMacroAssembler::set_method_data_pointer_for_bcp() {
1365   assert(ProfileInterpreter, "must be profiling interpreter");
1366   Label zero_continue;
1367 
1368   // Test MDO to avoid the call if it is NULL.
1369   ld_ptr(Lmethod, methodOopDesc::method_data_offset(), ImethodDataPtr);
1370   test_method_data_pointer(zero_continue);
1371   call_VM_leaf(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::bcp_to_di), Lmethod, Lbcp);
1372   set_method_data_pointer_offset(O0);
1373   bind(zero_continue);
1374 }
1375 
1376 // Test ImethodDataPtr.  If it is null, continue at the specified label
1377 
1378 void InterpreterMacroAssembler::test_method_data_pointer(Label& zero_continue) {
1379   assert(ProfileInterpreter, "must be profiling interpreter");
1380 #ifdef _LP64
1381   bpr(Assembler::rc_z, false, Assembler::pn, ImethodDataPtr, zero_continue);
1382 #else
1383   tst(ImethodDataPtr);
1384   br(Assembler::zero, false, Assembler::pn, zero_continue);
1385 #endif
1386   delayed()->nop();
1387 }
1388 
1389 void InterpreterMacroAssembler::verify_method_data_pointer() {
1390   assert(ProfileInterpreter, "must be profiling interpreter");
1391 #ifdef ASSERT
1392   Label verify_continue;
1393   test_method_data_pointer(verify_continue);
1394 
1395   // If the mdp is valid, it will point to a DataLayout header which is
1396   // consistent with the bcp.  The converse is highly probable also.
1397   lduh(ImethodDataPtr, in_bytes(DataLayout::bci_offset()), G3_scratch);
1398   ld_ptr(Lmethod, methodOopDesc::const_offset(), O5);
1399   add(G3_scratch, in_bytes(constMethodOopDesc::codes_offset()), G3_scratch);
1400   add(G3_scratch, O5, G3_scratch);
1401   cmp(Lbcp, G3_scratch);
1402   brx(Assembler::equal, false, Assembler::pt, verify_continue);
1403 
1404   Register temp_reg = O5;
1405   delayed()->mov(ImethodDataPtr, temp_reg);
1406   // %%% should use call_VM_leaf here?
1407   //call_VM_leaf(noreg, ..., Lmethod, Lbcp, ImethodDataPtr);
1408   save_frame_and_mov(sizeof(jdouble) / wordSize, Lmethod, O0, Lbcp, O1);
1409   Address d_save(FP, -sizeof(jdouble) + STACK_BIAS);
1410   stf(FloatRegisterImpl::D, Ftos_d, d_save);
1411   mov(temp_reg->after_save(), O2);
1412   save_thread(L7_thread_cache);
1413   call(CAST_FROM_FN_PTR(address, InterpreterRuntime::verify_mdp), relocInfo::none);
1414   delayed()->nop();
1415   restore_thread(L7_thread_cache);
1416   ldf(FloatRegisterImpl::D, d_save, Ftos_d);
1417   restore();
1418   bind(verify_continue);
1419 #endif // ASSERT
1420 }
1421 
1422 void InterpreterMacroAssembler::test_invocation_counter_for_mdp(Register invocation_count,
1423                                                                 Register cur_bcp,
1424                                                                 Register Rtmp,
1425                                                                 Label &profile_continue) {
1426   assert(ProfileInterpreter, "must be profiling interpreter");
1427   // Control will flow to "profile_continue" if the counter is less than the
1428   // limit or if we call profile_method()
1429 
1430   Label done;
1431 
1432   // if no method data exists, and the counter is high enough, make one
1433 #ifdef _LP64
1434   bpr(Assembler::rc_nz, false, Assembler::pn, ImethodDataPtr, done);
1435 #else
1436   tst(ImethodDataPtr);
1437   br(Assembler::notZero, false, Assembler::pn, done);
1438 #endif
1439 
1440   // Test to see if we should create a method data oop
1441   AddressLiteral profile_limit((address) &InvocationCounter::InterpreterProfileLimit);
1442 #ifdef _LP64
1443   delayed()->nop();
1444   sethi(profile_limit, Rtmp);
1445 #else
1446   delayed()->sethi(profile_limit, Rtmp);
1447 #endif
1448   ld(Rtmp, profile_limit.low10(), Rtmp);
1449   cmp(invocation_count, Rtmp);
1450   br(Assembler::lessUnsigned, false, Assembler::pn, profile_continue);
1451   delayed()->nop();
1452 
1453   // Build it now.
1454   call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::profile_method), cur_bcp);
1455   set_method_data_pointer_offset(O0);
1456   ba(false, profile_continue);
1457   delayed()->nop();
1458   bind(done);
1459 }
1460 
1461 // Store a value at some constant offset from the method data pointer.
1462 
1463 void InterpreterMacroAssembler::set_mdp_data_at(int constant, Register value) {
1464   assert(ProfileInterpreter, "must be profiling interpreter");
1465   st_ptr(value, ImethodDataPtr, constant);
1466 }
1467 
1468 void InterpreterMacroAssembler::increment_mdp_data_at(Address counter,
1469                                                       Register bumped_count,
1470                                                       bool decrement) {
1471   assert(ProfileInterpreter, "must be profiling interpreter");
1472 
1473   // Load the counter.
1474   ld_ptr(counter, bumped_count);
1475 
1476   if (decrement) {
1477     // Decrement the register.  Set condition codes.
1478     subcc(bumped_count, DataLayout::counter_increment, bumped_count);
1479 
1480     // If the decrement causes the counter to overflow, stay negative
1481     Label L;
1482     brx(Assembler::negative, true, Assembler::pn, L);
1483 
1484     // Store the decremented counter, if it is still negative.
1485     delayed()->st_ptr(bumped_count, counter);
1486     bind(L);
1487   } else {
1488     // Increment the register.  Set carry flag.
1489     addcc(bumped_count, DataLayout::counter_increment, bumped_count);
1490 
1491     // If the increment causes the counter to overflow, pull back by 1.
1492     assert(DataLayout::counter_increment == 1, "subc works");
1493     subc(bumped_count, G0, bumped_count);
1494 
1495     // Store the incremented counter.
1496     st_ptr(bumped_count, counter);
1497   }
1498 }
1499 
1500 // Increment the value at some constant offset from the method data pointer.
1501 
1502 void InterpreterMacroAssembler::increment_mdp_data_at(int constant,
1503                                                       Register bumped_count,
1504                                                       bool decrement) {
1505   // Locate the counter at a fixed offset from the mdp:
1506   Address counter(ImethodDataPtr, constant);
1507   increment_mdp_data_at(counter, bumped_count, decrement);
1508 }
1509 
1510 // Increment the value at some non-fixed (reg + constant) offset from
1511 // the method data pointer.
1512 
1513 void InterpreterMacroAssembler::increment_mdp_data_at(Register reg,
1514                                                       int constant,
1515                                                       Register bumped_count,
1516                                                       Register scratch2,
1517                                                       bool decrement) {
1518   // Add the constant to reg to get the offset.
1519   add(ImethodDataPtr, reg, scratch2);
1520   Address counter(scratch2, constant);
1521   increment_mdp_data_at(counter, bumped_count, decrement);
1522 }
1523 
1524 // Set a flag value at the current method data pointer position.
1525 // Updates a single byte of the header, to avoid races with other header bits.
1526 
1527 void InterpreterMacroAssembler::set_mdp_flag_at(int flag_constant,
1528                                                 Register scratch) {
1529   assert(ProfileInterpreter, "must be profiling interpreter");
1530   // Load the data header
1531   ldub(ImethodDataPtr, in_bytes(DataLayout::flags_offset()), scratch);
1532 
1533   // Set the flag
1534   or3(scratch, flag_constant, scratch);
1535 
1536   // Store the modified header.
1537   stb(scratch, ImethodDataPtr, in_bytes(DataLayout::flags_offset()));
1538 }
1539 
1540 // Test the location at some offset from the method data pointer.
1541 // If it is not equal to value, branch to the not_equal_continue Label.
1542 // Set condition codes to match the nullness of the loaded value.
1543 
1544 void InterpreterMacroAssembler::test_mdp_data_at(int offset,
1545                                                  Register value,
1546                                                  Label& not_equal_continue,
1547                                                  Register scratch) {
1548   assert(ProfileInterpreter, "must be profiling interpreter");
1549   ld_ptr(ImethodDataPtr, offset, scratch);
1550   cmp(value, scratch);
1551   brx(Assembler::notEqual, false, Assembler::pn, not_equal_continue);
1552   delayed()->tst(scratch);
1553 }
1554 
1555 // Update the method data pointer by the displacement located at some fixed
1556 // offset from the method data pointer.
1557 
1558 void InterpreterMacroAssembler::update_mdp_by_offset(int offset_of_disp,
1559                                                      Register scratch) {
1560   assert(ProfileInterpreter, "must be profiling interpreter");
1561   ld_ptr(ImethodDataPtr, offset_of_disp, scratch);
1562   add(ImethodDataPtr, scratch, ImethodDataPtr);
1563 }
1564 
1565 // Update the method data pointer by the displacement located at the
1566 // offset (reg + offset_of_disp).
1567 
1568 void InterpreterMacroAssembler::update_mdp_by_offset(Register reg,
1569                                                      int offset_of_disp,
1570                                                      Register scratch) {
1571   assert(ProfileInterpreter, "must be profiling interpreter");
1572   add(reg, offset_of_disp, scratch);
1573   ld_ptr(ImethodDataPtr, scratch, scratch);
1574   add(ImethodDataPtr, scratch, ImethodDataPtr);
1575 }
1576 
1577 // Update the method data pointer by a simple constant displacement.
1578 
1579 void InterpreterMacroAssembler::update_mdp_by_constant(int constant) {
1580   assert(ProfileInterpreter, "must be profiling interpreter");
1581   add(ImethodDataPtr, constant, ImethodDataPtr);
1582 }
1583 
1584 // Update the method data pointer for a _ret bytecode whose target
1585 // was not among our cached targets.
1586 
1587 void InterpreterMacroAssembler::update_mdp_for_ret(TosState state,
1588                                                    Register return_bci) {
1589   assert(ProfileInterpreter, "must be profiling interpreter");
1590   push(state);
1591   st_ptr(return_bci, l_tmp);  // protect return_bci, in case it is volatile
1592   call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::update_mdp_for_ret), return_bci);
1593   ld_ptr(l_tmp, return_bci);
1594   pop(state);
1595 }
1596 
1597 // Count a taken branch in the bytecodes.
1598 
1599 void InterpreterMacroAssembler::profile_taken_branch(Register scratch, Register bumped_count) {
1600   if (ProfileInterpreter) {
1601     Label profile_continue;
1602 
1603     // If no method data exists, go to profile_continue.
1604     test_method_data_pointer(profile_continue);
1605 
1606     // We are taking a branch.  Increment the taken count.
1607     increment_mdp_data_at(in_bytes(JumpData::taken_offset()), bumped_count);
1608 
1609     // The method data pointer needs to be updated to reflect the new target.
1610     update_mdp_by_offset(in_bytes(JumpData::displacement_offset()), scratch);
1611     bind (profile_continue);
1612   }
1613 }
1614 
1615 
1616 // Count a not-taken branch in the bytecodes.
1617 
1618 void InterpreterMacroAssembler::profile_not_taken_branch(Register scratch) {
1619   if (ProfileInterpreter) {
1620     Label profile_continue;
1621 
1622     // If no method data exists, go to profile_continue.
1623     test_method_data_pointer(profile_continue);
1624 
1625     // We are taking a branch.  Increment the not taken count.
1626     increment_mdp_data_at(in_bytes(BranchData::not_taken_offset()), scratch);
1627 
1628     // The method data pointer needs to be updated to correspond to the
1629     // next bytecode.
1630     update_mdp_by_constant(in_bytes(BranchData::branch_data_size()));
1631     bind (profile_continue);
1632   }
1633 }
1634 
1635 
1636 // Count a non-virtual call in the bytecodes.
1637 
1638 void InterpreterMacroAssembler::profile_call(Register scratch) {
1639   if (ProfileInterpreter) {
1640     Label profile_continue;
1641 
1642     // If no method data exists, go to profile_continue.
1643     test_method_data_pointer(profile_continue);
1644 
1645     // We are making a call.  Increment the count.
1646     increment_mdp_data_at(in_bytes(CounterData::count_offset()), scratch);
1647 
1648     // The method data pointer needs to be updated to reflect the new target.
1649     update_mdp_by_constant(in_bytes(CounterData::counter_data_size()));
1650     bind (profile_continue);
1651   }
1652 }
1653 
1654 
1655 // Count a final call in the bytecodes.
1656 
1657 void InterpreterMacroAssembler::profile_final_call(Register scratch) {
1658   if (ProfileInterpreter) {
1659     Label profile_continue;
1660 
1661     // If no method data exists, go to profile_continue.
1662     test_method_data_pointer(profile_continue);
1663 
1664     // We are making a call.  Increment the count.
1665     increment_mdp_data_at(in_bytes(CounterData::count_offset()), scratch);
1666 
1667     // The method data pointer needs to be updated to reflect the new target.
1668     update_mdp_by_constant(in_bytes(VirtualCallData::virtual_call_data_size()));
1669     bind (profile_continue);
1670   }
1671 }
1672 
1673 
1674 // Count a virtual call in the bytecodes.
1675 
1676 void InterpreterMacroAssembler::profile_virtual_call(Register receiver,
1677                                                      Register scratch) {
1678   if (ProfileInterpreter) {
1679     Label profile_continue;
1680 
1681     // If no method data exists, go to profile_continue.
1682     test_method_data_pointer(profile_continue);
1683 
1684     // Record the receiver type.
1685     record_klass_in_profile(receiver, scratch, true);
1686 
1687     // The method data pointer needs to be updated to reflect the new target.
1688     update_mdp_by_constant(in_bytes(VirtualCallData::virtual_call_data_size()));
1689     bind (profile_continue);
1690   }
1691 }
1692 
1693 void InterpreterMacroAssembler::record_klass_in_profile_helper(
1694                                         Register receiver, Register scratch,
1695                                         int start_row, Label& done, bool is_virtual_call) {
1696   if (TypeProfileWidth == 0) {
1697     if (is_virtual_call) {
1698       increment_mdp_data_at(in_bytes(CounterData::count_offset()), scratch);
1699     }
1700     return;
1701   }
1702 
1703   int last_row = VirtualCallData::row_limit() - 1;
1704   assert(start_row <= last_row, "must be work left to do");
1705   // Test this row for both the receiver and for null.
1706   // Take any of three different outcomes:
1707   //   1. found receiver => increment count and goto done
1708   //   2. found null => keep looking for case 1, maybe allocate this cell
1709   //   3. found something else => keep looking for cases 1 and 2
1710   // Case 3 is handled by a recursive call.
1711   for (int row = start_row; row <= last_row; row++) {
1712     Label next_test;
1713     bool test_for_null_also = (row == start_row);
1714 
1715     // See if the receiver is receiver[n].
1716     int recvr_offset = in_bytes(VirtualCallData::receiver_offset(row));
1717     test_mdp_data_at(recvr_offset, receiver, next_test, scratch);
1718     // delayed()->tst(scratch);
1719 
1720     // The receiver is receiver[n].  Increment count[n].
1721     int count_offset = in_bytes(VirtualCallData::receiver_count_offset(row));
1722     increment_mdp_data_at(count_offset, scratch);
1723     ba(false, done);
1724     delayed()->nop();
1725     bind(next_test);
1726 
1727     if (test_for_null_also) {
1728       Label found_null;
1729       // Failed the equality check on receiver[n]...  Test for null.
1730       if (start_row == last_row) {
1731         // The only thing left to do is handle the null case.
1732         if (is_virtual_call) {
1733           brx(Assembler::zero, false, Assembler::pn, found_null);
1734           delayed()->nop();
1735           // Receiver did not match any saved receiver and there is no empty row for it.
1736           // Increment total counter to indicate polymorphic case.
1737           increment_mdp_data_at(in_bytes(CounterData::count_offset()), scratch);
1738           ba(false, done);
1739           delayed()->nop();
1740           bind(found_null);
1741         } else {
1742           brx(Assembler::notZero, false, Assembler::pt, done);
1743           delayed()->nop();
1744         }
1745         break;
1746       }
1747       // Since null is rare, make it be the branch-taken case.
1748       brx(Assembler::zero, false, Assembler::pn, found_null);
1749       delayed()->nop();
1750 
1751       // Put all the "Case 3" tests here.
1752       record_klass_in_profile_helper(receiver, scratch, start_row + 1, done, is_virtual_call);
1753 
1754       // Found a null.  Keep searching for a matching receiver,
1755       // but remember that this is an empty (unused) slot.
1756       bind(found_null);
1757     }
1758   }
1759 
1760   // In the fall-through case, we found no matching receiver, but we
1761   // observed the receiver[start_row] is NULL.
1762 
1763   // Fill in the receiver field and increment the count.
1764   int recvr_offset = in_bytes(VirtualCallData::receiver_offset(start_row));
1765   set_mdp_data_at(recvr_offset, receiver);
1766   int count_offset = in_bytes(VirtualCallData::receiver_count_offset(start_row));
1767   mov(DataLayout::counter_increment, scratch);
1768   set_mdp_data_at(count_offset, scratch);
1769   if (start_row > 0) {
1770     ba(false, done);
1771     delayed()->nop();
1772   }
1773 }
1774 
1775 void InterpreterMacroAssembler::record_klass_in_profile(Register receiver,
1776                                                         Register scratch, bool is_virtual_call) {
1777   assert(ProfileInterpreter, "must be profiling");
1778   Label done;
1779 
1780   record_klass_in_profile_helper(receiver, scratch, 0, done, is_virtual_call);
1781 
1782   bind (done);
1783 }
1784 
1785 
1786 // Count a ret in the bytecodes.
1787 
1788 void InterpreterMacroAssembler::profile_ret(TosState state,
1789                                             Register return_bci,
1790                                             Register scratch) {
1791   if (ProfileInterpreter) {
1792     Label profile_continue;
1793     uint row;
1794 
1795     // If no method data exists, go to profile_continue.
1796     test_method_data_pointer(profile_continue);
1797 
1798     // Update the total ret count.
1799     increment_mdp_data_at(in_bytes(CounterData::count_offset()), scratch);
1800 
1801     for (row = 0; row < RetData::row_limit(); row++) {
1802       Label next_test;
1803 
1804       // See if return_bci is equal to bci[n]:
1805       test_mdp_data_at(in_bytes(RetData::bci_offset(row)),
1806                        return_bci, next_test, scratch);
1807 
1808       // return_bci is equal to bci[n].  Increment the count.
1809       increment_mdp_data_at(in_bytes(RetData::bci_count_offset(row)), scratch);
1810 
1811       // The method data pointer needs to be updated to reflect the new target.
1812       update_mdp_by_offset(in_bytes(RetData::bci_displacement_offset(row)), scratch);
1813       ba(false, profile_continue);
1814       delayed()->nop();
1815       bind(next_test);
1816     }
1817 
1818     update_mdp_for_ret(state, return_bci);
1819 
1820     bind (profile_continue);
1821   }
1822 }
1823 
1824 // Profile an unexpected null in the bytecodes.
1825 void InterpreterMacroAssembler::profile_null_seen(Register scratch) {
1826   if (ProfileInterpreter) {
1827     Label profile_continue;
1828 
1829     // If no method data exists, go to profile_continue.
1830     test_method_data_pointer(profile_continue);
1831 
1832     set_mdp_flag_at(BitData::null_seen_byte_constant(), scratch);
1833 
1834     // The method data pointer needs to be updated.
1835     int mdp_delta = in_bytes(BitData::bit_data_size());
1836     if (TypeProfileCasts) {
1837       mdp_delta = in_bytes(VirtualCallData::virtual_call_data_size());
1838     }
1839     update_mdp_by_constant(mdp_delta);
1840 
1841     bind (profile_continue);
1842   }
1843 }
1844 
1845 void InterpreterMacroAssembler::profile_typecheck(Register klass,
1846                                                   Register scratch) {
1847   if (ProfileInterpreter) {
1848     Label profile_continue;
1849 
1850     // If no method data exists, go to profile_continue.
1851     test_method_data_pointer(profile_continue);
1852 
1853     int mdp_delta = in_bytes(BitData::bit_data_size());
1854     if (TypeProfileCasts) {
1855       mdp_delta = in_bytes(VirtualCallData::virtual_call_data_size());
1856 
1857       // Record the object type.
1858       record_klass_in_profile(klass, scratch, false);
1859     }
1860 
1861     // The method data pointer needs to be updated.
1862     update_mdp_by_constant(mdp_delta);
1863 
1864     bind (profile_continue);
1865   }
1866 }
1867 
1868 void InterpreterMacroAssembler::profile_typecheck_failed(Register scratch) {
1869   if (ProfileInterpreter && TypeProfileCasts) {
1870     Label profile_continue;
1871 
1872     // If no method data exists, go to profile_continue.
1873     test_method_data_pointer(profile_continue);
1874 
1875     int count_offset = in_bytes(CounterData::count_offset());
1876     // Back up the address, since we have already bumped the mdp.
1877     count_offset -= in_bytes(VirtualCallData::virtual_call_data_size());
1878 
1879     // *Decrement* the counter.  We expect to see zero or small negatives.
1880     increment_mdp_data_at(count_offset, scratch, true);
1881 
1882     bind (profile_continue);
1883   }
1884 }
1885 
1886 // Count the default case of a switch construct.
1887 
1888 void InterpreterMacroAssembler::profile_switch_default(Register scratch) {
1889   if (ProfileInterpreter) {
1890     Label profile_continue;
1891 
1892     // If no method data exists, go to profile_continue.
1893     test_method_data_pointer(profile_continue);
1894 
1895     // Update the default case count
1896     increment_mdp_data_at(in_bytes(MultiBranchData::default_count_offset()),
1897                           scratch);
1898 
1899     // The method data pointer needs to be updated.
1900     update_mdp_by_offset(
1901                     in_bytes(MultiBranchData::default_displacement_offset()),
1902                     scratch);
1903 
1904     bind (profile_continue);
1905   }
1906 }
1907 
1908 // Count the index'th case of a switch construct.
1909 
1910 void InterpreterMacroAssembler::profile_switch_case(Register index,
1911                                                     Register scratch,
1912                                                     Register scratch2,
1913                                                     Register scratch3) {
1914   if (ProfileInterpreter) {
1915     Label profile_continue;
1916 
1917     // If no method data exists, go to profile_continue.
1918     test_method_data_pointer(profile_continue);
1919 
1920     // Build the base (index * per_case_size_in_bytes()) + case_array_offset_in_bytes()
1921     set(in_bytes(MultiBranchData::per_case_size()), scratch);
1922     smul(index, scratch, scratch);
1923     add(scratch, in_bytes(MultiBranchData::case_array_offset()), scratch);
1924 
1925     // Update the case count
1926     increment_mdp_data_at(scratch,
1927                           in_bytes(MultiBranchData::relative_count_offset()),
1928                           scratch2,
1929                           scratch3);
1930 
1931     // The method data pointer needs to be updated.
1932     update_mdp_by_offset(scratch,
1933                      in_bytes(MultiBranchData::relative_displacement_offset()),
1934                      scratch2);
1935 
1936     bind (profile_continue);
1937   }
1938 }
1939 
1940 // add a InterpMonitorElem to stack (see frame_sparc.hpp)
1941 
1942 void InterpreterMacroAssembler::add_monitor_to_stack( bool stack_is_empty,
1943                                                       Register Rtemp,
1944                                                       Register Rtemp2 ) {
1945 
1946   Register Rlimit = Lmonitors;
1947   const jint delta = frame::interpreter_frame_monitor_size() * wordSize;
1948   assert( (delta & LongAlignmentMask) == 0,
1949           "sizeof BasicObjectLock must be even number of doublewords");
1950 
1951   sub( SP,        delta, SP);
1952   sub( Lesp,      delta, Lesp);
1953   sub( Lmonitors, delta, Lmonitors);
1954 
1955   if (!stack_is_empty) {
1956 
1957     // must copy stack contents down
1958 
1959     Label start_copying, next;
1960 
1961     // untested("monitor stack expansion");
1962     compute_stack_base(Rtemp);
1963     ba( false, start_copying );
1964     delayed()->cmp( Rtemp, Rlimit); // done? duplicated below
1965 
1966     // note: must copy from low memory upwards
1967     // On entry to loop,
1968     // Rtemp points to new base of stack, Lesp points to new end of stack (1 past TOS)
1969     // Loop mutates Rtemp
1970 
1971     bind( next);
1972 
1973     st_ptr(Rtemp2, Rtemp, 0);
1974     inc(Rtemp, wordSize);
1975     cmp(Rtemp, Rlimit); // are we done? (duplicated above)
1976 
1977     bind( start_copying );
1978 
1979     brx( notEqual, true, pn, next );
1980     delayed()->ld_ptr( Rtemp, delta, Rtemp2 );
1981 
1982     // done copying stack
1983   }
1984 }
1985 
1986 // Locals
1987 #ifdef ASSERT
1988 void InterpreterMacroAssembler::verify_local_tag(frame::Tag t,
1989                                                  Register base,
1990                                                  Register scratch,
1991                                                  int n) {
1992   if (TaggedStackInterpreter) {
1993     Label ok, long_ok;
1994     // Use dst for scratch
1995     assert_different_registers(base, scratch);
1996     ld_ptr(base, Interpreter::local_tag_offset_in_bytes(n), scratch);
1997     if (t == frame::TagCategory2) {
1998       cmp(scratch, G0);
1999       brx(Assembler::equal, false, Assembler::pt, long_ok);
2000       delayed()->ld_ptr(base, Interpreter::local_tag_offset_in_bytes(n+1), scratch);
2001       stop("local long/double tag value bad");
2002       bind(long_ok);
2003       // compare second half tag
2004       cmp(scratch, G0);
2005     } else if (t == frame::TagValue) {
2006       cmp(scratch, G0);
2007     } else {
2008       assert_different_registers(O3, base, scratch);
2009       mov(t, O3);
2010       cmp(scratch, O3);
2011     }
2012     brx(Assembler::equal, false, Assembler::pt, ok);
2013     delayed()->nop();
2014     // Also compare if the local value is zero, then the tag might
2015     // not have been set coming from deopt.
2016     ld_ptr(base, Interpreter::local_offset_in_bytes(n), scratch);
2017     cmp(scratch, G0);
2018     brx(Assembler::equal, false, Assembler::pt, ok);
2019     delayed()->nop();
2020     stop("Local tag value is bad");
2021     bind(ok);
2022   }
2023 }
2024 #endif // ASSERT
2025 
2026 void InterpreterMacroAssembler::access_local_ptr( Register index, Register dst ) {
2027   assert_not_delayed();
2028   sll(index, Interpreter::logStackElementSize(), index);
2029   sub(Llocals, index, index);
2030   debug_only(verify_local_tag(frame::TagReference, index, dst));
2031   ld_ptr(index, Interpreter::value_offset_in_bytes(), dst);
2032   // Note:  index must hold the effective address--the iinc template uses it
2033 }
2034 
2035 // Just like access_local_ptr but the tag is a returnAddress
2036 void InterpreterMacroAssembler::access_local_returnAddress(Register index,
2037                                                            Register dst ) {
2038   assert_not_delayed();
2039   sll(index, Interpreter::logStackElementSize(), index);
2040   sub(Llocals, index, index);
2041   debug_only(verify_local_tag(frame::TagValue, index, dst));
2042   ld_ptr(index, Interpreter::value_offset_in_bytes(), dst);
2043 }
2044 
2045 void InterpreterMacroAssembler::access_local_int( Register index, Register dst ) {
2046   assert_not_delayed();
2047   sll(index, Interpreter::logStackElementSize(), index);
2048   sub(Llocals, index, index);
2049   debug_only(verify_local_tag(frame::TagValue, index, dst));
2050   ld(index, Interpreter::value_offset_in_bytes(), dst);
2051   // Note:  index must hold the effective address--the iinc template uses it
2052 }
2053 
2054 
2055 void InterpreterMacroAssembler::access_local_long( Register index, Register dst ) {
2056   assert_not_delayed();
2057   sll(index, Interpreter::logStackElementSize(), index);
2058   sub(Llocals, index, index);
2059   debug_only(verify_local_tag(frame::TagCategory2, index, dst));
2060   // First half stored at index n+1 (which grows down from Llocals[n])
2061   load_unaligned_long(index, Interpreter::local_offset_in_bytes(1), dst);
2062 }
2063 
2064 
2065 void InterpreterMacroAssembler::access_local_float( Register index, FloatRegister dst ) {
2066   assert_not_delayed();
2067   sll(index, Interpreter::logStackElementSize(), index);
2068   sub(Llocals, index, index);
2069   debug_only(verify_local_tag(frame::TagValue, index, G1_scratch));
2070   ldf(FloatRegisterImpl::S, index, Interpreter::value_offset_in_bytes(), dst);
2071 }
2072 
2073 
2074 void InterpreterMacroAssembler::access_local_double( Register index, FloatRegister dst ) {
2075   assert_not_delayed();
2076   sll(index, Interpreter::logStackElementSize(), index);
2077   sub(Llocals, index, index);
2078   debug_only(verify_local_tag(frame::TagCategory2, index, G1_scratch));
2079   load_unaligned_double(index, Interpreter::local_offset_in_bytes(1), dst);
2080 }
2081 
2082 
2083 #ifdef ASSERT
2084 void InterpreterMacroAssembler::check_for_regarea_stomp(Register Rindex, int offset, Register Rlimit, Register Rscratch, Register Rscratch1) {
2085   Label L;
2086 
2087   assert(Rindex != Rscratch, "Registers cannot be same");
2088   assert(Rindex != Rscratch1, "Registers cannot be same");
2089   assert(Rlimit != Rscratch, "Registers cannot be same");
2090   assert(Rlimit != Rscratch1, "Registers cannot be same");
2091   assert(Rscratch1 != Rscratch, "Registers cannot be same");
2092 
2093   // untested("reg area corruption");
2094   add(Rindex, offset, Rscratch);
2095   add(Rlimit, 64 + STACK_BIAS, Rscratch1);
2096   cmp(Rscratch, Rscratch1);
2097   brx(Assembler::greaterEqualUnsigned, false, pn, L);
2098   delayed()->nop();
2099   stop("regsave area is being clobbered");
2100   bind(L);
2101 }
2102 #endif // ASSERT
2103 
2104 void InterpreterMacroAssembler::tag_local(frame::Tag t,
2105                                           Register base,
2106                                           Register src,
2107                                           int n) {
2108   if (TaggedStackInterpreter) {
2109     // have to store zero because local slots can be reused (rats!)
2110     if (t == frame::TagValue) {
2111       st_ptr(G0, base, Interpreter::local_tag_offset_in_bytes(n));
2112     } else if (t == frame::TagCategory2) {
2113       st_ptr(G0, base, Interpreter::local_tag_offset_in_bytes(n));
2114       st_ptr(G0, base, Interpreter::local_tag_offset_in_bytes(n+1));
2115     } else {
2116       // assert that we don't stomp the value in 'src'
2117       // O3 is arbitrary because it's not used.
2118       assert_different_registers(src, base, O3);
2119       mov( t, O3);
2120       st_ptr(O3, base, Interpreter::local_tag_offset_in_bytes(n));
2121     }
2122   }
2123 }
2124 
2125 
2126 void InterpreterMacroAssembler::store_local_int( Register index, Register src ) {
2127   assert_not_delayed();
2128   sll(index, Interpreter::logStackElementSize(), index);
2129   sub(Llocals, index, index);
2130   debug_only(check_for_regarea_stomp(index, Interpreter::value_offset_in_bytes(), FP, G1_scratch, G4_scratch);)
2131   tag_local(frame::TagValue, index, src);
2132   st(src, index, Interpreter::value_offset_in_bytes());
2133 }
2134 
2135 void InterpreterMacroAssembler::store_local_ptr( Register index, Register src,
2136                                                  Register tag ) {
2137   assert_not_delayed();
2138   sll(index, Interpreter::logStackElementSize(), index);
2139   sub(Llocals, index, index);
2140   #ifdef ASSERT
2141   check_for_regarea_stomp(index, Interpreter::value_offset_in_bytes(), FP, G1_scratch, G4_scratch);
2142   #endif
2143   st_ptr(src, index, Interpreter::value_offset_in_bytes());
2144   // Store tag register directly
2145   if (TaggedStackInterpreter) {
2146     st_ptr(tag, index, Interpreter::tag_offset_in_bytes());
2147   }
2148 }
2149 
2150 
2151 
2152 void InterpreterMacroAssembler::store_local_ptr( int n, Register src,
2153                                                  Register tag ) {
2154   st_ptr(src,  Llocals, Interpreter::local_offset_in_bytes(n));
2155   if (TaggedStackInterpreter) {
2156     st_ptr(tag, Llocals, Interpreter::local_tag_offset_in_bytes(n));
2157   }
2158 }
2159 
2160 void InterpreterMacroAssembler::store_local_long( Register index, Register src ) {
2161   assert_not_delayed();
2162   sll(index, Interpreter::logStackElementSize(), index);
2163   sub(Llocals, index, index);
2164   #ifdef ASSERT
2165   check_for_regarea_stomp(index, Interpreter::local_offset_in_bytes(1), FP, G1_scratch, G4_scratch);
2166   #endif
2167   tag_local(frame::TagCategory2, index, src);
2168   store_unaligned_long(src, index, Interpreter::local_offset_in_bytes(1)); // which is n+1
2169 }
2170 
2171 
2172 void InterpreterMacroAssembler::store_local_float( Register index, FloatRegister src ) {
2173   assert_not_delayed();
2174   sll(index, Interpreter::logStackElementSize(), index);
2175   sub(Llocals, index, index);
2176   #ifdef ASSERT
2177   check_for_regarea_stomp(index, Interpreter::value_offset_in_bytes(), FP, G1_scratch, G4_scratch);
2178   #endif
2179   tag_local(frame::TagValue, index, G1_scratch);
2180   stf(FloatRegisterImpl::S, src, index, Interpreter::value_offset_in_bytes());
2181 }
2182 
2183 
2184 void InterpreterMacroAssembler::store_local_double( Register index, FloatRegister src ) {
2185   assert_not_delayed();
2186   sll(index, Interpreter::logStackElementSize(), index);
2187   sub(Llocals, index, index);
2188   #ifdef ASSERT
2189   check_for_regarea_stomp(index, Interpreter::local_offset_in_bytes(1), FP, G1_scratch, G4_scratch);
2190   #endif
2191   tag_local(frame::TagCategory2, index, G1_scratch);
2192   store_unaligned_double(src, index, Interpreter::local_offset_in_bytes(1));
2193 }
2194 
2195 
2196 int InterpreterMacroAssembler::top_most_monitor_byte_offset() {
2197   const jint delta = frame::interpreter_frame_monitor_size() * wordSize;
2198   int rounded_vm_local_words = ::round_to(frame::interpreter_frame_vm_local_words, WordsPerLong);
2199   return ((-rounded_vm_local_words * wordSize) - delta ) + STACK_BIAS;
2200 }
2201 
2202 
2203 Address InterpreterMacroAssembler::top_most_monitor() {
2204   return Address(FP, top_most_monitor_byte_offset());
2205 }
2206 
2207 
2208 void InterpreterMacroAssembler::compute_stack_base( Register Rdest ) {
2209   add( Lesp,      wordSize,                                    Rdest );
2210 }
2211 
2212 #endif /* CC_INTERP */
2213 
2214 void InterpreterMacroAssembler::increment_invocation_counter( Register Rtmp, Register Rtmp2 ) {
2215   assert(UseCompiler, "incrementing must be useful");
2216 #ifdef CC_INTERP
2217   Address inv_counter(G5_method, methodOopDesc::invocation_counter_offset() +
2218                                  InvocationCounter::counter_offset());
2219   Address be_counter (G5_method, methodOopDesc::backedge_counter_offset() +
2220                                  InvocationCounter::counter_offset());
2221 #else
2222   Address inv_counter(Lmethod, methodOopDesc::invocation_counter_offset() +
2223                                InvocationCounter::counter_offset());
2224   Address be_counter (Lmethod, methodOopDesc::backedge_counter_offset() +
2225                                InvocationCounter::counter_offset());
2226 #endif /* CC_INTERP */
2227   int delta = InvocationCounter::count_increment;
2228 
2229   // Load each counter in a register
2230   ld( inv_counter, Rtmp );
2231   ld( be_counter, Rtmp2 );
2232 
2233   assert( is_simm13( delta ), " delta too large.");
2234 
2235   // Add the delta to the invocation counter and store the result
2236   add( Rtmp, delta, Rtmp );
2237 
2238   // Mask the backedge counter
2239   and3( Rtmp2, InvocationCounter::count_mask_value, Rtmp2 );
2240 
2241   // Store value
2242   st( Rtmp, inv_counter);
2243 
2244   // Add invocation counter + backedge counter
2245   add( Rtmp, Rtmp2, Rtmp);
2246 
2247   // Note that this macro must leave the backedge_count + invocation_count in Rtmp!
2248 }
2249 
2250 void InterpreterMacroAssembler::increment_backedge_counter( Register Rtmp, Register Rtmp2 ) {
2251   assert(UseCompiler, "incrementing must be useful");
2252 #ifdef CC_INTERP
2253   Address be_counter (G5_method, methodOopDesc::backedge_counter_offset() +
2254                                  InvocationCounter::counter_offset());
2255   Address inv_counter(G5_method, methodOopDesc::invocation_counter_offset() +
2256                                  InvocationCounter::counter_offset());
2257 #else
2258   Address be_counter (Lmethod, methodOopDesc::backedge_counter_offset() +
2259                                InvocationCounter::counter_offset());
2260   Address inv_counter(Lmethod, methodOopDesc::invocation_counter_offset() +
2261                                InvocationCounter::counter_offset());
2262 #endif /* CC_INTERP */
2263   int delta = InvocationCounter::count_increment;
2264   // Load each counter in a register
2265   ld( be_counter, Rtmp );
2266   ld( inv_counter, Rtmp2 );
2267 
2268   // Add the delta to the backedge counter
2269   add( Rtmp, delta, Rtmp );
2270 
2271   // Mask the invocation counter, add to backedge counter
2272   and3( Rtmp2, InvocationCounter::count_mask_value, Rtmp2 );
2273 
2274   // and store the result to memory
2275   st( Rtmp, be_counter );
2276 
2277   // Add backedge + invocation counter
2278   add( Rtmp, Rtmp2, Rtmp );
2279 
2280   // Note that this macro must leave backedge_count + invocation_count in Rtmp!
2281 }
2282 
2283 #ifndef CC_INTERP
2284 void InterpreterMacroAssembler::test_backedge_count_for_osr( Register backedge_count,
2285                                                              Register branch_bcp,
2286                                                              Register Rtmp ) {
2287   Label did_not_overflow;
2288   Label overflow_with_error;
2289   assert_different_registers(backedge_count, Rtmp, branch_bcp);
2290   assert(UseOnStackReplacement,"Must UseOnStackReplacement to test_backedge_count_for_osr");
2291 
2292   AddressLiteral limit(&InvocationCounter::InterpreterBackwardBranchLimit);
2293   load_contents(limit, Rtmp);
2294   cmp(backedge_count, Rtmp);
2295   br(Assembler::lessUnsigned, false, Assembler::pt, did_not_overflow);
2296   delayed()->nop();
2297 
2298   // When ProfileInterpreter is on, the backedge_count comes from the
2299   // methodDataOop, which value does not get reset on the call to
2300   // frequency_counter_overflow().  To avoid excessive calls to the overflow
2301   // routine while the method is being compiled, add a second test to make sure
2302   // the overflow function is called only once every overflow_frequency.
2303   if (ProfileInterpreter) {
2304     const int overflow_frequency = 1024;
2305     andcc(backedge_count, overflow_frequency-1, Rtmp);
2306     brx(Assembler::notZero, false, Assembler::pt, did_not_overflow);
2307     delayed()->nop();
2308   }
2309 
2310   // overflow in loop, pass branch bytecode
2311   set(6,Rtmp);
2312   call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::frequency_counter_overflow), branch_bcp, Rtmp);
2313 
2314   // Was an OSR adapter generated?
2315   // O0 = osr nmethod
2316   tst(O0);
2317   brx(Assembler::zero, false, Assembler::pn, overflow_with_error);
2318   delayed()->nop();
2319 
2320   // Has the nmethod been invalidated already?
2321   ld(O0, nmethod::entry_bci_offset(), O2);
2322   cmp(O2, InvalidOSREntryBci);
2323   br(Assembler::equal, false, Assembler::pn, overflow_with_error);
2324   delayed()->nop();
2325 
2326   // migrate the interpreter frame off of the stack
2327 
2328   mov(G2_thread, L7);
2329   // save nmethod
2330   mov(O0, L6);
2331   set_last_Java_frame(SP, noreg);
2332   call_VM_leaf(noreg, CAST_FROM_FN_PTR(address, SharedRuntime::OSR_migration_begin), L7);
2333   reset_last_Java_frame();
2334   mov(L7, G2_thread);
2335 
2336   // move OSR nmethod to I1
2337   mov(L6, I1);
2338 
2339   // OSR buffer to I0
2340   mov(O0, I0);
2341 
2342   // remove the interpreter frame
2343   restore(I5_savedSP, 0, SP);
2344 
2345   // Jump to the osr code.
2346   ld_ptr(O1, nmethod::osr_entry_point_offset(), O2);
2347   jmp(O2, G0);
2348   delayed()->nop();
2349 
2350   bind(overflow_with_error);
2351 
2352   bind(did_not_overflow);
2353 }
2354 
2355 
2356 
2357 void InterpreterMacroAssembler::interp_verify_oop(Register reg, TosState state, const char * file, int line) {
2358   if (state == atos) { MacroAssembler::_verify_oop(reg, "broken oop ", file, line); }
2359 }
2360 
2361 
2362 // local helper function for the verify_oop_or_return_address macro
2363 static bool verify_return_address(methodOopDesc* m, int bci) {
2364 #ifndef PRODUCT
2365   address pc = (address)(m->constMethod())
2366              + in_bytes(constMethodOopDesc::codes_offset()) + bci;
2367   // assume it is a valid return address if it is inside m and is preceded by a jsr
2368   if (!m->contains(pc))                                          return false;
2369   address jsr_pc;
2370   jsr_pc = pc - Bytecodes::length_for(Bytecodes::_jsr);
2371   if (*jsr_pc == Bytecodes::_jsr   && jsr_pc >= m->code_base())    return true;
2372   jsr_pc = pc - Bytecodes::length_for(Bytecodes::_jsr_w);
2373   if (*jsr_pc == Bytecodes::_jsr_w && jsr_pc >= m->code_base())    return true;
2374 #endif // PRODUCT
2375   return false;
2376 }
2377 
2378 
2379 void InterpreterMacroAssembler::verify_oop_or_return_address(Register reg, Register Rtmp) {
2380   if (!VerifyOops)  return;
2381   // the VM documentation for the astore[_wide] bytecode allows
2382   // the TOS to be not only an oop but also a return address
2383   Label test;
2384   Label skip;
2385   // See if it is an address (in the current method):
2386 
2387   mov(reg, Rtmp);
2388   const int log2_bytecode_size_limit = 16;
2389   srl(Rtmp, log2_bytecode_size_limit, Rtmp);
2390   br_notnull( Rtmp, false, pt, test );
2391   delayed()->nop();
2392 
2393   // %%% should use call_VM_leaf here?
2394   save_frame_and_mov(0, Lmethod, O0, reg, O1);
2395   save_thread(L7_thread_cache);
2396   call(CAST_FROM_FN_PTR(address,verify_return_address), relocInfo::none);
2397   delayed()->nop();
2398   restore_thread(L7_thread_cache);
2399   br_notnull( O0, false, pt, skip );
2400   delayed()->restore();
2401 
2402   // Perform a more elaborate out-of-line call
2403   // Not an address; verify it:
2404   bind(test);
2405   verify_oop(reg);
2406   bind(skip);
2407 }
2408 
2409 
2410 void InterpreterMacroAssembler::verify_FPU(int stack_depth, TosState state) {
2411   if (state == ftos || state == dtos) MacroAssembler::verify_FPU(stack_depth);
2412 }
2413 #endif /* CC_INTERP */
2414 
2415 // Inline assembly for:
2416 //
2417 // if (thread is in interp_only_mode) {
2418 //   InterpreterRuntime::post_method_entry();
2419 // }
2420 // if (DTraceMethodProbes) {
2421 //   SharedRuntime::dtrace_method_entry(method, receiver);
2422 // }
2423 // if (RC_TRACE_IN_RANGE(0x00001000, 0x00002000)) {
2424 //   SharedRuntime::rc_trace_method_entry(method, receiver);
2425 // }
2426 
2427 void InterpreterMacroAssembler::notify_method_entry() {
2428 
2429   // C++ interpreter only uses this for native methods.
2430 
2431   // Whenever JVMTI puts a thread in interp_only_mode, method
2432   // entry/exit events are sent for that thread to track stack
2433   // depth.  If it is possible to enter interp_only_mode we add
2434   // the code to check if the event should be sent.
2435   if (JvmtiExport::can_post_interpreter_events()) {
2436     Label L;
2437     Register temp_reg = O5;
2438     const Address interp_only(G2_thread, JavaThread::interp_only_mode_offset());
2439     ld(interp_only, temp_reg);
2440     tst(temp_reg);
2441     br(zero, false, pt, L);
2442     delayed()->nop();
2443     call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_method_entry));
2444     bind(L);
2445   }
2446 
2447   {
2448     Register temp_reg = O5;
2449     SkipIfEqual skip_if(this, temp_reg, &DTraceMethodProbes, zero);
2450     call_VM_leaf(noreg,
2451       CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_entry),
2452       G2_thread, Lmethod);
2453   }
2454 
2455   // RedefineClasses() tracing support for obsolete method entry
2456   if (RC_TRACE_IN_RANGE(0x00001000, 0x00002000)) {
2457     call_VM_leaf(noreg,
2458       CAST_FROM_FN_PTR(address, SharedRuntime::rc_trace_method_entry),
2459       G2_thread, Lmethod);
2460   }
2461 }
2462 
2463 
2464 // Inline assembly for:
2465 //
2466 // if (thread is in interp_only_mode) {
2467 //   // save result
2468 //   InterpreterRuntime::post_method_exit();
2469 //   // restore result
2470 // }
2471 // if (DTraceMethodProbes) {
2472 //   SharedRuntime::dtrace_method_exit(thread, method);
2473 // }
2474 //
2475 // Native methods have their result stored in d_tmp and l_tmp
2476 // Java methods have their result stored in the expression stack
2477 
2478 void InterpreterMacroAssembler::notify_method_exit(bool is_native_method,
2479                                                    TosState state,
2480                                                    NotifyMethodExitMode mode) {
2481   // C++ interpreter only uses this for native methods.
2482 
2483   // Whenever JVMTI puts a thread in interp_only_mode, method
2484   // entry/exit events are sent for that thread to track stack
2485   // depth.  If it is possible to enter interp_only_mode we add
2486   // the code to check if the event should be sent.
2487   if (mode == NotifyJVMTI && JvmtiExport::can_post_interpreter_events()) {
2488     Label L;
2489     Register temp_reg = O5;
2490     const Address interp_only(G2_thread, JavaThread::interp_only_mode_offset());
2491     ld(interp_only, temp_reg);
2492     tst(temp_reg);
2493     br(zero, false, pt, L);
2494     delayed()->nop();
2495 
2496     // Note: frame::interpreter_frame_result has a dependency on how the
2497     // method result is saved across the call to post_method_exit. For
2498     // native methods it assumes the result registers are saved to
2499     // l_scratch and d_scratch. If this changes then the interpreter_frame_result
2500     // implementation will need to be updated too.
2501 
2502     save_return_value(state, is_native_method);
2503     call_VM(noreg,
2504             CAST_FROM_FN_PTR(address, InterpreterRuntime::post_method_exit));
2505     restore_return_value(state, is_native_method);
2506     bind(L);
2507   }
2508 
2509   {
2510     Register temp_reg = O5;
2511     // Dtrace notification
2512     SkipIfEqual skip_if(this, temp_reg, &DTraceMethodProbes, zero);
2513     save_return_value(state, is_native_method);
2514     call_VM_leaf(
2515       noreg,
2516       CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit),
2517       G2_thread, Lmethod);
2518     restore_return_value(state, is_native_method);
2519   }
2520 }
2521 
2522 void InterpreterMacroAssembler::save_return_value(TosState state, bool is_native_call) {
2523 #ifdef CC_INTERP
2524   // result potentially in O0/O1: save it across calls
2525   stf(FloatRegisterImpl::D, F0, STATE(_native_fresult));
2526 #ifdef _LP64
2527   stx(O0, STATE(_native_lresult));
2528 #else
2529   std(O0, STATE(_native_lresult));
2530 #endif
2531 #else // CC_INTERP
2532   if (is_native_call) {
2533     stf(FloatRegisterImpl::D, F0, d_tmp);
2534 #ifdef _LP64
2535     stx(O0, l_tmp);
2536 #else
2537     std(O0, l_tmp);
2538 #endif
2539   } else {
2540     push(state);
2541   }
2542 #endif // CC_INTERP
2543 }
2544 
2545 void InterpreterMacroAssembler::restore_return_value( TosState state, bool is_native_call) {
2546 #ifdef CC_INTERP
2547   ldf(FloatRegisterImpl::D, STATE(_native_fresult), F0);
2548 #ifdef _LP64
2549   ldx(STATE(_native_lresult), O0);
2550 #else
2551   ldd(STATE(_native_lresult), O0);
2552 #endif
2553 #else // CC_INTERP
2554   if (is_native_call) {
2555     ldf(FloatRegisterImpl::D, d_tmp, F0);
2556 #ifdef _LP64
2557     ldx(l_tmp, O0);
2558 #else
2559     ldd(l_tmp, O0);
2560 #endif
2561   } else {
2562     pop(state);
2563   }
2564 #endif // CC_INTERP
2565 }