src/cpu/sparc/vm/interp_masm_sparc.cpp
Index Unified diffs Context diffs Sdiffs Wdiffs Patch New Old Previous File Next File 7063628_1 Sdiff src/cpu/sparc/vm

src/cpu/sparc/vm/interp_masm_sparc.cpp

Print this page




 219   case stos:                                           // fall through
 220   case itos: ld(val_addr, Otos_l1);                       break;
 221   case ftos: ldf(FloatRegisterImpl::S, val_addr, Ftos_f); break;
 222   case dtos: ldf(FloatRegisterImpl::D, val_addr, Ftos_d); break;
 223   case vtos: /* nothing to do */                          break;
 224   default  : ShouldNotReachHere();
 225   }
 226   // Clean up tos value in the jvmti thread state
 227   or3(G0, ilgl, G3_scratch);
 228   stw(G3_scratch, tos_addr);
 229   st_long(G0, val_addr);
 230   interp_verify_oop(Otos_i, state, __FILE__, __LINE__);
 231 }
 232 
 233 
 234 void InterpreterMacroAssembler::check_and_handle_earlyret(Register scratch_reg) {
 235   if (JvmtiExport::can_force_early_return()) {
 236     Label L;
 237     Register thr_state = G3_scratch;
 238     ld_ptr(G2_thread, JavaThread::jvmti_thread_state_offset(), thr_state);
 239     br_null(thr_state, false, pt, L); // if (thread->jvmti_thread_state() == NULL) exit;
 240 
 241     // Initiate earlyret handling only if it is not already being processed.
 242     // If the flag has the earlyret_processing bit set, it means that this code
 243     // is called *during* earlyret handling - we don't want to reenter.
 244     ld(thr_state, JvmtiThreadState::earlyret_state_offset(), G4_scratch);
 245     cmp_and_br(G4_scratch, JvmtiThreadState::earlyret_pending, Assembler::notEqual, false, pt, L);
 246 
 247     // Call Interpreter::remove_activation_early_entry() to get the address of the
 248     // same-named entrypoint in the generated interpreter code
 249     ld(thr_state, JvmtiThreadState::earlyret_tos_offset(), Otos_l1);
 250     call_VM_leaf(noreg, CAST_FROM_FN_PTR(address, Interpreter::remove_activation_early_entry), Otos_l1);
 251 
 252     // Jump to Interpreter::_remove_activation_early_entry
 253     jmpl(O0, G0, G0);
 254     delayed()->nop();
 255     bind(L);
 256   }
 257 }
 258 
 259 
 260 void InterpreterMacroAssembler::super_call_VM_leaf(Register thread_cache, address entry_point, Register arg_1, Register arg_2) {
 261   mov(arg_1, O0);
 262   mov(arg_2, O1);
 263   MacroAssembler::call_VM_leaf_base(thread_cache, entry_point, 2);
 264 }
 265 #endif /* CC_INTERP */


 545 
 546 
 547 #ifdef ASSERT
 548 void InterpreterMacroAssembler::verify_sp(Register Rsp, Register Rtemp) {
 549   Label Bad, OK;
 550 
 551   // Saved SP must be aligned.
 552 #ifdef _LP64
 553   btst(2*BytesPerWord-1, Rsp);
 554 #else
 555   btst(LongAlignmentMask, Rsp);
 556 #endif
 557   br(Assembler::notZero, false, Assembler::pn, Bad);
 558   delayed()->nop();
 559 
 560   // Saved SP, plus register window size, must not be above FP.
 561   add(Rsp, frame::register_save_words * wordSize, Rtemp);
 562 #ifdef _LP64
 563   sub(Rtemp, STACK_BIAS, Rtemp);  // Bias Rtemp before cmp to FP
 564 #endif
 565   cmp_and_brx(Rtemp, FP, Assembler::greaterUnsigned, false, Assembler::pn, Bad);
 566 
 567   // Saved SP must not be ridiculously below current SP.
 568   size_t maxstack = MAX2(JavaThread::stack_size_at_create(), (size_t) 4*K*K);
 569   set(maxstack, Rtemp);
 570   sub(SP, Rtemp, Rtemp);
 571 #ifdef _LP64
 572   add(Rtemp, STACK_BIAS, Rtemp);  // Unbias Rtemp before cmp to Rsp
 573 #endif
 574   cmp_and_brx(Rsp, Rtemp, Assembler::lessUnsigned, false, Assembler::pn, Bad);
 575 
 576   ba(OK);
 577 
 578   bind(Bad);
 579   stop("on return to interpreted call, restored SP is corrupted");
 580 
 581   bind(OK);
 582 }
 583 
 584 
 585 void InterpreterMacroAssembler::verify_esp(Register Resp) {
 586   // about to read or write Resp[0]
 587   // make sure it is not in the monitors or the register save area
 588   Label OK1, OK2;
 589 
 590   cmp(Resp, Lmonitors);
 591   brx(Assembler::lessUnsigned, true, Assembler::pt, OK1);
 592   delayed()->sub(Resp, frame::memory_parameter_word_sp_offset * wordSize, Resp);
 593   stop("too many pops:  Lesp points into monitor area");
 594   bind(OK1);
 595 #ifdef _LP64
 596   sub(Resp, STACK_BIAS, Resp);


 604 #endif // ASSERT
 605 
 606 // Load compiled (i2c) or interpreter entry when calling from interpreted and
 607 // do the call. Centralized so that all interpreter calls will do the same actions.
 608 // If jvmti single stepping is on for a thread we must not call compiled code.
 609 void InterpreterMacroAssembler::call_from_interpreter(Register target, Register scratch, Register Rret) {
 610 
 611   // Assume we want to go compiled if available
 612 
 613   ld_ptr(G5_method, in_bytes(methodOopDesc::from_interpreted_offset()), target);
 614 
 615   if (JvmtiExport::can_post_interpreter_events()) {
 616     // JVMTI events, such as single-stepping, are implemented partly by avoiding running
 617     // compiled code in threads for which the event is enabled.  Check here for
 618     // interp_only_mode if these events CAN be enabled.
 619     verify_thread();
 620     Label skip_compiled_code;
 621 
 622     const Address interp_only(G2_thread, JavaThread::interp_only_mode_offset());
 623     ld(interp_only, scratch);
 624     tst(scratch);
 625     br(Assembler::notZero, true, Assembler::pn, skip_compiled_code);
 626     delayed()->ld_ptr(G5_method, in_bytes(methodOopDesc::interpreter_entry_offset()), target);
 627     bind(skip_compiled_code);
 628   }
 629 
 630   // the i2c_adapters need methodOop in G5_method (right? %%%)
 631   // do the call
 632 #ifdef ASSERT
 633   {
 634     Label ok;
 635     br_notnull(target, false, Assembler::pt, ok);
 636     stop("null entry point");
 637     bind(ok);
 638   }
 639 #endif // ASSERT
 640 
 641   // Adjust Rret first so Llast_SP can be same as Rret
 642   add(Rret, -frame::pc_return_offset, O7);
 643   add(Lesp, BytesPerWord, Gargs); // setup parameter pointer
 644   // Record SP so we can remove any stack space allocated by adapter transition
 645   jmp(target, 0);
 646   delayed()->mov(SP, Llast_SP);
 647 }
 648 
 649 void InterpreterMacroAssembler::if_cmp(Condition cc, bool ptr_compare) {
 650   assert_not_delayed();
 651 
 652   Label not_taken;
 653   if (ptr_compare) brx(cc, false, Assembler::pn, not_taken);
 654   else             br (cc, false, Assembler::pn, not_taken);
 655   delayed()->nop();


 955                                                               bool install_monitor_exception) {
 956   Label unlocked, unlock, no_unlock;
 957 
 958   // get the value of _do_not_unlock_if_synchronized into G1_scratch
 959   const Address do_not_unlock_if_synchronized(G2_thread,
 960     JavaThread::do_not_unlock_if_synchronized_offset());
 961   ldbool(do_not_unlock_if_synchronized, G1_scratch);
 962   stbool(G0, do_not_unlock_if_synchronized); // reset the flag
 963 
 964   // check if synchronized method
 965   const Address access_flags(Lmethod, methodOopDesc::access_flags_offset());
 966   interp_verify_oop(Otos_i, state, __FILE__, __LINE__);
 967   push(state); // save tos
 968   ld(access_flags, G3_scratch); // Load access flags.
 969   btst(JVM_ACC_SYNCHRONIZED, G3_scratch);
 970   br(zero, false, pt, unlocked);
 971   delayed()->nop();
 972 
 973   // Don't unlock anything if the _do_not_unlock_if_synchronized flag
 974   // is set.
 975   tst(G1_scratch);
 976   br(Assembler::notZero, false, pn, no_unlock);
 977   delayed()->nop();
 978 
 979   // BasicObjectLock will be first in list, since this is a synchronized method. However, need
 980   // to check that the object has not been unlocked by an explicit monitorexit bytecode.
 981 
 982   //Intel: if (throw_monitor_exception) ... else ...
 983   // Entry already unlocked, need to throw exception
 984   //...
 985 
 986   // pass top-most monitor elem
 987   add( top_most_monitor(), O1 );
 988 
 989   ld_ptr(O1, BasicObjectLock::obj_offset_in_bytes(), G3_scratch);
 990   br_notnull(G3_scratch, false, pt, unlock);
 991 
 992   if (throw_monitor_exception) {
 993     // Entry already unlocked need to throw an exception
 994     MacroAssembler::call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_illegal_monitor_state_exception));
 995     should_not_reach_here();
 996   } else {
 997     // Monitor already unlocked during a stack unroll.
 998     // If requested, install an illegal_monitor_state_exception.
 999     // Continue with stack unrolling.
1000     if (install_monitor_exception) {
1001       MacroAssembler::call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::new_illegal_monitor_state_exception));
1002     }
1003     ba(unlocked);
1004   }
1005 
1006   bind(unlock);
1007 
1008   unlock_object(O1);
1009 
1010   bind(unlocked);
1011 
1012   // I0, I1: Might contain return value
1013 
1014   // Check that all monitors are unlocked
1015   { Label loop, exception, entry, restart;
1016 
1017     Register Rmptr   = O0;
1018     Register Rtemp   = O1;
1019     Register Rlimit  = Lmonitors;
1020     const jint delta = frame::interpreter_frame_monitor_size() * wordSize;
1021     assert( (delta & LongAlignmentMask) == 0,
1022             "sizeof BasicObjectLock must be even number of doublewords");
1023 
1024     #ifdef ASSERT
1025     add(top_most_monitor(), Rmptr, delta);
1026     { Label L;
1027       // ensure that Rmptr starts out above (or at) Rlimit
1028       cmp_and_brx(Rmptr, Rlimit, Assembler::greaterEqualUnsigned, false, pn, L);
1029       stop("monitor stack has negative size");
1030       bind(L);
1031     }
1032     #endif
1033     bind(restart);
1034     ba(entry, false);
1035     delayed()->
1036     add(top_most_monitor(), Rmptr, delta);      // points to current entry, starting with bottom-most entry
1037 
1038     // Entry is still locked, need to throw exception
1039     bind(exception);
1040     if (throw_monitor_exception) {
1041       MacroAssembler::call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_illegal_monitor_state_exception));
1042       should_not_reach_here();
1043     } else {
1044       // Stack unrolling. Unlock object and if requested, install illegal_monitor_exception.
1045       // Unlock does not block, so don't have to worry about the frame
1046       unlock_object(Rmptr);
1047       if (install_monitor_exception) {
1048         MacroAssembler::call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::new_illegal_monitor_state_exception));
1049       }
1050       ba(restart);
1051     }
1052 
1053     bind(loop);
1054     cmp(Rtemp, G0);                             // check if current entry is used
1055     brx(Assembler::notEqual, false, pn, exception);
1056     delayed()->
1057     dec(Rmptr, delta);                          // otherwise advance to next entry
1058     #ifdef ASSERT
1059     { Label L;
1060       // ensure that Rmptr has not somehow stepped below Rlimit
1061       cmp_and_brx(Rmptr, Rlimit, Assembler::greaterEqualUnsigned, false, pn, L);
1062       stop("ran off the end of the monitor stack");
1063       bind(L);
1064     }
1065     #endif
1066     bind(entry);
1067     cmp(Rmptr, Rlimit);                         // check if bottom reached
1068     brx(Assembler::notEqual, true, pn, loop);   // if not at bottom then check this entry
1069     delayed()->
1070     ld_ptr(Rmptr, BasicObjectLock::obj_offset_in_bytes() - delta, Rtemp);
1071   }
1072 
1073   bind(no_unlock);
1074   pop(state);
1075   interp_verify_oop(Otos_i, state, __FILE__, __LINE__);
1076 }
1077 
1078 
1079 // remove activation
1080 //
1081 // Unlock the receiver if this is a synchronized method.


1162     ld_ptr(mark_addr, mark_reg);
1163 
1164     if (UseBiasedLocking) {
1165       biased_locking_enter(obj_reg, mark_reg, temp_reg, done, &slow_case);
1166     }
1167 
1168     // get the address of basicLock on stack that will be stored in the object
1169     // we need a temporary register here as we do not want to clobber lock_reg
1170     // (cas clobbers the destination register)
1171     mov(lock_reg, temp_reg);
1172     // set mark reg to be (markOop of object | UNLOCK_VALUE)
1173     or3(mark_reg, markOopDesc::unlocked_value, mark_reg);
1174     // initialize the box  (Must happen before we update the object mark!)
1175     st_ptr(mark_reg, lock_addr, BasicLock::displaced_header_offset_in_bytes());
1176     // compare and exchange object_addr, markOop | 1, stack address of basicLock
1177     assert(mark_addr.disp() == 0, "cas must take a zero displacement");
1178     casx_under_lock(mark_addr.base(), mark_reg, temp_reg,
1179       (address)StubRoutines::Sparc::atomic_memory_operation_lock_addr());
1180 
1181     // if the compare and exchange succeeded we are done (we saw an unlocked object)
1182     cmp_and_brx(mark_reg, temp_reg, Assembler::equal, true, Assembler::pt, done);
1183 
1184     // We did not see an unlocked object so try the fast recursive case
1185 
1186     // Check if owner is self by comparing the value in the markOop of object
1187     // with the stack pointer
1188     sub(temp_reg, SP, temp_reg);
1189 #ifdef _LP64
1190     sub(temp_reg, STACK_BIAS, temp_reg);
1191 #endif
1192     assert(os::vm_page_size() > 0xfff, "page size too small - change the constant");
1193 
1194     // Composite "andcc" test:
1195     // (a) %sp -vs- markword proximity check, and,
1196     // (b) verify mark word LSBs == 0 (Stack-locked).
1197     //
1198     // FFFFF003/FFFFFFFFFFFF003 is (markOopDesc::lock_mask_in_place | -os::vm_page_size())
1199     // Note that the page size used for %sp proximity testing is arbitrary and is
1200     // unrelated to the actual MMU page size.  We use a 'logical' page size of
1201     // 4096 bytes.   F..FFF003 is designed to fit conveniently in the SIMM13 immediate
1202     // field of the andcc instruction.


1288 
1289 // Set the method data pointer for the current bcp.
1290 
1291 void InterpreterMacroAssembler::set_method_data_pointer_for_bcp() {
1292   assert(ProfileInterpreter, "must be profiling interpreter");
1293   Label zero_continue;
1294 
1295   // Test MDO to avoid the call if it is NULL.
1296   ld_ptr(Lmethod, in_bytes(methodOopDesc::method_data_offset()), ImethodDataPtr);
1297   test_method_data_pointer(zero_continue);
1298   call_VM_leaf(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::bcp_to_di), Lmethod, Lbcp);
1299   add(ImethodDataPtr, in_bytes(methodDataOopDesc::data_offset()), ImethodDataPtr);
1300   add(ImethodDataPtr, O0, ImethodDataPtr);
1301   bind(zero_continue);
1302 }
1303 
1304 // Test ImethodDataPtr.  If it is null, continue at the specified label
1305 
1306 void InterpreterMacroAssembler::test_method_data_pointer(Label& zero_continue) {
1307   assert(ProfileInterpreter, "must be profiling interpreter");
1308   br_null(ImethodDataPtr, false, Assembler::pn, zero_continue);
1309 }
1310 
1311 void InterpreterMacroAssembler::verify_method_data_pointer() {
1312   assert(ProfileInterpreter, "must be profiling interpreter");
1313 #ifdef ASSERT
1314   Label verify_continue;
1315   test_method_data_pointer(verify_continue);
1316 
1317   // If the mdp is valid, it will point to a DataLayout header which is
1318   // consistent with the bcp.  The converse is highly probable also.
1319   lduh(ImethodDataPtr, in_bytes(DataLayout::bci_offset()), G3_scratch);
1320   ld_ptr(Lmethod, methodOopDesc::const_offset(), O5);
1321   add(G3_scratch, in_bytes(constMethodOopDesc::codes_offset()), G3_scratch);
1322   add(G3_scratch, O5, G3_scratch);
1323   cmp(Lbcp, G3_scratch);
1324   brx(Assembler::equal, false, Assembler::pt, verify_continue);
1325 
1326   Register temp_reg = O5;
1327   delayed()->mov(ImethodDataPtr, temp_reg);
1328   // %%% should use call_VM_leaf here?


1334   save_thread(L7_thread_cache);
1335   call(CAST_FROM_FN_PTR(address, InterpreterRuntime::verify_mdp), relocInfo::none);
1336   delayed()->nop();
1337   restore_thread(L7_thread_cache);
1338   ldf(FloatRegisterImpl::D, d_save, Ftos_d);
1339   restore();
1340   bind(verify_continue);
1341 #endif // ASSERT
1342 }
1343 
1344 void InterpreterMacroAssembler::test_invocation_counter_for_mdp(Register invocation_count,
1345                                                                 Register Rtmp,
1346                                                                 Label &profile_continue) {
1347   assert(ProfileInterpreter, "must be profiling interpreter");
1348   // Control will flow to "profile_continue" if the counter is less than the
1349   // limit or if we call profile_method()
1350 
1351   Label done;
1352 
1353   // if no method data exists, and the counter is high enough, make one
1354   br_notnull(ImethodDataPtr, false, Assembler::pn, done);
1355 
1356   // Test to see if we should create a method data oop
1357   AddressLiteral profile_limit((address) &InvocationCounter::InterpreterProfileLimit);
1358   sethi(profile_limit, Rtmp);
1359   ld(Rtmp, profile_limit.low10(), Rtmp);
1360   cmp_and_br(invocation_count, Rtmp, Assembler::lessUnsigned, false, Assembler::pn, profile_continue);
1361 
1362   // Build it now.
1363   call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::profile_method));
1364   set_method_data_pointer_for_bcp();
1365   ba(profile_continue);
1366   bind(done);
1367 }
1368 
1369 // Store a value at some constant offset from the method data pointer.
1370 
1371 void InterpreterMacroAssembler::set_mdp_data_at(int constant, Register value) {
1372   assert(ProfileInterpreter, "must be profiling interpreter");
1373   st_ptr(value, ImethodDataPtr, constant);
1374 }
1375 
1376 void InterpreterMacroAssembler::increment_mdp_data_at(Address counter,
1377                                                       Register bumped_count,
1378                                                       bool decrement) {
1379   assert(ProfileInterpreter, "must be profiling interpreter");
1380 
1381   // Load the counter.
1382   ld_ptr(counter, bumped_count);
1383 
1384   if (decrement) {
1385     // Decrement the register.  Set condition codes.


1577     bind (profile_continue);
1578   }
1579 }
1580 
1581 
1582 // Count a virtual call in the bytecodes.
1583 
1584 void InterpreterMacroAssembler::profile_virtual_call(Register receiver,
1585                                                      Register scratch,
1586                                                      bool receiver_can_be_null) {
1587   if (ProfileInterpreter) {
1588     Label profile_continue;
1589 
1590     // If no method data exists, go to profile_continue.
1591     test_method_data_pointer(profile_continue);
1592 
1593 
1594     Label skip_receiver_profile;
1595     if (receiver_can_be_null) {
1596       Label not_null;
1597       br_notnull(receiver, false, Assembler::pt, not_null);
1598       // We are making a call.  Increment the count for null receiver.
1599       increment_mdp_data_at(in_bytes(CounterData::count_offset()), scratch);
1600       ba(skip_receiver_profile);
1601       bind(not_null);
1602     }
1603 
1604     // Record the receiver type.
1605     record_klass_in_profile(receiver, scratch, true);
1606     bind(skip_receiver_profile);
1607 
1608     // The method data pointer needs to be updated to reflect the new target.
1609     update_mdp_by_constant(in_bytes(VirtualCallData::virtual_call_data_size()));
1610     bind (profile_continue);
1611   }
1612 }
1613 
1614 void InterpreterMacroAssembler::record_klass_in_profile_helper(
1615                                         Register receiver, Register scratch,
1616                                         int start_row, Label& done, bool is_virtual_call) {
1617   if (TypeProfileWidth == 0) {
1618     if (is_virtual_call) {
1619       increment_mdp_data_at(in_bytes(CounterData::count_offset()), scratch);
1620     }


1624   int last_row = VirtualCallData::row_limit() - 1;
1625   assert(start_row <= last_row, "must be work left to do");
1626   // Test this row for both the receiver and for null.
1627   // Take any of three different outcomes:
1628   //   1. found receiver => increment count and goto done
1629   //   2. found null => keep looking for case 1, maybe allocate this cell
1630   //   3. found something else => keep looking for cases 1 and 2
1631   // Case 3 is handled by a recursive call.
1632   for (int row = start_row; row <= last_row; row++) {
1633     Label next_test;
1634     bool test_for_null_also = (row == start_row);
1635 
1636     // See if the receiver is receiver[n].
1637     int recvr_offset = in_bytes(VirtualCallData::receiver_offset(row));
1638     test_mdp_data_at(recvr_offset, receiver, next_test, scratch);
1639     // delayed()->tst(scratch);
1640 
1641     // The receiver is receiver[n].  Increment count[n].
1642     int count_offset = in_bytes(VirtualCallData::receiver_count_offset(row));
1643     increment_mdp_data_at(count_offset, scratch);
1644     ba(done);
1645     bind(next_test);
1646 
1647     if (test_for_null_also) {
1648       Label found_null;
1649       // Failed the equality check on receiver[n]...  Test for null.
1650       if (start_row == last_row) {
1651         // The only thing left to do is handle the null case.
1652         if (is_virtual_call) {
1653           brx(Assembler::zero, false, Assembler::pn, found_null);
1654           delayed()->nop();
1655           // Receiver did not match any saved receiver and there is no empty row for it.
1656           // Increment total counter to indicate polymorphic case.
1657           increment_mdp_data_at(in_bytes(CounterData::count_offset()), scratch);
1658           ba(done);
1659           bind(found_null);
1660         } else {
1661           brx(Assembler::notZero, false, Assembler::pt, done);
1662           delayed()->nop();
1663         }
1664         break;
1665       }
1666       // Since null is rare, make it be the branch-taken case.
1667       brx(Assembler::zero, false, Assembler::pn, found_null);
1668       delayed()->nop();
1669 
1670       // Put all the "Case 3" tests here.
1671       record_klass_in_profile_helper(receiver, scratch, start_row + 1, done, is_virtual_call);
1672 
1673       // Found a null.  Keep searching for a matching receiver,
1674       // but remember that this is an empty (unused) slot.
1675       bind(found_null);
1676     }
1677   }
1678 
1679   // In the fall-through case, we found no matching receiver, but we
1680   // observed the receiver[start_row] is NULL.
1681 
1682   // Fill in the receiver field and increment the count.
1683   int recvr_offset = in_bytes(VirtualCallData::receiver_offset(start_row));
1684   set_mdp_data_at(recvr_offset, receiver);
1685   int count_offset = in_bytes(VirtualCallData::receiver_count_offset(start_row));
1686   mov(DataLayout::counter_increment, scratch);
1687   set_mdp_data_at(count_offset, scratch);
1688   if (start_row > 0) {
1689     ba(done);
1690   }
1691 }
1692 
1693 void InterpreterMacroAssembler::record_klass_in_profile(Register receiver,
1694                                                         Register scratch, bool is_virtual_call) {
1695   assert(ProfileInterpreter, "must be profiling");
1696   Label done;
1697 
1698   record_klass_in_profile_helper(receiver, scratch, 0, done, is_virtual_call);
1699 
1700   bind (done);
1701 }
1702 
1703 
1704 // Count a ret in the bytecodes.
1705 
1706 void InterpreterMacroAssembler::profile_ret(TosState state,
1707                                             Register return_bci,
1708                                             Register scratch) {
1709   if (ProfileInterpreter) {


1711     uint row;
1712 
1713     // If no method data exists, go to profile_continue.
1714     test_method_data_pointer(profile_continue);
1715 
1716     // Update the total ret count.
1717     increment_mdp_data_at(in_bytes(CounterData::count_offset()), scratch);
1718 
1719     for (row = 0; row < RetData::row_limit(); row++) {
1720       Label next_test;
1721 
1722       // See if return_bci is equal to bci[n]:
1723       test_mdp_data_at(in_bytes(RetData::bci_offset(row)),
1724                        return_bci, next_test, scratch);
1725 
1726       // return_bci is equal to bci[n].  Increment the count.
1727       increment_mdp_data_at(in_bytes(RetData::bci_count_offset(row)), scratch);
1728 
1729       // The method data pointer needs to be updated to reflect the new target.
1730       update_mdp_by_offset(in_bytes(RetData::bci_displacement_offset(row)), scratch);
1731       ba(profile_continue);
1732       bind(next_test);
1733     }
1734 
1735     update_mdp_for_ret(state, return_bci);
1736 
1737     bind (profile_continue);
1738   }
1739 }
1740 
1741 // Profile an unexpected null in the bytecodes.
1742 void InterpreterMacroAssembler::profile_null_seen(Register scratch) {
1743   if (ProfileInterpreter) {
1744     Label profile_continue;
1745 
1746     // If no method data exists, go to profile_continue.
1747     test_method_data_pointer(profile_continue);
1748 
1749     set_mdp_flag_at(BitData::null_seen_byte_constant(), scratch);
1750 
1751     // The method data pointer needs to be updated.


1860                                                       Register Rtemp,
1861                                                       Register Rtemp2 ) {
1862 
1863   Register Rlimit = Lmonitors;
1864   const jint delta = frame::interpreter_frame_monitor_size() * wordSize;
1865   assert( (delta & LongAlignmentMask) == 0,
1866           "sizeof BasicObjectLock must be even number of doublewords");
1867 
1868   sub( SP,        delta, SP);
1869   sub( Lesp,      delta, Lesp);
1870   sub( Lmonitors, delta, Lmonitors);
1871 
1872   if (!stack_is_empty) {
1873 
1874     // must copy stack contents down
1875 
1876     Label start_copying, next;
1877 
1878     // untested("monitor stack expansion");
1879     compute_stack_base(Rtemp);
1880     ba(start_copying, false);
1881     delayed()->cmp(Rtemp, Rlimit); // done? duplicated below
1882 
1883     // note: must copy from low memory upwards
1884     // On entry to loop,
1885     // Rtemp points to new base of stack, Lesp points to new end of stack (1 past TOS)
1886     // Loop mutates Rtemp
1887 
1888     bind( next);
1889 
1890     st_ptr(Rtemp2, Rtemp, 0);
1891     inc(Rtemp, wordSize);
1892     cmp(Rtemp, Rlimit); // are we done? (duplicated above)
1893 
1894     bind( start_copying );
1895 
1896     brx( notEqual, true, pn, next );
1897     delayed()->ld_ptr( Rtemp, delta, Rtemp2 );
1898 
1899     // done copying stack
1900   }


1948   assert_not_delayed();
1949   sll(index, Interpreter::logStackElementSize, index);
1950   sub(Llocals, index, index);
1951   load_unaligned_double(index, Interpreter::local_offset_in_bytes(1), dst);
1952 }
1953 
1954 
1955 #ifdef ASSERT
1956 void InterpreterMacroAssembler::check_for_regarea_stomp(Register Rindex, int offset, Register Rlimit, Register Rscratch, Register Rscratch1) {
1957   Label L;
1958 
1959   assert(Rindex != Rscratch, "Registers cannot be same");
1960   assert(Rindex != Rscratch1, "Registers cannot be same");
1961   assert(Rlimit != Rscratch, "Registers cannot be same");
1962   assert(Rlimit != Rscratch1, "Registers cannot be same");
1963   assert(Rscratch1 != Rscratch, "Registers cannot be same");
1964 
1965   // untested("reg area corruption");
1966   add(Rindex, offset, Rscratch);
1967   add(Rlimit, 64 + STACK_BIAS, Rscratch1);
1968   cmp_and_brx(Rscratch, Rscratch1, Assembler::greaterEqualUnsigned, false, pn, L);
1969   stop("regsave area is being clobbered");
1970   bind(L);
1971 }
1972 #endif // ASSERT
1973 
1974 
1975 void InterpreterMacroAssembler::store_local_int( Register index, Register src ) {
1976   assert_not_delayed();
1977   sll(index, Interpreter::logStackElementSize, index);
1978   sub(Llocals, index, index);
1979   debug_only(check_for_regarea_stomp(index, 0, FP, G1_scratch, G4_scratch);)
1980   st(src, index, 0);
1981 }
1982 
1983 void InterpreterMacroAssembler::store_local_ptr( Register index, Register src ) {
1984   assert_not_delayed();
1985   sll(index, Interpreter::logStackElementSize, index);
1986   sub(Llocals, index, index);
1987 #ifdef ASSERT
1988   check_for_regarea_stomp(index, 0, FP, G1_scratch, G4_scratch);


2110   // and store the result to memory
2111   st( Rtmp, be_counter );
2112 
2113   // Add backedge + invocation counter
2114   add( Rtmp, Rtmp2, Rtmp );
2115 
2116   // Note that this macro must leave backedge_count + invocation_count in Rtmp!
2117 }
2118 
2119 #ifndef CC_INTERP
2120 void InterpreterMacroAssembler::test_backedge_count_for_osr( Register backedge_count,
2121                                                              Register branch_bcp,
2122                                                              Register Rtmp ) {
2123   Label did_not_overflow;
2124   Label overflow_with_error;
2125   assert_different_registers(backedge_count, Rtmp, branch_bcp);
2126   assert(UseOnStackReplacement,"Must UseOnStackReplacement to test_backedge_count_for_osr");
2127 
2128   AddressLiteral limit(&InvocationCounter::InterpreterBackwardBranchLimit);
2129   load_contents(limit, Rtmp);
2130   cmp_and_br(backedge_count, Rtmp, Assembler::lessUnsigned, false, Assembler::pt, did_not_overflow);
2131 
2132   // When ProfileInterpreter is on, the backedge_count comes from the
2133   // methodDataOop, which value does not get reset on the call to
2134   // frequency_counter_overflow().  To avoid excessive calls to the overflow
2135   // routine while the method is being compiled, add a second test to make sure
2136   // the overflow function is called only once every overflow_frequency.
2137   if (ProfileInterpreter) {
2138     const int overflow_frequency = 1024;
2139     andcc(backedge_count, overflow_frequency-1, Rtmp);
2140     brx(Assembler::notZero, false, Assembler::pt, did_not_overflow);
2141     delayed()->nop();
2142   }
2143 
2144   // overflow in loop, pass branch bytecode
2145   set(6,Rtmp);
2146   call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::frequency_counter_overflow), branch_bcp, Rtmp);
2147 
2148   // Was an OSR adapter generated?
2149   // O0 = osr nmethod
2150   br_null(O0, false, Assembler::pn, overflow_with_error);
2151 
2152   // Has the nmethod been invalidated already?
2153   ld(O0, nmethod::entry_bci_offset(), O2);
2154   cmp_and_br(O2, InvalidOSREntryBci, Assembler::equal, false, Assembler::pn, overflow_with_error);
2155 
2156   // migrate the interpreter frame off of the stack
2157 
2158   mov(G2_thread, L7);
2159   // save nmethod
2160   mov(O0, L6);
2161   set_last_Java_frame(SP, noreg);
2162   call_VM_leaf(noreg, CAST_FROM_FN_PTR(address, SharedRuntime::OSR_migration_begin), L7);
2163   reset_last_Java_frame();
2164   mov(L7, G2_thread);
2165 
2166   // move OSR nmethod to I1
2167   mov(L6, I1);
2168 
2169   // OSR buffer to I0
2170   mov(O0, I0);
2171 
2172   // remove the interpreter frame
2173   restore(I5_savedSP, 0, SP);
2174 


2200   jsr_pc = pc - Bytecodes::length_for(Bytecodes::_jsr);
2201   if (*jsr_pc == Bytecodes::_jsr   && jsr_pc >= m->code_base())    return true;
2202   jsr_pc = pc - Bytecodes::length_for(Bytecodes::_jsr_w);
2203   if (*jsr_pc == Bytecodes::_jsr_w && jsr_pc >= m->code_base())    return true;
2204 #endif // PRODUCT
2205   return false;
2206 }
2207 
2208 
2209 void InterpreterMacroAssembler::verify_oop_or_return_address(Register reg, Register Rtmp) {
2210   if (!VerifyOops)  return;
2211   // the VM documentation for the astore[_wide] bytecode allows
2212   // the TOS to be not only an oop but also a return address
2213   Label test;
2214   Label skip;
2215   // See if it is an address (in the current method):
2216 
2217   mov(reg, Rtmp);
2218   const int log2_bytecode_size_limit = 16;
2219   srl(Rtmp, log2_bytecode_size_limit, Rtmp);
2220   br_notnull( Rtmp, false, pt, test );
2221 
2222   // %%% should use call_VM_leaf here?
2223   save_frame_and_mov(0, Lmethod, O0, reg, O1);
2224   save_thread(L7_thread_cache);
2225   call(CAST_FROM_FN_PTR(address,verify_return_address), relocInfo::none);
2226   delayed()->nop();
2227   restore_thread(L7_thread_cache);
2228   br_notnull( O0, false, pt, skip, false );
2229   delayed()->restore();
2230 
2231   // Perform a more elaborate out-of-line call
2232   // Not an address; verify it:
2233   bind(test);
2234   verify_oop(reg);
2235   bind(skip);
2236 }
2237 
2238 
2239 void InterpreterMacroAssembler::verify_FPU(int stack_depth, TosState state) {
2240   if (state == ftos || state == dtos) MacroAssembler::verify_FPU(stack_depth);


2249 // if (DTraceMethodProbes) {
2250 //   SharedRuntime::dtrace_method_entry(method, receiver);
2251 // }
2252 // if (RC_TRACE_IN_RANGE(0x00001000, 0x00002000)) {
2253 //   SharedRuntime::rc_trace_method_entry(method, receiver);
2254 // }
2255 
2256 void InterpreterMacroAssembler::notify_method_entry() {
2257 
2258   // C++ interpreter only uses this for native methods.
2259 
2260   // Whenever JVMTI puts a thread in interp_only_mode, method
2261   // entry/exit events are sent for that thread to track stack
2262   // depth.  If it is possible to enter interp_only_mode we add
2263   // the code to check if the event should be sent.
2264   if (JvmtiExport::can_post_interpreter_events()) {
2265     Label L;
2266     Register temp_reg = O5;
2267     const Address interp_only(G2_thread, JavaThread::interp_only_mode_offset());
2268     ld(interp_only, temp_reg);
2269     br_zero(temp_reg, L);
2270     call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_method_entry));
2271     bind(L);
2272   }
2273 
2274   {
2275     Register temp_reg = O5;
2276     SkipIfEqual skip_if(this, temp_reg, &DTraceMethodProbes, zero);
2277     call_VM_leaf(noreg,
2278       CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_entry),
2279       G2_thread, Lmethod);
2280   }
2281 
2282   // RedefineClasses() tracing support for obsolete method entry
2283   if (RC_TRACE_IN_RANGE(0x00001000, 0x00002000)) {
2284     call_VM_leaf(noreg,
2285       CAST_FROM_FN_PTR(address, SharedRuntime::rc_trace_method_entry),
2286       G2_thread, Lmethod);
2287   }
2288 }
2289 


2299 //   SharedRuntime::dtrace_method_exit(thread, method);
2300 // }
2301 //
2302 // Native methods have their result stored in d_tmp and l_tmp
2303 // Java methods have their result stored in the expression stack
2304 
2305 void InterpreterMacroAssembler::notify_method_exit(bool is_native_method,
2306                                                    TosState state,
2307                                                    NotifyMethodExitMode mode) {
2308   // C++ interpreter only uses this for native methods.
2309 
2310   // Whenever JVMTI puts a thread in interp_only_mode, method
2311   // entry/exit events are sent for that thread to track stack
2312   // depth.  If it is possible to enter interp_only_mode we add
2313   // the code to check if the event should be sent.
2314   if (mode == NotifyJVMTI && JvmtiExport::can_post_interpreter_events()) {
2315     Label L;
2316     Register temp_reg = O5;
2317     const Address interp_only(G2_thread, JavaThread::interp_only_mode_offset());
2318     ld(interp_only, temp_reg);
2319     br_zero(temp_reg, L);
2320 
2321     // Note: frame::interpreter_frame_result has a dependency on how the
2322     // method result is saved across the call to post_method_exit. For
2323     // native methods it assumes the result registers are saved to
2324     // l_scratch and d_scratch. If this changes then the interpreter_frame_result
2325     // implementation will need to be updated too.
2326 
2327     save_return_value(state, is_native_method);
2328     call_VM(noreg,
2329             CAST_FROM_FN_PTR(address, InterpreterRuntime::post_method_exit));
2330     restore_return_value(state, is_native_method);
2331     bind(L);
2332   }
2333 
2334   {
2335     Register temp_reg = O5;
2336     // Dtrace notification
2337     SkipIfEqual skip_if(this, temp_reg, &DTraceMethodProbes, zero);
2338     save_return_value(state, is_native_method);
2339     call_VM_leaf(




 219   case stos:                                           // fall through
 220   case itos: ld(val_addr, Otos_l1);                       break;
 221   case ftos: ldf(FloatRegisterImpl::S, val_addr, Ftos_f); break;
 222   case dtos: ldf(FloatRegisterImpl::D, val_addr, Ftos_d); break;
 223   case vtos: /* nothing to do */                          break;
 224   default  : ShouldNotReachHere();
 225   }
 226   // Clean up tos value in the jvmti thread state
 227   or3(G0, ilgl, G3_scratch);
 228   stw(G3_scratch, tos_addr);
 229   st_long(G0, val_addr);
 230   interp_verify_oop(Otos_i, state, __FILE__, __LINE__);
 231 }
 232 
 233 
 234 void InterpreterMacroAssembler::check_and_handle_earlyret(Register scratch_reg) {
 235   if (JvmtiExport::can_force_early_return()) {
 236     Label L;
 237     Register thr_state = G3_scratch;
 238     ld_ptr(G2_thread, JavaThread::jvmti_thread_state_offset(), thr_state);
 239     br_null_short(thr_state, pt, L); // if (thread->jvmti_thread_state() == NULL) exit;
 240 
 241     // Initiate earlyret handling only if it is not already being processed.
 242     // If the flag has the earlyret_processing bit set, it means that this code
 243     // is called *during* earlyret handling - we don't want to reenter.
 244     ld(thr_state, JvmtiThreadState::earlyret_state_offset(), G4_scratch);
 245     cmp_and_br_short(G4_scratch, JvmtiThreadState::earlyret_pending, Assembler::notEqual, pt, L);
 246 
 247     // Call Interpreter::remove_activation_early_entry() to get the address of the
 248     // same-named entrypoint in the generated interpreter code
 249     ld(thr_state, JvmtiThreadState::earlyret_tos_offset(), Otos_l1);
 250     call_VM_leaf(noreg, CAST_FROM_FN_PTR(address, Interpreter::remove_activation_early_entry), Otos_l1);
 251 
 252     // Jump to Interpreter::_remove_activation_early_entry
 253     jmpl(O0, G0, G0);
 254     delayed()->nop();
 255     bind(L);
 256   }
 257 }
 258 
 259 
 260 void InterpreterMacroAssembler::super_call_VM_leaf(Register thread_cache, address entry_point, Register arg_1, Register arg_2) {
 261   mov(arg_1, O0);
 262   mov(arg_2, O1);
 263   MacroAssembler::call_VM_leaf_base(thread_cache, entry_point, 2);
 264 }
 265 #endif /* CC_INTERP */


 545 
 546 
 547 #ifdef ASSERT
 548 void InterpreterMacroAssembler::verify_sp(Register Rsp, Register Rtemp) {
 549   Label Bad, OK;
 550 
 551   // Saved SP must be aligned.
 552 #ifdef _LP64
 553   btst(2*BytesPerWord-1, Rsp);
 554 #else
 555   btst(LongAlignmentMask, Rsp);
 556 #endif
 557   br(Assembler::notZero, false, Assembler::pn, Bad);
 558   delayed()->nop();
 559 
 560   // Saved SP, plus register window size, must not be above FP.
 561   add(Rsp, frame::register_save_words * wordSize, Rtemp);
 562 #ifdef _LP64
 563   sub(Rtemp, STACK_BIAS, Rtemp);  // Bias Rtemp before cmp to FP
 564 #endif
 565   cmp_and_brx_short(Rtemp, FP, Assembler::greaterUnsigned, Assembler::pn, Bad);
 566 
 567   // Saved SP must not be ridiculously below current SP.
 568   size_t maxstack = MAX2(JavaThread::stack_size_at_create(), (size_t) 4*K*K);
 569   set(maxstack, Rtemp);
 570   sub(SP, Rtemp, Rtemp);
 571 #ifdef _LP64
 572   add(Rtemp, STACK_BIAS, Rtemp);  // Unbias Rtemp before cmp to Rsp
 573 #endif
 574   cmp_and_brx_short(Rsp, Rtemp, Assembler::lessUnsigned, Assembler::pn, Bad);
 575 
 576   ba_short(OK);
 577 
 578   bind(Bad);
 579   stop("on return to interpreted call, restored SP is corrupted");
 580 
 581   bind(OK);
 582 }
 583 
 584 
 585 void InterpreterMacroAssembler::verify_esp(Register Resp) {
 586   // about to read or write Resp[0]
 587   // make sure it is not in the monitors or the register save area
 588   Label OK1, OK2;
 589 
 590   cmp(Resp, Lmonitors);
 591   brx(Assembler::lessUnsigned, true, Assembler::pt, OK1);
 592   delayed()->sub(Resp, frame::memory_parameter_word_sp_offset * wordSize, Resp);
 593   stop("too many pops:  Lesp points into monitor area");
 594   bind(OK1);
 595 #ifdef _LP64
 596   sub(Resp, STACK_BIAS, Resp);


 604 #endif // ASSERT
 605 
 606 // Load compiled (i2c) or interpreter entry when calling from interpreted and
 607 // do the call. Centralized so that all interpreter calls will do the same actions.
 608 // If jvmti single stepping is on for a thread we must not call compiled code.
 609 void InterpreterMacroAssembler::call_from_interpreter(Register target, Register scratch, Register Rret) {
 610 
 611   // Assume we want to go compiled if available
 612 
 613   ld_ptr(G5_method, in_bytes(methodOopDesc::from_interpreted_offset()), target);
 614 
 615   if (JvmtiExport::can_post_interpreter_events()) {
 616     // JVMTI events, such as single-stepping, are implemented partly by avoiding running
 617     // compiled code in threads for which the event is enabled.  Check here for
 618     // interp_only_mode if these events CAN be enabled.
 619     verify_thread();
 620     Label skip_compiled_code;
 621 
 622     const Address interp_only(G2_thread, JavaThread::interp_only_mode_offset());
 623     ld(interp_only, scratch);
 624     cmp_zero_and_br(Assembler::notZero, scratch, skip_compiled_code, true, Assembler::pn);

 625     delayed()->ld_ptr(G5_method, in_bytes(methodOopDesc::interpreter_entry_offset()), target);
 626     bind(skip_compiled_code);
 627   }
 628 
 629   // the i2c_adapters need methodOop in G5_method (right? %%%)
 630   // do the call
 631 #ifdef ASSERT
 632   {
 633     Label ok;
 634     br_notnull_short(target, Assembler::pt, ok);
 635     stop("null entry point");
 636     bind(ok);
 637   }
 638 #endif // ASSERT
 639 
 640   // Adjust Rret first so Llast_SP can be same as Rret
 641   add(Rret, -frame::pc_return_offset, O7);
 642   add(Lesp, BytesPerWord, Gargs); // setup parameter pointer
 643   // Record SP so we can remove any stack space allocated by adapter transition
 644   jmp(target, 0);
 645   delayed()->mov(SP, Llast_SP);
 646 }
 647 
 648 void InterpreterMacroAssembler::if_cmp(Condition cc, bool ptr_compare) {
 649   assert_not_delayed();
 650 
 651   Label not_taken;
 652   if (ptr_compare) brx(cc, false, Assembler::pn, not_taken);
 653   else             br (cc, false, Assembler::pn, not_taken);
 654   delayed()->nop();


 954                                                               bool install_monitor_exception) {
 955   Label unlocked, unlock, no_unlock;
 956 
 957   // get the value of _do_not_unlock_if_synchronized into G1_scratch
 958   const Address do_not_unlock_if_synchronized(G2_thread,
 959     JavaThread::do_not_unlock_if_synchronized_offset());
 960   ldbool(do_not_unlock_if_synchronized, G1_scratch);
 961   stbool(G0, do_not_unlock_if_synchronized); // reset the flag
 962 
 963   // check if synchronized method
 964   const Address access_flags(Lmethod, methodOopDesc::access_flags_offset());
 965   interp_verify_oop(Otos_i, state, __FILE__, __LINE__);
 966   push(state); // save tos
 967   ld(access_flags, G3_scratch); // Load access flags.
 968   btst(JVM_ACC_SYNCHRONIZED, G3_scratch);
 969   br(zero, false, pt, unlocked);
 970   delayed()->nop();
 971 
 972   // Don't unlock anything if the _do_not_unlock_if_synchronized flag
 973   // is set.
 974   cmp_zero_and_br(Assembler::notZero, G1_scratch, no_unlock);

 975   delayed()->nop();
 976 
 977   // BasicObjectLock will be first in list, since this is a synchronized method. However, need
 978   // to check that the object has not been unlocked by an explicit monitorexit bytecode.
 979 
 980   //Intel: if (throw_monitor_exception) ... else ...
 981   // Entry already unlocked, need to throw exception
 982   //...
 983 
 984   // pass top-most monitor elem
 985   add( top_most_monitor(), O1 );
 986 
 987   ld_ptr(O1, BasicObjectLock::obj_offset_in_bytes(), G3_scratch);
 988   br_notnull_short(G3_scratch, pt, unlock);
 989 
 990   if (throw_monitor_exception) {
 991     // Entry already unlocked need to throw an exception
 992     MacroAssembler::call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_illegal_monitor_state_exception));
 993     should_not_reach_here();
 994   } else {
 995     // Monitor already unlocked during a stack unroll.
 996     // If requested, install an illegal_monitor_state_exception.
 997     // Continue with stack unrolling.
 998     if (install_monitor_exception) {
 999       MacroAssembler::call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::new_illegal_monitor_state_exception));
1000     }
1001     ba_short(unlocked);
1002   }
1003 
1004   bind(unlock);
1005 
1006   unlock_object(O1);
1007 
1008   bind(unlocked);
1009 
1010   // I0, I1: Might contain return value
1011 
1012   // Check that all monitors are unlocked
1013   { Label loop, exception, entry, restart;
1014 
1015     Register Rmptr   = O0;
1016     Register Rtemp   = O1;
1017     Register Rlimit  = Lmonitors;
1018     const jint delta = frame::interpreter_frame_monitor_size() * wordSize;
1019     assert( (delta & LongAlignmentMask) == 0,
1020             "sizeof BasicObjectLock must be even number of doublewords");
1021 
1022     #ifdef ASSERT
1023     add(top_most_monitor(), Rmptr, delta);
1024     { Label L;
1025       // ensure that Rmptr starts out above (or at) Rlimit
1026       cmp_and_brx_short(Rmptr, Rlimit, Assembler::greaterEqualUnsigned, pn, L);
1027       stop("monitor stack has negative size");
1028       bind(L);
1029     }
1030     #endif
1031     bind(restart);
1032     ba(entry);
1033     delayed()->
1034     add(top_most_monitor(), Rmptr, delta);      // points to current entry, starting with bottom-most entry
1035 
1036     // Entry is still locked, need to throw exception
1037     bind(exception);
1038     if (throw_monitor_exception) {
1039       MacroAssembler::call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_illegal_monitor_state_exception));
1040       should_not_reach_here();
1041     } else {
1042       // Stack unrolling. Unlock object and if requested, install illegal_monitor_exception.
1043       // Unlock does not block, so don't have to worry about the frame
1044       unlock_object(Rmptr);
1045       if (install_monitor_exception) {
1046         MacroAssembler::call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::new_illegal_monitor_state_exception));
1047       }
1048       ba_short(restart);
1049     }
1050 
1051     bind(loop);
1052     cmp(Rtemp, G0);                             // check if current entry is used
1053     brx(Assembler::notEqual, false, pn, exception);
1054     delayed()->
1055     dec(Rmptr, delta);                          // otherwise advance to next entry
1056     #ifdef ASSERT
1057     { Label L;
1058       // ensure that Rmptr has not somehow stepped below Rlimit
1059       cmp_and_brx_short(Rmptr, Rlimit, Assembler::greaterEqualUnsigned, pn, L);
1060       stop("ran off the end of the monitor stack");
1061       bind(L);
1062     }
1063     #endif
1064     bind(entry);
1065     cmp(Rmptr, Rlimit);                         // check if bottom reached
1066     brx(Assembler::notEqual, true, pn, loop);   // if not at bottom then check this entry
1067     delayed()->
1068     ld_ptr(Rmptr, BasicObjectLock::obj_offset_in_bytes() - delta, Rtemp);
1069   }
1070 
1071   bind(no_unlock);
1072   pop(state);
1073   interp_verify_oop(Otos_i, state, __FILE__, __LINE__);
1074 }
1075 
1076 
1077 // remove activation
1078 //
1079 // Unlock the receiver if this is a synchronized method.


1160     ld_ptr(mark_addr, mark_reg);
1161 
1162     if (UseBiasedLocking) {
1163       biased_locking_enter(obj_reg, mark_reg, temp_reg, done, &slow_case);
1164     }
1165 
1166     // get the address of basicLock on stack that will be stored in the object
1167     // we need a temporary register here as we do not want to clobber lock_reg
1168     // (cas clobbers the destination register)
1169     mov(lock_reg, temp_reg);
1170     // set mark reg to be (markOop of object | UNLOCK_VALUE)
1171     or3(mark_reg, markOopDesc::unlocked_value, mark_reg);
1172     // initialize the box  (Must happen before we update the object mark!)
1173     st_ptr(mark_reg, lock_addr, BasicLock::displaced_header_offset_in_bytes());
1174     // compare and exchange object_addr, markOop | 1, stack address of basicLock
1175     assert(mark_addr.disp() == 0, "cas must take a zero displacement");
1176     casx_under_lock(mark_addr.base(), mark_reg, temp_reg,
1177       (address)StubRoutines::Sparc::atomic_memory_operation_lock_addr());
1178 
1179     // if the compare and exchange succeeded we are done (we saw an unlocked object)
1180     cmp_and_brx_short(mark_reg, temp_reg, Assembler::equal, Assembler::pt, done);
1181 
1182     // We did not see an unlocked object so try the fast recursive case
1183 
1184     // Check if owner is self by comparing the value in the markOop of object
1185     // with the stack pointer
1186     sub(temp_reg, SP, temp_reg);
1187 #ifdef _LP64
1188     sub(temp_reg, STACK_BIAS, temp_reg);
1189 #endif
1190     assert(os::vm_page_size() > 0xfff, "page size too small - change the constant");
1191 
1192     // Composite "andcc" test:
1193     // (a) %sp -vs- markword proximity check, and,
1194     // (b) verify mark word LSBs == 0 (Stack-locked).
1195     //
1196     // FFFFF003/FFFFFFFFFFFF003 is (markOopDesc::lock_mask_in_place | -os::vm_page_size())
1197     // Note that the page size used for %sp proximity testing is arbitrary and is
1198     // unrelated to the actual MMU page size.  We use a 'logical' page size of
1199     // 4096 bytes.   F..FFF003 is designed to fit conveniently in the SIMM13 immediate
1200     // field of the andcc instruction.


1286 
1287 // Set the method data pointer for the current bcp.
1288 
1289 void InterpreterMacroAssembler::set_method_data_pointer_for_bcp() {
1290   assert(ProfileInterpreter, "must be profiling interpreter");
1291   Label zero_continue;
1292 
1293   // Test MDO to avoid the call if it is NULL.
1294   ld_ptr(Lmethod, in_bytes(methodOopDesc::method_data_offset()), ImethodDataPtr);
1295   test_method_data_pointer(zero_continue);
1296   call_VM_leaf(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::bcp_to_di), Lmethod, Lbcp);
1297   add(ImethodDataPtr, in_bytes(methodDataOopDesc::data_offset()), ImethodDataPtr);
1298   add(ImethodDataPtr, O0, ImethodDataPtr);
1299   bind(zero_continue);
1300 }
1301 
1302 // Test ImethodDataPtr.  If it is null, continue at the specified label
1303 
1304 void InterpreterMacroAssembler::test_method_data_pointer(Label& zero_continue) {
1305   assert(ProfileInterpreter, "must be profiling interpreter");
1306   br_null_short(ImethodDataPtr, Assembler::pn, zero_continue);
1307 }
1308 
1309 void InterpreterMacroAssembler::verify_method_data_pointer() {
1310   assert(ProfileInterpreter, "must be profiling interpreter");
1311 #ifdef ASSERT
1312   Label verify_continue;
1313   test_method_data_pointer(verify_continue);
1314 
1315   // If the mdp is valid, it will point to a DataLayout header which is
1316   // consistent with the bcp.  The converse is highly probable also.
1317   lduh(ImethodDataPtr, in_bytes(DataLayout::bci_offset()), G3_scratch);
1318   ld_ptr(Lmethod, methodOopDesc::const_offset(), O5);
1319   add(G3_scratch, in_bytes(constMethodOopDesc::codes_offset()), G3_scratch);
1320   add(G3_scratch, O5, G3_scratch);
1321   cmp(Lbcp, G3_scratch);
1322   brx(Assembler::equal, false, Assembler::pt, verify_continue);
1323 
1324   Register temp_reg = O5;
1325   delayed()->mov(ImethodDataPtr, temp_reg);
1326   // %%% should use call_VM_leaf here?


1332   save_thread(L7_thread_cache);
1333   call(CAST_FROM_FN_PTR(address, InterpreterRuntime::verify_mdp), relocInfo::none);
1334   delayed()->nop();
1335   restore_thread(L7_thread_cache);
1336   ldf(FloatRegisterImpl::D, d_save, Ftos_d);
1337   restore();
1338   bind(verify_continue);
1339 #endif // ASSERT
1340 }
1341 
1342 void InterpreterMacroAssembler::test_invocation_counter_for_mdp(Register invocation_count,
1343                                                                 Register Rtmp,
1344                                                                 Label &profile_continue) {
1345   assert(ProfileInterpreter, "must be profiling interpreter");
1346   // Control will flow to "profile_continue" if the counter is less than the
1347   // limit or if we call profile_method()
1348 
1349   Label done;
1350 
1351   // if no method data exists, and the counter is high enough, make one
1352   br_notnull_short(ImethodDataPtr, Assembler::pn, done);
1353 
1354   // Test to see if we should create a method data oop
1355   AddressLiteral profile_limit((address) &InvocationCounter::InterpreterProfileLimit);
1356   sethi(profile_limit, Rtmp);
1357   ld(Rtmp, profile_limit.low10(), Rtmp);
1358   cmp_and_br_short(invocation_count, Rtmp, Assembler::lessUnsigned, Assembler::pn, profile_continue);
1359 
1360   // Build it now.
1361   call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::profile_method));
1362   set_method_data_pointer_for_bcp();
1363   ba_short(profile_continue);
1364   bind(done);
1365 }
1366 
1367 // Store a value at some constant offset from the method data pointer.
1368 
1369 void InterpreterMacroAssembler::set_mdp_data_at(int constant, Register value) {
1370   assert(ProfileInterpreter, "must be profiling interpreter");
1371   st_ptr(value, ImethodDataPtr, constant);
1372 }
1373 
1374 void InterpreterMacroAssembler::increment_mdp_data_at(Address counter,
1375                                                       Register bumped_count,
1376                                                       bool decrement) {
1377   assert(ProfileInterpreter, "must be profiling interpreter");
1378 
1379   // Load the counter.
1380   ld_ptr(counter, bumped_count);
1381 
1382   if (decrement) {
1383     // Decrement the register.  Set condition codes.


1575     bind (profile_continue);
1576   }
1577 }
1578 
1579 
1580 // Count a virtual call in the bytecodes.
1581 
1582 void InterpreterMacroAssembler::profile_virtual_call(Register receiver,
1583                                                      Register scratch,
1584                                                      bool receiver_can_be_null) {
1585   if (ProfileInterpreter) {
1586     Label profile_continue;
1587 
1588     // If no method data exists, go to profile_continue.
1589     test_method_data_pointer(profile_continue);
1590 
1591 
1592     Label skip_receiver_profile;
1593     if (receiver_can_be_null) {
1594       Label not_null;
1595       br_notnull_short(receiver, Assembler::pt, not_null);
1596       // We are making a call.  Increment the count for null receiver.
1597       increment_mdp_data_at(in_bytes(CounterData::count_offset()), scratch);
1598       ba_short(skip_receiver_profile);
1599       bind(not_null);
1600     }
1601 
1602     // Record the receiver type.
1603     record_klass_in_profile(receiver, scratch, true);
1604     bind(skip_receiver_profile);
1605 
1606     // The method data pointer needs to be updated to reflect the new target.
1607     update_mdp_by_constant(in_bytes(VirtualCallData::virtual_call_data_size()));
1608     bind (profile_continue);
1609   }
1610 }
1611 
1612 void InterpreterMacroAssembler::record_klass_in_profile_helper(
1613                                         Register receiver, Register scratch,
1614                                         int start_row, Label& done, bool is_virtual_call) {
1615   if (TypeProfileWidth == 0) {
1616     if (is_virtual_call) {
1617       increment_mdp_data_at(in_bytes(CounterData::count_offset()), scratch);
1618     }


1622   int last_row = VirtualCallData::row_limit() - 1;
1623   assert(start_row <= last_row, "must be work left to do");
1624   // Test this row for both the receiver and for null.
1625   // Take any of three different outcomes:
1626   //   1. found receiver => increment count and goto done
1627   //   2. found null => keep looking for case 1, maybe allocate this cell
1628   //   3. found something else => keep looking for cases 1 and 2
1629   // Case 3 is handled by a recursive call.
1630   for (int row = start_row; row <= last_row; row++) {
1631     Label next_test;
1632     bool test_for_null_also = (row == start_row);
1633 
1634     // See if the receiver is receiver[n].
1635     int recvr_offset = in_bytes(VirtualCallData::receiver_offset(row));
1636     test_mdp_data_at(recvr_offset, receiver, next_test, scratch);
1637     // delayed()->tst(scratch);
1638 
1639     // The receiver is receiver[n].  Increment count[n].
1640     int count_offset = in_bytes(VirtualCallData::receiver_count_offset(row));
1641     increment_mdp_data_at(count_offset, scratch);
1642     ba_short(done);
1643     bind(next_test);
1644 
1645     if (test_for_null_also) {
1646       Label found_null;
1647       // Failed the equality check on receiver[n]...  Test for null.
1648       if (start_row == last_row) {
1649         // The only thing left to do is handle the null case.
1650         if (is_virtual_call) {
1651           brx(Assembler::zero, false, Assembler::pn, found_null);
1652           delayed()->nop();
1653           // Receiver did not match any saved receiver and there is no empty row for it.
1654           // Increment total counter to indicate polymorphic case.
1655           increment_mdp_data_at(in_bytes(CounterData::count_offset()), scratch);
1656           ba_short(done);
1657           bind(found_null);
1658         } else {
1659           brx(Assembler::notZero, false, Assembler::pt, done);
1660           delayed()->nop();
1661         }
1662         break;
1663       }
1664       // Since null is rare, make it be the branch-taken case.
1665       brx(Assembler::zero, false, Assembler::pn, found_null);
1666       delayed()->nop();
1667 
1668       // Put all the "Case 3" tests here.
1669       record_klass_in_profile_helper(receiver, scratch, start_row + 1, done, is_virtual_call);
1670 
1671       // Found a null.  Keep searching for a matching receiver,
1672       // but remember that this is an empty (unused) slot.
1673       bind(found_null);
1674     }
1675   }
1676 
1677   // In the fall-through case, we found no matching receiver, but we
1678   // observed the receiver[start_row] is NULL.
1679 
1680   // Fill in the receiver field and increment the count.
1681   int recvr_offset = in_bytes(VirtualCallData::receiver_offset(start_row));
1682   set_mdp_data_at(recvr_offset, receiver);
1683   int count_offset = in_bytes(VirtualCallData::receiver_count_offset(start_row));
1684   mov(DataLayout::counter_increment, scratch);
1685   set_mdp_data_at(count_offset, scratch);
1686   if (start_row > 0) {
1687     ba_short(done);
1688   }
1689 }
1690 
1691 void InterpreterMacroAssembler::record_klass_in_profile(Register receiver,
1692                                                         Register scratch, bool is_virtual_call) {
1693   assert(ProfileInterpreter, "must be profiling");
1694   Label done;
1695 
1696   record_klass_in_profile_helper(receiver, scratch, 0, done, is_virtual_call);
1697 
1698   bind (done);
1699 }
1700 
1701 
1702 // Count a ret in the bytecodes.
1703 
1704 void InterpreterMacroAssembler::profile_ret(TosState state,
1705                                             Register return_bci,
1706                                             Register scratch) {
1707   if (ProfileInterpreter) {


1709     uint row;
1710 
1711     // If no method data exists, go to profile_continue.
1712     test_method_data_pointer(profile_continue);
1713 
1714     // Update the total ret count.
1715     increment_mdp_data_at(in_bytes(CounterData::count_offset()), scratch);
1716 
1717     for (row = 0; row < RetData::row_limit(); row++) {
1718       Label next_test;
1719 
1720       // See if return_bci is equal to bci[n]:
1721       test_mdp_data_at(in_bytes(RetData::bci_offset(row)),
1722                        return_bci, next_test, scratch);
1723 
1724       // return_bci is equal to bci[n].  Increment the count.
1725       increment_mdp_data_at(in_bytes(RetData::bci_count_offset(row)), scratch);
1726 
1727       // The method data pointer needs to be updated to reflect the new target.
1728       update_mdp_by_offset(in_bytes(RetData::bci_displacement_offset(row)), scratch);
1729       ba_short(profile_continue);
1730       bind(next_test);
1731     }
1732 
1733     update_mdp_for_ret(state, return_bci);
1734 
1735     bind (profile_continue);
1736   }
1737 }
1738 
1739 // Profile an unexpected null in the bytecodes.
1740 void InterpreterMacroAssembler::profile_null_seen(Register scratch) {
1741   if (ProfileInterpreter) {
1742     Label profile_continue;
1743 
1744     // If no method data exists, go to profile_continue.
1745     test_method_data_pointer(profile_continue);
1746 
1747     set_mdp_flag_at(BitData::null_seen_byte_constant(), scratch);
1748 
1749     // The method data pointer needs to be updated.


1858                                                       Register Rtemp,
1859                                                       Register Rtemp2 ) {
1860 
1861   Register Rlimit = Lmonitors;
1862   const jint delta = frame::interpreter_frame_monitor_size() * wordSize;
1863   assert( (delta & LongAlignmentMask) == 0,
1864           "sizeof BasicObjectLock must be even number of doublewords");
1865 
1866   sub( SP,        delta, SP);
1867   sub( Lesp,      delta, Lesp);
1868   sub( Lmonitors, delta, Lmonitors);
1869 
1870   if (!stack_is_empty) {
1871 
1872     // must copy stack contents down
1873 
1874     Label start_copying, next;
1875 
1876     // untested("monitor stack expansion");
1877     compute_stack_base(Rtemp);
1878     ba(start_copying);
1879     delayed()->cmp(Rtemp, Rlimit); // done? duplicated below
1880 
1881     // note: must copy from low memory upwards
1882     // On entry to loop,
1883     // Rtemp points to new base of stack, Lesp points to new end of stack (1 past TOS)
1884     // Loop mutates Rtemp
1885 
1886     bind( next);
1887 
1888     st_ptr(Rtemp2, Rtemp, 0);
1889     inc(Rtemp, wordSize);
1890     cmp(Rtemp, Rlimit); // are we done? (duplicated above)
1891 
1892     bind( start_copying );
1893 
1894     brx( notEqual, true, pn, next );
1895     delayed()->ld_ptr( Rtemp, delta, Rtemp2 );
1896 
1897     // done copying stack
1898   }


1946   assert_not_delayed();
1947   sll(index, Interpreter::logStackElementSize, index);
1948   sub(Llocals, index, index);
1949   load_unaligned_double(index, Interpreter::local_offset_in_bytes(1), dst);
1950 }
1951 
1952 
1953 #ifdef ASSERT
1954 void InterpreterMacroAssembler::check_for_regarea_stomp(Register Rindex, int offset, Register Rlimit, Register Rscratch, Register Rscratch1) {
1955   Label L;
1956 
1957   assert(Rindex != Rscratch, "Registers cannot be same");
1958   assert(Rindex != Rscratch1, "Registers cannot be same");
1959   assert(Rlimit != Rscratch, "Registers cannot be same");
1960   assert(Rlimit != Rscratch1, "Registers cannot be same");
1961   assert(Rscratch1 != Rscratch, "Registers cannot be same");
1962 
1963   // untested("reg area corruption");
1964   add(Rindex, offset, Rscratch);
1965   add(Rlimit, 64 + STACK_BIAS, Rscratch1);
1966   cmp_and_brx_short(Rscratch, Rscratch1, Assembler::greaterEqualUnsigned, pn, L);
1967   stop("regsave area is being clobbered");
1968   bind(L);
1969 }
1970 #endif // ASSERT
1971 
1972 
1973 void InterpreterMacroAssembler::store_local_int( Register index, Register src ) {
1974   assert_not_delayed();
1975   sll(index, Interpreter::logStackElementSize, index);
1976   sub(Llocals, index, index);
1977   debug_only(check_for_regarea_stomp(index, 0, FP, G1_scratch, G4_scratch);)
1978   st(src, index, 0);
1979 }
1980 
1981 void InterpreterMacroAssembler::store_local_ptr( Register index, Register src ) {
1982   assert_not_delayed();
1983   sll(index, Interpreter::logStackElementSize, index);
1984   sub(Llocals, index, index);
1985 #ifdef ASSERT
1986   check_for_regarea_stomp(index, 0, FP, G1_scratch, G4_scratch);


2108   // and store the result to memory
2109   st( Rtmp, be_counter );
2110 
2111   // Add backedge + invocation counter
2112   add( Rtmp, Rtmp2, Rtmp );
2113 
2114   // Note that this macro must leave backedge_count + invocation_count in Rtmp!
2115 }
2116 
2117 #ifndef CC_INTERP
2118 void InterpreterMacroAssembler::test_backedge_count_for_osr( Register backedge_count,
2119                                                              Register branch_bcp,
2120                                                              Register Rtmp ) {
2121   Label did_not_overflow;
2122   Label overflow_with_error;
2123   assert_different_registers(backedge_count, Rtmp, branch_bcp);
2124   assert(UseOnStackReplacement,"Must UseOnStackReplacement to test_backedge_count_for_osr");
2125 
2126   AddressLiteral limit(&InvocationCounter::InterpreterBackwardBranchLimit);
2127   load_contents(limit, Rtmp);
2128   cmp_and_br_short(backedge_count, Rtmp, Assembler::lessUnsigned, Assembler::pt, did_not_overflow);
2129 
2130   // When ProfileInterpreter is on, the backedge_count comes from the
2131   // methodDataOop, which value does not get reset on the call to
2132   // frequency_counter_overflow().  To avoid excessive calls to the overflow
2133   // routine while the method is being compiled, add a second test to make sure
2134   // the overflow function is called only once every overflow_frequency.
2135   if (ProfileInterpreter) {
2136     const int overflow_frequency = 1024;
2137     andcc(backedge_count, overflow_frequency-1, Rtmp);
2138     brx(Assembler::notZero, false, Assembler::pt, did_not_overflow);
2139     delayed()->nop();
2140   }
2141 
2142   // overflow in loop, pass branch bytecode
2143   set(6,Rtmp);
2144   call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::frequency_counter_overflow), branch_bcp, Rtmp);
2145 
2146   // Was an OSR adapter generated?
2147   // O0 = osr nmethod
2148   br_null_short(O0, Assembler::pn, overflow_with_error);
2149 
2150   // Has the nmethod been invalidated already?
2151   ld(O0, nmethod::entry_bci_offset(), O2);
2152   cmp_and_br_short(O2, InvalidOSREntryBci, Assembler::equal, Assembler::pn, overflow_with_error);
2153 
2154   // migrate the interpreter frame off of the stack
2155 
2156   mov(G2_thread, L7);
2157   // save nmethod
2158   mov(O0, L6);
2159   set_last_Java_frame(SP, noreg);
2160   call_VM_leaf(noreg, CAST_FROM_FN_PTR(address, SharedRuntime::OSR_migration_begin), L7);
2161   reset_last_Java_frame();
2162   mov(L7, G2_thread);
2163 
2164   // move OSR nmethod to I1
2165   mov(L6, I1);
2166 
2167   // OSR buffer to I0
2168   mov(O0, I0);
2169 
2170   // remove the interpreter frame
2171   restore(I5_savedSP, 0, SP);
2172 


2198   jsr_pc = pc - Bytecodes::length_for(Bytecodes::_jsr);
2199   if (*jsr_pc == Bytecodes::_jsr   && jsr_pc >= m->code_base())    return true;
2200   jsr_pc = pc - Bytecodes::length_for(Bytecodes::_jsr_w);
2201   if (*jsr_pc == Bytecodes::_jsr_w && jsr_pc >= m->code_base())    return true;
2202 #endif // PRODUCT
2203   return false;
2204 }
2205 
2206 
2207 void InterpreterMacroAssembler::verify_oop_or_return_address(Register reg, Register Rtmp) {
2208   if (!VerifyOops)  return;
2209   // the VM documentation for the astore[_wide] bytecode allows
2210   // the TOS to be not only an oop but also a return address
2211   Label test;
2212   Label skip;
2213   // See if it is an address (in the current method):
2214 
2215   mov(reg, Rtmp);
2216   const int log2_bytecode_size_limit = 16;
2217   srl(Rtmp, log2_bytecode_size_limit, Rtmp);
2218   br_notnull_short( Rtmp, pt, test );
2219 
2220   // %%% should use call_VM_leaf here?
2221   save_frame_and_mov(0, Lmethod, O0, reg, O1);
2222   save_thread(L7_thread_cache);
2223   call(CAST_FROM_FN_PTR(address,verify_return_address), relocInfo::none);
2224   delayed()->nop();
2225   restore_thread(L7_thread_cache);
2226   br_notnull( O0, false, pt, skip, false );
2227   delayed()->restore();
2228 
2229   // Perform a more elaborate out-of-line call
2230   // Not an address; verify it:
2231   bind(test);
2232   verify_oop(reg);
2233   bind(skip);
2234 }
2235 
2236 
2237 void InterpreterMacroAssembler::verify_FPU(int stack_depth, TosState state) {
2238   if (state == ftos || state == dtos) MacroAssembler::verify_FPU(stack_depth);


2247 // if (DTraceMethodProbes) {
2248 //   SharedRuntime::dtrace_method_entry(method, receiver);
2249 // }
2250 // if (RC_TRACE_IN_RANGE(0x00001000, 0x00002000)) {
2251 //   SharedRuntime::rc_trace_method_entry(method, receiver);
2252 // }
2253 
2254 void InterpreterMacroAssembler::notify_method_entry() {
2255 
2256   // C++ interpreter only uses this for native methods.
2257 
2258   // Whenever JVMTI puts a thread in interp_only_mode, method
2259   // entry/exit events are sent for that thread to track stack
2260   // depth.  If it is possible to enter interp_only_mode we add
2261   // the code to check if the event should be sent.
2262   if (JvmtiExport::can_post_interpreter_events()) {
2263     Label L;
2264     Register temp_reg = O5;
2265     const Address interp_only(G2_thread, JavaThread::interp_only_mode_offset());
2266     ld(interp_only, temp_reg);
2267     cmp_and_br_short(temp_reg, 0, equal, pt, L);
2268     call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_method_entry));
2269     bind(L);
2270   }
2271 
2272   {
2273     Register temp_reg = O5;
2274     SkipIfEqual skip_if(this, temp_reg, &DTraceMethodProbes, zero);
2275     call_VM_leaf(noreg,
2276       CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_entry),
2277       G2_thread, Lmethod);
2278   }
2279 
2280   // RedefineClasses() tracing support for obsolete method entry
2281   if (RC_TRACE_IN_RANGE(0x00001000, 0x00002000)) {
2282     call_VM_leaf(noreg,
2283       CAST_FROM_FN_PTR(address, SharedRuntime::rc_trace_method_entry),
2284       G2_thread, Lmethod);
2285   }
2286 }
2287 


2297 //   SharedRuntime::dtrace_method_exit(thread, method);
2298 // }
2299 //
2300 // Native methods have their result stored in d_tmp and l_tmp
2301 // Java methods have their result stored in the expression stack
2302 
2303 void InterpreterMacroAssembler::notify_method_exit(bool is_native_method,
2304                                                    TosState state,
2305                                                    NotifyMethodExitMode mode) {
2306   // C++ interpreter only uses this for native methods.
2307 
2308   // Whenever JVMTI puts a thread in interp_only_mode, method
2309   // entry/exit events are sent for that thread to track stack
2310   // depth.  If it is possible to enter interp_only_mode we add
2311   // the code to check if the event should be sent.
2312   if (mode == NotifyJVMTI && JvmtiExport::can_post_interpreter_events()) {
2313     Label L;
2314     Register temp_reg = O5;
2315     const Address interp_only(G2_thread, JavaThread::interp_only_mode_offset());
2316     ld(interp_only, temp_reg);
2317     cmp_and_br_short(temp_reg, 0, equal, pt, L);
2318 
2319     // Note: frame::interpreter_frame_result has a dependency on how the
2320     // method result is saved across the call to post_method_exit. For
2321     // native methods it assumes the result registers are saved to
2322     // l_scratch and d_scratch. If this changes then the interpreter_frame_result
2323     // implementation will need to be updated too.
2324 
2325     save_return_value(state, is_native_method);
2326     call_VM(noreg,
2327             CAST_FROM_FN_PTR(address, InterpreterRuntime::post_method_exit));
2328     restore_return_value(state, is_native_method);
2329     bind(L);
2330   }
2331 
2332   {
2333     Register temp_reg = O5;
2334     // Dtrace notification
2335     SkipIfEqual skip_if(this, temp_reg, &DTraceMethodProbes, zero);
2336     save_return_value(state, is_native_method);
2337     call_VM_leaf(


src/cpu/sparc/vm/interp_masm_sparc.cpp
Index Unified diffs Context diffs Sdiffs Wdiffs Patch New Old Previous File Next File