src/cpu/sparc/vm/interp_masm_sparc.cpp
Index Unified diffs Context diffs Sdiffs Wdiffs Patch New Old Previous File Next File 7063628 Sdiff src/cpu/sparc/vm

src/cpu/sparc/vm/interp_masm_sparc.cpp

Print this page




 219   case stos:                                           // fall through
 220   case itos: ld(val_addr, Otos_l1);                       break;
 221   case ftos: ldf(FloatRegisterImpl::S, val_addr, Ftos_f); break;
 222   case dtos: ldf(FloatRegisterImpl::D, val_addr, Ftos_d); break;
 223   case vtos: /* nothing to do */                          break;
 224   default  : ShouldNotReachHere();
 225   }
 226   // Clean up tos value in the jvmti thread state
 227   or3(G0, ilgl, G3_scratch);
 228   stw(G3_scratch, tos_addr);
 229   st_long(G0, val_addr);
 230   interp_verify_oop(Otos_i, state, __FILE__, __LINE__);
 231 }
 232 
 233 
 234 void InterpreterMacroAssembler::check_and_handle_earlyret(Register scratch_reg) {
 235   if (JvmtiExport::can_force_early_return()) {
 236     Label L;
 237     Register thr_state = G3_scratch;
 238     ld_ptr(G2_thread, JavaThread::jvmti_thread_state_offset(), thr_state);
 239     tst(thr_state);
 240     br(zero, false, pt, L); // if (thread->jvmti_thread_state() == NULL) exit;
 241     delayed()->nop();
 242 
 243     // Initiate earlyret handling only if it is not already being processed.
 244     // If the flag has the earlyret_processing bit set, it means that this code
 245     // is called *during* earlyret handling - we don't want to reenter.
 246     ld(thr_state, JvmtiThreadState::earlyret_state_offset(), G4_scratch);
 247     cmp(G4_scratch, JvmtiThreadState::earlyret_pending);
 248     br(Assembler::notEqual, false, pt, L);
 249     delayed()->nop();
 250 
 251     // Call Interpreter::remove_activation_early_entry() to get the address of the
 252     // same-named entrypoint in the generated interpreter code
 253     ld(thr_state, JvmtiThreadState::earlyret_tos_offset(), Otos_l1);
 254     call_VM_leaf(noreg, CAST_FROM_FN_PTR(address, Interpreter::remove_activation_early_entry), Otos_l1);
 255 
 256     // Jump to Interpreter::_remove_activation_early_entry
 257     jmpl(O0, G0, G0);
 258     delayed()->nop();
 259     bind(L);
 260   }
 261 }
 262 
 263 
 264 void InterpreterMacroAssembler::super_call_VM_leaf(Register thread_cache, address entry_point, Register arg_1, Register arg_2) {
 265   mov(arg_1, O0);
 266   mov(arg_2, O1);
 267   MacroAssembler::call_VM_leaf_base(thread_cache, entry_point, 2);
 268 }
 269 #endif /* CC_INTERP */


 549 
 550 
 551 #ifdef ASSERT
 552 void InterpreterMacroAssembler::verify_sp(Register Rsp, Register Rtemp) {
 553   Label Bad, OK;
 554 
 555   // Saved SP must be aligned.
 556 #ifdef _LP64
 557   btst(2*BytesPerWord-1, Rsp);
 558 #else
 559   btst(LongAlignmentMask, Rsp);
 560 #endif
 561   br(Assembler::notZero, false, Assembler::pn, Bad);
 562   delayed()->nop();
 563 
 564   // Saved SP, plus register window size, must not be above FP.
 565   add(Rsp, frame::register_save_words * wordSize, Rtemp);
 566 #ifdef _LP64
 567   sub(Rtemp, STACK_BIAS, Rtemp);  // Bias Rtemp before cmp to FP
 568 #endif
 569   cmp(Rtemp, FP);
 570   brx(Assembler::greaterUnsigned, false, Assembler::pn, Bad);
 571   delayed()->nop();
 572 
 573   // Saved SP must not be ridiculously below current SP.
 574   size_t maxstack = MAX2(JavaThread::stack_size_at_create(), (size_t) 4*K*K);
 575   set(maxstack, Rtemp);
 576   sub(SP, Rtemp, Rtemp);
 577 #ifdef _LP64
 578   add(Rtemp, STACK_BIAS, Rtemp);  // Unbias Rtemp before cmp to Rsp
 579 #endif
 580   cmp(Rsp, Rtemp);
 581   brx(Assembler::lessUnsigned, false, Assembler::pn, Bad);
 582   delayed()->nop();
 583 
 584   br(Assembler::always, false, Assembler::pn, OK);
 585   delayed()->nop();
 586 
 587   bind(Bad);
 588   stop("on return to interpreted call, restored SP is corrupted");
 589 
 590   bind(OK);
 591 }
 592 
 593 
 594 void InterpreterMacroAssembler::verify_esp(Register Resp) {
 595   // about to read or write Resp[0]
 596   // make sure it is not in the monitors or the register save area
 597   Label OK1, OK2;
 598 
 599   cmp(Resp, Lmonitors);
 600   brx(Assembler::lessUnsigned, true, Assembler::pt, OK1);
 601   delayed()->sub(Resp, frame::memory_parameter_word_sp_offset * wordSize, Resp);
 602   stop("too many pops:  Lesp points into monitor area");
 603   bind(OK1);
 604 #ifdef _LP64
 605   sub(Resp, STACK_BIAS, Resp);


 613 #endif // ASSERT
 614 
 615 // Load compiled (i2c) or interpreter entry when calling from interpreted and
 616 // do the call. Centralized so that all interpreter calls will do the same actions.
 617 // If jvmti single stepping is on for a thread we must not call compiled code.
 618 void InterpreterMacroAssembler::call_from_interpreter(Register target, Register scratch, Register Rret) {
 619 
 620   // Assume we want to go compiled if available
 621 
 622   ld_ptr(G5_method, in_bytes(methodOopDesc::from_interpreted_offset()), target);
 623 
 624   if (JvmtiExport::can_post_interpreter_events()) {
 625     // JVMTI events, such as single-stepping, are implemented partly by avoiding running
 626     // compiled code in threads for which the event is enabled.  Check here for
 627     // interp_only_mode if these events CAN be enabled.
 628     verify_thread();
 629     Label skip_compiled_code;
 630 
 631     const Address interp_only(G2_thread, JavaThread::interp_only_mode_offset());
 632     ld(interp_only, scratch);
 633     tst(scratch);
 634     br(Assembler::notZero, true, Assembler::pn, skip_compiled_code);
 635     delayed()->ld_ptr(G5_method, in_bytes(methodOopDesc::interpreter_entry_offset()), target);
 636     bind(skip_compiled_code);
 637   }
 638 
 639   // the i2c_adapters need methodOop in G5_method (right? %%%)
 640   // do the call
 641 #ifdef ASSERT
 642   {
 643     Label ok;
 644     br_notnull(target, false, Assembler::pt, ok);
 645     delayed()->nop();
 646     stop("null entry point");
 647     bind(ok);
 648   }
 649 #endif // ASSERT
 650 
 651   // Adjust Rret first so Llast_SP can be same as Rret
 652   add(Rret, -frame::pc_return_offset, O7);
 653   add(Lesp, BytesPerWord, Gargs); // setup parameter pointer
 654   // Record SP so we can remove any stack space allocated by adapter transition
 655   jmp(target, 0);
 656   delayed()->mov(SP, Llast_SP);
 657 }
 658 
 659 void InterpreterMacroAssembler::if_cmp(Condition cc, bool ptr_compare) {
 660   assert_not_delayed();
 661 
 662   Label not_taken;
 663   if (ptr_compare) brx(cc, false, Assembler::pn, not_taken);
 664   else             br (cc, false, Assembler::pn, not_taken);
 665   delayed()->nop();


 965                                                               bool install_monitor_exception) {
 966   Label unlocked, unlock, no_unlock;
 967 
 968   // get the value of _do_not_unlock_if_synchronized into G1_scratch
 969   const Address do_not_unlock_if_synchronized(G2_thread,
 970     JavaThread::do_not_unlock_if_synchronized_offset());
 971   ldbool(do_not_unlock_if_synchronized, G1_scratch);
 972   stbool(G0, do_not_unlock_if_synchronized); // reset the flag
 973 
 974   // check if synchronized method
 975   const Address access_flags(Lmethod, methodOopDesc::access_flags_offset());
 976   interp_verify_oop(Otos_i, state, __FILE__, __LINE__);
 977   push(state); // save tos
 978   ld(access_flags, G3_scratch); // Load access flags.
 979   btst(JVM_ACC_SYNCHRONIZED, G3_scratch);
 980   br(zero, false, pt, unlocked);
 981   delayed()->nop();
 982 
 983   // Don't unlock anything if the _do_not_unlock_if_synchronized flag
 984   // is set.
 985   tstbool(G1_scratch);
 986   br(Assembler::notZero, false, pn, no_unlock);
 987   delayed()->nop();
 988 
 989   // BasicObjectLock will be first in list, since this is a synchronized method. However, need
 990   // to check that the object has not been unlocked by an explicit monitorexit bytecode.
 991 
 992   //Intel: if (throw_monitor_exception) ... else ...
 993   // Entry already unlocked, need to throw exception
 994   //...
 995 
 996   // pass top-most monitor elem
 997   add( top_most_monitor(), O1 );
 998 
 999   ld_ptr(O1, BasicObjectLock::obj_offset_in_bytes(), G3_scratch);
1000   br_notnull(G3_scratch, false, pt, unlock);
1001   delayed()->nop();
1002 
1003   if (throw_monitor_exception) {
1004     // Entry already unlocked need to throw an exception
1005     MacroAssembler::call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_illegal_monitor_state_exception));
1006     should_not_reach_here();
1007   } else {
1008     // Monitor already unlocked during a stack unroll.
1009     // If requested, install an illegal_monitor_state_exception.
1010     // Continue with stack unrolling.
1011     if (install_monitor_exception) {
1012       MacroAssembler::call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::new_illegal_monitor_state_exception));
1013     }
1014     ba(false, unlocked);
1015     delayed()->nop();
1016   }
1017 
1018   bind(unlock);
1019 
1020   unlock_object(O1);
1021 
1022   bind(unlocked);
1023 
1024   // I0, I1: Might contain return value
1025 
1026   // Check that all monitors are unlocked
1027   { Label loop, exception, entry, restart;
1028 
1029     Register Rmptr   = O0;
1030     Register Rtemp   = O1;
1031     Register Rlimit  = Lmonitors;
1032     const jint delta = frame::interpreter_frame_monitor_size() * wordSize;
1033     assert( (delta & LongAlignmentMask) == 0,
1034             "sizeof BasicObjectLock must be even number of doublewords");
1035 
1036     #ifdef ASSERT
1037     add(top_most_monitor(), Rmptr, delta);
1038     { Label L;
1039       // ensure that Rmptr starts out above (or at) Rlimit
1040       cmp(Rmptr, Rlimit);
1041       brx(Assembler::greaterEqualUnsigned, false, pn, L);
1042       delayed()->nop();
1043       stop("monitor stack has negative size");
1044       bind(L);
1045     }
1046     #endif
1047     bind(restart);
1048     ba(false, entry);
1049     delayed()->
1050     add(top_most_monitor(), Rmptr, delta);      // points to current entry, starting with bottom-most entry
1051 
1052     // Entry is still locked, need to throw exception
1053     bind(exception);
1054     if (throw_monitor_exception) {
1055       MacroAssembler::call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_illegal_monitor_state_exception));
1056       should_not_reach_here();
1057     } else {
1058       // Stack unrolling. Unlock object and if requested, install illegal_monitor_exception.
1059       // Unlock does not block, so don't have to worry about the frame
1060       unlock_object(Rmptr);
1061       if (install_monitor_exception) {
1062         MacroAssembler::call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::new_illegal_monitor_state_exception));
1063       }
1064       ba(false, restart);
1065       delayed()->nop();
1066     }
1067 
1068     bind(loop);
1069     cmp(Rtemp, G0);                             // check if current entry is used
1070     brx(Assembler::notEqual, false, pn, exception);
1071     delayed()->
1072     dec(Rmptr, delta);                          // otherwise advance to next entry
1073     #ifdef ASSERT
1074     { Label L;
1075       // ensure that Rmptr has not somehow stepped below Rlimit
1076       cmp(Rmptr, Rlimit);
1077       brx(Assembler::greaterEqualUnsigned, false, pn, L);
1078       delayed()->nop();
1079       stop("ran off the end of the monitor stack");
1080       bind(L);
1081     }
1082     #endif
1083     bind(entry);
1084     cmp(Rmptr, Rlimit);                         // check if bottom reached
1085     brx(Assembler::notEqual, true, pn, loop);   // if not at bottom then check this entry
1086     delayed()->
1087     ld_ptr(Rmptr, BasicObjectLock::obj_offset_in_bytes() - delta, Rtemp);
1088   }
1089 
1090   bind(no_unlock);
1091   pop(state);
1092   interp_verify_oop(Otos_i, state, __FILE__, __LINE__);
1093 }
1094 
1095 
1096 // remove activation
1097 //
1098 // Unlock the receiver if this is a synchronized method.


1179     ld_ptr(mark_addr, mark_reg);
1180 
1181     if (UseBiasedLocking) {
1182       biased_locking_enter(obj_reg, mark_reg, temp_reg, done, &slow_case);
1183     }
1184 
1185     // get the address of basicLock on stack that will be stored in the object
1186     // we need a temporary register here as we do not want to clobber lock_reg
1187     // (cas clobbers the destination register)
1188     mov(lock_reg, temp_reg);
1189     // set mark reg to be (markOop of object | UNLOCK_VALUE)
1190     or3(mark_reg, markOopDesc::unlocked_value, mark_reg);
1191     // initialize the box  (Must happen before we update the object mark!)
1192     st_ptr(mark_reg, lock_addr, BasicLock::displaced_header_offset_in_bytes());
1193     // compare and exchange object_addr, markOop | 1, stack address of basicLock
1194     assert(mark_addr.disp() == 0, "cas must take a zero displacement");
1195     casx_under_lock(mark_addr.base(), mark_reg, temp_reg,
1196       (address)StubRoutines::Sparc::atomic_memory_operation_lock_addr());
1197 
1198     // if the compare and exchange succeeded we are done (we saw an unlocked object)
1199     cmp(mark_reg, temp_reg);
1200     brx(Assembler::equal, true, Assembler::pt, done);
1201     delayed()->nop();
1202 
1203     // We did not see an unlocked object so try the fast recursive case
1204 
1205     // Check if owner is self by comparing the value in the markOop of object
1206     // with the stack pointer
1207     sub(temp_reg, SP, temp_reg);
1208 #ifdef _LP64
1209     sub(temp_reg, STACK_BIAS, temp_reg);
1210 #endif
1211     assert(os::vm_page_size() > 0xfff, "page size too small - change the constant");
1212 
1213     // Composite "andcc" test:
1214     // (a) %sp -vs- markword proximity check, and,
1215     // (b) verify mark word LSBs == 0 (Stack-locked).
1216     //
1217     // FFFFF003/FFFFFFFFFFFF003 is (markOopDesc::lock_mask_in_place | -os::vm_page_size())
1218     // Note that the page size used for %sp proximity testing is arbitrary and is
1219     // unrelated to the actual MMU page size.  We use a 'logical' page size of
1220     // 4096 bytes.   F..FFF003 is designed to fit conveniently in the SIMM13 immediate
1221     // field of the andcc instruction.


1307 
1308 // Set the method data pointer for the current bcp.
1309 
1310 void InterpreterMacroAssembler::set_method_data_pointer_for_bcp() {
1311   assert(ProfileInterpreter, "must be profiling interpreter");
1312   Label zero_continue;
1313 
1314   // Test MDO to avoid the call if it is NULL.
1315   ld_ptr(Lmethod, in_bytes(methodOopDesc::method_data_offset()), ImethodDataPtr);
1316   test_method_data_pointer(zero_continue);
1317   call_VM_leaf(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::bcp_to_di), Lmethod, Lbcp);
1318   add(ImethodDataPtr, in_bytes(methodDataOopDesc::data_offset()), ImethodDataPtr);
1319   add(ImethodDataPtr, O0, ImethodDataPtr);
1320   bind(zero_continue);
1321 }
1322 
1323 // Test ImethodDataPtr.  If it is null, continue at the specified label
1324 
1325 void InterpreterMacroAssembler::test_method_data_pointer(Label& zero_continue) {
1326   assert(ProfileInterpreter, "must be profiling interpreter");
1327 #ifdef _LP64
1328   bpr(Assembler::rc_z, false, Assembler::pn, ImethodDataPtr, zero_continue);
1329 #else
1330   tst(ImethodDataPtr);
1331   br(Assembler::zero, false, Assembler::pn, zero_continue);
1332 #endif
1333   delayed()->nop();
1334 }
1335 
1336 void InterpreterMacroAssembler::verify_method_data_pointer() {
1337   assert(ProfileInterpreter, "must be profiling interpreter");
1338 #ifdef ASSERT
1339   Label verify_continue;
1340   test_method_data_pointer(verify_continue);
1341 
1342   // If the mdp is valid, it will point to a DataLayout header which is
1343   // consistent with the bcp.  The converse is highly probable also.
1344   lduh(ImethodDataPtr, in_bytes(DataLayout::bci_offset()), G3_scratch);
1345   ld_ptr(Lmethod, methodOopDesc::const_offset(), O5);
1346   add(G3_scratch, in_bytes(constMethodOopDesc::codes_offset()), G3_scratch);
1347   add(G3_scratch, O5, G3_scratch);
1348   cmp(Lbcp, G3_scratch);
1349   brx(Assembler::equal, false, Assembler::pt, verify_continue);
1350 
1351   Register temp_reg = O5;
1352   delayed()->mov(ImethodDataPtr, temp_reg);
1353   // %%% should use call_VM_leaf here?


1359   save_thread(L7_thread_cache);
1360   call(CAST_FROM_FN_PTR(address, InterpreterRuntime::verify_mdp), relocInfo::none);
1361   delayed()->nop();
1362   restore_thread(L7_thread_cache);
1363   ldf(FloatRegisterImpl::D, d_save, Ftos_d);
1364   restore();
1365   bind(verify_continue);
1366 #endif // ASSERT
1367 }
1368 
1369 void InterpreterMacroAssembler::test_invocation_counter_for_mdp(Register invocation_count,
1370                                                                 Register Rtmp,
1371                                                                 Label &profile_continue) {
1372   assert(ProfileInterpreter, "must be profiling interpreter");
1373   // Control will flow to "profile_continue" if the counter is less than the
1374   // limit or if we call profile_method()
1375 
1376   Label done;
1377 
1378   // if no method data exists, and the counter is high enough, make one
1379 #ifdef _LP64
1380   bpr(Assembler::rc_nz, false, Assembler::pn, ImethodDataPtr, done);
1381 #else
1382   tst(ImethodDataPtr);
1383   br(Assembler::notZero, false, Assembler::pn, done);
1384 #endif
1385 
1386   // Test to see if we should create a method data oop
1387   AddressLiteral profile_limit((address) &InvocationCounter::InterpreterProfileLimit);
1388 #ifdef _LP64
1389   delayed()->nop();
1390   sethi(profile_limit, Rtmp);
1391 #else
1392   delayed()->sethi(profile_limit, Rtmp);
1393 #endif
1394   ld(Rtmp, profile_limit.low10(), Rtmp);
1395   cmp(invocation_count, Rtmp);
1396   br(Assembler::lessUnsigned, false, Assembler::pn, profile_continue);
1397   delayed()->nop();
1398 
1399   // Build it now.
1400   call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::profile_method));
1401   set_method_data_pointer_for_bcp();
1402   ba(false, profile_continue);
1403   delayed()->nop();
1404   bind(done);
1405 }
1406 
1407 // Store a value at some constant offset from the method data pointer.
1408 
1409 void InterpreterMacroAssembler::set_mdp_data_at(int constant, Register value) {
1410   assert(ProfileInterpreter, "must be profiling interpreter");
1411   st_ptr(value, ImethodDataPtr, constant);
1412 }
1413 
1414 void InterpreterMacroAssembler::increment_mdp_data_at(Address counter,
1415                                                       Register bumped_count,
1416                                                       bool decrement) {
1417   assert(ProfileInterpreter, "must be profiling interpreter");
1418 
1419   // Load the counter.
1420   ld_ptr(counter, bumped_count);
1421 
1422   if (decrement) {
1423     // Decrement the register.  Set condition codes.


1615     bind (profile_continue);
1616   }
1617 }
1618 
1619 
1620 // Count a virtual call in the bytecodes.
1621 
1622 void InterpreterMacroAssembler::profile_virtual_call(Register receiver,
1623                                                      Register scratch,
1624                                                      bool receiver_can_be_null) {
1625   if (ProfileInterpreter) {
1626     Label profile_continue;
1627 
1628     // If no method data exists, go to profile_continue.
1629     test_method_data_pointer(profile_continue);
1630 
1631 
1632     Label skip_receiver_profile;
1633     if (receiver_can_be_null) {
1634       Label not_null;
1635       tst(receiver);
1636       brx(Assembler::notZero, false, Assembler::pt, not_null);
1637       delayed()->nop();
1638       // We are making a call.  Increment the count for null receiver.
1639       increment_mdp_data_at(in_bytes(CounterData::count_offset()), scratch);
1640       ba(false, skip_receiver_profile);
1641       delayed()->nop();
1642       bind(not_null);
1643     }
1644 
1645     // Record the receiver type.
1646     record_klass_in_profile(receiver, scratch, true);
1647     bind(skip_receiver_profile);
1648 
1649     // The method data pointer needs to be updated to reflect the new target.
1650     update_mdp_by_constant(in_bytes(VirtualCallData::virtual_call_data_size()));
1651     bind (profile_continue);
1652   }
1653 }
1654 
1655 void InterpreterMacroAssembler::record_klass_in_profile_helper(
1656                                         Register receiver, Register scratch,
1657                                         int start_row, Label& done, bool is_virtual_call) {
1658   if (TypeProfileWidth == 0) {
1659     if (is_virtual_call) {
1660       increment_mdp_data_at(in_bytes(CounterData::count_offset()), scratch);
1661     }


1665   int last_row = VirtualCallData::row_limit() - 1;
1666   assert(start_row <= last_row, "must be work left to do");
1667   // Test this row for both the receiver and for null.
1668   // Take any of three different outcomes:
1669   //   1. found receiver => increment count and goto done
1670   //   2. found null => keep looking for case 1, maybe allocate this cell
1671   //   3. found something else => keep looking for cases 1 and 2
1672   // Case 3 is handled by a recursive call.
1673   for (int row = start_row; row <= last_row; row++) {
1674     Label next_test;
1675     bool test_for_null_also = (row == start_row);
1676 
1677     // See if the receiver is receiver[n].
1678     int recvr_offset = in_bytes(VirtualCallData::receiver_offset(row));
1679     test_mdp_data_at(recvr_offset, receiver, next_test, scratch);
1680     // delayed()->tst(scratch);
1681 
1682     // The receiver is receiver[n].  Increment count[n].
1683     int count_offset = in_bytes(VirtualCallData::receiver_count_offset(row));
1684     increment_mdp_data_at(count_offset, scratch);
1685     ba(false, done);
1686     delayed()->nop();
1687     bind(next_test);
1688 
1689     if (test_for_null_also) {
1690       Label found_null;
1691       // Failed the equality check on receiver[n]...  Test for null.
1692       if (start_row == last_row) {
1693         // The only thing left to do is handle the null case.
1694         if (is_virtual_call) {
1695           brx(Assembler::zero, false, Assembler::pn, found_null);
1696           delayed()->nop();
1697           // Receiver did not match any saved receiver and there is no empty row for it.
1698           // Increment total counter to indicate polymorphic case.
1699           increment_mdp_data_at(in_bytes(CounterData::count_offset()), scratch);
1700           ba(false, done);
1701           delayed()->nop();
1702           bind(found_null);
1703         } else {
1704           brx(Assembler::notZero, false, Assembler::pt, done);
1705           delayed()->nop();
1706         }
1707         break;
1708       }
1709       // Since null is rare, make it be the branch-taken case.
1710       brx(Assembler::zero, false, Assembler::pn, found_null);
1711       delayed()->nop();
1712 
1713       // Put all the "Case 3" tests here.
1714       record_klass_in_profile_helper(receiver, scratch, start_row + 1, done, is_virtual_call);
1715 
1716       // Found a null.  Keep searching for a matching receiver,
1717       // but remember that this is an empty (unused) slot.
1718       bind(found_null);
1719     }
1720   }
1721 
1722   // In the fall-through case, we found no matching receiver, but we
1723   // observed the receiver[start_row] is NULL.
1724 
1725   // Fill in the receiver field and increment the count.
1726   int recvr_offset = in_bytes(VirtualCallData::receiver_offset(start_row));
1727   set_mdp_data_at(recvr_offset, receiver);
1728   int count_offset = in_bytes(VirtualCallData::receiver_count_offset(start_row));
1729   mov(DataLayout::counter_increment, scratch);
1730   set_mdp_data_at(count_offset, scratch);
1731   if (start_row > 0) {
1732     ba(false, done);
1733     delayed()->nop();
1734   }
1735 }
1736 
1737 void InterpreterMacroAssembler::record_klass_in_profile(Register receiver,
1738                                                         Register scratch, bool is_virtual_call) {
1739   assert(ProfileInterpreter, "must be profiling");
1740   Label done;
1741 
1742   record_klass_in_profile_helper(receiver, scratch, 0, done, is_virtual_call);
1743 
1744   bind (done);
1745 }
1746 
1747 
1748 // Count a ret in the bytecodes.
1749 
1750 void InterpreterMacroAssembler::profile_ret(TosState state,
1751                                             Register return_bci,
1752                                             Register scratch) {
1753   if (ProfileInterpreter) {


1755     uint row;
1756 
1757     // If no method data exists, go to profile_continue.
1758     test_method_data_pointer(profile_continue);
1759 
1760     // Update the total ret count.
1761     increment_mdp_data_at(in_bytes(CounterData::count_offset()), scratch);
1762 
1763     for (row = 0; row < RetData::row_limit(); row++) {
1764       Label next_test;
1765 
1766       // See if return_bci is equal to bci[n]:
1767       test_mdp_data_at(in_bytes(RetData::bci_offset(row)),
1768                        return_bci, next_test, scratch);
1769 
1770       // return_bci is equal to bci[n].  Increment the count.
1771       increment_mdp_data_at(in_bytes(RetData::bci_count_offset(row)), scratch);
1772 
1773       // The method data pointer needs to be updated to reflect the new target.
1774       update_mdp_by_offset(in_bytes(RetData::bci_displacement_offset(row)), scratch);
1775       ba(false, profile_continue);
1776       delayed()->nop();
1777       bind(next_test);
1778     }
1779 
1780     update_mdp_for_ret(state, return_bci);
1781 
1782     bind (profile_continue);
1783   }
1784 }
1785 
1786 // Profile an unexpected null in the bytecodes.
1787 void InterpreterMacroAssembler::profile_null_seen(Register scratch) {
1788   if (ProfileInterpreter) {
1789     Label profile_continue;
1790 
1791     // If no method data exists, go to profile_continue.
1792     test_method_data_pointer(profile_continue);
1793 
1794     set_mdp_flag_at(BitData::null_seen_byte_constant(), scratch);
1795 
1796     // The method data pointer needs to be updated.


1905                                                       Register Rtemp,
1906                                                       Register Rtemp2 ) {
1907 
1908   Register Rlimit = Lmonitors;
1909   const jint delta = frame::interpreter_frame_monitor_size() * wordSize;
1910   assert( (delta & LongAlignmentMask) == 0,
1911           "sizeof BasicObjectLock must be even number of doublewords");
1912 
1913   sub( SP,        delta, SP);
1914   sub( Lesp,      delta, Lesp);
1915   sub( Lmonitors, delta, Lmonitors);
1916 
1917   if (!stack_is_empty) {
1918 
1919     // must copy stack contents down
1920 
1921     Label start_copying, next;
1922 
1923     // untested("monitor stack expansion");
1924     compute_stack_base(Rtemp);
1925     ba( false, start_copying );
1926     delayed()->cmp( Rtemp, Rlimit); // done? duplicated below
1927 
1928     // note: must copy from low memory upwards
1929     // On entry to loop,
1930     // Rtemp points to new base of stack, Lesp points to new end of stack (1 past TOS)
1931     // Loop mutates Rtemp
1932 
1933     bind( next);
1934 
1935     st_ptr(Rtemp2, Rtemp, 0);
1936     inc(Rtemp, wordSize);
1937     cmp(Rtemp, Rlimit); // are we done? (duplicated above)
1938 
1939     bind( start_copying );
1940 
1941     brx( notEqual, true, pn, next );
1942     delayed()->ld_ptr( Rtemp, delta, Rtemp2 );
1943 
1944     // done copying stack
1945   }
1946 }


1993   assert_not_delayed();
1994   sll(index, Interpreter::logStackElementSize, index);
1995   sub(Llocals, index, index);
1996   load_unaligned_double(index, Interpreter::local_offset_in_bytes(1), dst);
1997 }
1998 
1999 
2000 #ifdef ASSERT
2001 void InterpreterMacroAssembler::check_for_regarea_stomp(Register Rindex, int offset, Register Rlimit, Register Rscratch, Register Rscratch1) {
2002   Label L;
2003 
2004   assert(Rindex != Rscratch, "Registers cannot be same");
2005   assert(Rindex != Rscratch1, "Registers cannot be same");
2006   assert(Rlimit != Rscratch, "Registers cannot be same");
2007   assert(Rlimit != Rscratch1, "Registers cannot be same");
2008   assert(Rscratch1 != Rscratch, "Registers cannot be same");
2009 
2010   // untested("reg area corruption");
2011   add(Rindex, offset, Rscratch);
2012   add(Rlimit, 64 + STACK_BIAS, Rscratch1);
2013   cmp(Rscratch, Rscratch1);
2014   brx(Assembler::greaterEqualUnsigned, false, pn, L);
2015   delayed()->nop();
2016   stop("regsave area is being clobbered");
2017   bind(L);
2018 }
2019 #endif // ASSERT
2020 
2021 
2022 void InterpreterMacroAssembler::store_local_int( Register index, Register src ) {
2023   assert_not_delayed();
2024   sll(index, Interpreter::logStackElementSize, index);
2025   sub(Llocals, index, index);
2026   debug_only(check_for_regarea_stomp(index, 0, FP, G1_scratch, G4_scratch);)
2027   st(src, index, 0);
2028 }
2029 
2030 void InterpreterMacroAssembler::store_local_ptr( Register index, Register src ) {
2031   assert_not_delayed();
2032   sll(index, Interpreter::logStackElementSize, index);
2033   sub(Llocals, index, index);
2034 #ifdef ASSERT
2035   check_for_regarea_stomp(index, 0, FP, G1_scratch, G4_scratch);


2157   // and store the result to memory
2158   st( Rtmp, be_counter );
2159 
2160   // Add backedge + invocation counter
2161   add( Rtmp, Rtmp2, Rtmp );
2162 
2163   // Note that this macro must leave backedge_count + invocation_count in Rtmp!
2164 }
2165 
2166 #ifndef CC_INTERP
2167 void InterpreterMacroAssembler::test_backedge_count_for_osr( Register backedge_count,
2168                                                              Register branch_bcp,
2169                                                              Register Rtmp ) {
2170   Label did_not_overflow;
2171   Label overflow_with_error;
2172   assert_different_registers(backedge_count, Rtmp, branch_bcp);
2173   assert(UseOnStackReplacement,"Must UseOnStackReplacement to test_backedge_count_for_osr");
2174 
2175   AddressLiteral limit(&InvocationCounter::InterpreterBackwardBranchLimit);
2176   load_contents(limit, Rtmp);
2177   cmp(backedge_count, Rtmp);
2178   br(Assembler::lessUnsigned, false, Assembler::pt, did_not_overflow);
2179   delayed()->nop();
2180 
2181   // When ProfileInterpreter is on, the backedge_count comes from the
2182   // methodDataOop, which value does not get reset on the call to
2183   // frequency_counter_overflow().  To avoid excessive calls to the overflow
2184   // routine while the method is being compiled, add a second test to make sure
2185   // the overflow function is called only once every overflow_frequency.
2186   if (ProfileInterpreter) {
2187     const int overflow_frequency = 1024;
2188     andcc(backedge_count, overflow_frequency-1, Rtmp);
2189     brx(Assembler::notZero, false, Assembler::pt, did_not_overflow);
2190     delayed()->nop();
2191   }
2192 
2193   // overflow in loop, pass branch bytecode
2194   set(6,Rtmp);
2195   call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::frequency_counter_overflow), branch_bcp, Rtmp);
2196 
2197   // Was an OSR adapter generated?
2198   // O0 = osr nmethod
2199   tst(O0);
2200   brx(Assembler::zero, false, Assembler::pn, overflow_with_error);
2201   delayed()->nop();
2202 
2203   // Has the nmethod been invalidated already?
2204   ld(O0, nmethod::entry_bci_offset(), O2);
2205   cmp(O2, InvalidOSREntryBci);
2206   br(Assembler::equal, false, Assembler::pn, overflow_with_error);
2207   delayed()->nop();
2208 
2209   // migrate the interpreter frame off of the stack
2210 
2211   mov(G2_thread, L7);
2212   // save nmethod
2213   mov(O0, L6);
2214   set_last_Java_frame(SP, noreg);
2215   call_VM_leaf(noreg, CAST_FROM_FN_PTR(address, SharedRuntime::OSR_migration_begin), L7);
2216   reset_last_Java_frame();
2217   mov(L7, G2_thread);
2218 
2219   // move OSR nmethod to I1
2220   mov(L6, I1);
2221 
2222   // OSR buffer to I0
2223   mov(O0, I0);
2224 
2225   // remove the interpreter frame
2226   restore(I5_savedSP, 0, SP);
2227 


2253   jsr_pc = pc - Bytecodes::length_for(Bytecodes::_jsr);
2254   if (*jsr_pc == Bytecodes::_jsr   && jsr_pc >= m->code_base())    return true;
2255   jsr_pc = pc - Bytecodes::length_for(Bytecodes::_jsr_w);
2256   if (*jsr_pc == Bytecodes::_jsr_w && jsr_pc >= m->code_base())    return true;
2257 #endif // PRODUCT
2258   return false;
2259 }
2260 
2261 
2262 void InterpreterMacroAssembler::verify_oop_or_return_address(Register reg, Register Rtmp) {
2263   if (!VerifyOops)  return;
2264   // the VM documentation for the astore[_wide] bytecode allows
2265   // the TOS to be not only an oop but also a return address
2266   Label test;
2267   Label skip;
2268   // See if it is an address (in the current method):
2269 
2270   mov(reg, Rtmp);
2271   const int log2_bytecode_size_limit = 16;
2272   srl(Rtmp, log2_bytecode_size_limit, Rtmp);
2273   br_notnull( Rtmp, false, pt, test );
2274   delayed()->nop();
2275 
2276   // %%% should use call_VM_leaf here?
2277   save_frame_and_mov(0, Lmethod, O0, reg, O1);
2278   save_thread(L7_thread_cache);
2279   call(CAST_FROM_FN_PTR(address,verify_return_address), relocInfo::none);
2280   delayed()->nop();
2281   restore_thread(L7_thread_cache);
2282   br_notnull( O0, false, pt, skip );
2283   delayed()->restore();
2284 
2285   // Perform a more elaborate out-of-line call
2286   // Not an address; verify it:
2287   bind(test);
2288   verify_oop(reg);
2289   bind(skip);
2290 }
2291 
2292 
2293 void InterpreterMacroAssembler::verify_FPU(int stack_depth, TosState state) {
2294   if (state == ftos || state == dtos) MacroAssembler::verify_FPU(stack_depth);


2303 // if (DTraceMethodProbes) {
2304 //   SharedRuntime::dtrace_method_entry(method, receiver);
2305 // }
2306 // if (RC_TRACE_IN_RANGE(0x00001000, 0x00002000)) {
2307 //   SharedRuntime::rc_trace_method_entry(method, receiver);
2308 // }
2309 
2310 void InterpreterMacroAssembler::notify_method_entry() {
2311 
2312   // C++ interpreter only uses this for native methods.
2313 
2314   // Whenever JVMTI puts a thread in interp_only_mode, method
2315   // entry/exit events are sent for that thread to track stack
2316   // depth.  If it is possible to enter interp_only_mode we add
2317   // the code to check if the event should be sent.
2318   if (JvmtiExport::can_post_interpreter_events()) {
2319     Label L;
2320     Register temp_reg = O5;
2321     const Address interp_only(G2_thread, JavaThread::interp_only_mode_offset());
2322     ld(interp_only, temp_reg);
2323     tst(temp_reg);
2324     br(zero, false, pt, L);
2325     delayed()->nop();
2326     call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_method_entry));
2327     bind(L);
2328   }
2329 
2330   {
2331     Register temp_reg = O5;
2332     SkipIfEqual skip_if(this, temp_reg, &DTraceMethodProbes, zero);
2333     call_VM_leaf(noreg,
2334       CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_entry),
2335       G2_thread, Lmethod);
2336   }
2337 
2338   // RedefineClasses() tracing support for obsolete method entry
2339   if (RC_TRACE_IN_RANGE(0x00001000, 0x00002000)) {
2340     call_VM_leaf(noreg,
2341       CAST_FROM_FN_PTR(address, SharedRuntime::rc_trace_method_entry),
2342       G2_thread, Lmethod);
2343   }
2344 }
2345 


2355 //   SharedRuntime::dtrace_method_exit(thread, method);
2356 // }
2357 //
2358 // Native methods have their result stored in d_tmp and l_tmp
2359 // Java methods have their result stored in the expression stack
2360 
2361 void InterpreterMacroAssembler::notify_method_exit(bool is_native_method,
2362                                                    TosState state,
2363                                                    NotifyMethodExitMode mode) {
2364   // C++ interpreter only uses this for native methods.
2365 
2366   // Whenever JVMTI puts a thread in interp_only_mode, method
2367   // entry/exit events are sent for that thread to track stack
2368   // depth.  If it is possible to enter interp_only_mode we add
2369   // the code to check if the event should be sent.
2370   if (mode == NotifyJVMTI && JvmtiExport::can_post_interpreter_events()) {
2371     Label L;
2372     Register temp_reg = O5;
2373     const Address interp_only(G2_thread, JavaThread::interp_only_mode_offset());
2374     ld(interp_only, temp_reg);
2375     tst(temp_reg);
2376     br(zero, false, pt, L);
2377     delayed()->nop();
2378 
2379     // Note: frame::interpreter_frame_result has a dependency on how the
2380     // method result is saved across the call to post_method_exit. For
2381     // native methods it assumes the result registers are saved to
2382     // l_scratch and d_scratch. If this changes then the interpreter_frame_result
2383     // implementation will need to be updated too.
2384 
2385     save_return_value(state, is_native_method);
2386     call_VM(noreg,
2387             CAST_FROM_FN_PTR(address, InterpreterRuntime::post_method_exit));
2388     restore_return_value(state, is_native_method);
2389     bind(L);
2390   }
2391 
2392   {
2393     Register temp_reg = O5;
2394     // Dtrace notification
2395     SkipIfEqual skip_if(this, temp_reg, &DTraceMethodProbes, zero);
2396     save_return_value(state, is_native_method);
2397     call_VM_leaf(




 219   case stos:                                           // fall through
 220   case itos: ld(val_addr, Otos_l1);                       break;
 221   case ftos: ldf(FloatRegisterImpl::S, val_addr, Ftos_f); break;
 222   case dtos: ldf(FloatRegisterImpl::D, val_addr, Ftos_d); break;
 223   case vtos: /* nothing to do */                          break;
 224   default  : ShouldNotReachHere();
 225   }
 226   // Clean up tos value in the jvmti thread state
 227   or3(G0, ilgl, G3_scratch);
 228   stw(G3_scratch, tos_addr);
 229   st_long(G0, val_addr);
 230   interp_verify_oop(Otos_i, state, __FILE__, __LINE__);
 231 }
 232 
 233 
 234 void InterpreterMacroAssembler::check_and_handle_earlyret(Register scratch_reg) {
 235   if (JvmtiExport::can_force_early_return()) {
 236     Label L;
 237     Register thr_state = G3_scratch;
 238     ld_ptr(G2_thread, JavaThread::jvmti_thread_state_offset(), thr_state);
 239     br_null_short(thr_state, pt, L); // if (thread->jvmti_thread_state() == NULL) exit;


 240 
 241     // Initiate earlyret handling only if it is not already being processed.
 242     // If the flag has the earlyret_processing bit set, it means that this code
 243     // is called *during* earlyret handling - we don't want to reenter.
 244     ld(thr_state, JvmtiThreadState::earlyret_state_offset(), G4_scratch);
 245     cmp_and_br_short(G4_scratch, JvmtiThreadState::earlyret_pending, Assembler::notEqual, pt, L);


 246 
 247     // Call Interpreter::remove_activation_early_entry() to get the address of the
 248     // same-named entrypoint in the generated interpreter code
 249     ld(thr_state, JvmtiThreadState::earlyret_tos_offset(), Otos_l1);
 250     call_VM_leaf(noreg, CAST_FROM_FN_PTR(address, Interpreter::remove_activation_early_entry), Otos_l1);
 251 
 252     // Jump to Interpreter::_remove_activation_early_entry
 253     jmpl(O0, G0, G0);
 254     delayed()->nop();
 255     bind(L);
 256   }
 257 }
 258 
 259 
 260 void InterpreterMacroAssembler::super_call_VM_leaf(Register thread_cache, address entry_point, Register arg_1, Register arg_2) {
 261   mov(arg_1, O0);
 262   mov(arg_2, O1);
 263   MacroAssembler::call_VM_leaf_base(thread_cache, entry_point, 2);
 264 }
 265 #endif /* CC_INTERP */


 545 
 546 
 547 #ifdef ASSERT
 548 void InterpreterMacroAssembler::verify_sp(Register Rsp, Register Rtemp) {
 549   Label Bad, OK;
 550 
 551   // Saved SP must be aligned.
 552 #ifdef _LP64
 553   btst(2*BytesPerWord-1, Rsp);
 554 #else
 555   btst(LongAlignmentMask, Rsp);
 556 #endif
 557   br(Assembler::notZero, false, Assembler::pn, Bad);
 558   delayed()->nop();
 559 
 560   // Saved SP, plus register window size, must not be above FP.
 561   add(Rsp, frame::register_save_words * wordSize, Rtemp);
 562 #ifdef _LP64
 563   sub(Rtemp, STACK_BIAS, Rtemp);  // Bias Rtemp before cmp to FP
 564 #endif
 565   cmp_and_brx_short(Rtemp, FP, Assembler::greaterUnsigned, Assembler::pn, Bad);


 566 
 567   // Saved SP must not be ridiculously below current SP.
 568   size_t maxstack = MAX2(JavaThread::stack_size_at_create(), (size_t) 4*K*K);
 569   set(maxstack, Rtemp);
 570   sub(SP, Rtemp, Rtemp);
 571 #ifdef _LP64
 572   add(Rtemp, STACK_BIAS, Rtemp);  // Unbias Rtemp before cmp to Rsp
 573 #endif
 574   cmp_and_brx_short(Rsp, Rtemp, Assembler::lessUnsigned, Assembler::pn, Bad);


 575 
 576   ba_short(OK);

 577 
 578   bind(Bad);
 579   stop("on return to interpreted call, restored SP is corrupted");
 580 
 581   bind(OK);
 582 }
 583 
 584 
 585 void InterpreterMacroAssembler::verify_esp(Register Resp) {
 586   // about to read or write Resp[0]
 587   // make sure it is not in the monitors or the register save area
 588   Label OK1, OK2;
 589 
 590   cmp(Resp, Lmonitors);
 591   brx(Assembler::lessUnsigned, true, Assembler::pt, OK1);
 592   delayed()->sub(Resp, frame::memory_parameter_word_sp_offset * wordSize, Resp);
 593   stop("too many pops:  Lesp points into monitor area");
 594   bind(OK1);
 595 #ifdef _LP64
 596   sub(Resp, STACK_BIAS, Resp);


 604 #endif // ASSERT
 605 
 606 // Load compiled (i2c) or interpreter entry when calling from interpreted and
 607 // do the call. Centralized so that all interpreter calls will do the same actions.
 608 // If jvmti single stepping is on for a thread we must not call compiled code.
 609 void InterpreterMacroAssembler::call_from_interpreter(Register target, Register scratch, Register Rret) {
 610 
 611   // Assume we want to go compiled if available
 612 
 613   ld_ptr(G5_method, in_bytes(methodOopDesc::from_interpreted_offset()), target);
 614 
 615   if (JvmtiExport::can_post_interpreter_events()) {
 616     // JVMTI events, such as single-stepping, are implemented partly by avoiding running
 617     // compiled code in threads for which the event is enabled.  Check here for
 618     // interp_only_mode if these events CAN be enabled.
 619     verify_thread();
 620     Label skip_compiled_code;
 621 
 622     const Address interp_only(G2_thread, JavaThread::interp_only_mode_offset());
 623     ld(interp_only, scratch);
 624     cmp_zero_and_br(Assembler::notZero, scratch, skip_compiled_code, true, Assembler::pn);

 625     delayed()->ld_ptr(G5_method, in_bytes(methodOopDesc::interpreter_entry_offset()), target);
 626     bind(skip_compiled_code);
 627   }
 628 
 629   // the i2c_adapters need methodOop in G5_method (right? %%%)
 630   // do the call
 631 #ifdef ASSERT
 632   {
 633     Label ok;
 634     br_notnull_short(target, Assembler::pt, ok);

 635     stop("null entry point");
 636     bind(ok);
 637   }
 638 #endif // ASSERT
 639 
 640   // Adjust Rret first so Llast_SP can be same as Rret
 641   add(Rret, -frame::pc_return_offset, O7);
 642   add(Lesp, BytesPerWord, Gargs); // setup parameter pointer
 643   // Record SP so we can remove any stack space allocated by adapter transition
 644   jmp(target, 0);
 645   delayed()->mov(SP, Llast_SP);
 646 }
 647 
 648 void InterpreterMacroAssembler::if_cmp(Condition cc, bool ptr_compare) {
 649   assert_not_delayed();
 650 
 651   Label not_taken;
 652   if (ptr_compare) brx(cc, false, Assembler::pn, not_taken);
 653   else             br (cc, false, Assembler::pn, not_taken);
 654   delayed()->nop();


 954                                                               bool install_monitor_exception) {
 955   Label unlocked, unlock, no_unlock;
 956 
 957   // get the value of _do_not_unlock_if_synchronized into G1_scratch
 958   const Address do_not_unlock_if_synchronized(G2_thread,
 959     JavaThread::do_not_unlock_if_synchronized_offset());
 960   ldbool(do_not_unlock_if_synchronized, G1_scratch);
 961   stbool(G0, do_not_unlock_if_synchronized); // reset the flag
 962 
 963   // check if synchronized method
 964   const Address access_flags(Lmethod, methodOopDesc::access_flags_offset());
 965   interp_verify_oop(Otos_i, state, __FILE__, __LINE__);
 966   push(state); // save tos
 967   ld(access_flags, G3_scratch); // Load access flags.
 968   btst(JVM_ACC_SYNCHRONIZED, G3_scratch);
 969   br(zero, false, pt, unlocked);
 970   delayed()->nop();
 971 
 972   // Don't unlock anything if the _do_not_unlock_if_synchronized flag
 973   // is set.
 974   cmp_zero_and_br(Assembler::notZero, G1_scratch, no_unlock);

 975   delayed()->nop();
 976 
 977   // BasicObjectLock will be first in list, since this is a synchronized method. However, need
 978   // to check that the object has not been unlocked by an explicit monitorexit bytecode.
 979 
 980   //Intel: if (throw_monitor_exception) ... else ...
 981   // Entry already unlocked, need to throw exception
 982   //...
 983 
 984   // pass top-most monitor elem
 985   add( top_most_monitor(), O1 );
 986 
 987   ld_ptr(O1, BasicObjectLock::obj_offset_in_bytes(), G3_scratch);
 988   br_notnull_short(G3_scratch, pt, unlock);

 989 
 990   if (throw_monitor_exception) {
 991     // Entry already unlocked need to throw an exception
 992     MacroAssembler::call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_illegal_monitor_state_exception));
 993     should_not_reach_here();
 994   } else {
 995     // Monitor already unlocked during a stack unroll.
 996     // If requested, install an illegal_monitor_state_exception.
 997     // Continue with stack unrolling.
 998     if (install_monitor_exception) {
 999       MacroAssembler::call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::new_illegal_monitor_state_exception));
1000     }
1001     ba_short(unlocked);

1002   }
1003 
1004   bind(unlock);
1005 
1006   unlock_object(O1);
1007 
1008   bind(unlocked);
1009 
1010   // I0, I1: Might contain return value
1011 
1012   // Check that all monitors are unlocked
1013   { Label loop, exception, entry, restart;
1014 
1015     Register Rmptr   = O0;
1016     Register Rtemp   = O1;
1017     Register Rlimit  = Lmonitors;
1018     const jint delta = frame::interpreter_frame_monitor_size() * wordSize;
1019     assert( (delta & LongAlignmentMask) == 0,
1020             "sizeof BasicObjectLock must be even number of doublewords");
1021 
1022     #ifdef ASSERT
1023     add(top_most_monitor(), Rmptr, delta);
1024     { Label L;
1025       // ensure that Rmptr starts out above (or at) Rlimit
1026       cmp_and_brx_short(Rmptr, Rlimit, Assembler::greaterEqualUnsigned, pn, L);


1027       stop("monitor stack has negative size");
1028       bind(L);
1029     }
1030     #endif
1031     bind(restart);
1032     ba(entry);
1033     delayed()->
1034     add(top_most_monitor(), Rmptr, delta);      // points to current entry, starting with bottom-most entry
1035 
1036     // Entry is still locked, need to throw exception
1037     bind(exception);
1038     if (throw_monitor_exception) {
1039       MacroAssembler::call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_illegal_monitor_state_exception));
1040       should_not_reach_here();
1041     } else {
1042       // Stack unrolling. Unlock object and if requested, install illegal_monitor_exception.
1043       // Unlock does not block, so don't have to worry about the frame
1044       unlock_object(Rmptr);
1045       if (install_monitor_exception) {
1046         MacroAssembler::call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::new_illegal_monitor_state_exception));
1047       }
1048       ba_short(restart);

1049     }
1050 
1051     bind(loop);
1052     cmp(Rtemp, G0);                             // check if current entry is used
1053     brx(Assembler::notEqual, false, pn, exception);
1054     delayed()->
1055     dec(Rmptr, delta);                          // otherwise advance to next entry
1056     #ifdef ASSERT
1057     { Label L;
1058       // ensure that Rmptr has not somehow stepped below Rlimit
1059       cmp_and_brx_short(Rmptr, Rlimit, Assembler::greaterEqualUnsigned, pn, L);


1060       stop("ran off the end of the monitor stack");
1061       bind(L);
1062     }
1063     #endif
1064     bind(entry);
1065     cmp(Rmptr, Rlimit);                         // check if bottom reached
1066     brx(Assembler::notEqual, true, pn, loop);   // if not at bottom then check this entry
1067     delayed()->
1068     ld_ptr(Rmptr, BasicObjectLock::obj_offset_in_bytes() - delta, Rtemp);
1069   }
1070 
1071   bind(no_unlock);
1072   pop(state);
1073   interp_verify_oop(Otos_i, state, __FILE__, __LINE__);
1074 }
1075 
1076 
1077 // remove activation
1078 //
1079 // Unlock the receiver if this is a synchronized method.


1160     ld_ptr(mark_addr, mark_reg);
1161 
1162     if (UseBiasedLocking) {
1163       biased_locking_enter(obj_reg, mark_reg, temp_reg, done, &slow_case);
1164     }
1165 
1166     // get the address of basicLock on stack that will be stored in the object
1167     // we need a temporary register here as we do not want to clobber lock_reg
1168     // (cas clobbers the destination register)
1169     mov(lock_reg, temp_reg);
1170     // set mark reg to be (markOop of object | UNLOCK_VALUE)
1171     or3(mark_reg, markOopDesc::unlocked_value, mark_reg);
1172     // initialize the box  (Must happen before we update the object mark!)
1173     st_ptr(mark_reg, lock_addr, BasicLock::displaced_header_offset_in_bytes());
1174     // compare and exchange object_addr, markOop | 1, stack address of basicLock
1175     assert(mark_addr.disp() == 0, "cas must take a zero displacement");
1176     casx_under_lock(mark_addr.base(), mark_reg, temp_reg,
1177       (address)StubRoutines::Sparc::atomic_memory_operation_lock_addr());
1178 
1179     // if the compare and exchange succeeded we are done (we saw an unlocked object)
1180     cmp_and_brx_short(mark_reg, temp_reg, Assembler::equal, Assembler::pt, done);


1181 
1182     // We did not see an unlocked object so try the fast recursive case
1183 
1184     // Check if owner is self by comparing the value in the markOop of object
1185     // with the stack pointer
1186     sub(temp_reg, SP, temp_reg);
1187 #ifdef _LP64
1188     sub(temp_reg, STACK_BIAS, temp_reg);
1189 #endif
1190     assert(os::vm_page_size() > 0xfff, "page size too small - change the constant");
1191 
1192     // Composite "andcc" test:
1193     // (a) %sp -vs- markword proximity check, and,
1194     // (b) verify mark word LSBs == 0 (Stack-locked).
1195     //
1196     // FFFFF003/FFFFFFFFFFFF003 is (markOopDesc::lock_mask_in_place | -os::vm_page_size())
1197     // Note that the page size used for %sp proximity testing is arbitrary and is
1198     // unrelated to the actual MMU page size.  We use a 'logical' page size of
1199     // 4096 bytes.   F..FFF003 is designed to fit conveniently in the SIMM13 immediate
1200     // field of the andcc instruction.


1286 
1287 // Set the method data pointer for the current bcp.
1288 
1289 void InterpreterMacroAssembler::set_method_data_pointer_for_bcp() {
1290   assert(ProfileInterpreter, "must be profiling interpreter");
1291   Label zero_continue;
1292 
1293   // Test MDO to avoid the call if it is NULL.
1294   ld_ptr(Lmethod, in_bytes(methodOopDesc::method_data_offset()), ImethodDataPtr);
1295   test_method_data_pointer(zero_continue);
1296   call_VM_leaf(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::bcp_to_di), Lmethod, Lbcp);
1297   add(ImethodDataPtr, in_bytes(methodDataOopDesc::data_offset()), ImethodDataPtr);
1298   add(ImethodDataPtr, O0, ImethodDataPtr);
1299   bind(zero_continue);
1300 }
1301 
1302 // Test ImethodDataPtr.  If it is null, continue at the specified label
1303 
1304 void InterpreterMacroAssembler::test_method_data_pointer(Label& zero_continue) {
1305   assert(ProfileInterpreter, "must be profiling interpreter");
1306   br_null_short(ImethodDataPtr, Assembler::pn, zero_continue);






1307 }
1308 
1309 void InterpreterMacroAssembler::verify_method_data_pointer() {
1310   assert(ProfileInterpreter, "must be profiling interpreter");
1311 #ifdef ASSERT
1312   Label verify_continue;
1313   test_method_data_pointer(verify_continue);
1314 
1315   // If the mdp is valid, it will point to a DataLayout header which is
1316   // consistent with the bcp.  The converse is highly probable also.
1317   lduh(ImethodDataPtr, in_bytes(DataLayout::bci_offset()), G3_scratch);
1318   ld_ptr(Lmethod, methodOopDesc::const_offset(), O5);
1319   add(G3_scratch, in_bytes(constMethodOopDesc::codes_offset()), G3_scratch);
1320   add(G3_scratch, O5, G3_scratch);
1321   cmp(Lbcp, G3_scratch);
1322   brx(Assembler::equal, false, Assembler::pt, verify_continue);
1323 
1324   Register temp_reg = O5;
1325   delayed()->mov(ImethodDataPtr, temp_reg);
1326   // %%% should use call_VM_leaf here?


1332   save_thread(L7_thread_cache);
1333   call(CAST_FROM_FN_PTR(address, InterpreterRuntime::verify_mdp), relocInfo::none);
1334   delayed()->nop();
1335   restore_thread(L7_thread_cache);
1336   ldf(FloatRegisterImpl::D, d_save, Ftos_d);
1337   restore();
1338   bind(verify_continue);
1339 #endif // ASSERT
1340 }
1341 
1342 void InterpreterMacroAssembler::test_invocation_counter_for_mdp(Register invocation_count,
1343                                                                 Register Rtmp,
1344                                                                 Label &profile_continue) {
1345   assert(ProfileInterpreter, "must be profiling interpreter");
1346   // Control will flow to "profile_continue" if the counter is less than the
1347   // limit or if we call profile_method()
1348 
1349   Label done;
1350 
1351   // if no method data exists, and the counter is high enough, make one
1352   br_notnull_short(ImethodDataPtr, Assembler::pn, done);





1353 
1354   // Test to see if we should create a method data oop
1355   AddressLiteral profile_limit((address) &InvocationCounter::InterpreterProfileLimit);


1356   sethi(profile_limit, Rtmp);



1357   ld(Rtmp, profile_limit.low10(), Rtmp);
1358   cmp_and_br_short(invocation_count, Rtmp, Assembler::lessUnsigned, Assembler::pn, profile_continue);


1359 
1360   // Build it now.
1361   call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::profile_method));
1362   set_method_data_pointer_for_bcp();
1363   ba_short(profile_continue);

1364   bind(done);
1365 }
1366 
1367 // Store a value at some constant offset from the method data pointer.
1368 
1369 void InterpreterMacroAssembler::set_mdp_data_at(int constant, Register value) {
1370   assert(ProfileInterpreter, "must be profiling interpreter");
1371   st_ptr(value, ImethodDataPtr, constant);
1372 }
1373 
1374 void InterpreterMacroAssembler::increment_mdp_data_at(Address counter,
1375                                                       Register bumped_count,
1376                                                       bool decrement) {
1377   assert(ProfileInterpreter, "must be profiling interpreter");
1378 
1379   // Load the counter.
1380   ld_ptr(counter, bumped_count);
1381 
1382   if (decrement) {
1383     // Decrement the register.  Set condition codes.


1575     bind (profile_continue);
1576   }
1577 }
1578 
1579 
1580 // Count a virtual call in the bytecodes.
1581 
1582 void InterpreterMacroAssembler::profile_virtual_call(Register receiver,
1583                                                      Register scratch,
1584                                                      bool receiver_can_be_null) {
1585   if (ProfileInterpreter) {
1586     Label profile_continue;
1587 
1588     // If no method data exists, go to profile_continue.
1589     test_method_data_pointer(profile_continue);
1590 
1591 
1592     Label skip_receiver_profile;
1593     if (receiver_can_be_null) {
1594       Label not_null;
1595       br_notnull_short(receiver, Assembler::pt, not_null);


1596       // We are making a call.  Increment the count for null receiver.
1597       increment_mdp_data_at(in_bytes(CounterData::count_offset()), scratch);
1598       ba_short(skip_receiver_profile);

1599       bind(not_null);
1600     }
1601 
1602     // Record the receiver type.
1603     record_klass_in_profile(receiver, scratch, true);
1604     bind(skip_receiver_profile);
1605 
1606     // The method data pointer needs to be updated to reflect the new target.
1607     update_mdp_by_constant(in_bytes(VirtualCallData::virtual_call_data_size()));
1608     bind (profile_continue);
1609   }
1610 }
1611 
1612 void InterpreterMacroAssembler::record_klass_in_profile_helper(
1613                                         Register receiver, Register scratch,
1614                                         int start_row, Label& done, bool is_virtual_call) {
1615   if (TypeProfileWidth == 0) {
1616     if (is_virtual_call) {
1617       increment_mdp_data_at(in_bytes(CounterData::count_offset()), scratch);
1618     }


1622   int last_row = VirtualCallData::row_limit() - 1;
1623   assert(start_row <= last_row, "must be work left to do");
1624   // Test this row for both the receiver and for null.
1625   // Take any of three different outcomes:
1626   //   1. found receiver => increment count and goto done
1627   //   2. found null => keep looking for case 1, maybe allocate this cell
1628   //   3. found something else => keep looking for cases 1 and 2
1629   // Case 3 is handled by a recursive call.
1630   for (int row = start_row; row <= last_row; row++) {
1631     Label next_test;
1632     bool test_for_null_also = (row == start_row);
1633 
1634     // See if the receiver is receiver[n].
1635     int recvr_offset = in_bytes(VirtualCallData::receiver_offset(row));
1636     test_mdp_data_at(recvr_offset, receiver, next_test, scratch);
1637     // delayed()->tst(scratch);
1638 
1639     // The receiver is receiver[n].  Increment count[n].
1640     int count_offset = in_bytes(VirtualCallData::receiver_count_offset(row));
1641     increment_mdp_data_at(count_offset, scratch);
1642     ba_short(done);

1643     bind(next_test);
1644 
1645     if (test_for_null_also) {
1646       Label found_null;
1647       // Failed the equality check on receiver[n]...  Test for null.
1648       if (start_row == last_row) {
1649         // The only thing left to do is handle the null case.
1650         if (is_virtual_call) {
1651           brx(Assembler::zero, false, Assembler::pn, found_null);
1652           delayed()->nop();
1653           // Receiver did not match any saved receiver and there is no empty row for it.
1654           // Increment total counter to indicate polymorphic case.
1655           increment_mdp_data_at(in_bytes(CounterData::count_offset()), scratch);
1656           ba_short(done);

1657           bind(found_null);
1658         } else {
1659           brx(Assembler::notZero, false, Assembler::pt, done);
1660           delayed()->nop();
1661         }
1662         break;
1663       }
1664       // Since null is rare, make it be the branch-taken case.
1665       brx(Assembler::zero, false, Assembler::pn, found_null);
1666       delayed()->nop();
1667 
1668       // Put all the "Case 3" tests here.
1669       record_klass_in_profile_helper(receiver, scratch, start_row + 1, done, is_virtual_call);
1670 
1671       // Found a null.  Keep searching for a matching receiver,
1672       // but remember that this is an empty (unused) slot.
1673       bind(found_null);
1674     }
1675   }
1676 
1677   // In the fall-through case, we found no matching receiver, but we
1678   // observed the receiver[start_row] is NULL.
1679 
1680   // Fill in the receiver field and increment the count.
1681   int recvr_offset = in_bytes(VirtualCallData::receiver_offset(start_row));
1682   set_mdp_data_at(recvr_offset, receiver);
1683   int count_offset = in_bytes(VirtualCallData::receiver_count_offset(start_row));
1684   mov(DataLayout::counter_increment, scratch);
1685   set_mdp_data_at(count_offset, scratch);
1686   if (start_row > 0) {
1687     ba_short(done);

1688   }
1689 }
1690 
1691 void InterpreterMacroAssembler::record_klass_in_profile(Register receiver,
1692                                                         Register scratch, bool is_virtual_call) {
1693   assert(ProfileInterpreter, "must be profiling");
1694   Label done;
1695 
1696   record_klass_in_profile_helper(receiver, scratch, 0, done, is_virtual_call);
1697 
1698   bind (done);
1699 }
1700 
1701 
1702 // Count a ret in the bytecodes.
1703 
1704 void InterpreterMacroAssembler::profile_ret(TosState state,
1705                                             Register return_bci,
1706                                             Register scratch) {
1707   if (ProfileInterpreter) {


1709     uint row;
1710 
1711     // If no method data exists, go to profile_continue.
1712     test_method_data_pointer(profile_continue);
1713 
1714     // Update the total ret count.
1715     increment_mdp_data_at(in_bytes(CounterData::count_offset()), scratch);
1716 
1717     for (row = 0; row < RetData::row_limit(); row++) {
1718       Label next_test;
1719 
1720       // See if return_bci is equal to bci[n]:
1721       test_mdp_data_at(in_bytes(RetData::bci_offset(row)),
1722                        return_bci, next_test, scratch);
1723 
1724       // return_bci is equal to bci[n].  Increment the count.
1725       increment_mdp_data_at(in_bytes(RetData::bci_count_offset(row)), scratch);
1726 
1727       // The method data pointer needs to be updated to reflect the new target.
1728       update_mdp_by_offset(in_bytes(RetData::bci_displacement_offset(row)), scratch);
1729       ba_short(profile_continue);

1730       bind(next_test);
1731     }
1732 
1733     update_mdp_for_ret(state, return_bci);
1734 
1735     bind (profile_continue);
1736   }
1737 }
1738 
1739 // Profile an unexpected null in the bytecodes.
1740 void InterpreterMacroAssembler::profile_null_seen(Register scratch) {
1741   if (ProfileInterpreter) {
1742     Label profile_continue;
1743 
1744     // If no method data exists, go to profile_continue.
1745     test_method_data_pointer(profile_continue);
1746 
1747     set_mdp_flag_at(BitData::null_seen_byte_constant(), scratch);
1748 
1749     // The method data pointer needs to be updated.


1858                                                       Register Rtemp,
1859                                                       Register Rtemp2 ) {
1860 
1861   Register Rlimit = Lmonitors;
1862   const jint delta = frame::interpreter_frame_monitor_size() * wordSize;
1863   assert( (delta & LongAlignmentMask) == 0,
1864           "sizeof BasicObjectLock must be even number of doublewords");
1865 
1866   sub( SP,        delta, SP);
1867   sub( Lesp,      delta, Lesp);
1868   sub( Lmonitors, delta, Lmonitors);
1869 
1870   if (!stack_is_empty) {
1871 
1872     // must copy stack contents down
1873 
1874     Label start_copying, next;
1875 
1876     // untested("monitor stack expansion");
1877     compute_stack_base(Rtemp);
1878     ba(start_copying);
1879     delayed()->cmp(Rtemp, Rlimit); // done? duplicated below
1880 
1881     // note: must copy from low memory upwards
1882     // On entry to loop,
1883     // Rtemp points to new base of stack, Lesp points to new end of stack (1 past TOS)
1884     // Loop mutates Rtemp
1885 
1886     bind( next);
1887 
1888     st_ptr(Rtemp2, Rtemp, 0);
1889     inc(Rtemp, wordSize);
1890     cmp(Rtemp, Rlimit); // are we done? (duplicated above)
1891 
1892     bind( start_copying );
1893 
1894     brx( notEqual, true, pn, next );
1895     delayed()->ld_ptr( Rtemp, delta, Rtemp2 );
1896 
1897     // done copying stack
1898   }
1899 }


1946   assert_not_delayed();
1947   sll(index, Interpreter::logStackElementSize, index);
1948   sub(Llocals, index, index);
1949   load_unaligned_double(index, Interpreter::local_offset_in_bytes(1), dst);
1950 }
1951 
1952 
1953 #ifdef ASSERT
1954 void InterpreterMacroAssembler::check_for_regarea_stomp(Register Rindex, int offset, Register Rlimit, Register Rscratch, Register Rscratch1) {
1955   Label L;
1956 
1957   assert(Rindex != Rscratch, "Registers cannot be same");
1958   assert(Rindex != Rscratch1, "Registers cannot be same");
1959   assert(Rlimit != Rscratch, "Registers cannot be same");
1960   assert(Rlimit != Rscratch1, "Registers cannot be same");
1961   assert(Rscratch1 != Rscratch, "Registers cannot be same");
1962 
1963   // untested("reg area corruption");
1964   add(Rindex, offset, Rscratch);
1965   add(Rlimit, 64 + STACK_BIAS, Rscratch1);
1966   cmp_and_brx_short(Rscratch, Rscratch1, Assembler::greaterEqualUnsigned, pn, L);


1967   stop("regsave area is being clobbered");
1968   bind(L);
1969 }
1970 #endif // ASSERT
1971 
1972 
1973 void InterpreterMacroAssembler::store_local_int( Register index, Register src ) {
1974   assert_not_delayed();
1975   sll(index, Interpreter::logStackElementSize, index);
1976   sub(Llocals, index, index);
1977   debug_only(check_for_regarea_stomp(index, 0, FP, G1_scratch, G4_scratch);)
1978   st(src, index, 0);
1979 }
1980 
1981 void InterpreterMacroAssembler::store_local_ptr( Register index, Register src ) {
1982   assert_not_delayed();
1983   sll(index, Interpreter::logStackElementSize, index);
1984   sub(Llocals, index, index);
1985 #ifdef ASSERT
1986   check_for_regarea_stomp(index, 0, FP, G1_scratch, G4_scratch);


2108   // and store the result to memory
2109   st( Rtmp, be_counter );
2110 
2111   // Add backedge + invocation counter
2112   add( Rtmp, Rtmp2, Rtmp );
2113 
2114   // Note that this macro must leave backedge_count + invocation_count in Rtmp!
2115 }
2116 
2117 #ifndef CC_INTERP
2118 void InterpreterMacroAssembler::test_backedge_count_for_osr( Register backedge_count,
2119                                                              Register branch_bcp,
2120                                                              Register Rtmp ) {
2121   Label did_not_overflow;
2122   Label overflow_with_error;
2123   assert_different_registers(backedge_count, Rtmp, branch_bcp);
2124   assert(UseOnStackReplacement,"Must UseOnStackReplacement to test_backedge_count_for_osr");
2125 
2126   AddressLiteral limit(&InvocationCounter::InterpreterBackwardBranchLimit);
2127   load_contents(limit, Rtmp);
2128   cmp_and_br_short(backedge_count, Rtmp, Assembler::lessUnsigned, Assembler::pt, did_not_overflow);


2129 
2130   // When ProfileInterpreter is on, the backedge_count comes from the
2131   // methodDataOop, which value does not get reset on the call to
2132   // frequency_counter_overflow().  To avoid excessive calls to the overflow
2133   // routine while the method is being compiled, add a second test to make sure
2134   // the overflow function is called only once every overflow_frequency.
2135   if (ProfileInterpreter) {
2136     const int overflow_frequency = 1024;
2137     andcc(backedge_count, overflow_frequency-1, Rtmp);
2138     brx(Assembler::notZero, false, Assembler::pt, did_not_overflow);
2139     delayed()->nop();
2140   }
2141 
2142   // overflow in loop, pass branch bytecode
2143   set(6,Rtmp);
2144   call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::frequency_counter_overflow), branch_bcp, Rtmp);
2145 
2146   // Was an OSR adapter generated?
2147   // O0 = osr nmethod
2148   br_null_short(O0, Assembler::pn, overflow_with_error);


2149 
2150   // Has the nmethod been invalidated already?
2151   ld(O0, nmethod::entry_bci_offset(), O2);
2152   cmp_and_br_short(O2, InvalidOSREntryBci, Assembler::equal, Assembler::pn, overflow_with_error);


2153 
2154   // migrate the interpreter frame off of the stack
2155 
2156   mov(G2_thread, L7);
2157   // save nmethod
2158   mov(O0, L6);
2159   set_last_Java_frame(SP, noreg);
2160   call_VM_leaf(noreg, CAST_FROM_FN_PTR(address, SharedRuntime::OSR_migration_begin), L7);
2161   reset_last_Java_frame();
2162   mov(L7, G2_thread);
2163 
2164   // move OSR nmethod to I1
2165   mov(L6, I1);
2166 
2167   // OSR buffer to I0
2168   mov(O0, I0);
2169 
2170   // remove the interpreter frame
2171   restore(I5_savedSP, 0, SP);
2172 


2198   jsr_pc = pc - Bytecodes::length_for(Bytecodes::_jsr);
2199   if (*jsr_pc == Bytecodes::_jsr   && jsr_pc >= m->code_base())    return true;
2200   jsr_pc = pc - Bytecodes::length_for(Bytecodes::_jsr_w);
2201   if (*jsr_pc == Bytecodes::_jsr_w && jsr_pc >= m->code_base())    return true;
2202 #endif // PRODUCT
2203   return false;
2204 }
2205 
2206 
2207 void InterpreterMacroAssembler::verify_oop_or_return_address(Register reg, Register Rtmp) {
2208   if (!VerifyOops)  return;
2209   // the VM documentation for the astore[_wide] bytecode allows
2210   // the TOS to be not only an oop but also a return address
2211   Label test;
2212   Label skip;
2213   // See if it is an address (in the current method):
2214 
2215   mov(reg, Rtmp);
2216   const int log2_bytecode_size_limit = 16;
2217   srl(Rtmp, log2_bytecode_size_limit, Rtmp);
2218   br_notnull_short( Rtmp, pt, test );

2219 
2220   // %%% should use call_VM_leaf here?
2221   save_frame_and_mov(0, Lmethod, O0, reg, O1);
2222   save_thread(L7_thread_cache);
2223   call(CAST_FROM_FN_PTR(address,verify_return_address), relocInfo::none);
2224   delayed()->nop();
2225   restore_thread(L7_thread_cache);
2226   br_notnull( O0, false, pt, skip );
2227   delayed()->restore();
2228 
2229   // Perform a more elaborate out-of-line call
2230   // Not an address; verify it:
2231   bind(test);
2232   verify_oop(reg);
2233   bind(skip);
2234 }
2235 
2236 
2237 void InterpreterMacroAssembler::verify_FPU(int stack_depth, TosState state) {
2238   if (state == ftos || state == dtos) MacroAssembler::verify_FPU(stack_depth);


2247 // if (DTraceMethodProbes) {
2248 //   SharedRuntime::dtrace_method_entry(method, receiver);
2249 // }
2250 // if (RC_TRACE_IN_RANGE(0x00001000, 0x00002000)) {
2251 //   SharedRuntime::rc_trace_method_entry(method, receiver);
2252 // }
2253 
2254 void InterpreterMacroAssembler::notify_method_entry() {
2255 
2256   // C++ interpreter only uses this for native methods.
2257 
2258   // Whenever JVMTI puts a thread in interp_only_mode, method
2259   // entry/exit events are sent for that thread to track stack
2260   // depth.  If it is possible to enter interp_only_mode we add
2261   // the code to check if the event should be sent.
2262   if (JvmtiExport::can_post_interpreter_events()) {
2263     Label L;
2264     Register temp_reg = O5;
2265     const Address interp_only(G2_thread, JavaThread::interp_only_mode_offset());
2266     ld(interp_only, temp_reg);
2267     cmp_and_br_short(temp_reg, 0, equal, pt, L);


2268     call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_method_entry));
2269     bind(L);
2270   }
2271 
2272   {
2273     Register temp_reg = O5;
2274     SkipIfEqual skip_if(this, temp_reg, &DTraceMethodProbes, zero);
2275     call_VM_leaf(noreg,
2276       CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_entry),
2277       G2_thread, Lmethod);
2278   }
2279 
2280   // RedefineClasses() tracing support for obsolete method entry
2281   if (RC_TRACE_IN_RANGE(0x00001000, 0x00002000)) {
2282     call_VM_leaf(noreg,
2283       CAST_FROM_FN_PTR(address, SharedRuntime::rc_trace_method_entry),
2284       G2_thread, Lmethod);
2285   }
2286 }
2287 


2297 //   SharedRuntime::dtrace_method_exit(thread, method);
2298 // }
2299 //
2300 // Native methods have their result stored in d_tmp and l_tmp
2301 // Java methods have their result stored in the expression stack
2302 
2303 void InterpreterMacroAssembler::notify_method_exit(bool is_native_method,
2304                                                    TosState state,
2305                                                    NotifyMethodExitMode mode) {
2306   // C++ interpreter only uses this for native methods.
2307 
2308   // Whenever JVMTI puts a thread in interp_only_mode, method
2309   // entry/exit events are sent for that thread to track stack
2310   // depth.  If it is possible to enter interp_only_mode we add
2311   // the code to check if the event should be sent.
2312   if (mode == NotifyJVMTI && JvmtiExport::can_post_interpreter_events()) {
2313     Label L;
2314     Register temp_reg = O5;
2315     const Address interp_only(G2_thread, JavaThread::interp_only_mode_offset());
2316     ld(interp_only, temp_reg);
2317     cmp_and_br_short(temp_reg, 0, equal, pt, L);


2318 
2319     // Note: frame::interpreter_frame_result has a dependency on how the
2320     // method result is saved across the call to post_method_exit. For
2321     // native methods it assumes the result registers are saved to
2322     // l_scratch and d_scratch. If this changes then the interpreter_frame_result
2323     // implementation will need to be updated too.
2324 
2325     save_return_value(state, is_native_method);
2326     call_VM(noreg,
2327             CAST_FROM_FN_PTR(address, InterpreterRuntime::post_method_exit));
2328     restore_return_value(state, is_native_method);
2329     bind(L);
2330   }
2331 
2332   {
2333     Register temp_reg = O5;
2334     // Dtrace notification
2335     SkipIfEqual skip_if(this, temp_reg, &DTraceMethodProbes, zero);
2336     save_return_value(state, is_native_method);
2337     call_VM_leaf(


src/cpu/sparc/vm/interp_masm_sparc.cpp
Index Unified diffs Context diffs Sdiffs Wdiffs Patch New Old Previous File Next File