src/cpu/sparc/vm/interp_masm_sparc.cpp
Index Unified diffs Context diffs Sdiffs Wdiffs Patch New Old Previous File Next File
*** old/src/cpu/sparc/vm/interp_masm_sparc.cpp	Fri Jul 15 18:49:39 2011
--- new/src/cpu/sparc/vm/interp_masm_sparc.cpp	Fri Jul 15 18:49:39 2011

*** 234,254 **** --- 234,250 ---- void InterpreterMacroAssembler::check_and_handle_earlyret(Register scratch_reg) { if (JvmtiExport::can_force_early_return()) { Label L; Register thr_state = G3_scratch; ld_ptr(G2_thread, JavaThread::jvmti_thread_state_offset(), thr_state); ! tst(thr_state); br(zero, false, pt, L); // if (thread->jvmti_thread_state() == NULL) exit; delayed()->nop(); ! br_null_short(thr_state, pt, L); // if (thread->jvmti_thread_state() == NULL) exit; // Initiate earlyret handling only if it is not already being processed. // If the flag has the earlyret_processing bit set, it means that this code // is called *during* earlyret handling - we don't want to reenter. ld(thr_state, JvmtiThreadState::earlyret_state_offset(), G4_scratch); ! cmp_and_br_short(G4_scratch, JvmtiThreadState::earlyret_pending, Assembler::notEqual, pt, L); br(Assembler::notEqual, false, pt, L); delayed()->nop(); // Call Interpreter::remove_activation_early_entry() to get the address of the // same-named entrypoint in the generated interpreter code ld(thr_state, JvmtiThreadState::earlyret_tos_offset(), Otos_l1); call_VM_leaf(noreg, CAST_FROM_FN_PTR(address, Interpreter::remove_activation_early_entry), Otos_l1);
*** 564,590 **** --- 560,581 ---- // Saved SP, plus register window size, must not be above FP. add(Rsp, frame::register_save_words * wordSize, Rtemp); #ifdef _LP64 sub(Rtemp, STACK_BIAS, Rtemp); // Bias Rtemp before cmp to FP #endif ! cmp_and_brx_short(Rtemp, FP, Assembler::greaterUnsigned, Assembler::pn, Bad); brx(Assembler::greaterUnsigned, false, Assembler::pn, Bad); delayed()->nop(); // Saved SP must not be ridiculously below current SP. size_t maxstack = MAX2(JavaThread::stack_size_at_create(), (size_t) 4*K*K); set(maxstack, Rtemp); sub(SP, Rtemp, Rtemp); #ifdef _LP64 add(Rtemp, STACK_BIAS, Rtemp); // Unbias Rtemp before cmp to Rsp #endif ! cmp_and_brx_short(Rsp, Rtemp, Assembler::lessUnsigned, Assembler::pn, Bad); brx(Assembler::lessUnsigned, false, Assembler::pn, Bad); delayed()->nop(); ! br(Assembler::always, false, Assembler::pn, OK); delayed()->nop(); ! ba_short(OK); bind(Bad); stop("on return to interpreted call, restored SP is corrupted"); bind(OK);
*** 628,650 **** --- 619,639 ---- verify_thread(); Label skip_compiled_code; const Address interp_only(G2_thread, JavaThread::interp_only_mode_offset()); ld(interp_only, scratch); ! tst(scratch); br(Assembler::notZero, true, Assembler::pn, skip_compiled_code); ! cmp_zero_and_br(Assembler::notZero, scratch, skip_compiled_code, true, Assembler::pn); delayed()->ld_ptr(G5_method, in_bytes(methodOopDesc::interpreter_entry_offset()), target); bind(skip_compiled_code); } // the i2c_adapters need methodOop in G5_method (right? %%%) // do the call #ifdef ASSERT { Label ok; ! br_notnull(target, false, Assembler::pt, ok); delayed()->nop(); ! br_notnull_short(target, Assembler::pt, ok); stop("null entry point"); bind(ok); } #endif // ASSERT
*** 980,991 **** --- 969,979 ---- br(zero, false, pt, unlocked); delayed()->nop(); // Don't unlock anything if the _do_not_unlock_if_synchronized flag // is set. ! tstbool(G1_scratch); br(Assembler::notZero, false, pn, no_unlock); ! cmp_zero_and_br(Assembler::notZero, G1_scratch, no_unlock); delayed()->nop(); // BasicObjectLock will be first in list, since this is a synchronized method. However, need // to check that the object has not been unlocked by an explicit monitorexit bytecode.
*** 995,1006 **** --- 983,993 ---- // pass top-most monitor elem add( top_most_monitor(), O1 ); ld_ptr(O1, BasicObjectLock::obj_offset_in_bytes(), G3_scratch); ! br_notnull(G3_scratch, false, pt, unlock); delayed()->nop(); ! br_notnull_short(G3_scratch, pt, unlock); if (throw_monitor_exception) { // Entry already unlocked need to throw an exception MacroAssembler::call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_illegal_monitor_state_exception)); should_not_reach_here();
*** 1009,1020 **** --- 996,1006 ---- // If requested, install an illegal_monitor_state_exception. // Continue with stack unrolling. if (install_monitor_exception) { MacroAssembler::call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::new_illegal_monitor_state_exception)); } ! ba(false, unlocked); delayed()->nop(); ! ba_short(unlocked); } bind(unlock); unlock_object(O1);
*** 1035,1053 **** --- 1021,1037 ---- #ifdef ASSERT add(top_most_monitor(), Rmptr, delta); { Label L; // ensure that Rmptr starts out above (or at) Rlimit ! cmp_and_brx_short(Rmptr, Rlimit, Assembler::greaterEqualUnsigned, pn, L); brx(Assembler::greaterEqualUnsigned, false, pn, L); delayed()->nop(); stop("monitor stack has negative size"); bind(L); } #endif bind(restart); - ba(false, entry); delayed()-> add(top_most_monitor(), Rmptr, delta); // points to current entry, starting with bottom-most entry // Entry is still locked, need to throw exception bind(exception);
*** 1059,1070 **** --- 1043,1053 ---- // Unlock does not block, so don't have to worry about the frame unlock_object(Rmptr); if (install_monitor_exception) { MacroAssembler::call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::new_illegal_monitor_state_exception)); } ! ba(false, restart); delayed()->nop(); ! ba_short(restart); } bind(loop); cmp(Rtemp, G0); // check if current entry is used brx(Assembler::notEqual, false, pn, exception);
*** 1071,1083 **** --- 1054,1064 ---- delayed()-> dec(Rmptr, delta); // otherwise advance to next entry #ifdef ASSERT { Label L; // ensure that Rmptr has not somehow stepped below Rlimit ! cmp_and_brx_short(Rmptr, Rlimit, Assembler::greaterEqualUnsigned, pn, L); brx(Assembler::greaterEqualUnsigned, false, pn, L); delayed()->nop(); stop("ran off the end of the monitor stack"); bind(L); } #endif bind(entry);
*** 1194,1206 **** --- 1175,1185 ---- assert(mark_addr.disp() == 0, "cas must take a zero displacement"); casx_under_lock(mark_addr.base(), mark_reg, temp_reg, (address)StubRoutines::Sparc::atomic_memory_operation_lock_addr()); // if the compare and exchange succeeded we are done (we saw an unlocked object) ! cmp_and_brx_short(mark_reg, temp_reg, Assembler::equal, Assembler::pt, done); brx(Assembler::equal, true, Assembler::pt, done); delayed()->nop(); // We did not see an unlocked object so try the fast recursive case // Check if owner is self by comparing the value in the markOop of object // with the stack pointer
*** 1322,1338 **** --- 1301,1311 ---- // Test ImethodDataPtr. If it is null, continue at the specified label void InterpreterMacroAssembler::test_method_data_pointer(Label& zero_continue) { assert(ProfileInterpreter, "must be profiling interpreter"); #ifdef _LP64 bpr(Assembler::rc_z, false, Assembler::pn, ImethodDataPtr, zero_continue); #else tst(ImethodDataPtr); br(Assembler::zero, false, Assembler::pn, zero_continue); #endif delayed()->nop(); + br_null_short(ImethodDataPtr, Assembler::pn, zero_continue); } void InterpreterMacroAssembler::verify_method_data_pointer() { assert(ProfileInterpreter, "must be profiling interpreter"); #ifdef ASSERT
*** 1374,1408 **** --- 1347,1368 ---- // limit or if we call profile_method() Label done; // if no method data exists, and the counter is high enough, make one #ifdef _LP64 bpr(Assembler::rc_nz, false, Assembler::pn, ImethodDataPtr, done); #else tst(ImethodDataPtr); br(Assembler::notZero, false, Assembler::pn, done); #endif + br_notnull_short(ImethodDataPtr, Assembler::pn, done); // Test to see if we should create a method data oop AddressLiteral profile_limit((address) &InvocationCounter::InterpreterProfileLimit); #ifdef _LP64 delayed()->nop(); sethi(profile_limit, Rtmp); #else delayed()->sethi(profile_limit, Rtmp); #endif ld(Rtmp, profile_limit.low10(), Rtmp); ! cmp_and_br_short(invocation_count, Rtmp, Assembler::lessUnsigned, Assembler::pn, profile_continue); br(Assembler::lessUnsigned, false, Assembler::pn, profile_continue); delayed()->nop(); // Build it now. call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::profile_method)); set_method_data_pointer_for_bcp(); ! ba(false, profile_continue); delayed()->nop(); ! ba_short(profile_continue); bind(done); } // Store a value at some constant offset from the method data pointer.
*** 1630,1646 **** --- 1590,1603 ---- Label skip_receiver_profile; if (receiver_can_be_null) { Label not_null; ! tst(receiver); brx(Assembler::notZero, false, Assembler::pt, not_null); delayed()->nop(); ! br_notnull_short(receiver, Assembler::pt, not_null); // We are making a call. Increment the count for null receiver. increment_mdp_data_at(in_bytes(CounterData::count_offset()), scratch); ! ba(false, skip_receiver_profile); delayed()->nop(); ! ba_short(skip_receiver_profile); bind(not_null); } // Record the receiver type. record_klass_in_profile(receiver, scratch, true);
*** 1680,1691 **** --- 1637,1647 ---- // delayed()->tst(scratch); // The receiver is receiver[n]. Increment count[n]. int count_offset = in_bytes(VirtualCallData::receiver_count_offset(row)); increment_mdp_data_at(count_offset, scratch); ! ba(false, done); delayed()->nop(); ! ba_short(done); bind(next_test); if (test_for_null_also) { Label found_null; // Failed the equality check on receiver[n]... Test for null.
*** 1695,1706 **** --- 1651,1661 ---- brx(Assembler::zero, false, Assembler::pn, found_null); delayed()->nop(); // Receiver did not match any saved receiver and there is no empty row for it. // Increment total counter to indicate polymorphic case. increment_mdp_data_at(in_bytes(CounterData::count_offset()), scratch); ! ba(false, done); delayed()->nop(); ! ba_short(done); bind(found_null); } else { brx(Assembler::notZero, false, Assembler::pt, done); delayed()->nop(); }
*** 1727,1738 **** --- 1682,1692 ---- set_mdp_data_at(recvr_offset, receiver); int count_offset = in_bytes(VirtualCallData::receiver_count_offset(start_row)); mov(DataLayout::counter_increment, scratch); set_mdp_data_at(count_offset, scratch); if (start_row > 0) { ! ba(false, done); delayed()->nop(); ! ba_short(done); } } void InterpreterMacroAssembler::record_klass_in_profile(Register receiver, Register scratch, bool is_virtual_call) {
*** 1770,1781 **** --- 1724,1734 ---- // return_bci is equal to bci[n]. Increment the count. increment_mdp_data_at(in_bytes(RetData::bci_count_offset(row)), scratch); // The method data pointer needs to be updated to reflect the new target. update_mdp_by_offset(in_bytes(RetData::bci_displacement_offset(row)), scratch); ! ba(false, profile_continue); delayed()->nop(); ! ba_short(profile_continue); bind(next_test); } update_mdp_for_ret(state, return_bci);
*** 1920,1931 **** --- 1873,1884 ---- Label start_copying, next; // untested("monitor stack expansion"); compute_stack_base(Rtemp); ! ba( false, start_copying ); - delayed()->cmp( Rtemp, Rlimit); // done? duplicated below ! delayed()->cmp(Rtemp, Rlimit); // done? duplicated below // note: must copy from low memory upwards // On entry to loop, // Rtemp points to new base of stack, Lesp points to new end of stack (1 past TOS) // Loop mutates Rtemp
*** 2008,2020 **** --- 1961,1971 ---- assert(Rscratch1 != Rscratch, "Registers cannot be same"); // untested("reg area corruption"); add(Rindex, offset, Rscratch); add(Rlimit, 64 + STACK_BIAS, Rscratch1); ! cmp_and_brx_short(Rscratch, Rscratch1, Assembler::greaterEqualUnsigned, pn, L); brx(Assembler::greaterEqualUnsigned, false, pn, L); delayed()->nop(); stop("regsave area is being clobbered"); bind(L); } #endif // ASSERT
*** 2172,2184 **** --- 2123,2133 ---- assert_different_registers(backedge_count, Rtmp, branch_bcp); assert(UseOnStackReplacement,"Must UseOnStackReplacement to test_backedge_count_for_osr"); AddressLiteral limit(&InvocationCounter::InterpreterBackwardBranchLimit); load_contents(limit, Rtmp); ! cmp_and_br_short(backedge_count, Rtmp, Assembler::lessUnsigned, Assembler::pt, did_not_overflow); br(Assembler::lessUnsigned, false, Assembler::pt, did_not_overflow); delayed()->nop(); // When ProfileInterpreter is on, the backedge_count comes from the // methodDataOop, which value does not get reset on the call to // frequency_counter_overflow(). To avoid excessive calls to the overflow // routine while the method is being compiled, add a second test to make sure
*** 2194,2212 **** --- 2143,2157 ---- set(6,Rtmp); call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::frequency_counter_overflow), branch_bcp, Rtmp); // Was an OSR adapter generated? // O0 = osr nmethod ! tst(O0); brx(Assembler::zero, false, Assembler::pn, overflow_with_error); delayed()->nop(); ! br_null_short(O0, Assembler::pn, overflow_with_error); // Has the nmethod been invalidated already? ld(O0, nmethod::entry_bci_offset(), O2); ! cmp_and_br_short(O2, InvalidOSREntryBci, Assembler::equal, Assembler::pn, overflow_with_error); br(Assembler::equal, false, Assembler::pn, overflow_with_error); delayed()->nop(); // migrate the interpreter frame off of the stack mov(G2_thread, L7); // save nmethod
*** 2268,2279 **** --- 2213,2223 ---- // See if it is an address (in the current method): mov(reg, Rtmp); const int log2_bytecode_size_limit = 16; srl(Rtmp, log2_bytecode_size_limit, Rtmp); ! br_notnull( Rtmp, false, pt, test ); delayed()->nop(); ! br_notnull_short( Rtmp, pt, test ); // %%% should use call_VM_leaf here? save_frame_and_mov(0, Lmethod, O0, reg, O1); save_thread(L7_thread_cache); call(CAST_FROM_FN_PTR(address,verify_return_address), relocInfo::none);
*** 2318,2330 **** --- 2262,2272 ---- if (JvmtiExport::can_post_interpreter_events()) { Label L; Register temp_reg = O5; const Address interp_only(G2_thread, JavaThread::interp_only_mode_offset()); ld(interp_only, temp_reg); ! tst(temp_reg); br(zero, false, pt, L); delayed()->nop(); ! cmp_and_br_short(temp_reg, 0, equal, pt, L); call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_method_entry)); bind(L); } {
*** 2370,2382 **** --- 2312,2322 ---- if (mode == NotifyJVMTI && JvmtiExport::can_post_interpreter_events()) { Label L; Register temp_reg = O5; const Address interp_only(G2_thread, JavaThread::interp_only_mode_offset()); ld(interp_only, temp_reg); ! tst(temp_reg); br(zero, false, pt, L); delayed()->nop(); ! cmp_and_br_short(temp_reg, 0, equal, pt, L); // Note: frame::interpreter_frame_result has a dependency on how the // method result is saved across the call to post_method_exit. For // native methods it assumes the result registers are saved to // l_scratch and d_scratch. If this changes then the interpreter_frame_result

src/cpu/sparc/vm/interp_masm_sparc.cpp
Index Unified diffs Context diffs Sdiffs Wdiffs Patch New Old Previous File Next File