--- old/src/cpu/sparc/vm/interp_masm_sparc.cpp Fri Jul 15 19:09:45 2011 +++ new/src/cpu/sparc/vm/interp_masm_sparc.cpp Fri Jul 15 19:09:45 2011 @@ -236,13 +236,13 @@ Label L; Register thr_state = G3_scratch; ld_ptr(G2_thread, JavaThread::jvmti_thread_state_offset(), thr_state); - br_null(thr_state, false, pt, L); // if (thread->jvmti_thread_state() == NULL) exit; + br_null_short(thr_state, pt, L); // if (thread->jvmti_thread_state() == NULL) exit; // Initiate earlyret handling only if it is not already being processed. // If the flag has the earlyret_processing bit set, it means that this code // is called *during* earlyret handling - we don't want to reenter. ld(thr_state, JvmtiThreadState::earlyret_state_offset(), G4_scratch); - cmp_and_br(G4_scratch, JvmtiThreadState::earlyret_pending, Assembler::notEqual, false, pt, L); + cmp_and_br_short(G4_scratch, JvmtiThreadState::earlyret_pending, Assembler::notEqual, pt, L); // Call Interpreter::remove_activation_early_entry() to get the address of the // same-named entrypoint in the generated interpreter code @@ -562,7 +562,7 @@ #ifdef _LP64 sub(Rtemp, STACK_BIAS, Rtemp); // Bias Rtemp before cmp to FP #endif - cmp_and_brx(Rtemp, FP, Assembler::greaterUnsigned, false, Assembler::pn, Bad); + cmp_and_brx_short(Rtemp, FP, Assembler::greaterUnsigned, Assembler::pn, Bad); // Saved SP must not be ridiculously below current SP. size_t maxstack = MAX2(JavaThread::stack_size_at_create(), (size_t) 4*K*K); @@ -571,9 +571,9 @@ #ifdef _LP64 add(Rtemp, STACK_BIAS, Rtemp); // Unbias Rtemp before cmp to Rsp #endif - cmp_and_brx(Rsp, Rtemp, Assembler::lessUnsigned, false, Assembler::pn, Bad); + cmp_and_brx_short(Rsp, Rtemp, Assembler::lessUnsigned, Assembler::pn, Bad); - ba(OK); + ba_short(OK); bind(Bad); stop("on return to interpreted call, restored SP is corrupted"); @@ -621,8 +621,7 @@ const Address interp_only(G2_thread, JavaThread::interp_only_mode_offset()); ld(interp_only, scratch); - tst(scratch); - br(Assembler::notZero, true, Assembler::pn, skip_compiled_code); + cmp_zero_and_br(Assembler::notZero, scratch, skip_compiled_code, true, Assembler::pn); delayed()->ld_ptr(G5_method, in_bytes(methodOopDesc::interpreter_entry_offset()), target); bind(skip_compiled_code); } @@ -632,7 +631,7 @@ #ifdef ASSERT { Label ok; - br_notnull(target, false, Assembler::pt, ok); + br_notnull_short(target, Assembler::pt, ok); stop("null entry point"); bind(ok); } @@ -972,8 +971,7 @@ // Don't unlock anything if the _do_not_unlock_if_synchronized flag // is set. - tst(G1_scratch); - br(Assembler::notZero, false, pn, no_unlock); + cmp_zero_and_br(Assembler::notZero, G1_scratch, no_unlock); delayed()->nop(); // BasicObjectLock will be first in list, since this is a synchronized method. However, need @@ -987,7 +985,7 @@ add( top_most_monitor(), O1 ); ld_ptr(O1, BasicObjectLock::obj_offset_in_bytes(), G3_scratch); - br_notnull(G3_scratch, false, pt, unlock); + br_notnull_short(G3_scratch, pt, unlock); if (throw_monitor_exception) { // Entry already unlocked need to throw an exception @@ -1000,7 +998,7 @@ if (install_monitor_exception) { MacroAssembler::call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::new_illegal_monitor_state_exception)); } - ba(unlocked); + ba_short(unlocked); } bind(unlock); @@ -1025,13 +1023,13 @@ add(top_most_monitor(), Rmptr, delta); { Label L; // ensure that Rmptr starts out above (or at) Rlimit - cmp_and_brx(Rmptr, Rlimit, Assembler::greaterEqualUnsigned, false, pn, L); + cmp_and_brx_short(Rmptr, Rlimit, Assembler::greaterEqualUnsigned, pn, L); stop("monitor stack has negative size"); bind(L); } #endif bind(restart); - ba(entry, false); + ba(entry); delayed()-> add(top_most_monitor(), Rmptr, delta); // points to current entry, starting with bottom-most entry @@ -1047,7 +1045,7 @@ if (install_monitor_exception) { MacroAssembler::call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::new_illegal_monitor_state_exception)); } - ba(restart); + ba_short(restart); } bind(loop); @@ -1058,7 +1056,7 @@ #ifdef ASSERT { Label L; // ensure that Rmptr has not somehow stepped below Rlimit - cmp_and_brx(Rmptr, Rlimit, Assembler::greaterEqualUnsigned, false, pn, L); + cmp_and_brx_short(Rmptr, Rlimit, Assembler::greaterEqualUnsigned, pn, L); stop("ran off the end of the monitor stack"); bind(L); } @@ -1179,7 +1177,7 @@ (address)StubRoutines::Sparc::atomic_memory_operation_lock_addr()); // if the compare and exchange succeeded we are done (we saw an unlocked object) - cmp_and_brx(mark_reg, temp_reg, Assembler::equal, true, Assembler::pt, done); + cmp_and_brx_short(mark_reg, temp_reg, Assembler::equal, Assembler::pt, done); // We did not see an unlocked object so try the fast recursive case @@ -1305,7 +1303,7 @@ void InterpreterMacroAssembler::test_method_data_pointer(Label& zero_continue) { assert(ProfileInterpreter, "must be profiling interpreter"); - br_null(ImethodDataPtr, false, Assembler::pn, zero_continue); + br_null_short(ImethodDataPtr, Assembler::pn, zero_continue); } void InterpreterMacroAssembler::verify_method_data_pointer() { @@ -1351,18 +1349,18 @@ Label done; // if no method data exists, and the counter is high enough, make one - br_notnull(ImethodDataPtr, false, Assembler::pn, done); + br_notnull_short(ImethodDataPtr, Assembler::pn, done); // Test to see if we should create a method data oop AddressLiteral profile_limit((address) &InvocationCounter::InterpreterProfileLimit); sethi(profile_limit, Rtmp); ld(Rtmp, profile_limit.low10(), Rtmp); - cmp_and_br(invocation_count, Rtmp, Assembler::lessUnsigned, false, Assembler::pn, profile_continue); + cmp_and_br_short(invocation_count, Rtmp, Assembler::lessUnsigned, Assembler::pn, profile_continue); // Build it now. call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::profile_method)); set_method_data_pointer_for_bcp(); - ba(profile_continue); + ba_short(profile_continue); bind(done); } @@ -1594,10 +1592,10 @@ Label skip_receiver_profile; if (receiver_can_be_null) { Label not_null; - br_notnull(receiver, false, Assembler::pt, not_null); + br_notnull_short(receiver, Assembler::pt, not_null); // We are making a call. Increment the count for null receiver. increment_mdp_data_at(in_bytes(CounterData::count_offset()), scratch); - ba(skip_receiver_profile); + ba_short(skip_receiver_profile); bind(not_null); } @@ -1641,7 +1639,7 @@ // The receiver is receiver[n]. Increment count[n]. int count_offset = in_bytes(VirtualCallData::receiver_count_offset(row)); increment_mdp_data_at(count_offset, scratch); - ba(done); + ba_short(done); bind(next_test); if (test_for_null_also) { @@ -1655,7 +1653,7 @@ // Receiver did not match any saved receiver and there is no empty row for it. // Increment total counter to indicate polymorphic case. increment_mdp_data_at(in_bytes(CounterData::count_offset()), scratch); - ba(done); + ba_short(done); bind(found_null); } else { brx(Assembler::notZero, false, Assembler::pt, done); @@ -1686,7 +1684,7 @@ mov(DataLayout::counter_increment, scratch); set_mdp_data_at(count_offset, scratch); if (start_row > 0) { - ba(done); + ba_short(done); } } @@ -1728,7 +1726,7 @@ // The method data pointer needs to be updated to reflect the new target. update_mdp_by_offset(in_bytes(RetData::bci_displacement_offset(row)), scratch); - ba(profile_continue); + ba_short(profile_continue); bind(next_test); } @@ -1877,7 +1875,7 @@ // untested("monitor stack expansion"); compute_stack_base(Rtemp); - ba(start_copying, false); + ba(start_copying); delayed()->cmp(Rtemp, Rlimit); // done? duplicated below // note: must copy from low memory upwards @@ -1965,7 +1963,7 @@ // untested("reg area corruption"); add(Rindex, offset, Rscratch); add(Rlimit, 64 + STACK_BIAS, Rscratch1); - cmp_and_brx(Rscratch, Rscratch1, Assembler::greaterEqualUnsigned, false, pn, L); + cmp_and_brx_short(Rscratch, Rscratch1, Assembler::greaterEqualUnsigned, pn, L); stop("regsave area is being clobbered"); bind(L); } @@ -2127,7 +2125,7 @@ AddressLiteral limit(&InvocationCounter::InterpreterBackwardBranchLimit); load_contents(limit, Rtmp); - cmp_and_br(backedge_count, Rtmp, Assembler::lessUnsigned, false, Assembler::pt, did_not_overflow); + cmp_and_br_short(backedge_count, Rtmp, Assembler::lessUnsigned, Assembler::pt, did_not_overflow); // When ProfileInterpreter is on, the backedge_count comes from the // methodDataOop, which value does not get reset on the call to @@ -2147,11 +2145,11 @@ // Was an OSR adapter generated? // O0 = osr nmethod - br_null(O0, false, Assembler::pn, overflow_with_error); + br_null_short(O0, Assembler::pn, overflow_with_error); // Has the nmethod been invalidated already? ld(O0, nmethod::entry_bci_offset(), O2); - cmp_and_br(O2, InvalidOSREntryBci, Assembler::equal, false, Assembler::pn, overflow_with_error); + cmp_and_br_short(O2, InvalidOSREntryBci, Assembler::equal, Assembler::pn, overflow_with_error); // migrate the interpreter frame off of the stack @@ -2217,7 +2215,7 @@ mov(reg, Rtmp); const int log2_bytecode_size_limit = 16; srl(Rtmp, log2_bytecode_size_limit, Rtmp); - br_notnull( Rtmp, false, pt, test ); + br_notnull_short( Rtmp, pt, test ); // %%% should use call_VM_leaf here? save_frame_and_mov(0, Lmethod, O0, reg, O1); @@ -2266,7 +2264,7 @@ Register temp_reg = O5; const Address interp_only(G2_thread, JavaThread::interp_only_mode_offset()); ld(interp_only, temp_reg); - br_zero(temp_reg, L); + cmp_and_br_short(temp_reg, 0, equal, pt, L); call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_method_entry)); bind(L); } @@ -2316,7 +2314,7 @@ Register temp_reg = O5; const Address interp_only(G2_thread, JavaThread::interp_only_mode_offset()); ld(interp_only, temp_reg); - br_zero(temp_reg, L); + cmp_and_br_short(temp_reg, 0, equal, pt, L); // Note: frame::interpreter_frame_result has a dependency on how the // method result is saved across the call to post_method_exit. For