--- old/src/cpu/sparc/vm/assembler_sparc.cpp Fri Jul 15 19:09:40 2011 +++ new/src/cpu/sparc/vm/assembler_sparc.cpp Fri Jul 15 19:09:40 2011 @@ -106,7 +106,7 @@ case bp_op2: s = "bp"; break; case cb_op2: s = "cb"; break; case bpr_op2: { - if (is_cbc(inst)) { + if (is_cbcond(inst)) { s = is_cxb(inst) ? "cxb" : "cwb"; } else { s = "bpr"; @@ -140,7 +140,7 @@ case br_op2: m = wdisp( word_aligned_ones, 0, 22); v = wdisp( dest_pos, inst_pos, 22); break; case cb_op2: m = wdisp( word_aligned_ones, 0, 22); v = wdisp( dest_pos, inst_pos, 22); break; case bpr_op2: { - if (is_cbc(inst)) { + if (is_cbcond(inst)) { m = wdisp10(word_aligned_ones, 0); v = wdisp10(dest_pos, inst_pos); } else { @@ -171,7 +171,7 @@ case br_op2: r = inv_wdisp( inst, pos, 22); break; case cb_op2: r = inv_wdisp( inst, pos, 22); break; case bpr_op2: { - if (is_cbc(inst)) { + if (is_cbcond(inst)) { r = inv_wdisp10(inst, pos); } else { r = inv_wdisp16(inst, pos); @@ -991,7 +991,7 @@ Label PcOk; save_frame(0); // to avoid clobbering O0 ld_ptr(pc_addr, L0); - br_null(L0, false, Assembler::pt, PcOk); + br_null_short(L0, Assembler::pt, PcOk); stop("last_Java_pc not zeroed before leaving Java"); bind(PcOk); @@ -1116,7 +1116,7 @@ Address exception_addr(G2_thread, Thread::pending_exception_offset()); ld_ptr(exception_addr, scratch_reg); - br_null(scratch_reg,false,pt,L); + br_null_short(scratch_reg, pt, L); // we use O7 linkage so that forward_exception_entry has the issuing PC call(StubRoutines::forward_exception_entry(), relocInfo::runtime_call_type); delayed()->nop(); @@ -1890,11 +1890,11 @@ // assert((obj & oop_mask) == oop_bits); and3(O0_obj, O2_mask, O4_temp); - cmp_and_brx(O4_temp, O3_bits, notEqual, false, pn, null_or_fail); + cmp_and_brx_short(O4_temp, O3_bits, notEqual, pn, null_or_fail); if ((NULL_WORD & Universe::verify_oop_mask()) == Universe::verify_oop_bits()) { // the null_or_fail case is useless; must test for null separately - br_null(O0_obj, false, pn, succeed); + br_null_short(O0_obj, pn, succeed); } // Check the klassOop of this object for being in the right area of memory. @@ -1906,7 +1906,7 @@ if( Universe::verify_klass_bits() != Universe::verify_oop_bits() ) set(Universe::verify_klass_bits(), O3_bits); and3(O0_obj, O2_mask, O4_temp); - cmp_and_brx(O4_temp, O3_bits, notEqual, false, pn, fail); + cmp_and_brx_short(O4_temp, O3_bits, notEqual, pn, fail); // Check the klass's klass load_klass(O0_obj, O0_obj); and3(O0_obj, O2_mask, O4_temp); @@ -2134,15 +2134,9 @@ } // compares (32 bit) register with zero and branches. NOT FOR USE WITH 64-bit POINTERS -void MacroAssembler::br_zero(Register s1, Label& L) { - assert_not_delayed(); - if (use_cbc(L)) { - Assembler::cbc(zero, icc, s1, 0, L); - } else { - tst(s1); - br (zero, false, pt, L); - delayed()->nop(); - } +void MacroAssembler::cmp_zero_and_br(Condition c, Register s1, Label& L, bool a, Predict p) { + tst(s1); + br (c, a, p, L); } // Compares a pointer register with zero and branches on null. @@ -2228,43 +2222,90 @@ } } -void MacroAssembler::cmp_and_br(Register s1, int simm13a, Condition c, - bool a, Predict p, Label& L) { +// Compare integer (32 bit) values (icc only). +void MacroAssembler::cmp_and_br_short(Register s1, Register s2, Condition c, + Predict p, Label& L) { assert_not_delayed(); - if (is_simm(simm13a,5) && use_cbc(L)) { - Assembler::cbc(c, icc, s1, simm13a, L); + if (use_cbcond(L)) { + Assembler::cbcond(c, icc, s1, s2, L); } else { + cmp(s1, s2); + br(c, false, p, L); + delayed()->nop(); + } +} + +// Compare integer (32 bit) values (icc only). +void MacroAssembler::cmp_and_br_short(Register s1, int simm13a, Condition c, + Predict p, Label& L) { + assert_not_delayed(); + if (is_simm(simm13a,5) && use_cbcond(L)) { + Assembler::cbcond(c, icc, s1, simm13a, L); + } else { cmp(s1, simm13a); - br(c, a, p, L); + br(c, false, p, L); delayed()->nop(); } } // Branch that tests xcc in LP64 and icc in !LP64 -void MacroAssembler::cmp_and_brx(Register s1, Register s2, Condition c, - bool a, Predict p, Label& L) { +void MacroAssembler::cmp_and_brx_short(Register s1, Register s2, Condition c, + Predict p, Label& L) { assert_not_delayed(); - if (use_cbc(L)) { - Assembler::cbc(c, ptr_cc, s1, s2, L); + if (use_cbcond(L)) { + Assembler::cbcond(c, ptr_cc, s1, s2, L); } else { cmp(s1, s2); - brx(c, a, p, L); + brx(c, false, p, L); delayed()->nop(); } } -void MacroAssembler::cmp_and_brx(Register s1, int simm13a, Condition c, - bool a, Predict p, Label& L) { +// Branch that tests xcc in LP64 and icc in !LP64 +void MacroAssembler::cmp_and_brx_short(Register s1, int simm13a, Condition c, + Predict p, Label& L) { assert_not_delayed(); - if (is_simm(simm13a,5) && use_cbc(L)) { - Assembler::cbc(c, ptr_cc, s1, simm13a, L); + if (is_simm(simm13a,5) && use_cbcond(L)) { + Assembler::cbcond(c, ptr_cc, s1, simm13a, L); } else { cmp(s1, simm13a); - brx(c, a, p, L); + brx(c, false, p, L); delayed()->nop(); } } +// Short branch version for compares a pointer with zero. + +void MacroAssembler::br_null_short(Register s1, Predict p, Label& L) { + assert_not_delayed(); + if (use_cbcond(L)) { + Assembler::cbcond(zero, ptr_cc, s1, 0, L); + return; + } + br_null(s1, false, p, L); + delayed()->nop(); +} + +void MacroAssembler::br_notnull_short(Register s1, Predict p, Label& L) { + assert_not_delayed(); + if (use_cbcond(L)) { + Assembler::cbcond(notZero, ptr_cc, s1, 0, L); + return; + } + br_notnull(s1, false, p, L); + delayed()->nop(); +} + +// Unconditional short branch +void MacroAssembler::ba_short(Label& L) { + if (use_cbcond(L)) { + Assembler::cbcond(equal, icc, G0, G0, L); + return; + } + br(always, false, pt, L); + delayed()->nop(); +} + // instruction sequences factored across compiler & interpreter @@ -2290,7 +2331,7 @@ if (VM_Version::v9_instructions_work()) { mov(-1, Rresult); - ba(done, false); delayed()-> movcc(greater, false, icc, 1, Rresult); + ba(done); delayed()-> movcc(greater, false, icc, 1, Rresult); } else { br(less, true, pt, done); delayed()-> set(-1, Rresult); br(greater, true, pt, done); delayed()-> set( 1, Rresult); @@ -2365,7 +2406,7 @@ sll(Rin_low, Rcount, Rout_low); // low half } srl(Rxfer_bits, 1, Rxfer_bits ); // shift right by one more - ba(done, false); + ba(done); delayed()->or3(Rout_high, Rxfer_bits, Rout_high); // new hi value: or in shifted old hi part and xfer from low // shift >= 32 bits, Ralt_count = Rcount-32 @@ -2426,7 +2467,7 @@ if (Rcount == Rout_low) { srl(Rin_low, Rcount, Rout_low); } - ba(done, false); + ba(done); delayed()->or3(Rout_low, Rxfer_bits, Rout_low); // new low value: or shifted old low part and xfer from high // shift >= 32 bits, Ralt_count = Rcount-32 @@ -2489,7 +2530,7 @@ if (Rcount == Rout_low) { srl(Rin_low, Rcount, Rout_low); } - ba(done, false); + ba(done); delayed()->or3(Rout_low, Rxfer_bits, Rout_low); // new low value: or shifted old low part and xfer from high // shift >= 32 bits, Ralt_count = Rcount-32 @@ -2753,7 +2794,7 @@ set(StubRoutines::Sparc::locked, lock_reg); bind(retry_get_lock); - cmp_and_br(yield_reg, V8AtomicOperationUnderLockSpinCount, Assembler::less, false, Assembler::pt, dont_yield); + cmp_and_br_short(yield_reg, V8AtomicOperationUnderLockSpinCount, Assembler::less, Assembler::pt, dont_yield); if(use_call_vm) { Untested("Need to verify global reg consistancy"); @@ -2783,7 +2824,7 @@ // yes, got lock. do we have the same top? ld(top_ptr_reg_after_save, 0, value_reg); - cmp_and_br(value_reg, top_reg_after_save, Assembler::notEqual, false, Assembler::pt, not_same); + cmp_and_br_short(value_reg, top_reg_after_save, Assembler::notEqual, Assembler::pn, not_same); // yes, same top. st(ptr_reg_after_save, top_ptr_reg_after_save, 0); @@ -3033,7 +3074,7 @@ // on success: restore(); - ba(L_success); + ba_short(L_success); // on failure: bind(L_pop_to_failure); @@ -3112,8 +3153,7 @@ // Hacked ba(), which may only be used just before L_fallthrough. #define FINAL_JUMP(label) \ if (&(label) != &L_fallthrough) { \ - ba(label, false); \ - delayed()->nop(); \ + ba(label); delayed()->nop(); \ } if (super_check_offset.is_register()) { @@ -3245,7 +3285,7 @@ st_ptr(super_klass, sub_klass, sc_offset); if (L_success != &L_fallthrough) { - ba(*L_success, false); + ba(*L_success); delayed()->nop(); } @@ -3260,7 +3300,7 @@ // compare method type against that of the receiver RegisterOrConstant mhtype_offset = delayed_value(java_lang_invoke_MethodHandle::type_offset_in_bytes, temp_reg); load_heap_oop(mh_reg, mhtype_offset, temp_reg); - cmp_and_brx(temp_reg, mtype_reg, Assembler::notEqual, false, Assembler::pn, wrong_method_type); + cmp_and_brx_short(temp_reg, mtype_reg, Assembler::notEqual, Assembler::pn, wrong_method_type); } @@ -3353,7 +3393,7 @@ // pointers to allow age to be placed into low bits assert(markOopDesc::age_shift == markOopDesc::lock_bits + markOopDesc::biased_lock_bits, "biased locking makes assumptions about bit layout"); and3(mark_reg, markOopDesc::biased_lock_mask_in_place, temp_reg); - cmp_and_brx(temp_reg, markOopDesc::biased_lock_pattern, Assembler::notEqual, false, Assembler::pn, cas_label); + cmp_and_brx_short(temp_reg, markOopDesc::biased_lock_pattern, Assembler::notEqual, Assembler::pn, cas_label); load_klass(obj_reg, temp_reg); ld_ptr(Address(temp_reg, Klass::prototype_header_offset_in_bytes() + klassOopDesc::klass_part_offset_in_bytes()), temp_reg); @@ -3420,7 +3460,7 @@ brx(Assembler::notEqual, true, Assembler::pn, *slow_case); delayed()->nop(); } - ba(done); + ba_short(done); bind(try_rebias); // At this point we know the epoch has expired, meaning that the @@ -3448,7 +3488,7 @@ brx(Assembler::notEqual, true, Assembler::pn, *slow_case); delayed()->nop(); } - ba(done); + ba_short(done); bind(try_revoke_bias); // The prototype mark in the klass doesn't have the bias bit set any @@ -3499,7 +3539,7 @@ // Solaris/SPARC's "as". Another apt name would be cas_ptr() void MacroAssembler::casn (Register addr_reg, Register cmp_reg, Register set_reg ) { - casx_under_lock (addr_reg, cmp_reg, set_reg, (address)StubRoutines::Sparc::atomic_memory_operation_lock_addr()) ; + casx_under_lock (addr_reg, cmp_reg, set_reg, (address)StubRoutines::Sparc::atomic_memory_operation_lock_addr()); } @@ -3540,9 +3580,9 @@ } if (EmitSync & 1) { - mov (3, Rscratch) ; - st_ptr (Rscratch, Rbox, BasicLock::displaced_header_offset_in_bytes()); - cmp (SP, G0) ; + mov(3, Rscratch); + st_ptr(Rscratch, Rbox, BasicLock::displaced_header_offset_in_bytes()); + cmp(SP, G0); return ; } @@ -3583,7 +3623,7 @@ assert(os::vm_page_size() > 0xfff, "page size too small - change the constant"); andcc(Rscratch, 0xfffff003, Rscratch); st_ptr(Rscratch, Rbox, BasicLock::displaced_header_offset_in_bytes()); - bind (done) ; + bind (done); return ; } @@ -3592,7 +3632,7 @@ if (EmitSync & 256) { Label IsInflated ; - ld_ptr (mark_addr, Rmark); // fetch obj->mark + ld_ptr(mark_addr, Rmark); // fetch obj->mark // Triage: biased, stack-locked, neutral, inflated if (try_bias) { biased_locking_enter(Roop, Rmark, Rscratch, done, NULL, counters); @@ -3603,49 +3643,49 @@ // Store mark into displaced mark field in the on-stack basic-lock "box" // Critically, this must happen before the CAS // Maximize the ST-CAS distance to minimize the ST-before-CAS penalty. - st_ptr (Rmark, Rbox, BasicLock::displaced_header_offset_in_bytes()); - andcc (Rmark, 2, G0) ; - brx (Assembler::notZero, false, Assembler::pn, IsInflated) ; - delayed() -> + st_ptr(Rmark, Rbox, BasicLock::displaced_header_offset_in_bytes()); + andcc(Rmark, 2, G0); + brx(Assembler::notZero, false, Assembler::pn, IsInflated); + delayed()-> // Try stack-lock acquisition. // Beware: the 1st instruction is in a delay slot - mov (Rbox, Rscratch); - or3 (Rmark, markOopDesc::unlocked_value, Rmark); - assert (mark_addr.disp() == 0, "cas must take a zero displacement"); - casn (mark_addr.base(), Rmark, Rscratch) ; - cmp (Rmark, Rscratch); - brx (Assembler::equal, false, Assembler::pt, done); + mov(Rbox, Rscratch); + or3(Rmark, markOopDesc::unlocked_value, Rmark); + assert(mark_addr.disp() == 0, "cas must take a zero displacement"); + casn(mark_addr.base(), Rmark, Rscratch); + cmp(Rmark, Rscratch); + brx(Assembler::equal, false, Assembler::pt, done); delayed()->sub(Rscratch, SP, Rscratch); // Stack-lock attempt failed - check for recursive stack-lock. // See the comments below about how we might remove this case. #ifdef _LP64 - sub (Rscratch, STACK_BIAS, Rscratch); + sub(Rscratch, STACK_BIAS, Rscratch); #endif assert(os::vm_page_size() > 0xfff, "page size too small - change the constant"); - andcc (Rscratch, 0xfffff003, Rscratch); - br (Assembler::always, false, Assembler::pt, done) ; - delayed()-> st_ptr (Rscratch, Rbox, BasicLock::displaced_header_offset_in_bytes()); + andcc(Rscratch, 0xfffff003, Rscratch); + br(Assembler::always, false, Assembler::pt, done); + delayed()-> st_ptr(Rscratch, Rbox, BasicLock::displaced_header_offset_in_bytes()); - bind (IsInflated) ; + bind(IsInflated); if (EmitSync & 64) { // If m->owner != null goto IsLocked // Pessimistic form: Test-and-CAS vs CAS // The optimistic form avoids RTS->RTO cache line upgrades. - ld_ptr (Rmark, ObjectMonitor::owner_offset_in_bytes() - 2, Rscratch); - andcc (Rscratch, Rscratch, G0) ; - brx (Assembler::notZero, false, Assembler::pn, done) ; - delayed()->nop() ; + ld_ptr(Rmark, ObjectMonitor::owner_offset_in_bytes() - 2, Rscratch); + andcc(Rscratch, Rscratch, G0); + brx(Assembler::notZero, false, Assembler::pn, done); + delayed()->nop(); // m->owner == null : it's unlocked. } // Try to CAS m->owner from null to Self // Invariant: if we acquire the lock then _recursions should be 0. - add (Rmark, ObjectMonitor::owner_offset_in_bytes()-2, Rmark) ; - mov (G2_thread, Rscratch) ; - casn (Rmark, G0, Rscratch) ; - cmp (Rscratch, G0) ; + add(Rmark, ObjectMonitor::owner_offset_in_bytes()-2, Rmark); + mov(G2_thread, Rscratch); + casn(Rmark, G0, Rscratch); + cmp(Rscratch, G0); // Intentional fall-through into done } else { // Aggressively avoid the Store-before-CAS penalty @@ -3653,9 +3693,9 @@ Label IsInflated, Recursive ; // Anticipate CAS -- Avoid RTS->RTO upgrade -// prefetch (mark_addr, Assembler::severalWritesAndPossiblyReads) ; +// prefetch (mark_addr, Assembler::severalWritesAndPossiblyReads); - ld_ptr (mark_addr, Rmark); // fetch obj->mark + ld_ptr(mark_addr, Rmark); // fetch obj->mark // Triage: biased, stack-locked, neutral, inflated if (try_bias) { @@ -3663,8 +3703,8 @@ // Invariant: if control reaches this point in the emitted stream // then Rmark has not been modified. } - andcc (Rmark, 2, G0) ; - brx (Assembler::notZero, false, Assembler::pn, IsInflated) ; + andcc(Rmark, 2, G0); + brx(Assembler::notZero, false, Assembler::pn, IsInflated); delayed()-> // Beware - dangling delay-slot // Try stack-lock acquisition. @@ -3674,23 +3714,21 @@ // ST obj->mark = box -- overwrite transient 0 value // This presumes TSO, of course. - mov (0, Rscratch) ; - or3 (Rmark, markOopDesc::unlocked_value, Rmark); - assert (mark_addr.disp() == 0, "cas must take a zero displacement"); - casn (mark_addr.base(), Rmark, Rscratch) ; -// prefetch (mark_addr, Assembler::severalWritesAndPossiblyReads) ; - cmp (Rscratch, Rmark) ; - brx (Assembler::notZero, false, Assembler::pn, Recursive) ; - delayed() -> - st_ptr (Rmark, Rbox, BasicLock::displaced_header_offset_in_bytes()); + mov(0, Rscratch); + or3(Rmark, markOopDesc::unlocked_value, Rmark); + assert(mark_addr.disp() == 0, "cas must take a zero displacement"); + casn(mark_addr.base(), Rmark, Rscratch); +// prefetch (mark_addr, Assembler::severalWritesAndPossiblyReads); + cmp(Rscratch, Rmark); + brx(Assembler::notZero, false, Assembler::pn, Recursive); + delayed()->st_ptr(Rmark, Rbox, BasicLock::displaced_header_offset_in_bytes()); if (counters != NULL) { cond_inc(Assembler::equal, (address) counters->fast_path_entry_count_addr(), Rmark, Rscratch); } - br (Assembler::always, false, Assembler::pt, done); - delayed() -> - st_ptr (Rbox, mark_addr) ; + ba(done); + delayed()->st_ptr(Rbox, mark_addr); - bind (Recursive) ; + bind(Recursive); // Stack-lock attempt failed - check for recursive stack-lock. // Tests show that we can remove the recursive case with no impact // on refworkload 0.83. If we need to reduce the size of the code @@ -3707,49 +3745,48 @@ // RScratch contains the fetched obj->mark value from the failed CASN. #ifdef _LP64 - sub (Rscratch, STACK_BIAS, Rscratch); + sub(Rscratch, STACK_BIAS, Rscratch); #endif sub(Rscratch, SP, Rscratch); assert(os::vm_page_size() > 0xfff, "page size too small - change the constant"); - andcc (Rscratch, 0xfffff003, Rscratch); + andcc(Rscratch, 0xfffff003, Rscratch); if (counters != NULL) { // Accounting needs the Rscratch register - st_ptr (Rscratch, Rbox, BasicLock::displaced_header_offset_in_bytes()); + st_ptr(Rscratch, Rbox, BasicLock::displaced_header_offset_in_bytes()); cond_inc(Assembler::equal, (address) counters->fast_path_entry_count_addr(), Rmark, Rscratch); - br (Assembler::always, false, Assembler::pt, done) ; - delayed()->nop() ; + ba_short(done); } else { - br (Assembler::always, false, Assembler::pt, done) ; - delayed()-> st_ptr (Rscratch, Rbox, BasicLock::displaced_header_offset_in_bytes()); + ba(done); + delayed()->st_ptr(Rscratch, Rbox, BasicLock::displaced_header_offset_in_bytes()); } - bind (IsInflated) ; + bind (IsInflated); if (EmitSync & 64) { // If m->owner != null goto IsLocked // Test-and-CAS vs CAS // Pessimistic form avoids futile (doomed) CAS attempts // The optimistic form avoids RTS->RTO cache line upgrades. - ld_ptr (Rmark, ObjectMonitor::owner_offset_in_bytes() - 2, Rscratch); - andcc (Rscratch, Rscratch, G0) ; - brx (Assembler::notZero, false, Assembler::pn, done) ; - delayed()->nop() ; + ld_ptr(Rmark, ObjectMonitor::owner_offset_in_bytes() - 2, Rscratch); + andcc(Rscratch, Rscratch, G0); + brx(Assembler::notZero, false, Assembler::pn, done); + delayed()->nop(); // m->owner == null : it's unlocked. } // Try to CAS m->owner from null to Self // Invariant: if we acquire the lock then _recursions should be 0. - add (Rmark, ObjectMonitor::owner_offset_in_bytes()-2, Rmark) ; - mov (G2_thread, Rscratch) ; - casn (Rmark, G0, Rscratch) ; - cmp (Rscratch, G0) ; + add(Rmark, ObjectMonitor::owner_offset_in_bytes()-2, Rmark); + mov(G2_thread, Rscratch); + casn(Rmark, G0, Rscratch); + cmp(Rscratch, G0); // ST box->displaced_header = NonZero. // Any non-zero value suffices: // unused_mark(), G2_thread, RBox, RScratch, rsp, etc. - st_ptr (Rbox, Rbox, BasicLock::displaced_header_offset_in_bytes()); + st_ptr(Rbox, Rbox, BasicLock::displaced_header_offset_in_bytes()); // Intentional fall-through into done } - bind (done) ; + bind (done); } void MacroAssembler::compiler_unlock_object(Register Roop, Register Rmark, @@ -3760,7 +3797,7 @@ Label done ; if (EmitSync & 4) { - cmp (SP, G0) ; + cmp(SP, G0); return ; } @@ -3771,7 +3808,7 @@ // Test first if it is a fast recursive unlock ld_ptr(Rbox, BasicLock::displaced_header_offset_in_bytes(), Rmark); - br_null(Rmark, false, Assembler::pt, done); + br_null_short(Rmark, Assembler::pt, done); // Check if it is still a light weight lock, this is is true if we see // the stack address of the basicLock in the markOop of the object @@ -3778,9 +3815,9 @@ assert(mark_addr.disp() == 0, "cas must take a zero displacement"); casx_under_lock(mark_addr.base(), Rbox, Rmark, (address)StubRoutines::Sparc::atomic_memory_operation_lock_addr()); - ba(done, false); + ba(done); delayed()->cmp(Rbox, Rmark); - bind (done) ; + bind(done); return ; } @@ -3795,14 +3832,14 @@ biased_locking_exit(mark_addr, Rscratch, done); } - ld_ptr (Roop, oopDesc::mark_offset_in_bytes(), Rmark) ; - ld_ptr (Rbox, BasicLock::displaced_header_offset_in_bytes(), Rscratch); - andcc (Rscratch, Rscratch, G0); - brx (Assembler::zero, false, Assembler::pn, done); - delayed()-> nop() ; // consider: relocate fetch of mark, above, into this DS - andcc (Rmark, 2, G0) ; - brx (Assembler::zero, false, Assembler::pt, LStacked) ; - delayed()-> nop() ; + ld_ptr(Roop, oopDesc::mark_offset_in_bytes(), Rmark); + ld_ptr(Rbox, BasicLock::displaced_header_offset_in_bytes(), Rscratch); + andcc(Rscratch, Rscratch, G0); + brx(Assembler::zero, false, Assembler::pn, done); + delayed()->nop(); // consider: relocate fetch of mark, above, into this DS + andcc(Rmark, 2, G0); + brx(Assembler::zero, false, Assembler::pt, LStacked); + delayed()->nop(); // It's inflated // Conceptually we need a #loadstore|#storestore "release" MEMBAR before @@ -3813,47 +3850,45 @@ // Note that we use 1-0 locking by default for the inflated case. We // close the resultant (and rare) race by having contented threads in // monitorenter periodically poll _owner. - ld_ptr (Rmark, ObjectMonitor::owner_offset_in_bytes() - 2, Rscratch); - ld_ptr (Rmark, ObjectMonitor::recursions_offset_in_bytes() - 2, Rbox); - xor3 (Rscratch, G2_thread, Rscratch) ; - orcc (Rbox, Rscratch, Rbox) ; - brx (Assembler::notZero, false, Assembler::pn, done) ; + ld_ptr(Rmark, ObjectMonitor::owner_offset_in_bytes() - 2, Rscratch); + ld_ptr(Rmark, ObjectMonitor::recursions_offset_in_bytes() - 2, Rbox); + xor3(Rscratch, G2_thread, Rscratch); + orcc(Rbox, Rscratch, Rbox); + brx(Assembler::notZero, false, Assembler::pn, done); delayed()-> - ld_ptr (Rmark, ObjectMonitor::EntryList_offset_in_bytes() - 2, Rscratch); - ld_ptr (Rmark, ObjectMonitor::cxq_offset_in_bytes() - 2, Rbox); - orcc (Rbox, Rscratch, G0) ; + ld_ptr(Rmark, ObjectMonitor::EntryList_offset_in_bytes() - 2, Rscratch); + ld_ptr(Rmark, ObjectMonitor::cxq_offset_in_bytes() - 2, Rbox); + orcc(Rbox, Rscratch, G0); if (EmitSync & 65536) { Label LSucc ; - brx (Assembler::notZero, false, Assembler::pn, LSucc) ; - delayed()->nop() ; - ba (done, false) ; - delayed()-> - st_ptr (G0, Rmark, ObjectMonitor::owner_offset_in_bytes() - 2); + brx(Assembler::notZero, false, Assembler::pn, LSucc); + delayed()->nop(); + ba(done); + delayed()->st_ptr(G0, Rmark, ObjectMonitor::owner_offset_in_bytes() - 2); - bind (LSucc) ; - st_ptr (G0, Rmark, ObjectMonitor::owner_offset_in_bytes() - 2); - if (os::is_MP()) { membar (StoreLoad) ; } - ld_ptr (Rmark, ObjectMonitor::succ_offset_in_bytes() - 2, Rscratch); - andcc (Rscratch, Rscratch, G0) ; - brx (Assembler::notZero, false, Assembler::pt, done) ; - delayed()-> andcc (G0, G0, G0) ; - add (Rmark, ObjectMonitor::owner_offset_in_bytes()-2, Rmark) ; - mov (G2_thread, Rscratch) ; - casn (Rmark, G0, Rscratch) ; + bind(LSucc); + st_ptr(G0, Rmark, ObjectMonitor::owner_offset_in_bytes() - 2); + if (os::is_MP()) { membar (StoreLoad); } + ld_ptr(Rmark, ObjectMonitor::succ_offset_in_bytes() - 2, Rscratch); + andcc(Rscratch, Rscratch, G0); + brx(Assembler::notZero, false, Assembler::pt, done); + delayed()->andcc(G0, G0, G0); + add(Rmark, ObjectMonitor::owner_offset_in_bytes()-2, Rmark); + mov(G2_thread, Rscratch); + casn(Rmark, G0, Rscratch); // invert icc.zf and goto done - br_notnull(Rscratch, false, Assembler::pt, done, false) ; - delayed() -> cmp (G0, G0) ; - ba (done, false); - delayed() -> cmp (G0, 1) ; + br_notnull(Rscratch, false, Assembler::pt, done); + delayed()->cmp(G0, G0); + ba(done); + delayed()->cmp(G0, 1); } else { - brx (Assembler::notZero, false, Assembler::pn, done) ; - delayed()->nop() ; - ba (done, false) ; - delayed()-> - st_ptr (G0, Rmark, ObjectMonitor::owner_offset_in_bytes() - 2); + brx(Assembler::notZero, false, Assembler::pn, done); + delayed()->nop(); + ba(done); + delayed()->st_ptr(G0, Rmark, ObjectMonitor::owner_offset_in_bytes() - 2); } - bind (LStacked) ; + bind (LStacked); // Consider: we could replace the expensive CAS in the exit // path with a simple ST of the displaced mark value fetched from // the on-stack basiclock box. That admits a race where a thread T2 @@ -3882,11 +3917,11 @@ // A prototype implementation showed excellent results, although // the scavenger and timeout code was rather involved. - casn (mark_addr.base(), Rbox, Rscratch) ; - cmp (Rbox, Rscratch); + casn(mark_addr.base(), Rbox, Rscratch); + cmp(Rbox, Rscratch); // Intentional fall through into done ... - bind (done) ; + bind(done); } @@ -3942,7 +3977,7 @@ ld_ptr(G2_thread, in_bytes(JavaThread::tlab_top_offset()), t1); ld_ptr(G2_thread, in_bytes(JavaThread::tlab_start_offset()), t2); or3(t1, t2, t3); - cmp_and_br(t1, t2, Assembler::greaterEqual, false, Assembler::pn, next); + cmp_and_br_short(t1, t2, Assembler::greaterEqual, Assembler::pn, next); stop("assert(top >= start)"); should_not_reach_here(); @@ -3950,13 +3985,13 @@ ld_ptr(G2_thread, in_bytes(JavaThread::tlab_top_offset()), t1); ld_ptr(G2_thread, in_bytes(JavaThread::tlab_end_offset()), t2); or3(t3, t2, t3); - cmp_and_br(t1, t2, Assembler::lessEqual, false, Assembler::pn, next2); + cmp_and_br_short(t1, t2, Assembler::lessEqual, Assembler::pn, next2); stop("assert(top <= end)"); should_not_reach_here(); bind(next2); and3(t3, MinObjAlignmentInBytesMask, t3); - cmp_and_br(t3, 0, Assembler::lessEqual, false, Assembler::pn, ok); + cmp_and_br_short(t3, 0, Assembler::lessEqual, Assembler::pn, ok); stop("assert(aligned)"); should_not_reach_here(); @@ -3982,7 +4017,7 @@ if (CMSIncrementalMode || !Universe::heap()->supports_inline_contig_alloc()) { // No allocation in the shared eden. - ba(slow_case); + ba_short(slow_case); } else { // get eden boundaries // note: we need both top & top_addr! @@ -4116,7 +4151,7 @@ if (CMSIncrementalMode || !Universe::heap()->supports_inline_contig_alloc()) { // No allocation in the shared eden. - ba(slow_case); + ba_short(slow_case); } ld_ptr(G2_thread, in_bytes(JavaThread::tlab_top_offset()), top); @@ -4141,7 +4176,7 @@ add(t2, 1, t2); stw(t2, G2_thread, in_bytes(JavaThread::tlab_slow_allocations_offset())); } - ba(try_eden); + ba_short(try_eden); bind(discard_tlab); if (TLABStats) { @@ -4157,7 +4192,7 @@ // if tlab is currently allocated (top or end != null) then // fill [top, end + alignment_reserve) with array object - br_null(top, false, Assembler::pn, do_refill); + br_null_short(top, Assembler::pn, do_refill); set((intptr_t)markOopDesc::prototype()->copy_set_hash(0x2), t2); st_ptr(t2, top, oopDesc::mark_offset_in_bytes()); // set up the mark word @@ -4192,7 +4227,7 @@ Label ok; ld_ptr(G2_thread, in_bytes(JavaThread::tlab_size_offset()), t2); sll_ptr(t2, LogHeapWordSize, t2); - cmp_and_br(t1, t2, Assembler::equal, false, Assembler::pn, ok); + cmp_and_br_short(t1, t2, Assembler::equal, Assembler::pt, ok); stop("assert(t1 == tlab_size)"); should_not_reach_here(); @@ -4203,7 +4238,7 @@ sub(top, ThreadLocalAllocBuffer::alignment_reserve_in_bytes(), top); st_ptr(top, G2_thread, in_bytes(JavaThread::tlab_end_offset())); verify_tlab(); - ba(retry); + ba_short(retry); } void MacroAssembler::incr_allocated_bytes(RegisterOrConstant size_in_bytes, @@ -4354,7 +4389,7 @@ __ bind(restart); __ ld_ptr(G2_thread, satb_q_index_byte_offset, L0); - __ br_on_reg_cond(Assembler::rc_z, /*annul*/false, Assembler::pn, L0, refill, false); + __ br_on_reg_cond(Assembler::rc_z, /*annul*/false, Assembler::pn, L0, refill); // If the branch is taken, no harm in executing this in the delay slot. __ delayed()->ld_ptr(G2_thread, satb_q_buf_byte_offset, L1); __ sub(L0, oopSize, L0); @@ -4469,6 +4504,7 @@ // Check on whether to annul. br_on_reg_cond(rc_z, /*annul*/false, Assembler::pt, tmp, filtered); + delayed()->nop(); // Do we need to load the previous value? if (obj != noreg) { @@ -4492,6 +4528,7 @@ // Is the previous value null? // Check on whether to annul. br_on_reg_cond(rc_z, /*annul*/false, Assembler::pt, pre_val, filtered); + delayed()->nop(); // OK, it's not filtered, so we'll need to call enqueue. In the normal // case, pre_val will be a scratch G-reg, but there are some cases in