< prev index next >
src/cpu/aarch64/vm/c1_LIRAssembler_aarch64.cpp
Print this page
rev 10528 : 8151775: aarch64: add support for 8.1 LSE atomic operations
Reviewed-by: aph
*** 1554,1611 ****
ShouldNotReachHere();
}
}
void LIR_Assembler::casw(Register addr, Register newval, Register cmpval) {
! if (UseLSE) {
! __ mov(rscratch1, cmpval);
! __ casal(Assembler::word, rscratch1, newval, addr);
! __ cmpw(rscratch1, cmpval);
__ cset(rscratch1, Assembler::NE);
- } else {
- Label retry_load, nope;
- // flush and load exclusive from the memory location
- // and fail if it is not what we expect
- __ prfm(Address(addr), PSTL1STRM);
- __ bind(retry_load);
- __ ldaxrw(rscratch1, addr);
- __ cmpw(rscratch1, cmpval);
- __ cset(rscratch1, Assembler::NE);
- __ br(Assembler::NE, nope);
- // if we store+flush with no intervening write rscratch1 wil be zero
- __ stlxrw(rscratch1, newval, addr);
- // retry so we only ever return after a load fails to compare
- // ensures we don't return a stale value after a failed write.
- __ cbnzw(rscratch1, retry_load);
- __ bind(nope);
- }
__ membar(__ AnyAny);
}
void LIR_Assembler::casl(Register addr, Register newval, Register cmpval) {
! if (UseLSE) {
! __ mov(rscratch1, cmpval);
! __ casal(Assembler::xword, rscratch1, newval, addr);
! __ cmp(rscratch1, cmpval);
__ cset(rscratch1, Assembler::NE);
- } else {
- Label retry_load, nope;
- // flush and load exclusive from the memory location
- // and fail if it is not what we expect
- __ prfm(Address(addr), PSTL1STRM);
- __ bind(retry_load);
- __ ldaxr(rscratch1, addr);
- __ cmp(rscratch1, cmpval);
- __ cset(rscratch1, Assembler::NE);
- __ br(Assembler::NE, nope);
- // if we store+flush with no intervening write rscratch1 wil be zero
- __ stlxr(rscratch1, newval, addr);
- // retry so we only ever return after a load fails to compare
- // ensures we don't return a stale value after a failed write.
- __ cbnz(rscratch1, retry_load);
- __ bind(nope);
- }
__ membar(__ AnyAny);
}
void LIR_Assembler::emit_compare_and_swap(LIR_OpCompareAndSwap* op) {
--- 1554,1571 ----
ShouldNotReachHere();
}
}
void LIR_Assembler::casw(Register addr, Register newval, Register cmpval) {
! __ cmpxchg(addr, cmpval, newval, Assembler::word, /* acquire*/ true, /* release*/ true, rscratch1);
__ cset(rscratch1, Assembler::NE);
__ membar(__ AnyAny);
}
void LIR_Assembler::casl(Register addr, Register newval, Register cmpval) {
! __ cmpxchg(addr, cmpval, newval, Assembler::xword, /* acquire*/ true, /* release*/ true, rscratch1);
__ cset(rscratch1, Assembler::NE);
__ membar(__ AnyAny);
}
void LIR_Assembler::emit_compare_and_swap(LIR_OpCompareAndSwap* op) {
*** 3119,3160 ****
void LIR_Assembler::atomic_op(LIR_Code code, LIR_Opr src, LIR_Opr data, LIR_Opr dest, LIR_Opr tmp_op) {
Address addr = as_Address(src->as_address_ptr(), noreg);
BasicType type = src->type();
bool is_oop = type == T_OBJECT || type == T_ARRAY;
! void (MacroAssembler::* lda)(Register Rd, Register Ra);
! void (MacroAssembler::* add)(Register Rd, Register Rn, RegisterOrConstant increment);
! void (MacroAssembler::* stl)(Register Rs, Register Rt, Register Rn);
switch(type) {
case T_INT:
! lda = &MacroAssembler::ldaxrw;
! add = &MacroAssembler::addw;
! stl = &MacroAssembler::stlxrw;
break;
case T_LONG:
! lda = &MacroAssembler::ldaxr;
! add = &MacroAssembler::add;
! stl = &MacroAssembler::stlxr;
break;
case T_OBJECT:
case T_ARRAY:
if (UseCompressedOops) {
! lda = &MacroAssembler::ldaxrw;
! add = &MacroAssembler::addw;
! stl = &MacroAssembler::stlxrw;
! } else {
! lda = &MacroAssembler::ldaxr;
! add = &MacroAssembler::add;
! stl = &MacroAssembler::stlxr;
}
break;
default:
ShouldNotReachHere();
! lda = &MacroAssembler::ldaxr;
! add = &MacroAssembler::add;
! stl = &MacroAssembler::stlxr; // unreachable
}
switch (code) {
case lir_xadd:
{
--- 3079,3114 ----
void LIR_Assembler::atomic_op(LIR_Code code, LIR_Opr src, LIR_Opr data, LIR_Opr dest, LIR_Opr tmp_op) {
Address addr = as_Address(src->as_address_ptr(), noreg);
BasicType type = src->type();
bool is_oop = type == T_OBJECT || type == T_ARRAY;
! void (MacroAssembler::* add)(Register prev, RegisterOrConstant incr, Register addr);
! void (MacroAssembler::* xchg)(Register prev, Register newv, Register addr);
switch(type) {
case T_INT:
! xchg = &MacroAssembler::atomic_xchgalw;
! add = &MacroAssembler::atomic_addalw;
break;
case T_LONG:
! xchg = &MacroAssembler::atomic_xchgal;
! add = &MacroAssembler::atomic_addal;
break;
case T_OBJECT:
case T_ARRAY:
if (UseCompressedOops) {
! xchg = &MacroAssembler::atomic_xchgalw;
! add = &MacroAssembler::atomic_addalw;
! } else {
! xchg = &MacroAssembler::atomic_xchgal;
! add = &MacroAssembler::atomic_addal;
}
break;
default:
ShouldNotReachHere();
! xchg = &MacroAssembler::atomic_xchgal;
! add = &MacroAssembler::atomic_addal; // unreachable
}
switch (code) {
case lir_xadd:
{
*** 3168,3185 ****
} else {
inc = RegisterOrConstant(as_reg(data));
assert_different_registers(inc.as_register(), dst, addr.base(), tmp,
rscratch1, rscratch2);
}
- Label again;
__ lea(tmp, addr);
! __ prfm(Address(tmp), PSTL1STRM);
! __ bind(again);
! (_masm->*lda)(dst, tmp);
! (_masm->*add)(rscratch1, dst, inc);
! (_masm->*stl)(rscratch2, rscratch1, tmp);
! __ cbnzw(rscratch2, again);
break;
}
case lir_xchg:
{
Register tmp = tmp_op->as_register();
--- 3122,3133 ----
} else {
inc = RegisterOrConstant(as_reg(data));
assert_different_registers(inc.as_register(), dst, addr.base(), tmp,
rscratch1, rscratch2);
}
__ lea(tmp, addr);
! (_masm->*add)(dst, inc, tmp);
break;
}
case lir_xchg:
{
Register tmp = tmp_op->as_register();
*** 3188,3204 ****
if (is_oop && UseCompressedOops) {
__ encode_heap_oop(rscratch1, obj);
obj = rscratch1;
}
assert_different_registers(obj, addr.base(), tmp, rscratch2, dst);
- Label again;
__ lea(tmp, addr);
! __ prfm(Address(tmp), PSTL1STRM);
! __ bind(again);
! (_masm->*lda)(dst, tmp);
! (_masm->*stl)(rscratch2, obj, tmp);
! __ cbnzw(rscratch2, again);
if (is_oop && UseCompressedOops) {
__ decode_heap_oop(dst);
}
}
break;
--- 3136,3147 ----
if (is_oop && UseCompressedOops) {
__ encode_heap_oop(rscratch1, obj);
obj = rscratch1;
}
assert_different_registers(obj, addr.base(), tmp, rscratch2, dst);
__ lea(tmp, addr);
! (_masm->*xchg)(dst, obj, tmp);
if (is_oop && UseCompressedOops) {
__ decode_heap_oop(dst);
}
}
break;
< prev index next >