< prev index next >

src/cpu/aarch64/vm/macroAssembler_aarch64.cpp

Print this page
rev 10437 : 8151775: aarch64: add support for 8.1 LSE atomic operations
Reviewed-by: aph

*** 1635,1644 **** --- 1635,1649 ---- add(Rd, base, Rd); return Address(Rd); } void MacroAssembler::atomic_incw(Register counter_addr, Register tmp, Register tmp2) { + if (UseLSE) { + mov(tmp, 1); + ldadd(Assembler::word, tmp, zr, counter_addr); + return; + } Label retry_load; prfm(Address(counter_addr), PSTL1STRM); bind(retry_load); // flush and load exclusive from the memory location ldxrw(tmp, counter_addr);
*** 2170,2181 **** return a != c; else return a != b.as_register() && a != c && b.as_register() != c; } ! #define ATOMIC_OP(LDXR, OP, IOP, STXR) \ void MacroAssembler::atomic_##OP(Register prev, RegisterOrConstant incr, Register addr) { \ Register result = rscratch2; \ if (prev->is_valid()) \ result = different(prev, incr, addr) ? prev : rscratch2; \ \ Label retry_load; \ --- 2175,2196 ---- return a != c; else return a != b.as_register() && a != c && b.as_register() != c; } ! #define ATOMIC_OP(LDXR, OP, IOP, AOP, STXR, sz) \ void MacroAssembler::atomic_##OP(Register prev, RegisterOrConstant incr, Register addr) { \ + if (UseLSE) { \ + prev = prev->is_valid() ? prev : zr; \ + if (incr.is_register()) { \ + AOP(sz, incr.as_register(), prev, addr); \ + } else { \ + mov(rscratch2, incr.as_constant()); \ + AOP(sz, rscratch2, prev, addr); \ + } \ + return; \ + } \ Register result = rscratch2; \ if (prev->is_valid()) \ result = different(prev, incr, addr) ? prev : rscratch2; \ \ Label retry_load; \
*** 2188,2204 **** if (prev->is_valid() && prev != result) { \ IOP(prev, rscratch1, incr); \ } \ } ! ATOMIC_OP(ldxr, add, sub, stxr) ! ATOMIC_OP(ldxrw, addw, subw, stxrw) #undef ATOMIC_OP ! #define ATOMIC_XCHG(OP, LDXR, STXR) \ void MacroAssembler::atomic_##OP(Register prev, Register newv, Register addr) { \ Register result = rscratch2; \ if (prev->is_valid()) \ result = different(prev, newv, addr) ? prev : rscratch2; \ \ Label retry_load; \ --- 2203,2224 ---- if (prev->is_valid() && prev != result) { \ IOP(prev, rscratch1, incr); \ } \ } ! ATOMIC_OP(ldxr, add, sub, ldadd, stxr, Assembler::xword) ! ATOMIC_OP(ldxrw, addw, subw, ldadd, stxrw, Assembler::word) #undef ATOMIC_OP ! #define ATOMIC_XCHG(OP, LDXR, STXR, sz) \ void MacroAssembler::atomic_##OP(Register prev, Register newv, Register addr) { \ + if (UseLSE) { \ + prev = prev->is_valid() ? prev : zr; \ + swp(sz, newv, prev, addr); \ + return; \ + } \ Register result = rscratch2; \ if (prev->is_valid()) \ result = different(prev, newv, addr) ? prev : rscratch2; \ \ Label retry_load; \
*** 2209,2220 **** cbnzw(rscratch1, retry_load); \ if (prev->is_valid() && prev != result) \ mov(prev, result); \ } ! ATOMIC_XCHG(xchg, ldxr, stxr) ! ATOMIC_XCHG(xchgw, ldxrw, stxrw) #undef ATOMIC_XCHG void MacroAssembler::incr_allocated_bytes(Register thread, Register var_size_in_bytes, --- 2229,2240 ---- cbnzw(rscratch1, retry_load); \ if (prev->is_valid() && prev != result) \ mov(prev, result); \ } ! ATOMIC_XCHG(xchg, ldxr, stxr, Assembler::xword) ! ATOMIC_XCHG(xchgw, ldxrw, stxrw, Assembler::word) #undef ATOMIC_XCHG void MacroAssembler::incr_allocated_bytes(Register thread, Register var_size_in_bytes,
< prev index next >