2180 if ((VM_Version::features() & VM_Version::CPU_STXR_PREFETCH))
2181 prfm(Address(addr), PSTL1STRM);
2182 bind(retry_load);
2183 load_exclusive(result, addr, size, acquire);
2184 if (size == xword)
2185 cmp(result, expected);
2186 else
2187 cmpw(result, expected);
2188 br(Assembler::NE, done);
2189 store_exclusive(rscratch1, new_val, addr, size, release);
2190 if (weak) {
2191 cmpw(rscratch1, 0u); // If the store fails, return NE to our caller.
2192 } else {
2193 cbnzw(rscratch1, retry_load);
2194 }
2195 bind(done);
2196 BLOCK_COMMENT("} cmpxchg");
2197 }
2198 }
2199
2200 void MacroAssembler::cmpxchg_oop_shenandoah(Register res, Register addr, Register expected,
2201 Register new_val,
2202 bool narrow,
2203 bool acquire, bool release,
2204 Register tmp1, Register tmp2) {
2205 assert(UseShenandoahGC, "only for shenandoah");
2206 assert_different_registers(res, addr, expected, new_val, tmp1, tmp2);
2207
2208 Label retry, done, fail;
2209
2210 mov(res, 0);
2211
2212 // CAS, using LL/SC pair.
2213 bind(retry);
2214 load_exclusive(tmp1, addr, narrow ? word : xword, true);
2215 if (narrow) cmpw(tmp1, expected);
2216 else cmp(tmp1, expected);
2217 br(Assembler::NE, fail);
2218 store_exclusive(tmp2, new_val, addr, narrow ? word : xword, true);
2219 cbnzw(tmp2, retry);
2220 mov(res, 1);
2221 b(done);
2222
2223 bind(fail);
2224 // Check if rb(expected)==rb(tmp1)
2225 // Shuffle registers so that we have memory value ready for next expected.
2226 mov(tmp2, expected);
2227 mov(expected, tmp1);
2228 if (narrow) {
2229 decode_heap_oop(tmp1, tmp1);
2230 decode_heap_oop(tmp2, tmp2);
2231 }
2232 oopDesc::bs()->interpreter_read_barrier(this, tmp1);
2233 oopDesc::bs()->interpreter_read_barrier(this, tmp2);
2234 cmp(tmp1, tmp2);
2235 // Retry with expected now being the value we just loaded from addr.
2236 br(Assembler::EQ, retry);
2237
2238 bind(done);
2239 membar(AnyAny);
2240 }
2241
2242 static bool different(Register a, RegisterOrConstant b, Register c) {
2243 if (b.is_constant())
2244 return a != c;
2245 else
2246 return a != b.as_register() && a != c && b.as_register() != c;
2247 }
2248
2249 #define ATOMIC_OP(NAME, LDXR, OP, IOP, AOP, STXR, sz) \
2250 void MacroAssembler::atomic_##NAME(Register prev, RegisterOrConstant incr, Register addr) { \
2251 if (UseLSE) { \
2252 prev = prev->is_valid() ? prev : zr; \
2253 if (incr.is_register()) { \
2254 AOP(sz, incr.as_register(), prev, addr); \
2255 } else { \
2256 mov(rscratch2, incr.as_constant()); \
2257 AOP(sz, rscratch2, prev, addr); \
2258 } \
2259 return; \
|
2180 if ((VM_Version::features() & VM_Version::CPU_STXR_PREFETCH))
2181 prfm(Address(addr), PSTL1STRM);
2182 bind(retry_load);
2183 load_exclusive(result, addr, size, acquire);
2184 if (size == xword)
2185 cmp(result, expected);
2186 else
2187 cmpw(result, expected);
2188 br(Assembler::NE, done);
2189 store_exclusive(rscratch1, new_val, addr, size, release);
2190 if (weak) {
2191 cmpw(rscratch1, 0u); // If the store fails, return NE to our caller.
2192 } else {
2193 cbnzw(rscratch1, retry_load);
2194 }
2195 bind(done);
2196 BLOCK_COMMENT("} cmpxchg");
2197 }
2198 }
2199
2200 void MacroAssembler::cmpxchg_oop_shenandoah(Register addr, Register expected,
2201 Register new_val,
2202 enum operand_size size,
2203 bool acquire, bool release,
2204 bool weak,
2205 Register res, Register tmp2) {
2206 assert(UseShenandoahGC, "only for shenandoah");
2207 Register result = res;
2208 if (result == noreg) result = rscratch1;
2209
2210 assert_different_registers(addr, expected, new_val, result, tmp2);
2211
2212 Label retry, done, fail;
2213
2214 // CAS, using LL/SC pair.
2215 bind(retry);
2216 load_exclusive(result, addr, size, acquire);
2217 if (size == xword) {
2218 cmp(result, expected);
2219 } else {
2220 cmpw(result, expected);
2221 }
2222 br(Assembler::NE, fail);
2223 store_exclusive(tmp2, new_val, addr, size, release);
2224 if (weak) {
2225 cmpw(tmp2, 0u); // If the store fails, return NE to our caller
2226 } else {
2227 cbnzw(tmp2, retry);
2228 }
2229 b(done);
2230
2231 bind(fail);
2232 // Check if rb(expected)==rb(result)
2233 // Shuffle registers so that we have memory value ready for next expected.
2234 mov(tmp2, expected);
2235 mov(expected, result);
2236 if (size == word) {
2237 decode_heap_oop(result, result);
2238 decode_heap_oop(tmp2, tmp2);
2239 }
2240 oopDesc::bs()->interpreter_read_barrier(this, result);
2241 oopDesc::bs()->interpreter_read_barrier(this, tmp2);
2242 cmp(result, tmp2);
2243 // Retry with expected now being the value we just loaded from addr.
2244 br(Assembler::EQ, retry);
2245 if (size == word && res != noreg) {
2246 // For cmp-and-exchange and narrow oops, we need to restore
2247 // the compressed old-value.
2248 mov(result, expected);
2249 }
2250 bind(done);
2251 }
2252
2253 static bool different(Register a, RegisterOrConstant b, Register c) {
2254 if (b.is_constant())
2255 return a != c;
2256 else
2257 return a != b.as_register() && a != c && b.as_register() != c;
2258 }
2259
2260 #define ATOMIC_OP(NAME, LDXR, OP, IOP, AOP, STXR, sz) \
2261 void MacroAssembler::atomic_##NAME(Register prev, RegisterOrConstant incr, Register addr) { \
2262 if (UseLSE) { \
2263 prev = prev->is_valid() ? prev : zr; \
2264 if (incr.is_register()) { \
2265 AOP(sz, incr.as_register(), prev, addr); \
2266 } else { \
2267 mov(rscratch2, incr.as_constant()); \
2268 AOP(sz, rscratch2, prev, addr); \
2269 } \
2270 return; \
|