2185 cmp(result, expected);
2186 else
2187 cmpw(result, expected);
2188 br(Assembler::NE, done);
2189 store_exclusive(rscratch1, new_val, addr, size, release);
2190 if (weak) {
2191 cmpw(rscratch1, 0u); // If the store fails, return NE to our caller.
2192 } else {
2193 cbnzw(rscratch1, retry_load);
2194 }
2195 bind(done);
2196 BLOCK_COMMENT("} cmpxchg");
2197 }
2198 }
2199
2200 void MacroAssembler::cmpxchg_oop_shenandoah(Register addr, Register expected,
2201 Register new_val,
2202 enum operand_size size,
2203 bool acquire, bool release,
2204 bool weak,
2205 Register res, Register tmp2) {
2206 assert(UseShenandoahGC, "only for shenandoah");
2207 Register result = res;
2208 if (result == noreg) result = rscratch1;
2209
2210 assert_different_registers(addr, expected, new_val, result, tmp2);
2211
2212 Label retry, done, fail;
2213
2214 // CAS, using LL/SC pair.
2215 bind(retry);
2216 load_exclusive(result, addr, size, acquire);
2217 if (size == xword) {
2218 cmp(result, expected);
2219 } else {
2220 cmpw(result, expected);
2221 }
2222 br(Assembler::NE, fail);
2223 store_exclusive(tmp2, new_val, addr, size, release);
2224 if (weak) {
2225 cmpw(tmp2, 0u); // If the store fails, return NE to our caller
2226 } else {
2227 cbnzw(tmp2, retry);
2228 }
2229 b(done);
2230
2231 bind(fail);
2232 // Check if rb(expected)==rb(result)
2233 // Shuffle registers so that we have memory value ready for next expected.
2234 mov(tmp2, expected);
2235 mov(expected, result);
2236 if (size == word) {
2237 decode_heap_oop(result, result);
2238 decode_heap_oop(tmp2, tmp2);
2239 }
2240 oopDesc::bs()->interpreter_read_barrier(this, result);
2241 oopDesc::bs()->interpreter_read_barrier(this, tmp2);
2242 cmp(result, tmp2);
2243 // Retry with expected now being the value we just loaded from addr.
2244 br(Assembler::EQ, retry);
2245 if (size == word && res != noreg) {
2246 // For cmp-and-exchange and narrow oops, we need to restore
2247 // the compressed old-value.
2248 mov(result, expected);
2249 }
2250 bind(done);
2251 }
2252
2253 static bool different(Register a, RegisterOrConstant b, Register c) {
2254 if (b.is_constant())
2255 return a != c;
2256 else
2257 return a != b.as_register() && a != c && b.as_register() != c;
2258 }
2259
2260 #define ATOMIC_OP(NAME, LDXR, OP, IOP, AOP, STXR, sz) \
2261 void MacroAssembler::atomic_##NAME(Register prev, RegisterOrConstant incr, Register addr) { \
2262 if (UseLSE) { \
2263 prev = prev->is_valid() ? prev : zr; \
2264 if (incr.is_register()) { \
2265 AOP(sz, incr.as_register(), prev, addr); \
2266 } else { \
2267 mov(rscratch2, incr.as_constant()); \
|
2185 cmp(result, expected);
2186 else
2187 cmpw(result, expected);
2188 br(Assembler::NE, done);
2189 store_exclusive(rscratch1, new_val, addr, size, release);
2190 if (weak) {
2191 cmpw(rscratch1, 0u); // If the store fails, return NE to our caller.
2192 } else {
2193 cbnzw(rscratch1, retry_load);
2194 }
2195 bind(done);
2196 BLOCK_COMMENT("} cmpxchg");
2197 }
2198 }
2199
2200 void MacroAssembler::cmpxchg_oop_shenandoah(Register addr, Register expected,
2201 Register new_val,
2202 enum operand_size size,
2203 bool acquire, bool release,
2204 bool weak,
2205 Register result, Register tmp2) {
2206 assert(UseShenandoahGC, "only for shenandoah");
2207 bool is_cae = (result != noreg);
2208 bool is_narrow = (size == word);
2209
2210 if (! is_cae) result = rscratch1;
2211
2212 assert_different_registers(addr, expected, new_val, result, tmp2);
2213
2214 if (ShenandoahStoreCheck) {
2215 if (is_narrow) {
2216 decode_heap_oop(tmp2, new_val);
2217 shenandoah_store_check(addr, tmp2);
2218 } else {
2219 shenandoah_store_check(addr, new_val);
2220 }
2221 }
2222 Label retry, done, fail;
2223
2224 // CAS, using LL/SC pair.
2225 bind(retry);
2226 load_exclusive(result, addr, size, acquire);
2227 if (is_narrow) {
2228 cmpw(result, expected);
2229 } else {
2230 cmp(result, expected);
2231 }
2232 br(Assembler::NE, fail);
2233 store_exclusive(tmp2, new_val, addr, size, release);
2234 if (weak) {
2235 cmpw(tmp2, 0u); // If the store fails, return NE to our caller
2236 } else {
2237 cbnzw(tmp2, retry);
2238 }
2239 b(done);
2240
2241 bind(fail);
2242 // Check if rb(expected)==rb(result)
2243 // Shuffle registers so that we have memory value ready for next expected.
2244 mov(tmp2, expected);
2245 mov(expected, result);
2246 if (is_narrow) {
2247 decode_heap_oop(result, result);
2248 decode_heap_oop(tmp2, tmp2);
2249 }
2250 oopDesc::bs()->interpreter_read_barrier(this, result);
2251 oopDesc::bs()->interpreter_read_barrier(this, tmp2);
2252 cmp(result, tmp2);
2253 // Retry with expected now being the value we just loaded from addr.
2254 br(Assembler::EQ, retry);
2255 if (is_narrow && is_cae) {
2256 // For cmp-and-exchange and narrow oops, we need to restore
2257 // the compressed old-value. We moved it to 'expected' a few lines up.
2258 mov(result, expected);
2259 }
2260 bind(done);
2261 }
2262
2263 static bool different(Register a, RegisterOrConstant b, Register c) {
2264 if (b.is_constant())
2265 return a != c;
2266 else
2267 return a != b.as_register() && a != c && b.as_register() != c;
2268 }
2269
2270 #define ATOMIC_OP(NAME, LDXR, OP, IOP, AOP, STXR, sz) \
2271 void MacroAssembler::atomic_##NAME(Register prev, RegisterOrConstant incr, Register addr) { \
2272 if (UseLSE) { \
2273 prev = prev->is_valid() ? prev : zr; \
2274 if (incr.is_register()) { \
2275 AOP(sz, incr.as_register(), prev, addr); \
2276 } else { \
2277 mov(rscratch2, incr.as_constant()); \
|