< prev index next >

src/hotspot/cpu/aarch64/gc/shenandoah/shenandoahBarrierSetAssembler_aarch64.cpp

Print this page
rev 53513 : 8217016: Shenandoah: Streamline generation of CAS barriers

@@ -421,49 +421,18 @@
     }
   }
 }
 
 void ShenandoahBarrierSetAssembler::cmpxchg_oop(MacroAssembler* masm, Register addr, Register expected, Register new_val,
-                                                bool acquire, bool release, bool weak, bool encode,
-                                                Register tmp1, Register tmp2, Register tmp3,
+                                                bool acquire, bool release, bool weak, bool is_cae,
                                                 Register result) {
 
-  if (!ShenandoahCASBarrier) {
-    if (UseCompressedOops) {
-      if (encode) {
-        __ encode_heap_oop(tmp1, expected);
-        expected = tmp1;
-        __ encode_heap_oop(tmp3, new_val);
-        new_val = tmp3;
-      }
-      __ cmpxchg(addr, expected, new_val, Assembler::word, /* acquire*/ true, /* release*/ true, /* weak*/ false, rscratch1);
-      __ membar(__ AnyAny);
-    } else {
-      __ cmpxchg(addr, expected, new_val, Assembler::xword, /* acquire*/ true, /* release*/ true, /* weak*/ false, rscratch1);
-      __ membar(__ AnyAny);
-    }
-    return;
-  }
-
-  if (encode) {
-    storeval_barrier(masm, new_val, tmp3);
-  }
-
-  if (UseCompressedOops) {
-    if (encode) {
-      __ encode_heap_oop(tmp1, expected);
-      expected = tmp1;
-      __ encode_heap_oop(tmp2, new_val);
-      new_val = tmp2;
-    }
-  }
-  bool is_cae = (result != noreg);
+  Register tmp = rscratch2;
   bool is_narrow = UseCompressedOops;
   Assembler::operand_size size = is_narrow ? Assembler::word : Assembler::xword;
-  if (! is_cae) result = rscratch1;
 
-  assert_different_registers(addr, expected, new_val, result, tmp3);
+  assert_different_registers(addr, expected, new_val, result, tmp);
 
   Label retry, done, fail;
 
   // CAS, using LL/SC pair.
   __ bind(retry);

@@ -472,39 +441,42 @@
     __ cmpw(result, expected);
   } else {
     __ cmp(result, expected);
   }
   __ br(Assembler::NE, fail);
-  __ store_exclusive(tmp3, new_val, addr, size, release);
+  __ store_exclusive(tmp, new_val, addr, size, release);
   if (weak) {
-    __ cmpw(tmp3, 0u); // If the store fails, return NE to our caller
+    __ cmpw(tmp, 0u); // If the store fails, return NE to our caller
   } else {
-    __ cbnzw(tmp3, retry);
+    __ cbnzw(tmp, retry);
   }
   __ b(done);
 
  __  bind(fail);
   // Check if rb(expected)==rb(result)
   // Shuffle registers so that we have memory value ready for next expected.
-  __ mov(tmp3, expected);
+  __ mov(tmp, expected);
   __ mov(expected, result);
   if (is_narrow) {
     __ decode_heap_oop(result, result);
-    __ decode_heap_oop(tmp3, tmp3);
+    __ decode_heap_oop(tmp, tmp);
   }
   read_barrier_impl(masm, result);
-  read_barrier_impl(masm, tmp3);
-  __ cmp(result, tmp3);
+  read_barrier_impl(masm, tmp);
+  __ cmp(result, tmp);
   // Retry with expected now being the value we just loaded from addr.
   __ br(Assembler::EQ, retry);
-  if (is_narrow && is_cae) {
+  if (is_cae && is_narrow) {
     // For cmp-and-exchange and narrow oops, we need to restore
     // the compressed old-value. We moved it to 'expected' a few lines up.
     __ mov(result, expected);
   }
   __ bind(done);
 
+  if (!is_cae) {
+    __ cset(result, Assembler::EQ);
+  }
 }
 
 #ifdef COMPILER1
 
 #undef __
< prev index next >