< prev index next >

src/os_cpu/linux_arm/vm/atomic_linux_arm.hpp

Print this page
rev 13266 : imported patch Atomic_refactoring
rev 13267 : [mq]: Atomic_polishing
rev 13268 : [mq]: Atomic_polishing_v2

@@ -145,11 +145,11 @@
 }
 
 template <>
 inline int64_t Atomic::specialized_cmpxchg<int64_t>(int64_t exchange_value, volatile int64_t* dest, int64_t compare_value, cmpxchg_memory_order order) {
 #ifdef AARCH64
-  jlong rv;
+  int64_t rv;
   int tmp;
   __asm__ volatile(
     "1:\n\t"
     " ldaxr %[rv], [%[dest]]\n\t"
     " cmp %[rv], %[cv]\n\t"

@@ -171,11 +171,11 @@
 }
 
 #ifdef AARCH64
 template <>
 inline int64_t Atomic::specialized_add<int64_t>(int64_t add_value, volatile int64_t* dest) {
-  intptr_t val;
+  int64_t val;
   int tmp;
   __asm__ volatile(
     "1:\n\t"
     " ldaxr %[val], [%[dest]]\n\t"
     " add %[val], %[val], %[add_val]\n\t"

@@ -187,11 +187,11 @@
   return val;
 }
 
 template <>
 inline int64_t Atomic::specialized_xchg<int64_t>(int64_t exchange_value, volatile int64_t* dest) {
-  intptr_t old_val;
+  int64_t old_val;
   int tmp;
   __asm__ volatile(
     "1:\n\t"
     " ldaxr %[old_val], [%[dest]]\n\t"
     " stlxr %w[tmp], %[new_val], [%[dest]]\n\t"
< prev index next >