17 * 2 along with this work; if not, write to the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 * or visit www.oracle.com if you need additional information or have any
22 * questions.
23 *
24 */
25
26 #ifndef OS_CPU_LINUX_AARCH64_VM_ATOMIC_LINUX_AARCH64_HPP
27 #define OS_CPU_LINUX_AARCH64_VM_ATOMIC_LINUX_AARCH64_HPP
28
29 #include "vm_version_aarch64.hpp"
30
31 // Implementation of class atomic
32
33 #define FULL_MEM_BARRIER __sync_synchronize()
34 #define READ_MEM_BARRIER __atomic_thread_fence(__ATOMIC_ACQUIRE);
35 #define WRITE_MEM_BARRIER __atomic_thread_fence(__ATOMIC_RELEASE);
36
37 inline void Atomic::store (jbyte store_value, jbyte* dest) { *dest = store_value; }
38 inline void Atomic::store (jshort store_value, jshort* dest) { *dest = store_value; }
39 inline void Atomic::store (jint store_value, jint* dest) { *dest = store_value; }
40 inline void Atomic::store_ptr(intptr_t store_value, intptr_t* dest) { *dest = store_value; }
41 inline void Atomic::store_ptr(void* store_value, void* dest) { *(void**)dest = store_value; }
42
43 inline void Atomic::store (jbyte store_value, volatile jbyte* dest) { *dest = store_value; }
44 inline void Atomic::store (jshort store_value, volatile jshort* dest) { *dest = store_value; }
45 inline void Atomic::store (jint store_value, volatile jint* dest) { *dest = store_value; }
46 inline void Atomic::store_ptr(intptr_t store_value, volatile intptr_t* dest) { *dest = store_value; }
47 inline void Atomic::store_ptr(void* store_value, volatile void* dest) { *(void* volatile *)dest = store_value; }
48
49
50 inline jint Atomic::add(jint add_value, volatile jint* dest)
51 {
52 return __sync_add_and_fetch(dest, add_value);
53 }
54
55 inline void Atomic::inc(volatile jint* dest)
56 {
57 add(1, dest);
58 }
59
60 inline void Atomic::inc_ptr(volatile void* dest)
61 {
62 add_ptr(1, dest);
63 }
64
65 inline void Atomic::dec (volatile jint* dest)
66 {
67 add(-1, dest);
68 }
69
70 inline void Atomic::dec_ptr(volatile void* dest)
71 {
72 add_ptr(-1, dest);
73 }
74
75 inline jint Atomic::xchg (jint exchange_value, volatile jint* dest)
76 {
77 jint res = __sync_lock_test_and_set (dest, exchange_value);
78 FULL_MEM_BARRIER;
79 return res;
80 }
81
82 inline void* Atomic::xchg_ptr(void* exchange_value, volatile void* dest)
83 {
84 return (void *) xchg_ptr((intptr_t) exchange_value,
85 (volatile intptr_t*) dest);
86 }
87
88 template <typename T> T generic_cmpxchg(T exchange_value, volatile T* dest,
89 T compare_value, cmpxchg_memory_order order)
90 {
91 if (order == memory_order_relaxed) {
92 T value = compare_value;
93 __atomic_compare_exchange(dest, &value, &exchange_value, /*weak*/false,
94 __ATOMIC_RELAXED, __ATOMIC_RELAXED);
95 return value;
96 } else {
97 return __sync_val_compare_and_swap(dest, compare_value, exchange_value);
98 }
99 }
100
101 #define VM_HAS_SPECIALIZED_CMPXCHG_BYTE
102 inline jbyte Atomic::cmpxchg (jbyte exchange_value, volatile jbyte* dest, jbyte compare_value, cmpxchg_memory_order order)
103 {
104 return generic_cmpxchg(exchange_value, dest, compare_value, order);
105 }
106
107 inline jint Atomic::cmpxchg (jint exchange_value, volatile jint* dest, jint compare_value, cmpxchg_memory_order order)
108 {
109 return generic_cmpxchg(exchange_value, dest, compare_value, order);
110 }
111
112 inline void Atomic::store (jlong store_value, jlong* dest) { *dest = store_value; }
113 inline void Atomic::store (jlong store_value, volatile jlong* dest) { *dest = store_value; }
114
115 inline intptr_t Atomic::add_ptr(intptr_t add_value, volatile intptr_t* dest)
116 {
117 return __sync_add_and_fetch(dest, add_value);
118 }
119
120 inline void* Atomic::add_ptr(intptr_t add_value, volatile void* dest)
121 {
122 return (void *) add_ptr(add_value, (volatile intptr_t *) dest);
123 }
124
125 inline void Atomic::inc_ptr(volatile intptr_t* dest)
126 {
127 add_ptr(1, dest);
128 }
129
130 inline void Atomic::dec_ptr(volatile intptr_t* dest)
131 {
132 add_ptr(-1, dest);
133 }
134
135 inline intptr_t Atomic::xchg_ptr(intptr_t exchange_value, volatile intptr_t* dest)
136 {
137 intptr_t res = __sync_lock_test_and_set (dest, exchange_value);
138 FULL_MEM_BARRIER;
139 return res;
140 }
141
142 inline jlong Atomic::cmpxchg (jlong exchange_value, volatile jlong* dest, jlong compare_value, cmpxchg_memory_order order)
143 {
144 return generic_cmpxchg(exchange_value, dest, compare_value, order);
145 }
146
147 inline intptr_t Atomic::cmpxchg_ptr(intptr_t exchange_value, volatile intptr_t* dest, intptr_t compare_value, cmpxchg_memory_order order)
148 {
149 return generic_cmpxchg(exchange_value, dest, compare_value, order);
150 }
151
152 inline void* Atomic::cmpxchg_ptr(void* exchange_value, volatile void* dest, void* compare_value, cmpxchg_memory_order order)
153 {
154 return (void *) cmpxchg_ptr((intptr_t) exchange_value,
155 (volatile intptr_t*) dest,
156 (intptr_t) compare_value,
157 order);
158 }
159
160 inline jlong Atomic::load(const volatile jlong* src) { return *src; }
161
162 #endif // OS_CPU_LINUX_AARCH64_VM_ATOMIC_LINUX_AARCH64_HPP
|
17 * 2 along with this work; if not, write to the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 * or visit www.oracle.com if you need additional information or have any
22 * questions.
23 *
24 */
25
26 #ifndef OS_CPU_LINUX_AARCH64_VM_ATOMIC_LINUX_AARCH64_HPP
27 #define OS_CPU_LINUX_AARCH64_VM_ATOMIC_LINUX_AARCH64_HPP
28
29 #include "vm_version_aarch64.hpp"
30
31 // Implementation of class atomic
32
33 #define FULL_MEM_BARRIER __sync_synchronize()
34 #define READ_MEM_BARRIER __atomic_thread_fence(__ATOMIC_ACQUIRE);
35 #define WRITE_MEM_BARRIER __atomic_thread_fence(__ATOMIC_RELEASE);
36
37
38 template <>
39 inline int32_t Atomic::specialized_add<int32_t>(int32_t add_value, volatile int32_t* dest) {
40 return __sync_add_and_fetch(dest, add_value);
41 }
42
43
44 template <>
45 inline int64_t Atomic::specialized_add<int64_t>(int64_t add_value, volatile int64_t* dest) {
46 return __sync_add_and_fetch(dest, add_value);
47 }
48
49
50 template <>
51 inline int32_t Atomic::specialized_xchg<int32_t>(int32_t exchange_value, volatile int32_t* dest) {
52 int32_t res = __sync_lock_test_and_set (dest, exchange_value);
53 FULL_MEM_BARRIER;
54 return res;
55 }
56
57 template <>
58 inline int64_t Atomic::specialized_xchg<int64_t>(int64_t exchange_value, volatile int64_t* dest) {
59 int64_t res = __sync_lock_test_and_set (dest, exchange_value);
60 FULL_MEM_BARRIER;
61 return res;
62 }
63
64 template <typename T> T generic_cmpxchg(T exchange_value, volatile T* dest,
65 T compare_value, cmpxchg_memory_order order)
66 {
67 if (order == memory_order_relaxed) {
68 T value = compare_value;
69 __atomic_compare_exchange(dest, &value, &exchange_value, /*weak*/false,
70 __ATOMIC_RELAXED, __ATOMIC_RELAXED);
71 return value;
72 } else {
73 return __sync_val_compare_and_swap(dest, compare_value, exchange_value);
74 }
75 }
76
77 #define VM_HAS_SPECIALIZED_CMPXCHG_BYTE
78 template <>
79 inline int8_t Atomic::specialized_cmpxchg<int8_t>(int8_t exchange_value, volatile int8_t* dest, int8_t compare_value, cmpxchg_memory_order order) {
80 return generic_cmpxchg(exchange_value, dest, compare_value, order);
81 }
82
83 template <>
84 inline int32_t Atomic::specialized_cmpxchg<int32_t>(int32_t exchange_value, volatile int32_t* dest, int32_t compare_value, cmpxchg_memory_order order) {
85 return generic_cmpxchg(exchange_value, dest, compare_value, order);
86 }
87
88
89 template <>
90 inline int64_t Atomic::specialized_cmpxchg<int64_t>(int64_t exchange_value, volatile int64_t* dest, int64_t compare_value, cmpxchg_memory_order order) {
91 return generic_cmpxchg(exchange_value, dest, compare_value, order);
92 }
93
94
95 #endif // OS_CPU_LINUX_AARCH64_VM_ATOMIC_LINUX_AARCH64_HPP
|