130 " b.ne 2f\n\t"
131 " stlxr %w[tmp], %w[ev], [%[dest]]\n\t"
132 " cbnz %w[tmp], 1b\n\t"
133 " b 3f\n\t"
134 "2:\n\t"
135 " dmb sy\n\t"
136 "3:\n\t"
137 : [rv] "=&r" (rv), [tmp] "=&r" (tmp)
138 : [ev] "r" (exchange_value), [dest] "r" (dest), [cv] "r" (compare_value)
139 : "memory");
140 return rv;
141 #else
142 // Warning: Arguments are swapped to avoid moving them for kernel call
143 return (*os::atomic_cmpxchg_func)(compare_value, exchange_value, dest);
144 #endif
145 }
146
147 template <>
148 inline int64_t Atomic::specialized_cmpxchg<int64_t>(int64_t exchange_value, volatile int64_t* dest, int64_t compare_value, cmpxchg_memory_order order) {
149 #ifdef AARCH64
150 jlong rv;
151 int tmp;
152 __asm__ volatile(
153 "1:\n\t"
154 " ldaxr %[rv], [%[dest]]\n\t"
155 " cmp %[rv], %[cv]\n\t"
156 " b.ne 2f\n\t"
157 " stlxr %w[tmp], %[ev], [%[dest]]\n\t"
158 " cbnz %w[tmp], 1b\n\t"
159 " b 3f\n\t"
160 "2:\n\t"
161 " dmb sy\n\t"
162 "3:\n\t"
163 : [rv] "=&r" (rv), [tmp] "=&r" (tmp)
164 : [ev] "r" (exchange_value), [dest] "r" (dest), [cv] "r" (compare_value)
165 : "memory");
166 return rv;
167 #else
168 assert(VM_Version::supports_cx8(), "64 bit atomic compare and exchange not supported on this architecture!");
169 return (*os::atomic_cmpxchg_long_func)(compare_value, exchange_value, dest);
170 #endif
171 }
172
173 #ifdef AARCH64
174 template <>
175 inline int64_t Atomic::specialized_add<int64_t>(int64_t add_value, volatile int64_t* dest) {
176 intptr_t val;
177 int tmp;
178 __asm__ volatile(
179 "1:\n\t"
180 " ldaxr %[val], [%[dest]]\n\t"
181 " add %[val], %[val], %[add_val]\n\t"
182 " stlxr %w[tmp], %[val], [%[dest]]\n\t"
183 " cbnz %w[tmp], 1b\n\t"
184 : [val] "=&r" (val), [tmp] "=&r" (tmp)
185 : [add_val] "r" (add_value), [dest] "r" (dest)
186 : "memory");
187 return val;
188 }
189
190 template <>
191 inline int64_t Atomic::specialized_xchg<int64_t>(int64_t exchange_value, volatile int64_t* dest) {
192 intptr_t old_val;
193 int tmp;
194 __asm__ volatile(
195 "1:\n\t"
196 " ldaxr %[old_val], [%[dest]]\n\t"
197 " stlxr %w[tmp], %[new_val], [%[dest]]\n\t"
198 " cbnz %w[tmp], 1b\n\t"
199 : [old_val] "=&r" (old_val), [tmp] "=&r" (tmp)
200 : [new_val] "r" (exchange_value), [dest] "r" (dest)
201 : "memory");
202 return old_val;
203 }
204
205 #endif
206
207 #endif // OS_CPU_LINUX_ARM_VM_ATOMIC_LINUX_ARM_HPP
|
130 " b.ne 2f\n\t"
131 " stlxr %w[tmp], %w[ev], [%[dest]]\n\t"
132 " cbnz %w[tmp], 1b\n\t"
133 " b 3f\n\t"
134 "2:\n\t"
135 " dmb sy\n\t"
136 "3:\n\t"
137 : [rv] "=&r" (rv), [tmp] "=&r" (tmp)
138 : [ev] "r" (exchange_value), [dest] "r" (dest), [cv] "r" (compare_value)
139 : "memory");
140 return rv;
141 #else
142 // Warning: Arguments are swapped to avoid moving them for kernel call
143 return (*os::atomic_cmpxchg_func)(compare_value, exchange_value, dest);
144 #endif
145 }
146
147 template <>
148 inline int64_t Atomic::specialized_cmpxchg<int64_t>(int64_t exchange_value, volatile int64_t* dest, int64_t compare_value, cmpxchg_memory_order order) {
149 #ifdef AARCH64
150 int64_t rv;
151 int tmp;
152 __asm__ volatile(
153 "1:\n\t"
154 " ldaxr %[rv], [%[dest]]\n\t"
155 " cmp %[rv], %[cv]\n\t"
156 " b.ne 2f\n\t"
157 " stlxr %w[tmp], %[ev], [%[dest]]\n\t"
158 " cbnz %w[tmp], 1b\n\t"
159 " b 3f\n\t"
160 "2:\n\t"
161 " dmb sy\n\t"
162 "3:\n\t"
163 : [rv] "=&r" (rv), [tmp] "=&r" (tmp)
164 : [ev] "r" (exchange_value), [dest] "r" (dest), [cv] "r" (compare_value)
165 : "memory");
166 return rv;
167 #else
168 assert(VM_Version::supports_cx8(), "64 bit atomic compare and exchange not supported on this architecture!");
169 return (*os::atomic_cmpxchg_long_func)(compare_value, exchange_value, dest);
170 #endif
171 }
172
173 #ifdef AARCH64
174 template <>
175 inline int64_t Atomic::specialized_add<int64_t>(int64_t add_value, volatile int64_t* dest) {
176 int64_t val;
177 int tmp;
178 __asm__ volatile(
179 "1:\n\t"
180 " ldaxr %[val], [%[dest]]\n\t"
181 " add %[val], %[val], %[add_val]\n\t"
182 " stlxr %w[tmp], %[val], [%[dest]]\n\t"
183 " cbnz %w[tmp], 1b\n\t"
184 : [val] "=&r" (val), [tmp] "=&r" (tmp)
185 : [add_val] "r" (add_value), [dest] "r" (dest)
186 : "memory");
187 return val;
188 }
189
190 template <>
191 inline int64_t Atomic::specialized_xchg<int64_t>(int64_t exchange_value, volatile int64_t* dest) {
192 int64_t old_val;
193 int tmp;
194 __asm__ volatile(
195 "1:\n\t"
196 " ldaxr %[old_val], [%[dest]]\n\t"
197 " stlxr %w[tmp], %[new_val], [%[dest]]\n\t"
198 " cbnz %w[tmp], 1b\n\t"
199 : [old_val] "=&r" (old_val), [tmp] "=&r" (tmp)
200 : [new_val] "r" (exchange_value), [dest] "r" (dest)
201 : "memory");
202 return old_val;
203 }
204
205 #endif
206
207 #endif // OS_CPU_LINUX_ARM_VM_ATOMIC_LINUX_ARM_HPP
|