1 /*
2 * Copyright (c) 2008, 2017, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
64 #endif
65
66 // As per atomic.hpp all read-modify-write operations have to provide two-way
67 // barriers semantics. For AARCH64 we are using load-acquire-with-reservation and
68 // store-release-with-reservation. While load-acquire combined with store-release
69 // do not generally form two-way barriers, their use with reservations does - the
70 // ARMv8 architecture manual Section F "Barrier Litmus Tests" indicates they
71 // provide sequentially consistent semantics. All we need to add is an explicit
72 // barrier in the failure path of the cmpxchg operations (as these don't execute
73 // the store) - arguably this may be overly cautious as there is a very low
74 // likelihood that the hardware would pull loads/stores into the region guarded
75 // by the reservation.
76 //
77 // For ARMv7 we add explicit barriers in the stubs.
78
79 template<size_t byte_size>
80 struct Atomic::PlatformAdd
81 : Atomic::AddAndFetch<Atomic::PlatformAdd<byte_size> >
82 {
83 template<typename I, typename D>
84 D add_and_fetch(I add_value, D volatile* dest) const;
85 };
86
87 template<>
88 template<typename I, typename D>
89 inline D Atomic::PlatformAdd<4>::add_and_fetch(I add_value, D volatile* dest) const {
90 STATIC_ASSERT(4 == sizeof(I));
91 STATIC_ASSERT(4 == sizeof(D));
92 #ifdef AARCH64
93 D val;
94 int tmp;
95 __asm__ volatile(
96 "1:\n\t"
97 " ldaxr %w[val], [%[dest]]\n\t"
98 " add %w[val], %w[val], %w[add_val]\n\t"
99 " stlxr %w[tmp], %w[val], [%[dest]]\n\t"
100 " cbnz %w[tmp], 1b\n\t"
101 : [val] "=&r" (val), [tmp] "=&r" (tmp)
102 : [add_val] "r" (add_value), [dest] "r" (dest)
103 : "memory");
104 return val;
105 #else
106 return add_using_helper<int32_t>(os::atomic_add_func, add_value, dest);
107 #endif
108 }
109
110 #ifdef AARCH64
111 template<>
112 template<typename I, typename D>
113 inline D Atomic::PlatformAdd<8>::add_and_fetch(I add_value, D volatile* dest) const {
114 STATIC_ASSERT(8 == sizeof(I));
115 STATIC_ASSERT(8 == sizeof(D));
116 D val;
117 int tmp;
118 __asm__ volatile(
119 "1:\n\t"
120 " ldaxr %[val], [%[dest]]\n\t"
121 " add %[val], %[val], %[add_val]\n\t"
122 " stlxr %w[tmp], %[val], [%[dest]]\n\t"
123 " cbnz %w[tmp], 1b\n\t"
124 : [val] "=&r" (val), [tmp] "=&r" (tmp)
125 : [add_val] "r" (add_value), [dest] "r" (dest)
126 : "memory");
127 return val;
128 }
129 #endif
130
131 template<>
132 template<typename T>
133 inline T Atomic::PlatformXchg<4>::operator()(T exchange_value,
134 T volatile* dest) const {
135 STATIC_ASSERT(4 == sizeof(T));
136 #ifdef AARCH64
137 T old_val;
138 int tmp;
139 __asm__ volatile(
140 "1:\n\t"
141 " ldaxr %w[old_val], [%[dest]]\n\t"
142 " stlxr %w[tmp], %w[new_val], [%[dest]]\n\t"
143 " cbnz %w[tmp], 1b\n\t"
144 : [old_val] "=&r" (old_val), [tmp] "=&r" (tmp)
145 : [new_val] "r" (exchange_value), [dest] "r" (dest)
146 : "memory");
147 return old_val;
148 #else
149 return xchg_using_helper<int32_t>(os::atomic_xchg_func, exchange_value, dest);
150 #endif
151 }
152
153 #ifdef AARCH64
154 template<>
155 template<typename T>
156 inline T Atomic::PlatformXchg<8>::operator()(T exchange_value,
157 T volatile* dest) const {
158 STATIC_ASSERT(8 == sizeof(T));
159 T old_val;
160 int tmp;
161 __asm__ volatile(
162 "1:\n\t"
163 " ldaxr %[old_val], [%[dest]]\n\t"
164 " stlxr %w[tmp], %[new_val], [%[dest]]\n\t"
165 " cbnz %w[tmp], 1b\n\t"
166 : [old_val] "=&r" (old_val), [tmp] "=&r" (tmp)
167 : [new_val] "r" (exchange_value), [dest] "r" (dest)
168 : "memory");
169 return old_val;
170 }
171 #endif // AARCH64
172
173 // The memory_order parameter is ignored - we always provide the strongest/most-conservative ordering
174
175 // No direct support for cmpxchg of bytes; emulate using int.
176 template<>
177 struct Atomic::PlatformCmpxchg<1> : Atomic::CmpxchgByteUsingInt {};
183 int32_t compare_value) {
184 // Warning: Arguments are swapped to avoid moving them for kernel call
185 return (*os::atomic_cmpxchg_func)(compare_value, exchange_value, dest);
186 }
187
188 inline int64_t reorder_cmpxchg_long_func(int64_t exchange_value,
189 int64_t volatile* dest,
190 int64_t compare_value) {
191 assert(VM_Version::supports_cx8(), "Atomic compare and exchange int64_t not supported on this architecture!");
192 // Warning: Arguments are swapped to avoid moving them for kernel call
193 return (*os::atomic_cmpxchg_long_func)(compare_value, exchange_value, dest);
194 }
195
196 #endif // !AARCH64
197
198 template<>
199 template<typename T>
200 inline T Atomic::PlatformCmpxchg<4>::operator()(T exchange_value,
201 T volatile* dest,
202 T compare_value,
203 cmpxchg_memory_order order) const {
204 STATIC_ASSERT(4 == sizeof(T));
205 #ifdef AARCH64
206 T rv;
207 int tmp;
208 __asm__ volatile(
209 "1:\n\t"
210 " ldaxr %w[rv], [%[dest]]\n\t"
211 " cmp %w[rv], %w[cv]\n\t"
212 " b.ne 2f\n\t"
213 " stlxr %w[tmp], %w[ev], [%[dest]]\n\t"
214 " cbnz %w[tmp], 1b\n\t"
215 " b 3f\n\t"
216 "2:\n\t"
217 " dmb sy\n\t"
218 "3:\n\t"
219 : [rv] "=&r" (rv), [tmp] "=&r" (tmp)
220 : [ev] "r" (exchange_value), [dest] "r" (dest), [cv] "r" (compare_value)
221 : "memory");
222 return rv;
223 #else
224 return cmpxchg_using_helper<int32_t>(reorder_cmpxchg_func, exchange_value, dest, compare_value);
225 #endif
226 }
227
228 template<>
229 template<typename T>
230 inline T Atomic::PlatformCmpxchg<8>::operator()(T exchange_value,
231 T volatile* dest,
232 T compare_value,
233 cmpxchg_memory_order order) const {
234 STATIC_ASSERT(8 == sizeof(T));
235 #ifdef AARCH64
236 T rv;
237 int tmp;
238 __asm__ volatile(
239 "1:\n\t"
240 " ldaxr %[rv], [%[dest]]\n\t"
241 " cmp %[rv], %[cv]\n\t"
242 " b.ne 2f\n\t"
243 " stlxr %w[tmp], %[ev], [%[dest]]\n\t"
244 " cbnz %w[tmp], 1b\n\t"
245 " b 3f\n\t"
246 "2:\n\t"
247 " dmb sy\n\t"
248 "3:\n\t"
249 : [rv] "=&r" (rv), [tmp] "=&r" (tmp)
250 : [ev] "r" (exchange_value), [dest] "r" (dest), [cv] "r" (compare_value)
251 : "memory");
252 return rv;
253 #else
|
1 /*
2 * Copyright (c) 2008, 2018, Oracle and/or its affiliates. All rights reserved.
3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
4 *
5 * This code is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
64 #endif
65
66 // As per atomic.hpp all read-modify-write operations have to provide two-way
67 // barriers semantics. For AARCH64 we are using load-acquire-with-reservation and
68 // store-release-with-reservation. While load-acquire combined with store-release
69 // do not generally form two-way barriers, their use with reservations does - the
70 // ARMv8 architecture manual Section F "Barrier Litmus Tests" indicates they
71 // provide sequentially consistent semantics. All we need to add is an explicit
72 // barrier in the failure path of the cmpxchg operations (as these don't execute
73 // the store) - arguably this may be overly cautious as there is a very low
74 // likelihood that the hardware would pull loads/stores into the region guarded
75 // by the reservation.
76 //
77 // For ARMv7 we add explicit barriers in the stubs.
78
79 template<size_t byte_size>
80 struct Atomic::PlatformAdd
81 : Atomic::AddAndFetch<Atomic::PlatformAdd<byte_size> >
82 {
83 template<typename I, typename D>
84 D add_and_fetch(I add_value, D volatile* dest, atomic_memory_order order) const;
85 };
86
87 template<>
88 template<typename I, typename D>
89 inline D Atomic::PlatformAdd<4>::add_and_fetch(I add_value, D volatile* dest,
90 atomic_memory_order order) const {
91 STATIC_ASSERT(4 == sizeof(I));
92 STATIC_ASSERT(4 == sizeof(D));
93 #ifdef AARCH64
94 D val;
95 int tmp;
96 __asm__ volatile(
97 "1:\n\t"
98 " ldaxr %w[val], [%[dest]]\n\t"
99 " add %w[val], %w[val], %w[add_val]\n\t"
100 " stlxr %w[tmp], %w[val], [%[dest]]\n\t"
101 " cbnz %w[tmp], 1b\n\t"
102 : [val] "=&r" (val), [tmp] "=&r" (tmp)
103 : [add_val] "r" (add_value), [dest] "r" (dest)
104 : "memory");
105 return val;
106 #else
107 return add_using_helper<int32_t>(os::atomic_add_func, add_value, dest);
108 #endif
109 }
110
111 #ifdef AARCH64
112 template<>
113 template<typename I, typename D>
114 inline D Atomic::PlatformAdd<8>::add_and_fetch(I add_value, D volatile* dest,
115 atomic_memory_order order) const {
116 STATIC_ASSERT(8 == sizeof(I));
117 STATIC_ASSERT(8 == sizeof(D));
118 D val;
119 int tmp;
120 __asm__ volatile(
121 "1:\n\t"
122 " ldaxr %[val], [%[dest]]\n\t"
123 " add %[val], %[val], %[add_val]\n\t"
124 " stlxr %w[tmp], %[val], [%[dest]]\n\t"
125 " cbnz %w[tmp], 1b\n\t"
126 : [val] "=&r" (val), [tmp] "=&r" (tmp)
127 : [add_val] "r" (add_value), [dest] "r" (dest)
128 : "memory");
129 return val;
130 }
131 #endif
132
133 template<>
134 template<typename T>
135 inline T Atomic::PlatformXchg<4>::operator()(T exchange_value,
136 T volatile* dest,
137 atomic_memory_order order) const {
138 STATIC_ASSERT(4 == sizeof(T));
139 #ifdef AARCH64
140 T old_val;
141 int tmp;
142 __asm__ volatile(
143 "1:\n\t"
144 " ldaxr %w[old_val], [%[dest]]\n\t"
145 " stlxr %w[tmp], %w[new_val], [%[dest]]\n\t"
146 " cbnz %w[tmp], 1b\n\t"
147 : [old_val] "=&r" (old_val), [tmp] "=&r" (tmp)
148 : [new_val] "r" (exchange_value), [dest] "r" (dest)
149 : "memory");
150 return old_val;
151 #else
152 return xchg_using_helper<int32_t>(os::atomic_xchg_func, exchange_value, dest);
153 #endif
154 }
155
156 #ifdef AARCH64
157 template<>
158 template<typename T>
159 inline T Atomic::PlatformXchg<8>::operator()(T exchange_value,
160 T volatile* dest,
161 atomic_memory_order order) const {
162 STATIC_ASSERT(8 == sizeof(T));
163 T old_val;
164 int tmp;
165 __asm__ volatile(
166 "1:\n\t"
167 " ldaxr %[old_val], [%[dest]]\n\t"
168 " stlxr %w[tmp], %[new_val], [%[dest]]\n\t"
169 " cbnz %w[tmp], 1b\n\t"
170 : [old_val] "=&r" (old_val), [tmp] "=&r" (tmp)
171 : [new_val] "r" (exchange_value), [dest] "r" (dest)
172 : "memory");
173 return old_val;
174 }
175 #endif // AARCH64
176
177 // The memory_order parameter is ignored - we always provide the strongest/most-conservative ordering
178
179 // No direct support for cmpxchg of bytes; emulate using int.
180 template<>
181 struct Atomic::PlatformCmpxchg<1> : Atomic::CmpxchgByteUsingInt {};
187 int32_t compare_value) {
188 // Warning: Arguments are swapped to avoid moving them for kernel call
189 return (*os::atomic_cmpxchg_func)(compare_value, exchange_value, dest);
190 }
191
192 inline int64_t reorder_cmpxchg_long_func(int64_t exchange_value,
193 int64_t volatile* dest,
194 int64_t compare_value) {
195 assert(VM_Version::supports_cx8(), "Atomic compare and exchange int64_t not supported on this architecture!");
196 // Warning: Arguments are swapped to avoid moving them for kernel call
197 return (*os::atomic_cmpxchg_long_func)(compare_value, exchange_value, dest);
198 }
199
200 #endif // !AARCH64
201
202 template<>
203 template<typename T>
204 inline T Atomic::PlatformCmpxchg<4>::operator()(T exchange_value,
205 T volatile* dest,
206 T compare_value,
207 atomic_memory_order order) const {
208 STATIC_ASSERT(4 == sizeof(T));
209 #ifdef AARCH64
210 T rv;
211 int tmp;
212 __asm__ volatile(
213 "1:\n\t"
214 " ldaxr %w[rv], [%[dest]]\n\t"
215 " cmp %w[rv], %w[cv]\n\t"
216 " b.ne 2f\n\t"
217 " stlxr %w[tmp], %w[ev], [%[dest]]\n\t"
218 " cbnz %w[tmp], 1b\n\t"
219 " b 3f\n\t"
220 "2:\n\t"
221 " dmb sy\n\t"
222 "3:\n\t"
223 : [rv] "=&r" (rv), [tmp] "=&r" (tmp)
224 : [ev] "r" (exchange_value), [dest] "r" (dest), [cv] "r" (compare_value)
225 : "memory");
226 return rv;
227 #else
228 return cmpxchg_using_helper<int32_t>(reorder_cmpxchg_func, exchange_value, dest, compare_value);
229 #endif
230 }
231
232 template<>
233 template<typename T>
234 inline T Atomic::PlatformCmpxchg<8>::operator()(T exchange_value,
235 T volatile* dest,
236 T compare_value,
237 atomic_memory_order order) const {
238 STATIC_ASSERT(8 == sizeof(T));
239 #ifdef AARCH64
240 T rv;
241 int tmp;
242 __asm__ volatile(
243 "1:\n\t"
244 " ldaxr %[rv], [%[dest]]\n\t"
245 " cmp %[rv], %[cv]\n\t"
246 " b.ne 2f\n\t"
247 " stlxr %w[tmp], %[ev], [%[dest]]\n\t"
248 " cbnz %w[tmp], 1b\n\t"
249 " b 3f\n\t"
250 "2:\n\t"
251 " dmb sy\n\t"
252 "3:\n\t"
253 : [rv] "=&r" (rv), [tmp] "=&r" (tmp)
254 : [ev] "r" (exchange_value), [dest] "r" (dest), [cv] "r" (compare_value)
255 : "memory");
256 return rv;
257 #else
|