9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #ifndef SHARE_VM_RUNTIME_ATOMIC_HPP
26 #define SHARE_VM_RUNTIME_ATOMIC_HPP
27
28 #include "memory/allocation.hpp"
29 #include "utilities/align.hpp"
30 #include "utilities/macros.hpp"
31
32 enum cmpxchg_memory_order {
33 memory_order_relaxed,
34 // Use value which doesn't interfere with C++2011. We need to be more conservative.
35 memory_order_conservative = 8
36 };
37
38 class Atomic : AllStatic {
39 public:
40 // Atomic operations on jlong types are not available on all 32-bit
41 // platforms. If atomic ops on jlongs are defined here they must only
42 // be used from code that verifies they are available at runtime and
43 // can provide an alternative action if not - see supports_cx8() for
44 // a means to test availability.
45
46 // The memory operations that are mentioned with each of the atomic
47 // function families come from src/share/vm/runtime/orderAccess.hpp,
48 // e.g., <fence> is described in that file and is implemented by the
94 // Atomically decrement a location. dec*() provide:
95 // <fence> decrement-dest <membar StoreLoad|StoreStore>
96 inline static void dec (volatile jint* dest);
97 inline static void dec (volatile jshort* dest);
98 inline static void dec (volatile size_t* dest);
99 inline static void dec_ptr(volatile intptr_t* dest);
100 inline static void dec_ptr(volatile void* dest);
101
102 // Performs atomic exchange of *dest with exchange_value. Returns old
103 // prior value of *dest. xchg*() provide:
104 // <fence> exchange-value-with-dest <membar StoreLoad|StoreStore>
105 inline static jint xchg (jint exchange_value, volatile jint* dest);
106 inline static unsigned int xchg (unsigned int exchange_value, volatile unsigned int* dest);
107 inline static intptr_t xchg_ptr(intptr_t exchange_value, volatile intptr_t* dest);
108 inline static void* xchg_ptr(void* exchange_value, volatile void* dest);
109
110 // Performs atomic compare of *dest and compare_value, and exchanges
111 // *dest with exchange_value if the comparison succeeded. Returns prior
112 // value of *dest. cmpxchg*() provide:
113 // <fence> compare-and-exchange <membar StoreLoad|StoreStore>
114 inline static jbyte cmpxchg (jbyte exchange_value, volatile jbyte* dest, jbyte compare_value, cmpxchg_memory_order order = memory_order_conservative);
115 inline static jint cmpxchg (jint exchange_value, volatile jint* dest, jint compare_value, cmpxchg_memory_order order = memory_order_conservative);
116 // See comment above about using jlong atomics on 32-bit platforms
117 inline static jlong cmpxchg (jlong exchange_value, volatile jlong* dest, jlong compare_value, cmpxchg_memory_order order = memory_order_conservative);
118 inline static unsigned int cmpxchg (unsigned int exchange_value, volatile unsigned int* dest, unsigned int compare_value, cmpxchg_memory_order order = memory_order_conservative);
119 inline static intptr_t cmpxchg_ptr(intptr_t exchange_value, volatile intptr_t* dest, intptr_t compare_value, cmpxchg_memory_order order = memory_order_conservative);
120 inline static void* cmpxchg_ptr(void* exchange_value, volatile void* dest, void* compare_value, cmpxchg_memory_order order = memory_order_conservative);
121 };
122
123 // platform specific in-line definitions - must come before shared definitions
124
125 #include OS_CPU_HEADER(atomic)
126
127 // shared in-line definitions
128
129 // size_t casts...
130 #if (SIZE_MAX != UINTPTR_MAX)
131 #error size_t is not WORD_SIZE, interesting platform, but missing implementation here
132 #endif
133
134 inline size_t Atomic::add(size_t add_value, volatile size_t* dest) {
135 return (size_t) add_ptr((intptr_t) add_value, (volatile intptr_t*) dest);
136 }
137
138 inline void Atomic::inc(volatile size_t* dest) {
139 inc_ptr((volatile intptr_t*) dest);
140 }
141
142 inline void Atomic::dec(volatile size_t* dest) {
143 dec_ptr((volatile intptr_t*) dest);
144 }
145
146 #ifndef VM_HAS_SPECIALIZED_CMPXCHG_BYTE
147 /*
148 * This is the default implementation of byte-sized cmpxchg. It emulates jbyte-sized cmpxchg
149 * in terms of jint-sized cmpxchg. Platforms may override this by defining their own inline definition
150 * as well as defining VM_HAS_SPECIALIZED_CMPXCHG_BYTE. This will cause the platform specific
151 * implementation to be used instead.
152 */
153 inline jbyte Atomic::cmpxchg(jbyte exchange_value, volatile jbyte* dest,
154 jbyte compare_value, cmpxchg_memory_order order) {
155 STATIC_ASSERT(sizeof(jbyte) == 1);
156 volatile jint* dest_int =
157 reinterpret_cast<volatile jint*>(align_down(dest, sizeof(jint)));
158 size_t offset = pointer_delta(dest, dest_int, 1);
159 jint cur = *dest_int;
160 jbyte* cur_as_bytes = reinterpret_cast<jbyte*>(&cur);
161
162 // current value may not be what we are looking for, so force it
163 // to that value so the initial cmpxchg will fail if it is different
164 cur_as_bytes[offset] = compare_value;
165
166 // always execute a real cmpxchg so that we get the required memory
167 // barriers even on initial failure
168 do {
169 // value to swap in matches current value ...
170 jint new_value = cur;
171 // ... except for the one jbyte we want to update
172 reinterpret_cast<jbyte*>(&new_value)[offset] = exchange_value;
173
174 jint res = cmpxchg(new_value, dest_int, cur, order);
175 if (res == cur) break; // success
176
177 // at least one jbyte in the jint changed value, so update
178 // our view of the current jint
179 cur = res;
180 // if our jbyte is still as cur we loop and try again
181 } while (cur_as_bytes[offset] == compare_value);
182
183 return cur_as_bytes[offset];
184 }
185
186 #endif // VM_HAS_SPECIALIZED_CMPXCHG_BYTE
187
188 inline unsigned Atomic::xchg(unsigned int exchange_value, volatile unsigned int* dest) {
189 assert(sizeof(unsigned int) == sizeof(jint), "more work to do");
190 return (unsigned int)Atomic::xchg((jint)exchange_value, (volatile jint*)dest);
191 }
192
193 inline unsigned Atomic::cmpxchg(unsigned int exchange_value,
194 volatile unsigned int* dest, unsigned int compare_value,
195 cmpxchg_memory_order order) {
196 assert(sizeof(unsigned int) == sizeof(jint), "more work to do");
197 return (unsigned int)Atomic::cmpxchg((jint)exchange_value, (volatile jint*)dest,
198 (jint)compare_value, order);
199 }
200
201 inline jshort Atomic::add(jshort add_value, volatile jshort* dest) {
202 // Most platforms do not support atomic add on a 2-byte value. However,
203 // if the value occupies the most significant 16 bits of an aligned 32-bit
204 // word, then we can do this with an atomic add of (add_value << 16)
205 // to the 32-bit word.
206 //
207 // The least significant parts of this 32-bit word will never be affected, even
208 // in case of overflow/underflow.
209 //
210 // Use the ATOMIC_SHORT_PAIR macro (see macros.hpp) to get the desired alignment.
211 #ifdef VM_LITTLE_ENDIAN
212 assert((intx(dest) & 0x03) == 0x02, "wrong alignment");
213 jint new_value = Atomic::add(add_value << 16, (volatile jint*)(dest-1));
214 #else
215 assert((intx(dest) & 0x03) == 0x00, "wrong alignment");
216 jint new_value = Atomic::add(add_value << 16, (volatile jint*)(dest));
217 #endif
218 return (jshort)(new_value >> 16); // preserves sign
|
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #ifndef SHARE_VM_RUNTIME_ATOMIC_HPP
26 #define SHARE_VM_RUNTIME_ATOMIC_HPP
27
28 #include "memory/allocation.hpp"
29 #include "metaprogramming/enableIf.hpp"
30 #include "metaprogramming/integerTypes.hpp"
31 #include "metaprogramming/isIntegral.hpp"
32 #include "metaprogramming/isSame.hpp"
33 #include "metaprogramming/removeCV.hpp"
34 #include "utilities/align.hpp"
35 #include "utilities/macros.hpp"
36
37 enum cmpxchg_memory_order {
38 memory_order_relaxed,
39 // Use value which doesn't interfere with C++2011. We need to be more conservative.
40 memory_order_conservative = 8
41 };
42
43 class Atomic : AllStatic {
44 public:
45 // Atomic operations on jlong types are not available on all 32-bit
46 // platforms. If atomic ops on jlongs are defined here they must only
47 // be used from code that verifies they are available at runtime and
48 // can provide an alternative action if not - see supports_cx8() for
49 // a means to test availability.
50
51 // The memory operations that are mentioned with each of the atomic
52 // function families come from src/share/vm/runtime/orderAccess.hpp,
53 // e.g., <fence> is described in that file and is implemented by the
99 // Atomically decrement a location. dec*() provide:
100 // <fence> decrement-dest <membar StoreLoad|StoreStore>
101 inline static void dec (volatile jint* dest);
102 inline static void dec (volatile jshort* dest);
103 inline static void dec (volatile size_t* dest);
104 inline static void dec_ptr(volatile intptr_t* dest);
105 inline static void dec_ptr(volatile void* dest);
106
107 // Performs atomic exchange of *dest with exchange_value. Returns old
108 // prior value of *dest. xchg*() provide:
109 // <fence> exchange-value-with-dest <membar StoreLoad|StoreStore>
110 inline static jint xchg (jint exchange_value, volatile jint* dest);
111 inline static unsigned int xchg (unsigned int exchange_value, volatile unsigned int* dest);
112 inline static intptr_t xchg_ptr(intptr_t exchange_value, volatile intptr_t* dest);
113 inline static void* xchg_ptr(void* exchange_value, volatile void* dest);
114
115 // Performs atomic compare of *dest and compare_value, and exchanges
116 // *dest with exchange_value if the comparison succeeded. Returns prior
117 // value of *dest. cmpxchg*() provide:
118 // <fence> compare-and-exchange <membar StoreLoad|StoreStore>
119
120 template<typename T, typename D, typename U>
121 inline static D cmpxchg(T exchange_value,
122 D volatile* dest,
123 U compare_value,
124 cmpxchg_memory_order order = memory_order_conservative);
125
126 // Performs atomic compare of *dest and NULL, and replaces *dest
127 // with exchange_value if the comparison succeeded. Returns true if
128 // the comparison succeeded and the exchange occurred. This is
129 // often used as part of lazy initialization, as a lock-free
130 // alternative to the Double-Checked Locking Pattern.
131 template<typename T, typename D>
132 inline static bool replace_if_null(T* value, D* volatile* dest,
133 cmpxchg_memory_order order = memory_order_conservative);
134
135 inline static intptr_t cmpxchg_ptr(intptr_t exchange_value,
136 volatile intptr_t* dest,
137 intptr_t compare_value,
138 cmpxchg_memory_order order = memory_order_conservative) {
139 return cmpxchg(exchange_value, dest, compare_value, order);
140 }
141
142 inline static void* cmpxchg_ptr(void* exchange_value,
143 volatile void* dest,
144 void* compare_value,
145 cmpxchg_memory_order order = memory_order_conservative) {
146 return cmpxchg(exchange_value,
147 reinterpret_cast<void* volatile*>(dest),
148 compare_value,
149 order);
150 }
151
152 private:
153 // Test whether From is implicitly convertible to To.
154 // From and To must be pointer types.
155 // Note: Provides the limited subset of C++11 std::is_convertible
156 // that is needed here.
157 template<typename From, typename To> struct IsPointerConvertible;
158
159 // Dispatch handler for cmpxchg. Provides type-based validity
160 // checking and limited conversions around calls to the
161 // platform-specific implementation layer provided by
162 // PlatformCmpxchg.
163 template<typename T, typename D, typename U, typename Enable = void>
164 struct CmpxchgImpl;
165
166 // Platform-specific implementation of cmpxchg. Support for sizes
167 // of 1, 4, and 8 are required. The class is a function object that
168 // must be default constructable, with these requirements:
169 //
170 // - dest is of type T*.
171 // - exchange_value and compare_value are of type T.
172 // - order is of type cmpxchg_memory_order.
173 // - platform_cmpxchg is an object of type PlatformCmpxchg<sizeof(T)>.
174 //
175 // Then
176 // platform_cmpxchg(exchange_value, dest, compare_value, order)
177 // must be a valid expression, returning a result convertible to T.
178 //
179 // A default definition is provided, which declares a function template
180 // T operator()(T, T volatile*, T, cmpxchg_memory_order) const
181 //
182 // For each required size, a platform must either provide an
183 // appropriate definition of that function, or must entirely
184 // specialize the class template for that size.
185 template<size_t byte_size> struct PlatformCmpxchg;
186
187 // Support for platforms that implement some variants of cmpxchg
188 // using a (typically out of line) non-template helper function.
189 // The generic arguments passed to PlatformCmpxchg need to be
190 // translated to the appropriate type for the helper function, the
191 // helper invoked on the translated arguments, and the result
192 // translated back. Type is the parameter / return type of the
193 // helper function.
194 template<typename Type, typename Fn, typename T>
195 static T cmpxchg_using_helper(Fn fn,
196 T exchange_value,
197 T volatile* dest,
198 T compare_value);
199
200 // Support platforms that do not provide Read-Modify-Write
201 // byte-level atomic access. To use, derive PlatformCmpxchg<1> from
202 // this class.
203 public: // Temporary, can't be private: C++03 11.4/2. Fixed by C++11.
204 struct CmpxchgByteUsingInt;
205 private:
206 };
207
208 template<typename From, typename To>
209 struct Atomic::IsPointerConvertible<From*, To*> : AllStatic {
210 // Determine whether From* is implicitly convertible to To*, using
211 // the "sizeof trick".
212 typedef char yes;
213 typedef char (&no)[2];
214
215 static yes test(To*);
216 static no test(...);
217 static From* test_value;
218
219 static const bool value = (sizeof(yes) == sizeof(test(test_value)));
220 };
221
222 // Define the class before including platform file, which may specialize
223 // the operator definition. No generic definition of specializations
224 // of the operator template are provided, nor are there any generic
225 // specializations of the class. The platform file is responsible for
226 // providing those.
227 template<size_t byte_size>
228 struct Atomic::PlatformCmpxchg VALUE_OBJ_CLASS_SPEC {
229 template<typename T>
230 T operator()(T exchange_value,
231 T volatile* dest,
232 T compare_value,
233 cmpxchg_memory_order order) const;
234 };
235
236 // Define the class before including platform file, which may use this
237 // as a base class, requiring it be complete. The definition is later
238 // in this file, near the other definitions related to cmpxchg.
239 struct Atomic::CmpxchgByteUsingInt VALUE_OBJ_CLASS_SPEC {
240 template<typename T>
241 T operator()(T exchange_value,
242 T volatile* dest,
243 T compare_value,
244 cmpxchg_memory_order order) const;
245 };
246
247 // platform specific in-line definitions - must come before shared definitions
248
249 #include OS_CPU_HEADER(atomic)
250
251 // shared in-line definitions
252
253 // size_t casts...
254 #if (SIZE_MAX != UINTPTR_MAX)
255 #error size_t is not WORD_SIZE, interesting platform, but missing implementation here
256 #endif
257
258 inline size_t Atomic::add(size_t add_value, volatile size_t* dest) {
259 return (size_t) add_ptr((intptr_t) add_value, (volatile intptr_t*) dest);
260 }
261
262 inline void Atomic::inc(volatile size_t* dest) {
263 inc_ptr((volatile intptr_t*) dest);
264 }
265
266 inline void Atomic::dec(volatile size_t* dest) {
267 dec_ptr((volatile intptr_t*) dest);
268 }
269
270 template<typename T, typename D, typename U>
271 inline D Atomic::cmpxchg(T exchange_value,
272 D volatile* dest,
273 U compare_value,
274 cmpxchg_memory_order order) {
275 return CmpxchgImpl<T, D, U>()(exchange_value, dest, compare_value, order);
276 }
277
278 template<typename T, typename D>
279 inline bool Atomic::replace_if_null(T* value, D* volatile* dest,
280 cmpxchg_memory_order order) {
281 // Presently using a trivial implementation in terms of cmpxchg.
282 // Consider adding platform support, to permit the use of compiler
283 // intrinsics like gcc's __sync_bool_compare_and_swap.
284 D* expected_null = NULL;
285 return expected_null == cmpxchg(value, dest, expected_null, order);
286 }
287
288 // Handle cmpxchg for integral and enum types.
289 //
290 // All the involved types must be identical.
291 template<typename T>
292 struct Atomic::CmpxchgImpl<
293 T, T, T,
294 typename EnableIf<IsIntegral<T>::value || IsRegisteredEnum<T>::value>::type>
295 VALUE_OBJ_CLASS_SPEC
296 {
297 T operator()(T exchange_value, T volatile* dest, T compare_value,
298 cmpxchg_memory_order order) const {
299 // Forward to the platform handler for the size of T.
300 return PlatformCmpxchg<sizeof(T)>()(exchange_value,
301 dest,
302 compare_value,
303 order);
304 }
305 };
306
307 // Handle cmpxchg for pointer types.
308 //
309 // The destination's type and the compare_value type must be the same,
310 // ignoring cv-qualifiers; we don't care about the cv-qualifiers of
311 // the compare_value.
312 //
313 // The exchange_value must be implicitly convertible to the
314 // destination's type; it must be type-correct to store the
315 // exchange_value in the destination.
316 template<typename T, typename D, typename U>
317 struct Atomic::CmpxchgImpl<
318 T*, D*, U*,
319 typename EnableIf<Atomic::IsPointerConvertible<T*, D*>::value &&
320 IsSame<typename RemoveCV<D>::type,
321 typename RemoveCV<U>::type>::value>::type>
322 VALUE_OBJ_CLASS_SPEC
323 {
324 D* operator()(T* exchange_value, D* volatile* dest, U* compare_value,
325 cmpxchg_memory_order order) const {
326 // Allow derived to base conversion, and adding cv-qualifiers.
327 D* new_value = exchange_value;
328 // Don't care what the CV qualifiers for compare_value are,
329 // but we need to match D* when calling platform support.
330 D* old_value = const_cast<D*>(compare_value);
331 return PlatformCmpxchg<sizeof(D*)>()(new_value, dest, old_value, order);
332 }
333 };
334
335 // Handle cmpxchg for types that have a translator.
336 //
337 // All the involved types must be identical.
338 //
339 // This translates the original call into a call on the decayed
340 // arguments, and returns the recovered result of that translated
341 // call.
342 template<typename T>
343 struct Atomic::CmpxchgImpl<
344 T, T, T,
345 typename EnableIf<IntegerTypes::Translate<T>::value>::type>
346 VALUE_OBJ_CLASS_SPEC
347 {
348 T operator()(T exchange_value, T volatile* dest, T compare_value,
349 cmpxchg_memory_order order) const {
350 typedef IntegerTypes::Translate<T> Translator;
351 typedef typename Translator::Decayed Decayed;
352 STATIC_ASSERT(sizeof(T) == sizeof(Decayed));
353 return Translator::recover(
354 cmpxchg(Translator::decay(exchange_value),
355 reinterpret_cast<Decayed volatile*>(dest),
356 Translator::decay(compare_value),
357 order));
358 }
359 };
360
361 template<typename Type, typename Fn, typename T>
362 inline T Atomic::cmpxchg_using_helper(Fn fn,
363 T exchange_value,
364 T volatile* dest,
365 T compare_value) {
366 STATIC_ASSERT(sizeof(Type) == sizeof(T));
367 return IntegerTypes::cast<T>(
368 fn(IntegerTypes::cast<Type>(exchange_value),
369 reinterpret_cast<Type volatile*>(dest),
370 IntegerTypes::cast<Type>(compare_value)));
371 }
372
373 template<typename T>
374 inline T Atomic::CmpxchgByteUsingInt::operator()(T exchange_value,
375 T volatile* dest,
376 T compare_value,
377 cmpxchg_memory_order order) const {
378 STATIC_ASSERT(sizeof(T) == sizeof(uint8_t));
379 uint8_t canon_exchange_value = exchange_value;
380 uint8_t canon_compare_value = compare_value;
381 volatile uint32_t* aligned_dest
382 = reinterpret_cast<volatile uint32_t*>(align_down(dest, sizeof(uint32_t)));
383 size_t offset = pointer_delta(dest, aligned_dest, 1);
384 uint32_t cur = *aligned_dest;
385 uint8_t* cur_as_bytes = reinterpret_cast<uint8_t*>(&cur);
386
387 // current value may not be what we are looking for, so force it
388 // to that value so the initial cmpxchg will fail if it is different
389 cur_as_bytes[offset] = canon_compare_value;
390
391 // always execute a real cmpxchg so that we get the required memory
392 // barriers even on initial failure
393 do {
394 // value to swap in matches current value ...
395 uint32_t new_value = cur;
396 // ... except for the one jbyte we want to update
397 reinterpret_cast<uint8_t*>(&new_value)[offset] = canon_exchange_value;
398
399 uint32_t res = cmpxchg(new_value, aligned_dest, cur, order);
400 if (res == cur) break; // success
401
402 // at least one byte in the int changed value, so update
403 // our view of the current int
404 cur = res;
405 // if our byte is still as cur we loop and try again
406 } while (cur_as_bytes[offset] == canon_compare_value);
407
408 return IntegerTypes::cast<T>(cur_as_bytes[offset]);
409 }
410
411 inline unsigned Atomic::xchg(unsigned int exchange_value, volatile unsigned int* dest) {
412 assert(sizeof(unsigned int) == sizeof(jint), "more work to do");
413 return (unsigned int)Atomic::xchg((jint)exchange_value, (volatile jint*)dest);
414 }
415
416 inline jshort Atomic::add(jshort add_value, volatile jshort* dest) {
417 // Most platforms do not support atomic add on a 2-byte value. However,
418 // if the value occupies the most significant 16 bits of an aligned 32-bit
419 // word, then we can do this with an atomic add of (add_value << 16)
420 // to the 32-bit word.
421 //
422 // The least significant parts of this 32-bit word will never be affected, even
423 // in case of overflow/underflow.
424 //
425 // Use the ATOMIC_SHORT_PAIR macro (see macros.hpp) to get the desired alignment.
426 #ifdef VM_LITTLE_ENDIAN
427 assert((intx(dest) & 0x03) == 0x02, "wrong alignment");
428 jint new_value = Atomic::add(add_value << 16, (volatile jint*)(dest-1));
429 #else
430 assert((intx(dest) & 0x03) == 0x00, "wrong alignment");
431 jint new_value = Atomic::add(add_value << 16, (volatile jint*)(dest));
432 #endif
433 return (jshort)(new_value >> 16); // preserves sign
|