9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #ifndef SHARE_VM_RUNTIME_ATOMIC_HPP
26 #define SHARE_VM_RUNTIME_ATOMIC_HPP
27
28 #include "memory/allocation.hpp"
29 #include "metaprogramming/integerTypes.hpp"
30 #include "metaprogramming/isIntegral.hpp"
31 #include "metaprogramming/isPointer.hpp"
32 #include "utilities/align.hpp"
33 #include "utilities/debug.hpp"
34 #include "utilities/macros.hpp"
35
36 enum cmpxchg_memory_order {
37 memory_order_relaxed,
38 // Use value which doesn't interfere with C++2011. We need to be more conservative.
39 memory_order_conservative = 8
40 };
41
42 class Atomic : AllStatic {
43 template<typename T> class Never: public FalseType {};
44
45 template <typename T>
46 inline static void specialized_store(T store_value, volatile T* dest) {
47 STATIC_ASSERT(sizeof(T) <= size_t(BytesPerWord)); // Does the machine support atomic wide accesses?
48 (void)const_cast<T&>(*dest = store_value);
49 }
50
51 template <typename T>
52 inline static T specialized_load(const volatile T* dest) {
53 STATIC_ASSERT(sizeof(T) <= size_t(BytesPerWord)); // Does the machine support atomic wide accesses?
54 return *dest;
55 }
56
57 template <typename T>
58 inline static T specialized_add(T add_value, volatile T* dest) {
59 STATIC_ASSERT(Never<T>::value);
60 return add_value;
61 }
62
63 template <typename T>
64 inline static void specialized_inc(volatile T* dest) {
65 add(1, dest);
66 }
67
68 template <typename T>
69 inline static void specialized_dec(volatile T* dest) {
70 add(-1, dest);
71 }
72
73 template <typename T>
74 inline static T specialized_xchg(T exchange_value, volatile T* dest) {
75 STATIC_ASSERT(Never<T>::value);
76 return exchange_value;
77 }
78
79 template <typename T>
80 inline static T specialized_cmpxchg(T exchange_value, volatile T* dest, T compare_value, cmpxchg_memory_order order) {
81 STATIC_ASSERT(Never<T>::value);
82 return exchange_value;
83 }
84
85 public:
86 // Atomic operations on 64-bit types are not available on all 32-bit
87 // platforms. If atomic ops on 64-bit types are defined here they must only
88 // be used from code that verifies they are available at runtime and
89 // can provide an alternative action if not - see supports_cx8() for
90 // a means to test availability.
91
92 // The memory operations that are mentioned with each of the atomic
93 // function families come from src/share/vm/runtime/orderAccess.hpp,
94 // e.g., <fence> is described in that file and is implemented by the
269
270 template <typename T>
271 inline void Atomic::dec(volatile T* src) {
272 STATIC_ASSERT(IsIntegral<T>::value);
273 typedef typename IntegerTypes::Signed<T>::type Raw;
274 specialized_dec(reinterpret_cast<volatile Raw*>(src));
275 }
276
277 template <typename T>
278 inline void Atomic::dec(T* volatile* src) {
279 if (sizeof(T) != 1) {
280 add(-1, src);
281 } else {
282 typedef typename IntegerTypes::Signed<T*>::type Raw;
283 specialized_dec(reinterpret_cast<volatile Raw*>(src));
284 }
285 }
286
287 template <typename T, typename U>
288 inline U Atomic::xchg(T exchange_value, volatile U* dest) {
289 typedef typename IntegerTypes::Signed<U>::type Raw;
290 U exchange_value_cast = exchange_value;
291 Raw result = specialized_xchg(IntegerTypes::cast_to_signed(exchange_value_cast),
292 reinterpret_cast<volatile Raw*>(dest));
293 return IntegerTypes::cast<U>(result);
294 }
295
296 template <typename T, typename U, typename V>
297 inline U Atomic::cmpxchg(T exchange_value, volatile U* dest, V compare_value, cmpxchg_memory_order order) {
298 typedef typename IntegerTypes::Signed<U>::type Raw;
299 U exchange_value_cast = exchange_value;
300 U compare_value_cast = compare_value;
301 Raw result = specialized_cmpxchg(IntegerTypes::cast_to_signed(exchange_value_cast),
302 reinterpret_cast<volatile Raw*>(dest),
303 IntegerTypes::cast_to_signed(compare_value_cast), order);
304 return IntegerTypes::cast<U>(result);
305 }
306
307 // platform specific in-line definitions - must come before shared definitions
308
309 #include OS_CPU_HEADER(atomic)
310
311 // shared in-line definitions
312
313 #ifndef VM_HAS_SPECIALIZED_CMPXCHG_BYTE
|
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #ifndef SHARE_VM_RUNTIME_ATOMIC_HPP
26 #define SHARE_VM_RUNTIME_ATOMIC_HPP
27
28 #include "memory/allocation.hpp"
29 #include "metaprogramming/conditional.hpp"
30 #include "metaprogramming/enableIf.hpp"
31 #include "metaprogramming/integerTypes.hpp"
32 #include "metaprogramming/isIntegral.hpp"
33 #include "metaprogramming/isPointer.hpp"
34 #include "utilities/align.hpp"
35 #include "utilities/debug.hpp"
36 #include "utilities/macros.hpp"
37
38 enum cmpxchg_memory_order {
39 memory_order_relaxed,
40 // Use value which doesn't interfere with C++2011. We need to be more conservative.
41 memory_order_conservative = 8
42 };
43
44 class Atomic : AllStatic {
45 template<typename T> class Never: public FalseType {};
46 typedef char* CanonicalPointer;
47
48 // The type is CanonicalPointer for pointers, otherwise canonical integer
49 template<typename T>
50 struct CanonicalType : AllStatic {
51 typedef typename Conditional<IsPointer<T>::value, CanonicalPointer, typename IntegerTypes::Signed<T>::type>::type type;
52 };
53
54 template<typename T>
55 static typename EnableIf<IsPointer<T>::value, CanonicalPointer>::type
56 cast_to_canonical(T value) { return reinterpret_cast<CanonicalPointer>(value); }
57
58 template<typename T>
59 static typename EnableIf<!IsPointer<T>::value, typename IntegerTypes::Signed<T>::type>::type
60 cast_to_canonical(T value) { return IntegerTypes::cast_to_signed(value); }
61
62 template<typename T, typename U>
63 static typename EnableIf<IsPointer<U>::value, T>::type cast_from_canonical(U value) {
64 return reinterpret_cast<T>(value);
65 }
66
67 template<typename T, typename U>
68 static typename EnableIf<!IsPointer<U>::value, T>::type cast_from_canonical(U value) {
69 return IntegerTypes::cast<T>(value);
70 }
71
72 template <typename T>
73 inline static void specialized_store(T store_value, volatile T* dest) {
74 STATIC_ASSERT(sizeof(T) <= size_t(BytesPerWord)); // Does the machine support atomic wide accesses?
75 (void)const_cast<T&>(*dest = store_value);
76 }
77
78 template <typename T>
79 inline static T specialized_load(const volatile T* dest) {
80 STATIC_ASSERT(sizeof(T) <= size_t(BytesPerWord)); // Does the machine support atomic wide accesses?
81 return *dest;
82 }
83
84 template <typename T>
85 inline static T specialized_add(T add_value, volatile T* dest) {
86 STATIC_ASSERT(Never<T>::value);
87 return add_value;
88 }
89
90 template <typename T>
91 inline static void specialized_inc(volatile T* dest) {
92 add(1, dest);
93 }
94
95 template <typename T>
96 inline static void specialized_dec(volatile T* dest) {
97 add(-1, dest);
98 }
99
100 // If the platform does not offer a specialization for pointers,
101 // try using the canonical pointer integer instead
102 template <typename T>
103 inline static typename EnableIf<IsPointer<T>::value, T>::type specialized_xchg(T exchange_value, volatile T* dest) {
104 typedef typename IntegerTypes::Signed<T>::type Raw;
105 Raw result = specialized_xchg(IntegerTypes::cast_to_signed(exchange_value), reinterpret_cast<volatile Raw*>(dest));
106 return IntegerTypes::cast<T>(result);
107 }
108
109 template <typename T>
110 inline static typename EnableIf<!IsPointer<T>::value, T>::type specialized_xchg(T exchange_value, volatile T* dest) {
111 STATIC_ASSERT(Never<T>::value);
112 return exchange_value;
113 }
114
115 template <typename T>
116 inline static T specialized_cmpxchg(T exchange_value, volatile T* dest, T compare_value, cmpxchg_memory_order order) {
117 STATIC_ASSERT(Never<T>::value);
118 return exchange_value;
119 }
120
121 public:
122 // Atomic operations on 64-bit types are not available on all 32-bit
123 // platforms. If atomic ops on 64-bit types are defined here they must only
124 // be used from code that verifies they are available at runtime and
125 // can provide an alternative action if not - see supports_cx8() for
126 // a means to test availability.
127
128 // The memory operations that are mentioned with each of the atomic
129 // function families come from src/share/vm/runtime/orderAccess.hpp,
130 // e.g., <fence> is described in that file and is implemented by the
305
306 template <typename T>
307 inline void Atomic::dec(volatile T* src) {
308 STATIC_ASSERT(IsIntegral<T>::value);
309 typedef typename IntegerTypes::Signed<T>::type Raw;
310 specialized_dec(reinterpret_cast<volatile Raw*>(src));
311 }
312
313 template <typename T>
314 inline void Atomic::dec(T* volatile* src) {
315 if (sizeof(T) != 1) {
316 add(-1, src);
317 } else {
318 typedef typename IntegerTypes::Signed<T*>::type Raw;
319 specialized_dec(reinterpret_cast<volatile Raw*>(src));
320 }
321 }
322
323 template <typename T, typename U>
324 inline U Atomic::xchg(T exchange_value, volatile U* dest) {
325 typedef typename CanonicalType<U>::type Raw;
326 U exchange_value_cast = exchange_value;
327 Raw result = specialized_xchg(cast_to_canonical(exchange_value_cast),
328 reinterpret_cast<volatile Raw*>(dest));
329 return cast_from_canonical<U>(result);
330 }
331
332 template <typename T, typename U, typename V>
333 inline U Atomic::cmpxchg(T exchange_value, volatile U* dest, V compare_value, cmpxchg_memory_order order) {
334 typedef typename IntegerTypes::Signed<U>::type Raw;
335 U exchange_value_cast = exchange_value;
336 U compare_value_cast = compare_value;
337 Raw result = specialized_cmpxchg(IntegerTypes::cast_to_signed(exchange_value_cast),
338 reinterpret_cast<volatile Raw*>(dest),
339 IntegerTypes::cast_to_signed(compare_value_cast), order);
340 return IntegerTypes::cast<U>(result);
341 }
342
343 // platform specific in-line definitions - must come before shared definitions
344
345 #include OS_CPU_HEADER(atomic)
346
347 // shared in-line definitions
348
349 #ifndef VM_HAS_SPECIALIZED_CMPXCHG_BYTE
|