9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #ifndef SHARE_VM_RUNTIME_ATOMIC_HPP
26 #define SHARE_VM_RUNTIME_ATOMIC_HPP
27
28 #include "memory/allocation.hpp"
29 #include "utilities/align.hpp"
30 #include "utilities/macros.hpp"
31
32 enum cmpxchg_memory_order {
33 memory_order_relaxed,
34 // Use value which doesn't interfere with C++2011. We need to be more conservative.
35 memory_order_conservative = 8
36 };
37
38 class Atomic : AllStatic {
39 public:
40 // Atomic operations on jlong types are not available on all 32-bit
41 // platforms. If atomic ops on jlongs are defined here they must only
42 // be used from code that verifies they are available at runtime and
43 // can provide an alternative action if not - see supports_cx8() for
44 // a means to test availability.
45
46 // The memory operations that are mentioned with each of the atomic
47 // function families come from src/share/vm/runtime/orderAccess.hpp,
48 // e.g., <fence> is described in that file and is implemented by the
94 // Atomically decrement a location. dec*() provide:
95 // <fence> decrement-dest <membar StoreLoad|StoreStore>
96 inline static void dec (volatile jint* dest);
97 inline static void dec (volatile jshort* dest);
98 inline static void dec (volatile size_t* dest);
99 inline static void dec_ptr(volatile intptr_t* dest);
100 inline static void dec_ptr(volatile void* dest);
101
102 // Performs atomic exchange of *dest with exchange_value. Returns old
103 // prior value of *dest. xchg*() provide:
104 // <fence> exchange-value-with-dest <membar StoreLoad|StoreStore>
105 inline static jint xchg (jint exchange_value, volatile jint* dest);
106 inline static unsigned int xchg (unsigned int exchange_value, volatile unsigned int* dest);
107 inline static intptr_t xchg_ptr(intptr_t exchange_value, volatile intptr_t* dest);
108 inline static void* xchg_ptr(void* exchange_value, volatile void* dest);
109
110 // Performs atomic compare of *dest and compare_value, and exchanges
111 // *dest with exchange_value if the comparison succeeded. Returns prior
112 // value of *dest. cmpxchg*() provide:
113 // <fence> compare-and-exchange <membar StoreLoad|StoreStore>
114 inline static jbyte cmpxchg (jbyte exchange_value, volatile jbyte* dest, jbyte compare_value, cmpxchg_memory_order order = memory_order_conservative);
115 inline static jint cmpxchg (jint exchange_value, volatile jint* dest, jint compare_value, cmpxchg_memory_order order = memory_order_conservative);
116 // See comment above about using jlong atomics on 32-bit platforms
117 inline static jlong cmpxchg (jlong exchange_value, volatile jlong* dest, jlong compare_value, cmpxchg_memory_order order = memory_order_conservative);
118 inline static unsigned int cmpxchg (unsigned int exchange_value, volatile unsigned int* dest, unsigned int compare_value, cmpxchg_memory_order order = memory_order_conservative);
119 inline static intptr_t cmpxchg_ptr(intptr_t exchange_value, volatile intptr_t* dest, intptr_t compare_value, cmpxchg_memory_order order = memory_order_conservative);
120 inline static void* cmpxchg_ptr(void* exchange_value, volatile void* dest, void* compare_value, cmpxchg_memory_order order = memory_order_conservative);
121 };
122
123 // platform specific in-line definitions - must come before shared definitions
124
125 #include OS_CPU_HEADER(atomic)
126
127 // shared in-line definitions
128
129 // size_t casts...
130 #if (SIZE_MAX != UINTPTR_MAX)
131 #error size_t is not WORD_SIZE, interesting platform, but missing implementation here
132 #endif
133
134 inline size_t Atomic::add(size_t add_value, volatile size_t* dest) {
135 return (size_t) add_ptr((intptr_t) add_value, (volatile intptr_t*) dest);
136 }
137
138 inline void Atomic::inc(volatile size_t* dest) {
139 inc_ptr((volatile intptr_t*) dest);
140 }
141
142 inline void Atomic::dec(volatile size_t* dest) {
143 dec_ptr((volatile intptr_t*) dest);
144 }
145
146 #ifndef VM_HAS_SPECIALIZED_CMPXCHG_BYTE
147 /*
148 * This is the default implementation of byte-sized cmpxchg. It emulates jbyte-sized cmpxchg
149 * in terms of jint-sized cmpxchg. Platforms may override this by defining their own inline definition
150 * as well as defining VM_HAS_SPECIALIZED_CMPXCHG_BYTE. This will cause the platform specific
151 * implementation to be used instead.
152 */
153 inline jbyte Atomic::cmpxchg(jbyte exchange_value, volatile jbyte* dest,
154 jbyte compare_value, cmpxchg_memory_order order) {
155 STATIC_ASSERT(sizeof(jbyte) == 1);
156 volatile jint* dest_int =
157 reinterpret_cast<volatile jint*>(align_down(dest, sizeof(jint)));
158 size_t offset = pointer_delta(dest, dest_int, 1);
159 jint cur = *dest_int;
160 jbyte* cur_as_bytes = reinterpret_cast<jbyte*>(&cur);
161
162 // current value may not be what we are looking for, so force it
163 // to that value so the initial cmpxchg will fail if it is different
164 cur_as_bytes[offset] = compare_value;
165
166 // always execute a real cmpxchg so that we get the required memory
167 // barriers even on initial failure
168 do {
169 // value to swap in matches current value ...
170 jint new_value = cur;
171 // ... except for the one jbyte we want to update
172 reinterpret_cast<jbyte*>(&new_value)[offset] = exchange_value;
173
174 jint res = cmpxchg(new_value, dest_int, cur, order);
175 if (res == cur) break; // success
176
177 // at least one jbyte in the jint changed value, so update
178 // our view of the current jint
179 cur = res;
180 // if our jbyte is still as cur we loop and try again
181 } while (cur_as_bytes[offset] == compare_value);
182
183 return cur_as_bytes[offset];
184 }
185
186 #endif // VM_HAS_SPECIALIZED_CMPXCHG_BYTE
187
188 inline unsigned Atomic::xchg(unsigned int exchange_value, volatile unsigned int* dest) {
189 assert(sizeof(unsigned int) == sizeof(jint), "more work to do");
190 return (unsigned int)Atomic::xchg((jint)exchange_value, (volatile jint*)dest);
191 }
192
193 inline unsigned Atomic::cmpxchg(unsigned int exchange_value,
194 volatile unsigned int* dest, unsigned int compare_value,
195 cmpxchg_memory_order order) {
196 assert(sizeof(unsigned int) == sizeof(jint), "more work to do");
197 return (unsigned int)Atomic::cmpxchg((jint)exchange_value, (volatile jint*)dest,
198 (jint)compare_value, order);
199 }
200
201 inline jshort Atomic::add(jshort add_value, volatile jshort* dest) {
202 // Most platforms do not support atomic add on a 2-byte value. However,
203 // if the value occupies the most significant 16 bits of an aligned 32-bit
204 // word, then we can do this with an atomic add of (add_value << 16)
205 // to the 32-bit word.
206 //
207 // The least significant parts of this 32-bit word will never be affected, even
208 // in case of overflow/underflow.
209 //
210 // Use the ATOMIC_SHORT_PAIR macro (see macros.hpp) to get the desired alignment.
211 #ifdef VM_LITTLE_ENDIAN
212 assert((intx(dest) & 0x03) == 0x02, "wrong alignment");
213 jint new_value = Atomic::add(add_value << 16, (volatile jint*)(dest-1));
214 #else
215 assert((intx(dest) & 0x03) == 0x00, "wrong alignment");
216 jint new_value = Atomic::add(add_value << 16, (volatile jint*)(dest));
217 #endif
218 return (jshort)(new_value >> 16); // preserves sign
|
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #ifndef SHARE_VM_RUNTIME_ATOMIC_HPP
26 #define SHARE_VM_RUNTIME_ATOMIC_HPP
27
28 #include "memory/allocation.hpp"
29 #include "metaprogramming/enableIf.hpp"
30 #include "metaprogramming/integerTypes.hpp"
31 #include "metaprogramming/isIntegral.hpp"
32 #include "metaprogramming/isSame.hpp"
33 #include "metaprogramming/removeCV.hpp"
34 #include "utilities/align.hpp"
35 #include "utilities/macros.hpp"
36
37 enum cmpxchg_memory_order {
38 memory_order_relaxed,
39 // Use value which doesn't interfere with C++2011. We need to be more conservative.
40 memory_order_conservative = 8
41 };
42
43 class Atomic : AllStatic {
44 public:
45 // Atomic operations on jlong types are not available on all 32-bit
46 // platforms. If atomic ops on jlongs are defined here they must only
47 // be used from code that verifies they are available at runtime and
48 // can provide an alternative action if not - see supports_cx8() for
49 // a means to test availability.
50
51 // The memory operations that are mentioned with each of the atomic
52 // function families come from src/share/vm/runtime/orderAccess.hpp,
53 // e.g., <fence> is described in that file and is implemented by the
99 // Atomically decrement a location. dec*() provide:
100 // <fence> decrement-dest <membar StoreLoad|StoreStore>
101 inline static void dec (volatile jint* dest);
102 inline static void dec (volatile jshort* dest);
103 inline static void dec (volatile size_t* dest);
104 inline static void dec_ptr(volatile intptr_t* dest);
105 inline static void dec_ptr(volatile void* dest);
106
107 // Performs atomic exchange of *dest with exchange_value. Returns old
108 // prior value of *dest. xchg*() provide:
109 // <fence> exchange-value-with-dest <membar StoreLoad|StoreStore>
110 inline static jint xchg (jint exchange_value, volatile jint* dest);
111 inline static unsigned int xchg (unsigned int exchange_value, volatile unsigned int* dest);
112 inline static intptr_t xchg_ptr(intptr_t exchange_value, volatile intptr_t* dest);
113 inline static void* xchg_ptr(void* exchange_value, volatile void* dest);
114
115 // Performs atomic compare of *dest and compare_value, and exchanges
116 // *dest with exchange_value if the comparison succeeded. Returns prior
117 // value of *dest. cmpxchg*() provide:
118 // <fence> compare-and-exchange <membar StoreLoad|StoreStore>
119
120 template<typename T, typename D, typename U>
121 inline static D cmpxchg(T exchange_value,
122 D volatile* dest,
123 U compare_value,
124 cmpxchg_memory_order order = memory_order_conservative);
125
126 template<typename T, typename D>
127 inline static bool conditional_store_ptr(T* value, D* volatile* dest,
128 cmpxchg_memory_order order = memory_order_conservative);
129
130 inline static intptr_t cmpxchg_ptr(intptr_t exchange_value, volatile intptr_t* dest, intptr_t compare_value, cmpxchg_memory_order order = memory_order_conservative) {
131 return cmpxchg(exchange_value, dest, compare_value, order);
132 }
133
134 inline static void* cmpxchg_ptr(void* exchange_value, volatile void* dest, void* compare_value, cmpxchg_memory_order order = memory_order_conservative) {
135 return cmpxchg(exchange_value,
136 reinterpret_cast<void* volatile*>(dest),
137 compare_value,
138 order);
139 }
140
141 private:
142 // Test whether From is implicitly convertible to To.
143 // From and To must be pointer types.
144 // Note: Provides the limited subset of C++11 std::is_convertible
145 // that is needed here.
146 template<typename From, typename To> struct IsPointerConvertible;
147
148 // Dispatch handler for cmpxchg. Provides type-based validity
149 // checking and limited conversions around calls to the
150 // platform-specific implementation layer provided by
151 // PlatformCmpxchg.
152 template<typename T, typename D, typename U, typename Enable = void>
153 struct CmpxchgImpl;
154
155 // Platform-specific implementation of cmpxchg. Support for sizes
156 // of 1, 4, and 8 are required. The class is a function object that
157 // must be default constructable, with these requirements:
158 //
159 // - dest is of type D*.
160 // - exchange_value and compare_value are of type D.
161 // - order is of type cmpxchg_memory_order.
162 // - platform_cmpxchg is an object of type PlatformCmpxchg<sizeof(D)>.
163 //
164 // Then
165 // platform_cmpxchg()(exchange_value, dest, compare_value, order)
166 // must be a valid expression, returning a result convertible to D.
167 //
168 // A default definition is provided, which declares a function template
169 // T operator()(T, T volatile*, T, cmpxchg_memory_order) const
170 //
171 // For each required size, a platform must either provide an
172 // appropriate definition of that function, or must entirely
173 // specialize the class template for that size.
174 template<size_t byte_size> struct PlatformCmpxchg;
175
176 // Support for platforms that implement some variants of cmpxchg
177 // using a (typically out of line) non-template helper function.
178 // The generic arguments passed to PlatformCmpxchg need to be
179 // translated to the appropriate type for the helper function, the
180 // helper invoked on the translated arguments, and the result
181 // translated back.
182 template<typename StubType, typename StubFn, typename T>
183 static T cmpxchg_using_stub(StubFn stub_fn,
184 T exchange_value,
185 T volatile* dest,
186 T compare_value);
187
188 // Support platforms that do not provide RMW byte-level atomic access
189 // To use, derive PlatformCmpxchg<1> from this class.
190 // Can't be private: C++03 11.4/2; fixed in C++11.
191 public:
192 struct CmpxchgByteUsingInt;
193 private:
194 };
195
196 template<typename From, typename To>
197 struct Atomic::IsPointerConvertible<From*, To*> : AllStatic {
198 // Use the "sizeof trick" to test for convertibility.
199 typedef char yes;
200 typedef char (&no)[2];
201
202 static yes test(To*);
203 static no test(...);
204 static From* test_value;
205
206 static const bool value = (sizeof(yes) == sizeof(test(test_value)));
207 };
208
209 // Define class before including platform file, which may specialize
210 // the operator definition. No generic definition of specializations
211 // of the operator template are provided, nor are there any generic
212 // specializations of the class. That all needs to be provided by the
213 // platform file.
214 template<size_t byte_size>
215 struct Atomic::PlatformCmpxchg VALUE_OBJ_CLASS_SPEC {
216 template<typename T>
217 T operator()(T exchange_value,
218 T volatile* dest,
219 T compare_value,
220 cmpxchg_memory_order order) const;
221 };
222
223 // Define class before including platform file, which may use this as
224 // a base class, requiring it be complete. The operator template
225 // definition is defined later.
226 struct Atomic::CmpxchgByteUsingInt VALUE_OBJ_CLASS_SPEC {
227 template<typename T>
228 T operator()(T exchange_value,
229 T volatile* dest,
230 T compare_value,
231 cmpxchg_memory_order order) const;
232 };
233
234 // platform specific in-line definitions - must come before shared definitions
235
236 #include OS_CPU_HEADER(atomic)
237
238 // shared in-line definitions
239
240 // size_t casts...
241 #if (SIZE_MAX != UINTPTR_MAX)
242 #error size_t is not WORD_SIZE, interesting platform, but missing implementation here
243 #endif
244
245 inline size_t Atomic::add(size_t add_value, volatile size_t* dest) {
246 return (size_t) add_ptr((intptr_t) add_value, (volatile intptr_t*) dest);
247 }
248
249 inline void Atomic::inc(volatile size_t* dest) {
250 inc_ptr((volatile intptr_t*) dest);
251 }
252
253 inline void Atomic::dec(volatile size_t* dest) {
254 dec_ptr((volatile intptr_t*) dest);
255 }
256
257 template<typename T, typename D, typename U>
258 inline D Atomic::cmpxchg(T exchange_value,
259 D volatile* dest,
260 U compare_value,
261 cmpxchg_memory_order order) {
262 return CmpxchgImpl<T, D, U>()(exchange_value, dest, compare_value, order);
263 }
264
265 template<typename T, typename D>
266 inline bool Atomic::conditional_store_ptr(T* value, D* volatile* dest,
267 cmpxchg_memory_order order) {
268 D* expected_null = NULL;
269 return expected_null == cmpxchg(value, dest, expected_null, order);
270 }
271
272 // Handle cmpxchg for integral and enum types.
273 //
274 // All the involved types must be identical.
275 template<typename T>
276 struct Atomic::CmpxchgImpl<
277 T, T, T,
278 typename EnableIf<IsIntegral<T>::value || IsRegisteredEnum<T>::value>::type>
279 VALUE_OBJ_CLASS_SPEC
280 {
281 T operator()(T exchange_value, T volatile* dest, T compare_value,
282 cmpxchg_memory_order order) const {
283 // Forward to the platform handler for the size of T.
284 return PlatformCmpxchg<sizeof(T)>()(exchange_value,
285 dest,
286 compare_value,
287 order);
288 }
289 };
290
291 // Handle cmpxchg for pointer types.
292 //
293 // The destination's type and the compare_value type must be the same,
294 // ignoring cv-qualifiers; we don't care about the cv-qualifiers of
295 // the compare_value.
296 //
297 // The exchange_value must be implicitly convertible to the
298 // destination's type; it must be type-correct to store the
299 // exchange_value in the destination.
300 template<typename T, typename D, typename U>
301 struct Atomic::CmpxchgImpl<
302 T*, D*, U*,
303 typename EnableIf<Atomic::IsPointerConvertible<T*, D*>::value &&
304 IsSame<typename RemoveCV<D>::type,
305 typename RemoveCV<U>::type>::value>::type>
306 VALUE_OBJ_CLASS_SPEC
307 {
308 D* operator()(T* exchange_value, D* volatile* dest, U* compare_value,
309 cmpxchg_memory_order order) const {
310 // Allow derived to base conversion, and adding cv-qualifiers.
311 D* new_value = exchange_value;
312 // Don't care what the CV qualifiers for compare_value are,
313 // but we need to match D when calling platform support.
314 typedef typename RemoveCV<U>::type U_nocv;
315 D* old_value = const_cast<U_nocv *>(compare_value);
316 return PlatformCmpxchg<sizeof(D*)>()(new_value, dest, old_value, order);
317 }
318 };
319
320 // Handle cmpxchg for types that have a translator.
321 //
322 // All the involved types must be identical.
323 //
324 // This translates the original call into a call on the decayed
325 // arguments, and returns the recovered result of that translated
326 // call.
327 template<typename T>
328 struct Atomic::CmpxchgImpl<
329 T, T, T,
330 typename EnableIf<IntegerTypes::Translate<T>::value>::type>
331 VALUE_OBJ_CLASS_SPEC
332 {
333 T operator()(T exchange_value, T volatile* dest, T compare_value,
334 cmpxchg_memory_order order) const {
335 typedef IntegerTypes::Translate<T> Translator;
336 typedef typename Translator::Decayed Decayed;
337 STATIC_ASSERT(sizeof(T) == sizeof(Decayed));
338 return Translator::recover(
339 cmpxchg(Translator::decay(exchange_value),
340 reinterpret_cast<Decayed volatile*>(dest),
341 Translator::decay(compare_value),
342 order));
343 }
344 };
345
346 template<typename StubType, typename StubFn, typename T>
347 inline T Atomic::cmpxchg_using_stub(StubFn stub_fn,
348 T exchange_value,
349 T volatile* dest,
350 T compare_value) {
351 STATIC_ASSERT(sizeof(StubType) == sizeof(T));
352 return IntegerTypes::cast<T>(
353 stub_fn(IntegerTypes::cast<StubType>(exchange_value),
354 reinterpret_cast<StubType volatile*>(dest),
355 IntegerTypes::cast<StubType>(compare_value)));
356 }
357
358 template<typename T>
359 inline T Atomic::CmpxchgByteUsingInt::operator()(T exchange_value,
360 T volatile* dest,
361 T compare_value,
362 cmpxchg_memory_order order) const {
363 STATIC_ASSERT(sizeof(T) == sizeof(uint8_t));
364 uint8_t canon_exchange_value = exchange_value;
365 uint8_t canon_compare_value = compare_value;
366 volatile uint32_t* aligned_dest
367 = reinterpret_cast<volatile uint32_t*>(align_down(dest, sizeof(uint32_t)));
368 size_t offset = pointer_delta(dest, aligned_dest, 1);
369 uint32_t cur = *aligned_dest;
370 uint8_t* cur_as_bytes = reinterpret_cast<uint8_t*>(&cur);
371
372 // current value may not be what we are looking for, so force it
373 // to that value so the initial cmpxchg will fail if it is different
374 cur_as_bytes[offset] = canon_compare_value;
375
376 // always execute a real cmpxchg so that we get the required memory
377 // barriers even on initial failure
378 do {
379 // value to swap in matches current value ...
380 uint32_t new_value = cur;
381 // ... except for the one jbyte we want to update
382 reinterpret_cast<uint8_t*>(&new_value)[offset] = canon_exchange_value;
383
384 uint32_t res = cmpxchg(new_value, aligned_dest, cur, order);
385 if (res == cur) break; // success
386
387 // at least one byte in the int changed value, so update
388 // our view of the current int
389 cur = res;
390 // if our byte is still as cur we loop and try again
391 } while (cur_as_bytes[offset] == canon_compare_value);
392
393 return IntegerTypes::cast<T>(cur_as_bytes[offset]);
394 }
395
396 inline unsigned Atomic::xchg(unsigned int exchange_value, volatile unsigned int* dest) {
397 assert(sizeof(unsigned int) == sizeof(jint), "more work to do");
398 return (unsigned int)Atomic::xchg((jint)exchange_value, (volatile jint*)dest);
399 }
400
401 inline jshort Atomic::add(jshort add_value, volatile jshort* dest) {
402 // Most platforms do not support atomic add on a 2-byte value. However,
403 // if the value occupies the most significant 16 bits of an aligned 32-bit
404 // word, then we can do this with an atomic add of (add_value << 16)
405 // to the 32-bit word.
406 //
407 // The least significant parts of this 32-bit word will never be affected, even
408 // in case of overflow/underflow.
409 //
410 // Use the ATOMIC_SHORT_PAIR macro (see macros.hpp) to get the desired alignment.
411 #ifdef VM_LITTLE_ENDIAN
412 assert((intx(dest) & 0x03) == 0x02, "wrong alignment");
413 jint new_value = Atomic::add(add_value << 16, (volatile jint*)(dest-1));
414 #else
415 assert((intx(dest) & 0x03) == 0x00, "wrong alignment");
416 jint new_value = Atomic::add(add_value << 16, (volatile jint*)(dest));
417 #endif
418 return (jshort)(new_value >> 16); // preserves sign
|