9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #ifndef SHARE_VM_RUNTIME_ATOMIC_HPP
26 #define SHARE_VM_RUNTIME_ATOMIC_HPP
27
28 #include "memory/allocation.hpp"
29 #include "utilities/align.hpp"
30 #include "utilities/macros.hpp"
31
32 enum cmpxchg_memory_order {
33 memory_order_relaxed,
34 // Use value which doesn't interfere with C++2011. We need to be more conservative.
35 memory_order_conservative = 8
36 };
37
38 class Atomic : AllStatic {
39 public:
40 // Atomic operations on jlong types are not available on all 32-bit
41 // platforms. If atomic ops on jlongs are defined here they must only
42 // be used from code that verifies they are available at runtime and
43 // can provide an alternative action if not - see supports_cx8() for
44 // a means to test availability.
45
46 // The memory operations that are mentioned with each of the atomic
47 // function families come from src/share/vm/runtime/orderAccess.hpp,
48 // e.g., <fence> is described in that file and is implemented by the
49 // OrderAccess::fence() function. See that file for the gory details
50 // on the Memory Access Ordering Model.
51
52 // All of the atomic operations that imply a read-modify-write action
53 // guarantee a two-way memory barrier across that operation. Historically
54 // these semantics reflect the strength of atomic operations that are
55 // provided on SPARC/X86. We assume that strength is necessary unless
56 // we can prove that a weaker form is sufficiently safe.
57
58 // Atomically store to a location
59 inline static void store (jbyte store_value, jbyte* dest);
60 inline static void store (jshort store_value, jshort* dest);
61 inline static void store (jint store_value, jint* dest);
62 // See comment above about using jlong atomics on 32-bit platforms
63 inline static void store (jlong store_value, jlong* dest);
64 inline static void store_ptr(intptr_t store_value, intptr_t* dest);
65 inline static void store_ptr(void* store_value, void* dest);
66
67 inline static void store (jbyte store_value, volatile jbyte* dest);
68 inline static void store (jshort store_value, volatile jshort* dest);
69 inline static void store (jint store_value, volatile jint* dest);
70 // See comment above about using jlong atomics on 32-bit platforms
71 inline static void store (jlong store_value, volatile jlong* dest);
72 inline static void store_ptr(intptr_t store_value, volatile intptr_t* dest);
73 inline static void store_ptr(void* store_value, volatile void* dest);
74
75 // See comment above about using jlong atomics on 32-bit platforms
76 inline static jlong load(const volatile jlong* src);
77
78 // Atomically add to a location. Returns updated value. add*() provide:
79 // <fence> add-value-to-dest <membar StoreLoad|StoreStore>
80 inline static jshort add (jshort add_value, volatile jshort* dest);
81 inline static jint add (jint add_value, volatile jint* dest);
82 inline static size_t add (size_t add_value, volatile size_t* dest);
83 inline static intptr_t add_ptr(intptr_t add_value, volatile intptr_t* dest);
84 inline static void* add_ptr(intptr_t add_value, volatile void* dest);
85
86 // Atomically increment location. inc*() provide:
87 // <fence> increment-dest <membar StoreLoad|StoreStore>
88 inline static void inc (volatile jint* dest);
89 inline static void inc (volatile jshort* dest);
90 inline static void inc (volatile size_t* dest);
91 inline static void inc_ptr(volatile intptr_t* dest);
92 inline static void inc_ptr(volatile void* dest);
93
94 // Atomically decrement a location. dec*() provide:
95 // <fence> decrement-dest <membar StoreLoad|StoreStore>
96 inline static void dec (volatile jint* dest);
97 inline static void dec (volatile jshort* dest);
98 inline static void dec (volatile size_t* dest);
99 inline static void dec_ptr(volatile intptr_t* dest);
100 inline static void dec_ptr(volatile void* dest);
101
102 // Performs atomic exchange of *dest with exchange_value. Returns old
103 // prior value of *dest. xchg*() provide:
104 // <fence> exchange-value-with-dest <membar StoreLoad|StoreStore>
105 inline static jint xchg (jint exchange_value, volatile jint* dest);
106 inline static unsigned int xchg (unsigned int exchange_value, volatile unsigned int* dest);
107 inline static intptr_t xchg_ptr(intptr_t exchange_value, volatile intptr_t* dest);
108 inline static void* xchg_ptr(void* exchange_value, volatile void* dest);
109
110 // Performs atomic compare of *dest and compare_value, and exchanges
111 // *dest with exchange_value if the comparison succeeded. Returns prior
112 // value of *dest. cmpxchg*() provide:
113 // <fence> compare-and-exchange <membar StoreLoad|StoreStore>
114 inline static jbyte cmpxchg (jbyte exchange_value, volatile jbyte* dest, jbyte compare_value, cmpxchg_memory_order order = memory_order_conservative);
115 inline static jint cmpxchg (jint exchange_value, volatile jint* dest, jint compare_value, cmpxchg_memory_order order = memory_order_conservative);
116 // See comment above about using jlong atomics on 32-bit platforms
117 inline static jlong cmpxchg (jlong exchange_value, volatile jlong* dest, jlong compare_value, cmpxchg_memory_order order = memory_order_conservative);
118 inline static unsigned int cmpxchg (unsigned int exchange_value, volatile unsigned int* dest, unsigned int compare_value, cmpxchg_memory_order order = memory_order_conservative);
119 inline static intptr_t cmpxchg_ptr(intptr_t exchange_value, volatile intptr_t* dest, intptr_t compare_value, cmpxchg_memory_order order = memory_order_conservative);
120 inline static void* cmpxchg_ptr(void* exchange_value, volatile void* dest, void* compare_value, cmpxchg_memory_order order = memory_order_conservative);
121 };
122
123 // platform specific in-line definitions - must come before shared definitions
124
125 #include OS_CPU_HEADER(atomic)
126
127 // shared in-line definitions
128
129 // size_t casts...
130 #if (SIZE_MAX != UINTPTR_MAX)
131 #error size_t is not WORD_SIZE, interesting platform, but missing implementation here
132 #endif
133
134 inline size_t Atomic::add(size_t add_value, volatile size_t* dest) {
135 return (size_t) add_ptr((intptr_t) add_value, (volatile intptr_t*) dest);
136 }
137
138 inline void Atomic::inc(volatile size_t* dest) {
139 inc_ptr((volatile intptr_t*) dest);
140 }
141
142 inline void Atomic::dec(volatile size_t* dest) {
143 dec_ptr((volatile intptr_t*) dest);
144 }
145
146 #ifndef VM_HAS_SPECIALIZED_CMPXCHG_BYTE
147 /*
148 * This is the default implementation of byte-sized cmpxchg. It emulates jbyte-sized cmpxchg
149 * in terms of jint-sized cmpxchg. Platforms may override this by defining their own inline definition
150 * as well as defining VM_HAS_SPECIALIZED_CMPXCHG_BYTE. This will cause the platform specific
151 * implementation to be used instead.
152 */
153 inline jbyte Atomic::cmpxchg(jbyte exchange_value, volatile jbyte* dest,
154 jbyte compare_value, cmpxchg_memory_order order) {
155 STATIC_ASSERT(sizeof(jbyte) == 1);
156 volatile jint* dest_int =
157 reinterpret_cast<volatile jint*>(align_down(dest, sizeof(jint)));
158 size_t offset = pointer_delta(dest, dest_int, 1);
159 jint cur = *dest_int;
160 jbyte* cur_as_bytes = reinterpret_cast<jbyte*>(&cur);
161
162 // current value may not be what we are looking for, so force it
163 // to that value so the initial cmpxchg will fail if it is different
164 cur_as_bytes[offset] = compare_value;
165
166 // always execute a real cmpxchg so that we get the required memory
167 // barriers even on initial failure
168 do {
169 // value to swap in matches current value ...
170 jint new_value = cur;
171 // ... except for the one jbyte we want to update
172 reinterpret_cast<jbyte*>(&new_value)[offset] = exchange_value;
173
174 jint res = cmpxchg(new_value, dest_int, cur, order);
175 if (res == cur) break; // success
176
177 // at least one jbyte in the jint changed value, so update
178 // our view of the current jint
179 cur = res;
180 // if our jbyte is still as cur we loop and try again
181 } while (cur_as_bytes[offset] == compare_value);
182
183 return cur_as_bytes[offset];
184 }
185
186 #endif // VM_HAS_SPECIALIZED_CMPXCHG_BYTE
187
188 inline unsigned Atomic::xchg(unsigned int exchange_value, volatile unsigned int* dest) {
189 assert(sizeof(unsigned int) == sizeof(jint), "more work to do");
190 return (unsigned int)Atomic::xchg((jint)exchange_value, (volatile jint*)dest);
191 }
192
193 inline unsigned Atomic::cmpxchg(unsigned int exchange_value,
194 volatile unsigned int* dest, unsigned int compare_value,
195 cmpxchg_memory_order order) {
196 assert(sizeof(unsigned int) == sizeof(jint), "more work to do");
197 return (unsigned int)Atomic::cmpxchg((jint)exchange_value, (volatile jint*)dest,
198 (jint)compare_value, order);
199 }
200
201 inline jshort Atomic::add(jshort add_value, volatile jshort* dest) {
202 // Most platforms do not support atomic add on a 2-byte value. However,
203 // if the value occupies the most significant 16 bits of an aligned 32-bit
204 // word, then we can do this with an atomic add of (add_value << 16)
205 // to the 32-bit word.
206 //
207 // The least significant parts of this 32-bit word will never be affected, even
208 // in case of overflow/underflow.
209 //
210 // Use the ATOMIC_SHORT_PAIR macro (see macros.hpp) to get the desired alignment.
211 #ifdef VM_LITTLE_ENDIAN
212 assert((intx(dest) & 0x03) == 0x02, "wrong alignment");
213 jint new_value = Atomic::add(add_value << 16, (volatile jint*)(dest-1));
214 #else
215 assert((intx(dest) & 0x03) == 0x00, "wrong alignment");
216 jint new_value = Atomic::add(add_value << 16, (volatile jint*)(dest));
217 #endif
218 return (jshort)(new_value >> 16); // preserves sign
219 }
220
221 inline void Atomic::inc(volatile jshort* dest) {
222 (void)add(1, dest);
223 }
224
225 inline void Atomic::dec(volatile jshort* dest) {
226 (void)add(-1, dest);
227 }
228
229 #endif // SHARE_VM_RUNTIME_ATOMIC_HPP
|
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #ifndef SHARE_VM_RUNTIME_ATOMIC_HPP
26 #define SHARE_VM_RUNTIME_ATOMIC_HPP
27
28 #include "memory/allocation.hpp"
29 #include "metaprogramming/conditional.hpp"
30 #include "metaprogramming/enableIf.hpp"
31 #include "metaprogramming/integerTypes.hpp"
32 #include "metaprogramming/isIntegral.hpp"
33 #include "metaprogramming/isPointer.hpp"
34 #include "metaprogramming/isSame.hpp"
35 #include "metaprogramming/removePointer.hpp"
36 #include "utilities/align.hpp"
37 #include "utilities/debug.hpp"
38 #include "utilities/macros.hpp"
39
40 enum cmpxchg_memory_order {
41 memory_order_relaxed,
42 // Use value which doesn't interfere with C++2011. We need to be more conservative.
43 memory_order_conservative = 8
44 };
45
46 class Atomic : AllStatic {
47 template<typename T> class Never: public FalseType {};
48
49 template <typename T>
50 inline static void specialized_store(T store_value, volatile T* dest) {
51 STATIC_ASSERT(sizeof(T) <= size_t(BytesPerWord)); // Does the machine support atomic wide accesses?
52 (void)const_cast<T&>(*dest = store_value);
53 }
54
55 template <typename T>
56 inline static T specialized_load(const volatile T* dest) {
57 STATIC_ASSERT(sizeof(T) <= size_t(BytesPerWord)); // Does the machine support atomic wide accesses?
58 return *dest;
59 }
60
61 template <typename T>
62 inline static T specialized_add(T add_value, volatile T* dest) {
63 STATIC_ASSERT(Never<T>::value);
64 return add_value;
65 }
66
67 template <typename T>
68 inline static void specialized_inc(volatile T* dest) {
69 add(1, dest);
70 }
71
72 template <typename T>
73 inline static void specialized_dec(volatile T* dest) {
74 add(-1, dest);
75 }
76
77 template <typename T>
78 inline static T specialized_xchg(T exchange_value, volatile T* dest) {
79 STATIC_ASSERT(Never<T>::value);
80 return exchange_value;
81 }
82
83 template <typename T>
84 inline static T specialized_cmpxchg(T exchange_value, volatile T* dest, T compare_value, cmpxchg_memory_order order) {
85 STATIC_ASSERT(Never<T>::value);
86 return exchange_value;
87 }
88
89 public:
90 // Atomic operations on 64-bit types are not available on all 32-bit
91 // platforms. If atomic ops on 64-bit types are defined here they must only
92 // be used from code that verifies they are available at runtime and
93 // can provide an alternative action if not - see supports_cx8() for
94 // a means to test availability.
95
96 // The memory operations that are mentioned with each of the atomic
97 // function families come from src/share/vm/runtime/orderAccess.hpp,
98 // e.g., <fence> is described in that file and is implemented by the
99 // OrderAccess::fence() function. See that file for the gory details
100 // on the Memory Access Ordering Model.
101
102 // All of the atomic operations that imply a read-modify-write action
103 // guarantee a two-way memory barrier across that operation. Historically
104 // these semantics reflect the strength of atomic operations that are
105 // provided on SPARC/X86. We assume that strength is necessary unless
106 // we can prove that a weaker form is sufficiently safe.
107
108 // Atomically store to a location
109 // See comment above about using 64-bit atomics on 32-bit platforms
110 template <typename T, typename U>
111 inline static void store(T store_value, volatile U* dest);
112
113 // The store_ptr() member functions are deprecated. Use store() instead.
114 static void store_ptr(intptr_t store_value, volatile intptr_t* dest) {
115 store(store_value, dest);
116 }
117
118 static void store_ptr(void* store_value, volatile void* dest) {
119 store((intptr_t)store_value, (volatile intptr_t*)dest);
120 }
121
122 // Atomically load from a location
123 // See comment above about using 64-bit atomics on 32-bit platforms
124 template <typename T>
125 inline static T load(volatile T* src);
126
127 // Atomically add to a location. Returns updated value. add*() provide:
128 // <fence> add-value-to-dest <membar StoreLoad|StoreStore>
129 // add(I1 v, I* d)
130 // add(I1 v, P* d)
131 // where I, I1 are integral types, P is a pointer type.
132 // Functional behavior is modelled on *dest += add_value.
133 template <typename T, typename U>
134 inline static U add(T add_value, volatile U* dst);
135
136 template <typename T, typename U>
137 inline static U* add(T add_value, U* volatile* dst);
138
139 // The add_ptr() member functions are deprecated. Use add() instead.
140 static intptr_t add_ptr(intptr_t add_value, volatile intptr_t* dest) {
141 return add(add_value, dest);
142 }
143
144 static void* add_ptr(intptr_t add_value, volatile void* dest) {
145 return (void*)add(add_value, (volatile intptr_t*)dest);
146 }
147
148 // Atomically increment location. inc*() provide:
149 // <fence> increment-dest <membar StoreLoad|StoreStore>
150 // Functional behavior is modelled on *dest++
151 template <typename T>
152 inline static void inc(volatile T* dest);
153
154 template <typename T>
155 inline static void inc(T* volatile* dest);
156
157 // The inc_ptr member functions are deprecated. Use inc() instead.
158 static void inc_ptr(volatile intptr_t* dest) {
159 inc(dest);
160 }
161
162 static void inc_ptr(volatile void* dest) {
163 inc((volatile intptr_t*)dest);
164 }
165
166 // Atomically decrement a location. dec*() provide:
167 // <fence> decrement-dest <membar StoreLoad|StoreStore>
168 // Functional behavior is modelled on *dest--
169 template <typename T>
170 inline static void dec(volatile T* dest);
171
172 template <typename T>
173 inline static void dec(T* volatile* dest);
174
175 // The dec_ptr member functions are deprecated. Use dec() instead.
176 static void dec_ptr(volatile intptr_t* dest) {
177 dec(dest);
178 }
179
180 static void dec_ptr(volatile void* dest) {
181 dec((volatile intptr_t*)dest);
182 }
183
184 // Performs atomic exchange of *dest with exchange_value. Returns old
185 // prior value of *dest. xchg*() provide:
186 // <fence> exchange-value-with-dest <membar StoreLoad|StoreStore>
187 template <typename T, typename U>
188 inline static U xchg(T exchange_value, volatile U* dest);
189
190 // The xchg_ptr() member functions are deprecated. Use xchg() instead.
191 static intptr_t xchg_ptr(intptr_t exchange_value, volatile intptr_t* dest) {
192 return xchg(exchange_value, dest);
193 }
194
195 static void* xchg_ptr(void* exchange_value, volatile void* dest) {
196 return (void*)xchg((intptr_t)exchange_value, (volatile intptr_t*)dest);
197 }
198
199 // Performs atomic compare of *dest and compare_value, and exchanges
200 // *dest with exchange_value if the comparison succeeded. Returns prior
201 // value of *dest. cmpxchg*() provide:
202 // <fence> compare-and-exchange <membar StoreLoad|StoreStore>
203 // See comment above about using 64-bit atomics on 32-bit platforms
204 template <typename T, typename U, typename V>
205 inline static U cmpxchg(T exchange_value, volatile U* dest, V compare_value, cmpxchg_memory_order order = memory_order_conservative);
206
207 // The cmpxchg_ptr member functions are deprecated. Use cmpxchg() instead.
208 inline static intptr_t cmpxchg_ptr(intptr_t exchange_value, volatile intptr_t* dest,
209 intptr_t compare_value, cmpxchg_memory_order order = memory_order_conservative) {
210 return cmpxchg(exchange_value, dest, compare_value, order);
211
212 }
213
214 inline static void* cmpxchg_ptr(void* exchange_value, volatile void* dest,
215 void* compare_value, cmpxchg_memory_order order = memory_order_conservative) {
216 return (void*)cmpxchg((intptr_t)exchange_value, (volatile intptr_t*)dest, (intptr_t)compare_value, order);
217 }
218 };
219
220 // internal implementation
221
222 template <typename T, typename U>
223 inline void Atomic::store(T store_value, volatile U* dest) {
224 typedef typename IntegerTypes::Signed<U>::type Raw;
225 U store_value_cast = store_value;
226 specialized_store(IntegerTypes::cast_to_signed(store_value_cast), reinterpret_cast<volatile Raw*>(dest));
227 }
228
229 template <typename T>
230 inline T Atomic::load(volatile T* src) {
231 typedef typename IntegerTypes::Signed<T>::type Raw;
232 return IntegerTypes::cast<T>(specialized_load(reinterpret_cast<const volatile Raw*>(src)));
233 }
234
235 template <typename T, typename U>
236 inline U Atomic::add(T add_value, volatile U* dst) {
237 STATIC_ASSERT(IsIntegral<T>::value);
238 STATIC_ASSERT(IsIntegral<U>::value);
239 typedef typename IntegerTypes::Signed<U>::type Raw;
240 // Allow -Wconversion or the like to complain about unsafe conversions.
241 U value = add_value;
242 Raw raw_value = IntegerTypes::cast_to_signed(value);
243 Raw result = specialized_add(raw_value, reinterpret_cast<volatile Raw*>(dst));
244 return IntegerTypes::cast<U>(result);
245 }
246
247 template <typename T, typename U>
248 inline U* Atomic::add(T add_value, U* volatile* dst) {
249 STATIC_ASSERT(IsIntegral<T>::value);
250 typedef typename IntegerTypes::Signed<intptr_t>::type Raw;
251 ptrdiff_t value = add_value;
252 Raw raw_value = IntegerTypes::cast_to_signed(value * sizeof(U));
253 Raw result = specialized_add(raw_value, reinterpret_cast<volatile Raw*>(dst));
254 return IntegerTypes::cast<U*>(result);
255 }
256
257 template <typename T>
258 inline void Atomic::inc(volatile T* src) {
259 STATIC_ASSERT(IsIntegral<T>::value);
260 typedef typename IntegerTypes::Signed<T>::type Raw;
261 specialized_inc(reinterpret_cast<volatile Raw*>(src));
262 }
263
264 template <typename T>
265 inline void Atomic::inc(T* volatile* src) {
266 if (sizeof(T) != 1) {
267 add(1, src);
268 } else {
269 typedef typename IntegerTypes::Signed<intptr_t>::type Raw;
270 specialized_inc(reinterpret_cast<volatile Raw*>(src));
271 }
272 }
273
274 template <typename T>
275 inline void Atomic::dec(volatile T* src) {
276 STATIC_ASSERT(IsIntegral<T>::value);
277 typedef typename IntegerTypes::Signed<T>::type Raw;
278 specialized_dec(reinterpret_cast<volatile Raw*>(src));
279 }
280
281 template <typename T>
282 inline void Atomic::dec(T* volatile* src) {
283 if (sizeof(T) != 1) {
284 add(-1, src);
285 } else {
286 typedef typename IntegerTypes::Signed<intptr_t>::type Raw;
287 specialized_dec(reinterpret_cast<volatile Raw*>(src));
288 }
289 }
290
291 template <typename T, typename U>
292 inline U Atomic::xchg(T exchange_value, volatile U* dest) {
293 typedef typename IntegerTypes::Signed<U>::type Raw;
294 U exchange_value_cast = exchange_value;
295 Raw result = specialized_xchg(IntegerTypes::cast_to_signed(exchange_value_cast),
296 reinterpret_cast<volatile Raw*>(dest));
297 return IntegerTypes::cast<U>(result);
298 }
299
300 template <typename T, typename U, typename V>
301 inline U Atomic::cmpxchg(T exchange_value, volatile U* dest, V compare_value, cmpxchg_memory_order order) {
302 typedef typename IntegerTypes::Signed<U>::type Raw;
303 U exchange_value_cast = exchange_value;
304 U compare_value_cast = compare_value;
305 Raw result = specialized_cmpxchg(IntegerTypes::cast_to_signed(exchange_value_cast),
306 reinterpret_cast<volatile Raw*>(dest),
307 IntegerTypes::cast_to_signed(compare_value_cast), order);
308 return IntegerTypes::cast<U>(result);
309 }
310
311 // platform specific in-line definitions - must come before shared definitions
312
313 #include OS_CPU_HEADER(atomic)
314
315 // shared in-line definitions
316
317 #ifndef VM_HAS_SPECIALIZED_CMPXCHG_BYTE
318 /*
319 * This is the default implementation of byte-sized cmpxchg. It emulates 8-bit-sized cmpxchg
320 * in terms of 32-bit-sized cmpxchg. Platforms may override this by defining their own inline definition
321 * as well as defining VM_HAS_SPECIALIZED_CMPXCHG_BYTE. This will cause the platform specific
322 * implementation to be used instead.
323 */
324 template <>
325 inline int8_t Atomic::specialized_cmpxchg<int8_t>(int8_t exchange_value, volatile int8_t* dest,
326 int8_t compare_value, cmpxchg_memory_order order) {
327 volatile int32_t* dest_int =
328 reinterpret_cast<volatile int32_t*>(align_down(dest, sizeof(int32_t)));
329 size_t offset = pointer_delta(dest, dest_int, 1);
330 int32_t cur = *dest_int;
331 int8_t* cur_as_bytes = reinterpret_cast<int8_t*>(&cur);
332
333 // current value may not be what we are looking for, so force it
334 // to that value so the initial cmpxchg will fail if it is different
335 cur_as_bytes[offset] = compare_value;
336
337 // always execute a real cmpxchg so that we get the required memory
338 // barriers even on initial failure
339 do {
340 // value to swap in matches current value ...
341 int32_t new_value = cur;
342 // ... except for the one byte we want to update
343 reinterpret_cast<int8_t*>(&new_value)[offset] = exchange_value;
344
345 int32_t res = cmpxchg(new_value, dest_int, cur, order);
346 if (res == cur) break; // success
347
348 // at least one byte in the int changed value, so update
349 // our view of the current int
350 cur = res;
351 // if our byte is still as cur we loop and try again
352 } while (cur_as_bytes[offset] == compare_value);
353
354 return cur_as_bytes[offset];
355 }
356
357 #endif // VM_HAS_SPECIALIZED_CMPXCHG_BYTE
358
359 template <>
360 inline int16_t Atomic::specialized_add<int16_t>(int16_t add_value, volatile int16_t* dest) {
361 // Most platforms do not support atomic add on a 2-byte value. However,
362 // if the value occupies the most significant 16 bits of an aligned 32-bit
363 // word, then we can do this with an atomic add of (add_value << 16)
364 // to the 32-bit word.
365 //
366 // The least significant parts of this 32-bit word will never be affected, even
367 // in case of overflow/underflow.
368 //
369 // Use the ATOMIC_SHORT_PAIR macro (see macros.hpp) to get the desired alignment.
370 #ifdef VM_LITTLE_ENDIAN
371 assert((intx(dest) & 0x03) == 0x02, "wrong alignment");
372 int32_t new_value = Atomic::add(int32_t(add_value) << 16, (volatile int32_t*)(dest-1));
373 #else
374 assert((intx(dest) & 0x03) == 0x00, "wrong alignment");
375 int32_t new_value = Atomic::add(int32_t(add_value) << 16, (volatile int32_t*)(dest));
376 #endif
377 return (int16_t)(new_value >> 16); // preserves sign
378 }
379
380 template <>
381 inline void Atomic::specialized_inc<int16_t>(volatile int16_t* dest) {
382 (void)add(int16_t(1), dest);
383 }
384
385 template <>
386 inline void Atomic::specialized_dec<int16_t>(volatile int16_t* dest) {
387 (void)add(int16_t(-1), dest);
388 }
389
390 #endif // SHARE_VM_RUNTIME_ATOMIC_HPP
|