100 return oop_atomic_cmpxchg(new_value, field_addr(base, offset), compare_value);
101 }
102
103 template <DecoratorSet decorators>
104 template <typename T>
105 inline T RawAccessBarrier<decorators>::oop_atomic_xchg(T new_value, void* addr) {
106 typedef typename AccessInternal::EncodedType<decorators, T>::type Encoded;
107 Encoded encoded_new = encode(new_value);
108 Encoded encoded_result = atomic_xchg(encoded_new, reinterpret_cast<Encoded*>(addr));
109 return decode<T>(encoded_result);
110 }
111
112 template <DecoratorSet decorators>
113 template <typename T>
114 inline T RawAccessBarrier<decorators>::oop_atomic_xchg_at(T new_value, oop base, ptrdiff_t offset) {
115 return oop_atomic_xchg(new_value, field_addr(base, offset));
116 }
117
118 template <DecoratorSet decorators>
119 template <typename T>
120 inline bool RawAccessBarrier<decorators>::oop_arraycopy(arrayOop src_obj, size_t src_offset_in_bytes, const T* src_raw, arrayOop dst_
121 return arraycopy(src_obj, src_offset_in_bytes, src_raw, dst_obj, dst_offset_in_bytes, dst_raw, length);
122 }
123
124 template <DecoratorSet decorators>
125 template <DecoratorSet ds, typename T>
126 inline typename EnableIf<
127 HasDecorator<ds, MO_SEQ_CST>::value, T>::type
128 RawAccessBarrier<decorators>::load_internal(void* addr) {
129 if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
130 OrderAccess::fence();
131 }
132 return OrderAccess::load_acquire(reinterpret_cast<const volatile T*>(addr));
133 }
134
135 template <DecoratorSet decorators>
136 template <DecoratorSet ds, typename T>
137 inline typename EnableIf<
138 HasDecorator<ds, MO_ACQUIRE>::value, T>::type
139 RawAccessBarrier<decorators>::load_internal(void* addr) {
140 return OrderAccess::load_acquire(reinterpret_cast<const volatile T*>(addr));
|
100 return oop_atomic_cmpxchg(new_value, field_addr(base, offset), compare_value);
101 }
102
103 template <DecoratorSet decorators>
104 template <typename T>
105 inline T RawAccessBarrier<decorators>::oop_atomic_xchg(T new_value, void* addr) {
106 typedef typename AccessInternal::EncodedType<decorators, T>::type Encoded;
107 Encoded encoded_new = encode(new_value);
108 Encoded encoded_result = atomic_xchg(encoded_new, reinterpret_cast<Encoded*>(addr));
109 return decode<T>(encoded_result);
110 }
111
112 template <DecoratorSet decorators>
113 template <typename T>
114 inline T RawAccessBarrier<decorators>::oop_atomic_xchg_at(T new_value, oop base, ptrdiff_t offset) {
115 return oop_atomic_xchg(new_value, field_addr(base, offset));
116 }
117
118 template <DecoratorSet decorators>
119 template <typename T>
120 inline bool RawAccessBarrier<decorators>::oop_arraycopy(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw,
121 arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
122 size_t length) {
123 return arraycopy(src_obj, src_offset_in_bytes, src_raw,
124 dst_obj, dst_offset_in_bytes, dst_raw,
125 length);
126 }
127
128 template <DecoratorSet decorators>
129 template <DecoratorSet ds, typename T>
130 inline typename EnableIf<
131 HasDecorator<ds, MO_SEQ_CST>::value, T>::type
132 RawAccessBarrier<decorators>::load_internal(void* addr) {
133 if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
134 OrderAccess::fence();
135 }
136 return OrderAccess::load_acquire(reinterpret_cast<const volatile T*>(addr));
137 }
138
139 template <DecoratorSet decorators>
140 template <DecoratorSet ds, typename T>
141 inline typename EnableIf<
142 HasDecorator<ds, MO_ACQUIRE>::value, T>::type
143 RawAccessBarrier<decorators>::load_internal(void* addr) {
144 return OrderAccess::load_acquire(reinterpret_cast<const volatile T*>(addr));
|
229 AccessInternal::PossiblyLockedAccess<T>::value, T>::type
230 RawAccessBarrier<ds>::atomic_cmpxchg_maybe_locked(T new_value, void* addr, T compare_value) {
231 if (!AccessInternal::wide_atomic_needs_locking()) {
232 return atomic_cmpxchg_internal<ds>(new_value, addr, compare_value);
233 } else {
234 AccessInternal::AccessLocker access_lock;
235 volatile T* p = reinterpret_cast<volatile T*>(addr);
236 T old_val = RawAccess<>::load(p);
237 if (old_val == compare_value) {
238 RawAccess<>::store(p, new_value);
239 }
240 return old_val;
241 }
242 }
243
244 class RawAccessBarrierArrayCopy: public AllStatic {
245 public:
246 template <DecoratorSet decorators, typename T>
247 static inline typename EnableIf<
248 HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value>::type
249 arraycopy(arrayOop src_obj, size_t src_offset_in_bytes, const T* src_raw, arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
250
251 src_raw = arrayOopDesc::obj_offset_to_raw(src_obj, src_offset_in_bytes, src_raw);
252 dst_raw = arrayOopDesc::obj_offset_to_raw(dst_obj, dst_offset_in_bytes, dst_raw);
253
254 // We do not check for ARRAYCOPY_ATOMIC for oops, because they are unconditionally always atomic.
255 if (HasDecorator<decorators, ARRAYCOPY_ARRAYOF>::value) {
256 AccessInternal::arraycopy_arrayof_conjoint_oops(const_cast<T*>(src_raw), dst_raw, length);
257 } else {
258 typedef typename HeapOopType<decorators>::type OopType;
259 AccessInternal::arraycopy_conjoint_oops(reinterpret_cast<OopType*>(const_cast<T*>(src_raw)),
260 reinterpret_cast<OopType*>(dst_raw), length);
261 }
262 }
263
264 template <DecoratorSet decorators, typename T>
265 static inline typename EnableIf<
266 !HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value>::type
267 arraycopy(arrayOop src_obj, size_t src_offset_in_bytes, const T* src_raw, arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
268
269 src_raw = arrayOopDesc::obj_offset_to_raw(src_obj, src_offset_in_bytes, src_raw);
270 dst_raw = arrayOopDesc::obj_offset_to_raw(dst_obj, dst_offset_in_bytes, dst_raw);
271
272 if (HasDecorator<decorators, ARRAYCOPY_ARRAYOF>::value) {
273 AccessInternal::arraycopy_arrayof_conjoint(const_cast<T*>(src_raw), dst_raw, length);
274 } else if (HasDecorator<decorators, ARRAYCOPY_DISJOINT>::value && sizeof(T) == HeapWordSize) {
275 // There is only a disjoint optimization for word granularity copying
276 if (HasDecorator<decorators, ARRAYCOPY_ATOMIC>::value) {
277 AccessInternal::arraycopy_disjoint_words_atomic(const_cast<T*>(src_raw), dst_raw, length);
278 } else {
279 AccessInternal::arraycopy_disjoint_words(const_cast<T*>(src_raw), dst_raw, length);
280 }
281 } else {
282 if (HasDecorator<decorators, ARRAYCOPY_ATOMIC>::value) {
283 AccessInternal::arraycopy_conjoint_atomic(const_cast<T*>(src_raw), dst_raw, length);
284 } else {
285 AccessInternal::arraycopy_conjoint(const_cast<T*>(src_raw), dst_raw, length);
286 }
287 }
288 }
289
290 template <DecoratorSet decorators>
291 static inline typename EnableIf<
292 !HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value>::type
293 arraycopy(arrayOop src_obj, size_t src_offset_in_bytes, const void* src_raw, arrayOop dst_obj, size_t dst_offset_in_bytes, void* ds
294
295 src_raw = arrayOopDesc::obj_offset_to_raw(src_obj, src_offset_in_bytes, src_raw);
296 dst_raw = arrayOopDesc::obj_offset_to_raw(dst_obj, dst_offset_in_bytes, dst_raw);
297
298 if (HasDecorator<decorators, ARRAYCOPY_ATOMIC>::value) {
299 AccessInternal::arraycopy_conjoint_atomic(const_cast<void*>(src_raw), dst_raw, length);
300 } else {
301 AccessInternal::arraycopy_conjoint(const_cast<void*>(src_raw), dst_raw, length);
302 }
303 }
304 };
305
306 template <DecoratorSet decorators>
307 template <typename T>
308 inline bool RawAccessBarrier<decorators>::arraycopy(arrayOop src_obj, size_t src_offset_in_bytes, const T* src_raw, arrayOop dst_obj,
309 RawAccessBarrierArrayCopy::arraycopy<decorators>(src_obj, src_offset_in_bytes, src_raw, dst_obj, dst_offset_in_bytes, dst_raw, leng
310 return true;
311 }
312
313 template <DecoratorSet decorators>
314 inline void RawAccessBarrier<decorators>::clone(oop src, oop dst, size_t size) {
315 // 4839641 (4840070): We must do an oop-atomic copy, because if another thread
316 // is modifying a reference field in the clonee, a non-oop-atomic copy might
317 // be suspended in the middle of copying the pointer and end up with parts
318 // of two different pointers in the field. Subsequent dereferences will crash.
319 // 4846409: an oop-copy of objects with long or double fields or arrays of same
320 // won't copy the longs/doubles atomically in 32-bit vm's, so we copy jlongs instead
321 // of oops. We know objects are aligned on a minimum of an jlong boundary.
322 // The same is true of StubRoutines::object_copy and the various oop_copy
323 // variants, and of the code generated by the inline_native_clone intrinsic.
324
325 assert(MinObjAlignmentInBytes >= BytesPerLong, "objects misaligned");
326 AccessInternal::arraycopy_conjoint_atomic(reinterpret_cast<jlong*>((oopDesc*)src),
327 reinterpret_cast<jlong*>((oopDesc*)dst),
328 align_object_size(size) / HeapWordsPerLong);
|
233 AccessInternal::PossiblyLockedAccess<T>::value, T>::type
234 RawAccessBarrier<ds>::atomic_cmpxchg_maybe_locked(T new_value, void* addr, T compare_value) {
235 if (!AccessInternal::wide_atomic_needs_locking()) {
236 return atomic_cmpxchg_internal<ds>(new_value, addr, compare_value);
237 } else {
238 AccessInternal::AccessLocker access_lock;
239 volatile T* p = reinterpret_cast<volatile T*>(addr);
240 T old_val = RawAccess<>::load(p);
241 if (old_val == compare_value) {
242 RawAccess<>::store(p, new_value);
243 }
244 return old_val;
245 }
246 }
247
248 class RawAccessBarrierArrayCopy: public AllStatic {
249 public:
250 template <DecoratorSet decorators, typename T>
251 static inline typename EnableIf<
252 HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value>::type
253 arraycopy(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw,
254 arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
255 size_t length) {
256
257 src_raw = arrayOopDesc::obj_offset_to_raw(src_obj, src_offset_in_bytes, src_raw);
258 dst_raw = arrayOopDesc::obj_offset_to_raw(dst_obj, dst_offset_in_bytes, dst_raw);
259
260 // We do not check for ARRAYCOPY_ATOMIC for oops, because they are unconditionally always atomic.
261 if (HasDecorator<decorators, ARRAYCOPY_ARRAYOF>::value) {
262 AccessInternal::arraycopy_arrayof_conjoint_oops(src_raw, dst_raw, length);
263 } else {
264 typedef typename HeapOopType<decorators>::type OopType;
265 AccessInternal::arraycopy_conjoint_oops(reinterpret_cast<OopType*>(src_raw),
266 reinterpret_cast<OopType*>(dst_raw), length);
267 }
268 }
269
270 template <DecoratorSet decorators, typename T>
271 static inline typename EnableIf<
272 !HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value>::type
273 arraycopy(arrayOop src_obj, size_t src_offset_in_bytes, const T* src_raw, arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
274
275 src_raw = arrayOopDesc::obj_offset_to_raw(src_obj, src_offset_in_bytes, src_raw);
276 dst_raw = arrayOopDesc::obj_offset_to_raw(dst_obj, dst_offset_in_bytes, dst_raw);
277
278 if (HasDecorator<decorators, ARRAYCOPY_ARRAYOF>::value) {
279 AccessInternal::arraycopy_arrayof_conjoint(const_cast<T*>(src_raw), dst_raw, length);
280 } else if (HasDecorator<decorators, ARRAYCOPY_DISJOINT>::value && sizeof(T) == HeapWordSize) {
281 // There is only a disjoint optimization for word granularity copying
282 if (HasDecorator<decorators, ARRAYCOPY_ATOMIC>::value) {
283 AccessInternal::arraycopy_disjoint_words_atomic(const_cast<T*>(src_raw), dst_raw, length);
284 } else {
285 AccessInternal::arraycopy_disjoint_words(const_cast<T*>(src_raw), dst_raw, length);
286 }
287 } else {
288 if (HasDecorator<decorators, ARRAYCOPY_ATOMIC>::value) {
289 AccessInternal::arraycopy_conjoint_atomic(const_cast<T*>(src_raw), dst_raw, length);
290 } else {
291 AccessInternal::arraycopy_conjoint(const_cast<T*>(src_raw), dst_raw, length);
292 }
293 }
294 }
295
296 template <DecoratorSet decorators>
297 static inline typename EnableIf<
298 !HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value>::type
299 arraycopy(arrayOop src_obj, size_t src_offset_in_bytes, const void* src_raw,
300 arrayOop dst_obj, size_t dst_offset_in_bytes, void* dst_raw,
301 size_t length) {
302
303 src_raw = arrayOopDesc::obj_offset_to_raw(src_obj, src_offset_in_bytes, src_raw);
304 dst_raw = arrayOopDesc::obj_offset_to_raw(dst_obj, dst_offset_in_bytes, dst_raw);
305
306 if (HasDecorator<decorators, ARRAYCOPY_ATOMIC>::value) {
307 AccessInternal::arraycopy_conjoint_atomic(const_cast<void*>(src_raw), dst_raw, length);
308 } else {
309 AccessInternal::arraycopy_conjoint(const_cast<void*>(src_raw), dst_raw, length);
310 }
311 }
312 };
313
314 template <DecoratorSet decorators>
315 template <typename T>
316 inline bool RawAccessBarrier<decorators>::arraycopy(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw,
317 arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
318 size_t length) {
319 RawAccessBarrierArrayCopy::arraycopy<decorators>(src_obj, src_offset_in_bytes, src_raw,
320 dst_obj, dst_offset_in_bytes, dst_raw,
321 length);
322 return true;
323 }
324
325 template <DecoratorSet decorators>
326 inline void RawAccessBarrier<decorators>::clone(oop src, oop dst, size_t size) {
327 // 4839641 (4840070): We must do an oop-atomic copy, because if another thread
328 // is modifying a reference field in the clonee, a non-oop-atomic copy might
329 // be suspended in the middle of copying the pointer and end up with parts
330 // of two different pointers in the field. Subsequent dereferences will crash.
331 // 4846409: an oop-copy of objects with long or double fields or arrays of same
332 // won't copy the longs/doubles atomically in 32-bit vm's, so we copy jlongs instead
333 // of oops. We know objects are aligned on a minimum of an jlong boundary.
334 // The same is true of StubRoutines::object_copy and the various oop_copy
335 // variants, and of the code generated by the inline_native_clone intrinsic.
336
337 assert(MinObjAlignmentInBytes >= BytesPerLong, "objects misaligned");
338 AccessInternal::arraycopy_conjoint_atomic(reinterpret_cast<jlong*>((oopDesc*)src),
339 reinterpret_cast<jlong*>((oopDesc*)dst),
340 align_object_size(size) / HeapWordsPerLong);
|