101 }
102
103 template <DecoratorSet decorators>
104 template <typename T>
105 inline T RawAccessBarrier<decorators>::oop_atomic_xchg(T new_value, void* addr) {
106 typedef typename AccessInternal::EncodedType<decorators, T>::type Encoded;
107 Encoded encoded_new = encode(new_value);
108 Encoded encoded_result = atomic_xchg(encoded_new, reinterpret_cast<Encoded*>(addr));
109 return decode<T>(encoded_result);
110 }
111
112 template <DecoratorSet decorators>
113 template <typename T>
114 inline T RawAccessBarrier<decorators>::oop_atomic_xchg_at(T new_value, oop base, ptrdiff_t offset) {
115 return oop_atomic_xchg(new_value, field_addr(base, offset));
116 }
117
118 template <DecoratorSet decorators>
119 template <typename T>
120 inline bool RawAccessBarrier<decorators>::oop_arraycopy(arrayOop src_obj, arrayOop dst_obj, T* src, T* dst, size_t length) {
121 return arraycopy(src, dst, length);
122 }
123
124 template <DecoratorSet decorators>
125 inline bool RawAccessBarrier<decorators>::oop_arraycopy(arrayOop src_obj, arrayOop dst_obj, HeapWord* src, HeapWord* dst, size_t length) {
126 bool needs_oop_compress = HasDecorator<decorators, INTERNAL_CONVERT_COMPRESSED_OOP>::value &&
127 HasDecorator<decorators, INTERNAL_RT_USE_COMPRESSED_OOPS>::value;
128 if (needs_oop_compress) {
129 return arraycopy(reinterpret_cast<narrowOop*>(src), reinterpret_cast<narrowOop*>(dst), length);
130 } else {
131 return arraycopy(reinterpret_cast<oop*>(src), reinterpret_cast<oop*>(dst), length);
132 }
133 }
134
135 template <DecoratorSet decorators>
136 template <DecoratorSet ds, typename T>
137 inline typename EnableIf<
138 HasDecorator<ds, MO_SEQ_CST>::value, T>::type
139 RawAccessBarrier<decorators>::load_internal(void* addr) {
140 if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
141 OrderAccess::fence();
240 AccessInternal::PossiblyLockedAccess<T>::value, T>::type
241 RawAccessBarrier<ds>::atomic_cmpxchg_maybe_locked(T new_value, void* addr, T compare_value) {
242 if (!AccessInternal::wide_atomic_needs_locking()) {
243 return atomic_cmpxchg_internal<ds>(new_value, addr, compare_value);
244 } else {
245 AccessInternal::AccessLocker access_lock;
246 volatile T* p = reinterpret_cast<volatile T*>(addr);
247 T old_val = RawAccess<>::load(p);
248 if (old_val == compare_value) {
249 RawAccess<>::store(p, new_value);
250 }
251 return old_val;
252 }
253 }
254
255 class RawAccessBarrierArrayCopy: public AllStatic {
256 public:
257 template <DecoratorSet decorators, typename T>
258 static inline typename EnableIf<
259 HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value>::type
260 arraycopy(T* src, T* dst, size_t length) {
261 // We do not check for ARRAYCOPY_ATOMIC for oops, because they are unconditionally always atomic.
262 if (HasDecorator<decorators, ARRAYCOPY_ARRAYOF>::value) {
263 AccessInternal::arraycopy_arrayof_conjoint_oops(src, dst, length);
264 } else {
265 typedef typename HeapOopType<decorators>::type OopType;
266 AccessInternal::arraycopy_conjoint_oops(reinterpret_cast<OopType*>(src),
267 reinterpret_cast<OopType*>(dst), length);
268 }
269 }
270
271 template <DecoratorSet decorators, typename T>
272 static inline typename EnableIf<
273 !HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value>::type
274 arraycopy(T* src, T* dst, size_t length) {
275 if (HasDecorator<decorators, ARRAYCOPY_ARRAYOF>::value) {
276 AccessInternal::arraycopy_arrayof_conjoint(src, dst, length);
277 } else if (HasDecorator<decorators, ARRAYCOPY_DISJOINT>::value && sizeof(T) == HeapWordSize) {
278 // There is only a disjoint optimization for word granularity copying
279 if (HasDecorator<decorators, ARRAYCOPY_ATOMIC>::value) {
280 AccessInternal::arraycopy_disjoint_words_atomic(src, dst, length);
281 } else {
282 AccessInternal::arraycopy_disjoint_words(src, dst, length);
283 }
284 } else {
285 if (HasDecorator<decorators, ARRAYCOPY_ATOMIC>::value) {
286 AccessInternal::arraycopy_conjoint_atomic(src, dst, length);
287 } else {
288 AccessInternal::arraycopy_conjoint(src, dst, length);
289 }
290 }
291 }
292 };
293
294 template <DecoratorSet decorators>
295 template <typename T>
296 inline bool RawAccessBarrier<decorators>::arraycopy(T* src, T* dst, size_t length) {
297 RawAccessBarrierArrayCopy::arraycopy<decorators>(src, dst, length);
298 return true;
299 }
300
301 template <DecoratorSet decorators>
302 inline void RawAccessBarrier<decorators>::clone(oop src, oop dst, size_t size) {
303 // 4839641 (4840070): We must do an oop-atomic copy, because if another thread
304 // is modifying a reference field in the clonee, a non-oop-atomic copy might
305 // be suspended in the middle of copying the pointer and end up with parts
306 // of two different pointers in the field. Subsequent dereferences will crash.
307 // 4846409: an oop-copy of objects with long or double fields or arrays of same
308 // won't copy the longs/doubles atomically in 32-bit vm's, so we copy jlongs instead
309 // of oops. We know objects are aligned on a minimum of an jlong boundary.
310 // The same is true of StubRoutines::object_copy and the various oop_copy
311 // variants, and of the code generated by the inline_native_clone intrinsic.
312
313 assert(MinObjAlignmentInBytes >= BytesPerLong, "objects misaligned");
314 AccessInternal::arraycopy_conjoint_atomic(reinterpret_cast<jlong*>((oopDesc*)src),
315 reinterpret_cast<jlong*>((oopDesc*)dst),
316 align_object_size(size) / HeapWordsPerLong);
317 // Clear the header
|
101 }
102
103 template <DecoratorSet decorators>
104 template <typename T>
105 inline T RawAccessBarrier<decorators>::oop_atomic_xchg(T new_value, void* addr) {
106 typedef typename AccessInternal::EncodedType<decorators, T>::type Encoded;
107 Encoded encoded_new = encode(new_value);
108 Encoded encoded_result = atomic_xchg(encoded_new, reinterpret_cast<Encoded*>(addr));
109 return decode<T>(encoded_result);
110 }
111
112 template <DecoratorSet decorators>
113 template <typename T>
114 inline T RawAccessBarrier<decorators>::oop_atomic_xchg_at(T new_value, oop base, ptrdiff_t offset) {
115 return oop_atomic_xchg(new_value, field_addr(base, offset));
116 }
117
118 template <DecoratorSet decorators>
119 template <typename T>
120 inline bool RawAccessBarrier<decorators>::oop_arraycopy(arrayOop src_obj, arrayOop dst_obj, T* src, T* dst, size_t length) {
121 return arraycopy(src_obj, dst_obj, src, dst, length);
122 }
123
124 template <DecoratorSet decorators>
125 inline bool RawAccessBarrier<decorators>::oop_arraycopy(arrayOop src_obj, arrayOop dst_obj, HeapWord* src, HeapWord* dst, size_t length) {
126 bool needs_oop_compress = HasDecorator<decorators, INTERNAL_CONVERT_COMPRESSED_OOP>::value &&
127 HasDecorator<decorators, INTERNAL_RT_USE_COMPRESSED_OOPS>::value;
128 if (needs_oop_compress) {
129 return arraycopy(reinterpret_cast<narrowOop*>(src), reinterpret_cast<narrowOop*>(dst), length);
130 } else {
131 return arraycopy(reinterpret_cast<oop*>(src), reinterpret_cast<oop*>(dst), length);
132 }
133 }
134
135 template <DecoratorSet decorators>
136 template <DecoratorSet ds, typename T>
137 inline typename EnableIf<
138 HasDecorator<ds, MO_SEQ_CST>::value, T>::type
139 RawAccessBarrier<decorators>::load_internal(void* addr) {
140 if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
141 OrderAccess::fence();
240 AccessInternal::PossiblyLockedAccess<T>::value, T>::type
241 RawAccessBarrier<ds>::atomic_cmpxchg_maybe_locked(T new_value, void* addr, T compare_value) {
242 if (!AccessInternal::wide_atomic_needs_locking()) {
243 return atomic_cmpxchg_internal<ds>(new_value, addr, compare_value);
244 } else {
245 AccessInternal::AccessLocker access_lock;
246 volatile T* p = reinterpret_cast<volatile T*>(addr);
247 T old_val = RawAccess<>::load(p);
248 if (old_val == compare_value) {
249 RawAccess<>::store(p, new_value);
250 }
251 return old_val;
252 }
253 }
254
255 class RawAccessBarrierArrayCopy: public AllStatic {
256 public:
257 template <DecoratorSet decorators, typename T>
258 static inline typename EnableIf<
259 HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value>::type
260 arraycopy(arrayOop src_obj, arrayOop dst_obj, T* src, T* dst, size_t length) {
261 // We do not check for ARRAYCOPY_ATOMIC for oops, because they are unconditionally always atomic.
262 if (HasDecorator<decorators, ARRAYCOPY_ARRAYOF>::value) {
263 AccessInternal::arraycopy_arrayof_conjoint_oops(src, dst, length);
264 } else {
265 typedef typename HeapOopType<decorators>::type OopType;
266 AccessInternal::arraycopy_conjoint_oops(reinterpret_cast<OopType*>(src),
267 reinterpret_cast<OopType*>(dst), length);
268 }
269 }
270
271 template <DecoratorSet decorators, typename T>
272 static inline typename EnableIf<
273 !HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value>::type
274 arraycopy(arrayOop src_obj, arrayOop dst_obj, T* src, T* dst, size_t length) {
275 if (HasDecorator<decorators, ARRAYCOPY_ARRAYOF>::value) {
276 AccessInternal::arraycopy_arrayof_conjoint(src, dst, length);
277 } else if (HasDecorator<decorators, ARRAYCOPY_DISJOINT>::value && sizeof(T) == HeapWordSize) {
278 // There is only a disjoint optimization for word granularity copying
279 if (HasDecorator<decorators, ARRAYCOPY_ATOMIC>::value) {
280 AccessInternal::arraycopy_disjoint_words_atomic(src, dst, length);
281 } else {
282 AccessInternal::arraycopy_disjoint_words(src, dst, length);
283 }
284 } else {
285 if (HasDecorator<decorators, ARRAYCOPY_ATOMIC>::value) {
286 AccessInternal::arraycopy_conjoint_atomic(src, dst, length);
287 } else {
288 AccessInternal::arraycopy_conjoint(src, dst, length);
289 }
290 }
291 }
292
293 template <DecoratorSet decorators>
294 static inline typename EnableIf<
295 !HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value>::type
296 arraycopy(arrayOop src_obj, arrayOop dst_obj, void* src, void* dst, size_t length) {
297 if (HasDecorator<decorators, ARRAYCOPY_ATOMIC>::value) {
298 AccessInternal::arraycopy_conjoint_atomic(src, dst, length);
299 } else {
300 AccessInternal::arraycopy_conjoint(src, dst, length);
301 }
302 }
303 };
304
305 template <DecoratorSet decorators>
306 template <typename T>
307 inline bool RawAccessBarrier<decorators>::arraycopy(arrayOop src_obj, arrayOop dst_obj, T* src, T* dst, size_t length) {
308 RawAccessBarrierArrayCopy::arraycopy<decorators>(src_obj, dst_obj, src, dst, length);
309 return true;
310 }
311
312 template <DecoratorSet decorators>
313 inline void RawAccessBarrier<decorators>::clone(oop src, oop dst, size_t size) {
314 // 4839641 (4840070): We must do an oop-atomic copy, because if another thread
315 // is modifying a reference field in the clonee, a non-oop-atomic copy might
316 // be suspended in the middle of copying the pointer and end up with parts
317 // of two different pointers in the field. Subsequent dereferences will crash.
318 // 4846409: an oop-copy of objects with long or double fields or arrays of same
319 // won't copy the longs/doubles atomically in 32-bit vm's, so we copy jlongs instead
320 // of oops. We know objects are aligned on a minimum of an jlong boundary.
321 // The same is true of StubRoutines::object_copy and the various oop_copy
322 // variants, and of the code generated by the inline_native_clone intrinsic.
323
324 assert(MinObjAlignmentInBytes >= BytesPerLong, "objects misaligned");
325 AccessInternal::arraycopy_conjoint_atomic(reinterpret_cast<jlong*>((oopDesc*)src),
326 reinterpret_cast<jlong*>((oopDesc*)dst),
327 align_object_size(size) / HeapWordsPerLong);
328 // Clear the header
|