< prev index next >

src/hotspot/share/oops/accessBackend.inline.hpp

Print this page




 101   return oop_atomic_cmpxchg(new_value, field_addr(base, offset), compare_value);
 102 }
 103 
 104 template <DecoratorSet decorators>
 105 template <typename T>
 106 inline T RawAccessBarrier<decorators>::oop_atomic_xchg(T new_value, void* addr) {
 107   typedef typename AccessInternal::EncodedType<decorators, T>::type Encoded;
 108   Encoded encoded_new = encode(new_value);
 109   Encoded encoded_result = atomic_xchg(encoded_new, reinterpret_cast<Encoded*>(addr));
 110   return decode<T>(encoded_result);
 111 }
 112 
 113 template <DecoratorSet decorators>
 114 template <typename T>
 115 inline T RawAccessBarrier<decorators>::oop_atomic_xchg_at(T new_value, oop base, ptrdiff_t offset) {
 116   return oop_atomic_xchg(new_value, field_addr(base, offset));
 117 }
 118 
 119 template <DecoratorSet decorators>
 120 template <typename T>
 121 inline bool RawAccessBarrier<decorators>::oop_arraycopy(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw,
 122                                                         arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
 123                                                         size_t length) {
 124   return arraycopy(src_obj, src_offset_in_bytes, src_raw,
 125                    dst_obj, dst_offset_in_bytes, dst_raw,
 126                    length);
 127 }
 128 
 129 template <DecoratorSet decorators>
 130 template <DecoratorSet ds, typename T>
 131 inline typename EnableIf<
 132   HasDecorator<ds, MO_SEQ_CST>::value, T>::type
 133 RawAccessBarrier<decorators>::load_internal(void* addr) {
 134   if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
 135     OrderAccess::fence();
 136   }
 137   return OrderAccess::load_acquire(reinterpret_cast<const volatile T*>(addr));
 138 }
 139 
 140 template <DecoratorSet decorators>
 141 template <DecoratorSet ds, typename T>
 142 inline typename EnableIf<
 143   HasDecorator<ds, MO_ACQUIRE>::value, T>::type
 144 RawAccessBarrier<decorators>::load_internal(void* addr) {


 317   template <DecoratorSet decorators, typename T>
 318   static inline typename EnableIf<
 319     !HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value &&
 320     !(HasDecorator<decorators, ARRAYCOPY_DISJOINT>::value && IsHeapWordSized<T>::value) &&
 321     !HasDecorator<decorators, ARRAYCOPY_ARRAYOF>::value &&
 322     HasDecorator<decorators, ARRAYCOPY_ATOMIC>::value>::type
 323   arraycopy(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw,
 324             arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
 325             size_t length) {
 326     src_raw = arrayOopDesc::obj_offset_to_raw(src_obj, src_offset_in_bytes, src_raw);
 327     dst_raw = arrayOopDesc::obj_offset_to_raw(dst_obj, dst_offset_in_bytes, dst_raw);
 328 
 329     AccessInternal::arraycopy_conjoint_atomic(src_raw, dst_raw, length);
 330   }
 331 };
 332 
 333 template<> struct RawAccessBarrierArrayCopy::IsHeapWordSized<void>: public IntegralConstant<bool, false> { };
 334 
 335 template <DecoratorSet decorators>
 336 template <typename T>
 337 inline bool RawAccessBarrier<decorators>::arraycopy(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw,
 338                                                     arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
 339                                                     size_t length) {
 340   RawAccessBarrierArrayCopy::arraycopy<decorators>(src_obj, src_offset_in_bytes, src_raw,
 341                                                    dst_obj, dst_offset_in_bytes, dst_raw,
 342                                                    length);
 343   return true;
 344 }
 345 
 346 template <DecoratorSet decorators>
 347 inline void RawAccessBarrier<decorators>::clone(oop src, oop dst, size_t size) {
 348   // 4839641 (4840070): We must do an oop-atomic copy, because if another thread
 349   // is modifying a reference field in the clonee, a non-oop-atomic copy might
 350   // be suspended in the middle of copying the pointer and end up with parts
 351   // of two different pointers in the field.  Subsequent dereferences will crash.
 352   // 4846409: an oop-copy of objects with long or double fields or arrays of same
 353   // won't copy the longs/doubles atomically in 32-bit vm's, so we copy jlongs instead
 354   // of oops.  We know objects are aligned on a minimum of an jlong boundary.
 355   // The same is true of StubRoutines::object_copy and the various oop_copy
 356   // variants, and of the code generated by the inline_native_clone intrinsic.
 357 
 358   assert(MinObjAlignmentInBytes >= BytesPerLong, "objects misaligned");
 359   AccessInternal::arraycopy_conjoint_atomic(reinterpret_cast<jlong*>((oopDesc*)src),
 360                                             reinterpret_cast<jlong*>((oopDesc*)dst),
 361                                             align_object_size(size) / HeapWordsPerLong);
 362   // Clear the header
 363   dst->init_mark_raw();


 101   return oop_atomic_cmpxchg(new_value, field_addr(base, offset), compare_value);
 102 }
 103 
 104 template <DecoratorSet decorators>
 105 template <typename T>
 106 inline T RawAccessBarrier<decorators>::oop_atomic_xchg(T new_value, void* addr) {
 107   typedef typename AccessInternal::EncodedType<decorators, T>::type Encoded;
 108   Encoded encoded_new = encode(new_value);
 109   Encoded encoded_result = atomic_xchg(encoded_new, reinterpret_cast<Encoded*>(addr));
 110   return decode<T>(encoded_result);
 111 }
 112 
 113 template <DecoratorSet decorators>
 114 template <typename T>
 115 inline T RawAccessBarrier<decorators>::oop_atomic_xchg_at(T new_value, oop base, ptrdiff_t offset) {
 116   return oop_atomic_xchg(new_value, field_addr(base, offset));
 117 }
 118 
 119 template <DecoratorSet decorators>
 120 template <typename T>
 121 inline void RawAccessBarrier<decorators>::oop_arraycopy(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw,
 122                                                         arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
 123                                                         size_t length) {
 124   arraycopy(src_obj, src_offset_in_bytes, src_raw,
 125             dst_obj, dst_offset_in_bytes, dst_raw,
 126             length);
 127 }
 128 
 129 template <DecoratorSet decorators>
 130 template <DecoratorSet ds, typename T>
 131 inline typename EnableIf<
 132   HasDecorator<ds, MO_SEQ_CST>::value, T>::type
 133 RawAccessBarrier<decorators>::load_internal(void* addr) {
 134   if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
 135     OrderAccess::fence();
 136   }
 137   return OrderAccess::load_acquire(reinterpret_cast<const volatile T*>(addr));
 138 }
 139 
 140 template <DecoratorSet decorators>
 141 template <DecoratorSet ds, typename T>
 142 inline typename EnableIf<
 143   HasDecorator<ds, MO_ACQUIRE>::value, T>::type
 144 RawAccessBarrier<decorators>::load_internal(void* addr) {


 317   template <DecoratorSet decorators, typename T>
 318   static inline typename EnableIf<
 319     !HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value &&
 320     !(HasDecorator<decorators, ARRAYCOPY_DISJOINT>::value && IsHeapWordSized<T>::value) &&
 321     !HasDecorator<decorators, ARRAYCOPY_ARRAYOF>::value &&
 322     HasDecorator<decorators, ARRAYCOPY_ATOMIC>::value>::type
 323   arraycopy(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw,
 324             arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
 325             size_t length) {
 326     src_raw = arrayOopDesc::obj_offset_to_raw(src_obj, src_offset_in_bytes, src_raw);
 327     dst_raw = arrayOopDesc::obj_offset_to_raw(dst_obj, dst_offset_in_bytes, dst_raw);
 328 
 329     AccessInternal::arraycopy_conjoint_atomic(src_raw, dst_raw, length);
 330   }
 331 };
 332 
 333 template<> struct RawAccessBarrierArrayCopy::IsHeapWordSized<void>: public IntegralConstant<bool, false> { };
 334 
 335 template <DecoratorSet decorators>
 336 template <typename T>
 337 inline void RawAccessBarrier<decorators>::arraycopy(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw,
 338                                                     arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
 339                                                     size_t length) {
 340   RawAccessBarrierArrayCopy::arraycopy<decorators>(src_obj, src_offset_in_bytes, src_raw,
 341                                                    dst_obj, dst_offset_in_bytes, dst_raw,
 342                                                    length);

 343 }
 344 
 345 template <DecoratorSet decorators>
 346 inline void RawAccessBarrier<decorators>::clone(oop src, oop dst, size_t size) {
 347   // 4839641 (4840070): We must do an oop-atomic copy, because if another thread
 348   // is modifying a reference field in the clonee, a non-oop-atomic copy might
 349   // be suspended in the middle of copying the pointer and end up with parts
 350   // of two different pointers in the field.  Subsequent dereferences will crash.
 351   // 4846409: an oop-copy of objects with long or double fields or arrays of same
 352   // won't copy the longs/doubles atomically in 32-bit vm's, so we copy jlongs instead
 353   // of oops.  We know objects are aligned on a minimum of an jlong boundary.
 354   // The same is true of StubRoutines::object_copy and the various oop_copy
 355   // variants, and of the code generated by the inline_native_clone intrinsic.
 356 
 357   assert(MinObjAlignmentInBytes >= BytesPerLong, "objects misaligned");
 358   AccessInternal::arraycopy_conjoint_atomic(reinterpret_cast<jlong*>((oopDesc*)src),
 359                                             reinterpret_cast<jlong*>((oopDesc*)dst),
 360                                             align_object_size(size) / HeapWordsPerLong);
 361   // Clear the header
 362   dst->init_mark_raw();
< prev index next >