< prev index next >

src/hotspot/share/oops/accessBackend.inline.hpp

Print this page
rev 50331 : 8198285: More consistent Access API for arraycopy


 101   return oop_atomic_cmpxchg(new_value, field_addr(base, offset), compare_value);
 102 }
 103 
 104 template <DecoratorSet decorators>
 105 template <typename T>
 106 inline T RawAccessBarrier<decorators>::oop_atomic_xchg(T new_value, void* addr) {
 107   typedef typename AccessInternal::EncodedType<decorators, T>::type Encoded;
 108   Encoded encoded_new = encode(new_value);
 109   Encoded encoded_result = atomic_xchg(encoded_new, reinterpret_cast<Encoded*>(addr));
 110   return decode<T>(encoded_result);
 111 }
 112 
 113 template <DecoratorSet decorators>
 114 template <typename T>
 115 inline T RawAccessBarrier<decorators>::oop_atomic_xchg_at(T new_value, oop base, ptrdiff_t offset) {
 116   return oop_atomic_xchg(new_value, field_addr(base, offset));
 117 }
 118 
 119 template <DecoratorSet decorators>
 120 template <typename T>
 121 inline bool RawAccessBarrier<decorators>::oop_arraycopy(arrayOop src_obj, arrayOop dst_obj, T* src, T* dst, size_t length) {
 122   return arraycopy(src_obj, dst_obj, src, dst, length);
 123 }
 124 
 125 template <DecoratorSet decorators>
 126 template <DecoratorSet ds, typename T>
 127 inline typename EnableIf<
 128   HasDecorator<ds, MO_SEQ_CST>::value, T>::type
 129 RawAccessBarrier<decorators>::load_internal(void* addr) {
 130   if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
 131     OrderAccess::fence();
 132   }
 133   return OrderAccess::load_acquire(reinterpret_cast<const volatile T*>(addr));
 134 }
 135 
 136 template <DecoratorSet decorators>
 137 template <DecoratorSet ds, typename T>
 138 inline typename EnableIf<
 139   HasDecorator<ds, MO_ACQUIRE>::value, T>::type
 140 RawAccessBarrier<decorators>::load_internal(void* addr) {
 141   return OrderAccess::load_acquire(reinterpret_cast<const volatile T*>(addr));
 142 }


 230   AccessInternal::PossiblyLockedAccess<T>::value, T>::type
 231 RawAccessBarrier<ds>::atomic_cmpxchg_maybe_locked(T new_value, void* addr, T compare_value) {
 232   if (!AccessInternal::wide_atomic_needs_locking()) {
 233     return atomic_cmpxchg_internal<ds>(new_value, addr, compare_value);
 234   } else {
 235     AccessInternal::AccessLocker access_lock;
 236     volatile T* p = reinterpret_cast<volatile T*>(addr);
 237     T old_val = RawAccess<>::load(p);
 238     if (old_val == compare_value) {
 239       RawAccess<>::store(p, new_value);
 240     }
 241     return old_val;
 242   }
 243 }
 244 
 245 class RawAccessBarrierArrayCopy: public AllStatic {
 246 public:
 247   template <DecoratorSet decorators, typename T>
 248   static inline typename EnableIf<
 249   HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value>::type
 250   arraycopy(arrayOop src_obj, arrayOop dst_obj, T* src, T* dst, size_t length) {




 251     // We do not check for ARRAYCOPY_ATOMIC for oops, because they are unconditionally always atomic.
 252     if (HasDecorator<decorators, ARRAYCOPY_ARRAYOF>::value) {
 253       AccessInternal::arraycopy_arrayof_conjoint_oops(src, dst, length);
 254     } else {
 255       typedef typename HeapOopType<decorators>::type OopType;
 256       AccessInternal::arraycopy_conjoint_oops(reinterpret_cast<OopType*>(src),
 257                                               reinterpret_cast<OopType*>(dst), length);
 258     }
 259   }
 260 
 261   template <DecoratorSet decorators, typename T>
 262   static inline typename EnableIf<
 263     !HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value>::type
 264   arraycopy(arrayOop src_obj, arrayOop dst_obj, T* src, T* dst, size_t length) {




 265     if (HasDecorator<decorators, ARRAYCOPY_ARRAYOF>::value) {
 266       AccessInternal::arraycopy_arrayof_conjoint(src, dst, length);
 267     } else if (HasDecorator<decorators, ARRAYCOPY_DISJOINT>::value && sizeof(T) == HeapWordSize) {
 268       // There is only a disjoint optimization for word granularity copying
 269       if (HasDecorator<decorators, ARRAYCOPY_ATOMIC>::value) {
 270         AccessInternal::arraycopy_disjoint_words_atomic(src, dst, length);
 271       } else {
 272         AccessInternal::arraycopy_disjoint_words(src, dst, length);
 273       }
 274     } else {
 275       if (HasDecorator<decorators, ARRAYCOPY_ATOMIC>::value) {
 276         AccessInternal::arraycopy_conjoint_atomic(src, dst, length);
 277       } else {
 278         AccessInternal::arraycopy_conjoint(src, dst, length);
 279       }
 280     }
 281   }
 282 
 283   template <DecoratorSet decorators>
 284   static inline typename EnableIf<
 285     !HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value>::type
 286   arraycopy(arrayOop src_obj, arrayOop dst_obj, void* src, void* dst, size_t length) {




 287     if (HasDecorator<decorators, ARRAYCOPY_ATOMIC>::value) {
 288       AccessInternal::arraycopy_conjoint_atomic(src, dst, length);
 289     } else {
 290       AccessInternal::arraycopy_conjoint(src, dst, length);
 291     }
 292   }
 293 };
 294 
 295 template <DecoratorSet decorators>
 296 template <typename T>
 297 inline bool RawAccessBarrier<decorators>::arraycopy(arrayOop src_obj, arrayOop dst_obj, T* src, T* dst, size_t length) {
 298   RawAccessBarrierArrayCopy::arraycopy<decorators>(src_obj, dst_obj, src, dst, length);
 299   return true;
 300 }
 301 
 302 template <DecoratorSet decorators>
 303 inline void RawAccessBarrier<decorators>::clone(oop src, oop dst, size_t size) {
 304   // 4839641 (4840070): We must do an oop-atomic copy, because if another thread
 305   // is modifying a reference field in the clonee, a non-oop-atomic copy might
 306   // be suspended in the middle of copying the pointer and end up with parts
 307   // of two different pointers in the field.  Subsequent dereferences will crash.
 308   // 4846409: an oop-copy of objects with long or double fields or arrays of same
 309   // won't copy the longs/doubles atomically in 32-bit vm's, so we copy jlongs instead
 310   // of oops.  We know objects are aligned on a minimum of an jlong boundary.
 311   // The same is true of StubRoutines::object_copy and the various oop_copy
 312   // variants, and of the code generated by the inline_native_clone intrinsic.
 313 
 314   assert(MinObjAlignmentInBytes >= BytesPerLong, "objects misaligned");
 315   AccessInternal::arraycopy_conjoint_atomic(reinterpret_cast<jlong*>((oopDesc*)src),
 316                                             reinterpret_cast<jlong*>((oopDesc*)dst),
 317                                             align_object_size(size) / HeapWordsPerLong);
 318   // Clear the header


 101   return oop_atomic_cmpxchg(new_value, field_addr(base, offset), compare_value);
 102 }
 103 
 104 template <DecoratorSet decorators>
 105 template <typename T>
 106 inline T RawAccessBarrier<decorators>::oop_atomic_xchg(T new_value, void* addr) {
 107   typedef typename AccessInternal::EncodedType<decorators, T>::type Encoded;
 108   Encoded encoded_new = encode(new_value);
 109   Encoded encoded_result = atomic_xchg(encoded_new, reinterpret_cast<Encoded*>(addr));
 110   return decode<T>(encoded_result);
 111 }
 112 
 113 template <DecoratorSet decorators>
 114 template <typename T>
 115 inline T RawAccessBarrier<decorators>::oop_atomic_xchg_at(T new_value, oop base, ptrdiff_t offset) {
 116   return oop_atomic_xchg(new_value, field_addr(base, offset));
 117 }
 118 
 119 template <DecoratorSet decorators>
 120 template <typename T>
 121 inline bool RawAccessBarrier<decorators>::oop_arraycopy(arrayOop src_obj, size_t src_offset_in_bytes, const T* src_raw, arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw, size_t length) {
 122   return arraycopy(src_obj, src_offset_in_bytes, src_raw, dst_obj, dst_offset_in_bytes, dst_raw, length);
 123 }
 124 
 125 template <DecoratorSet decorators>
 126 template <DecoratorSet ds, typename T>
 127 inline typename EnableIf<
 128   HasDecorator<ds, MO_SEQ_CST>::value, T>::type
 129 RawAccessBarrier<decorators>::load_internal(void* addr) {
 130   if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
 131     OrderAccess::fence();
 132   }
 133   return OrderAccess::load_acquire(reinterpret_cast<const volatile T*>(addr));
 134 }
 135 
 136 template <DecoratorSet decorators>
 137 template <DecoratorSet ds, typename T>
 138 inline typename EnableIf<
 139   HasDecorator<ds, MO_ACQUIRE>::value, T>::type
 140 RawAccessBarrier<decorators>::load_internal(void* addr) {
 141   return OrderAccess::load_acquire(reinterpret_cast<const volatile T*>(addr));
 142 }


 230   AccessInternal::PossiblyLockedAccess<T>::value, T>::type
 231 RawAccessBarrier<ds>::atomic_cmpxchg_maybe_locked(T new_value, void* addr, T compare_value) {
 232   if (!AccessInternal::wide_atomic_needs_locking()) {
 233     return atomic_cmpxchg_internal<ds>(new_value, addr, compare_value);
 234   } else {
 235     AccessInternal::AccessLocker access_lock;
 236     volatile T* p = reinterpret_cast<volatile T*>(addr);
 237     T old_val = RawAccess<>::load(p);
 238     if (old_val == compare_value) {
 239       RawAccess<>::store(p, new_value);
 240     }
 241     return old_val;
 242   }
 243 }
 244 
 245 class RawAccessBarrierArrayCopy: public AllStatic {
 246 public:
 247   template <DecoratorSet decorators, typename T>
 248   static inline typename EnableIf<
 249   HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value>::type
 250   arraycopy(arrayOop src_obj, size_t src_offset_in_bytes, const T* src_raw, arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw, size_t length) {
 251 
 252     src_raw = arrayOopDesc::obj_offset_to_raw(src_obj, src_offset_in_bytes, src_raw);
 253     dst_raw = arrayOopDesc::obj_offset_to_raw(dst_obj, dst_offset_in_bytes, dst_raw);
 254 
 255     // We do not check for ARRAYCOPY_ATOMIC for oops, because they are unconditionally always atomic.
 256     if (HasDecorator<decorators, ARRAYCOPY_ARRAYOF>::value) {
 257       AccessInternal::arraycopy_arrayof_conjoint_oops(const_cast<T*>(src_raw), dst_raw, length);
 258     } else {
 259       typedef typename HeapOopType<decorators>::type OopType;
 260       AccessInternal::arraycopy_conjoint_oops(reinterpret_cast<OopType*>(const_cast<T*>(src_raw)),
 261                                               reinterpret_cast<OopType*>(dst_raw), length);
 262     }
 263   }
 264 
 265   template <DecoratorSet decorators, typename T>
 266   static inline typename EnableIf<
 267     !HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value>::type
 268   arraycopy(arrayOop src_obj, size_t src_offset_in_bytes, const T* src_raw, arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw, size_t length) {
 269 
 270     src_raw = arrayOopDesc::obj_offset_to_raw(src_obj, src_offset_in_bytes, src_raw);
 271     dst_raw = arrayOopDesc::obj_offset_to_raw(dst_obj, dst_offset_in_bytes, dst_raw);
 272 
 273     if (HasDecorator<decorators, ARRAYCOPY_ARRAYOF>::value) {
 274       AccessInternal::arraycopy_arrayof_conjoint(const_cast<T*>(src_raw), dst_raw, length);
 275     } else if (HasDecorator<decorators, ARRAYCOPY_DISJOINT>::value && sizeof(T) == HeapWordSize) {
 276       // There is only a disjoint optimization for word granularity copying
 277       if (HasDecorator<decorators, ARRAYCOPY_ATOMIC>::value) {
 278         AccessInternal::arraycopy_disjoint_words_atomic(const_cast<T*>(src_raw), dst_raw, length);
 279       } else {
 280         AccessInternal::arraycopy_disjoint_words(const_cast<T*>(src_raw), dst_raw, length);
 281       }
 282     } else {
 283       if (HasDecorator<decorators, ARRAYCOPY_ATOMIC>::value) {
 284         AccessInternal::arraycopy_conjoint_atomic(const_cast<T*>(src_raw), dst_raw, length);
 285       } else {
 286         AccessInternal::arraycopy_conjoint(const_cast<T*>(src_raw), dst_raw, length);
 287       }
 288     }
 289   }
 290 
 291   template <DecoratorSet decorators>
 292   static inline typename EnableIf<
 293     !HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value>::type
 294   arraycopy(arrayOop src_obj, size_t src_offset_in_bytes, const void* src_raw, arrayOop dst_obj, size_t dst_offset_in_bytes, void* dst_raw, size_t length) {
 295 
 296     src_raw = arrayOopDesc::obj_offset_to_raw(src_obj, src_offset_in_bytes, src_raw);
 297     dst_raw = arrayOopDesc::obj_offset_to_raw(dst_obj, dst_offset_in_bytes, dst_raw);
 298 
 299     if (HasDecorator<decorators, ARRAYCOPY_ATOMIC>::value) {
 300       AccessInternal::arraycopy_conjoint_atomic(const_cast<void*>(src_raw), dst_raw, length);
 301     } else {
 302       AccessInternal::arraycopy_conjoint(const_cast<void*>(src_raw), dst_raw, length);
 303     }
 304   }
 305 };
 306 
 307 template <DecoratorSet decorators>
 308 template <typename T>
 309 inline bool RawAccessBarrier<decorators>::arraycopy(arrayOop src_obj, size_t src_offset_in_bytes, const T* src_raw, arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw, size_t length) {
 310   RawAccessBarrierArrayCopy::arraycopy<decorators>(src_obj, src_offset_in_bytes, src_raw, dst_obj, dst_offset_in_bytes, dst_raw, length);
 311   return true;
 312 }
 313 
 314 template <DecoratorSet decorators>
 315 inline void RawAccessBarrier<decorators>::clone(oop src, oop dst, size_t size) {
 316   // 4839641 (4840070): We must do an oop-atomic copy, because if another thread
 317   // is modifying a reference field in the clonee, a non-oop-atomic copy might
 318   // be suspended in the middle of copying the pointer and end up with parts
 319   // of two different pointers in the field.  Subsequent dereferences will crash.
 320   // 4846409: an oop-copy of objects with long or double fields or arrays of same
 321   // won't copy the longs/doubles atomically in 32-bit vm's, so we copy jlongs instead
 322   // of oops.  We know objects are aligned on a minimum of an jlong boundary.
 323   // The same is true of StubRoutines::object_copy and the various oop_copy
 324   // variants, and of the code generated by the inline_native_clone intrinsic.
 325 
 326   assert(MinObjAlignmentInBytes >= BytesPerLong, "objects misaligned");
 327   AccessInternal::arraycopy_conjoint_atomic(reinterpret_cast<jlong*>((oopDesc*)src),
 328                                             reinterpret_cast<jlong*>((oopDesc*)dst),
 329                                             align_object_size(size) / HeapWordsPerLong);
 330   // Clear the header
< prev index next >