< prev index next >

src/hotspot/share/oops/accessBackend.inline.hpp

Print this page
rev 49182 : 8198445: Access API for primitive/native arraycopy
rev 49183 : [mq]: 8198445-2.patch


 301 template <DecoratorSet decorators>
 302 inline void RawAccessBarrier<decorators>::clone(oop src, oop dst, size_t size) {
 303   // 4839641 (4840070): We must do an oop-atomic copy, because if another thread
 304   // is modifying a reference field in the clonee, a non-oop-atomic copy might
 305   // be suspended in the middle of copying the pointer and end up with parts
 306   // of two different pointers in the field.  Subsequent dereferences will crash.
 307   // 4846409: an oop-copy of objects with long or double fields or arrays of same
 308   // won't copy the longs/doubles atomically in 32-bit vm's, so we copy jlongs instead
 309   // of oops.  We know objects are aligned on a minimum of an jlong boundary.
 310   // The same is true of StubRoutines::object_copy and the various oop_copy
 311   // variants, and of the code generated by the inline_native_clone intrinsic.
 312 
 313   assert(MinObjAlignmentInBytes >= BytesPerLong, "objects misaligned");
 314   AccessInternal::arraycopy_conjoint_atomic(reinterpret_cast<jlong*>((oopDesc*)src),
 315                                             reinterpret_cast<jlong*>((oopDesc*)dst),
 316                                             align_object_size(size) / HeapWordsPerLong);
 317   // Clear the header
 318   dst->init_mark();
 319 }
 320 






 321 #endif // SHARE_VM_RUNTIME_ACCESSBACKEND_INLINE_HPP


 301 template <DecoratorSet decorators>
 302 inline void RawAccessBarrier<decorators>::clone(oop src, oop dst, size_t size) {
 303   // 4839641 (4840070): We must do an oop-atomic copy, because if another thread
 304   // is modifying a reference field in the clonee, a non-oop-atomic copy might
 305   // be suspended in the middle of copying the pointer and end up with parts
 306   // of two different pointers in the field.  Subsequent dereferences will crash.
 307   // 4846409: an oop-copy of objects with long or double fields or arrays of same
 308   // won't copy the longs/doubles atomically in 32-bit vm's, so we copy jlongs instead
 309   // of oops.  We know objects are aligned on a minimum of an jlong boundary.
 310   // The same is true of StubRoutines::object_copy and the various oop_copy
 311   // variants, and of the code generated by the inline_native_clone intrinsic.
 312 
 313   assert(MinObjAlignmentInBytes >= BytesPerLong, "objects misaligned");
 314   AccessInternal::arraycopy_conjoint_atomic(reinterpret_cast<jlong*>((oopDesc*)src),
 315                                             reinterpret_cast<jlong*>((oopDesc*)dst),
 316                                             align_object_size(size) / HeapWordsPerLong);
 317   // Clear the header
 318   dst->init_mark();
 319 }
 320 
 321 
 322 template<typename T>
 323 void AccessInternal::arraycopy_conjoint_atomic(T* src, T* dst, size_t length) {
 324   Copy::conjoint_memory_atomic(reinterpret_cast<void*>(src), reinterpret_cast<void*>(dst), length * sizeof(T));
 325 }
 326 
 327 #endif // SHARE_VM_RUNTIME_ACCESSBACKEND_INLINE_HPP
< prev index next >