< prev index next >

src/hotspot/share/oops/accessBackend.inline.hpp

Print this page
rev 49289 : 8199735: Mark word updates need to use Access API


 298   return true;
 299 }
 300 
 301 template <DecoratorSet decorators>
 302 inline void RawAccessBarrier<decorators>::clone(oop src, oop dst, size_t size) {
 303   // 4839641 (4840070): We must do an oop-atomic copy, because if another thread
 304   // is modifying a reference field in the clonee, a non-oop-atomic copy might
 305   // be suspended in the middle of copying the pointer and end up with parts
 306   // of two different pointers in the field.  Subsequent dereferences will crash.
 307   // 4846409: an oop-copy of objects with long or double fields or arrays of same
 308   // won't copy the longs/doubles atomically in 32-bit vm's, so we copy jlongs instead
 309   // of oops.  We know objects are aligned on a minimum of an jlong boundary.
 310   // The same is true of StubRoutines::object_copy and the various oop_copy
 311   // variants, and of the code generated by the inline_native_clone intrinsic.
 312 
 313   assert(MinObjAlignmentInBytes >= BytesPerLong, "objects misaligned");
 314   AccessInternal::arraycopy_conjoint_atomic(reinterpret_cast<jlong*>((oopDesc*)src),
 315                                             reinterpret_cast<jlong*>((oopDesc*)dst),
 316                                             align_object_size(size) / HeapWordsPerLong);
 317   // Clear the header
 318   dst->init_mark();
 319 }
 320 
 321 #endif // SHARE_VM_RUNTIME_ACCESSBACKEND_INLINE_HPP


 298   return true;
 299 }
 300 
 301 template <DecoratorSet decorators>
 302 inline void RawAccessBarrier<decorators>::clone(oop src, oop dst, size_t size) {
 303   // 4839641 (4840070): We must do an oop-atomic copy, because if another thread
 304   // is modifying a reference field in the clonee, a non-oop-atomic copy might
 305   // be suspended in the middle of copying the pointer and end up with parts
 306   // of two different pointers in the field.  Subsequent dereferences will crash.
 307   // 4846409: an oop-copy of objects with long or double fields or arrays of same
 308   // won't copy the longs/doubles atomically in 32-bit vm's, so we copy jlongs instead
 309   // of oops.  We know objects are aligned on a minimum of an jlong boundary.
 310   // The same is true of StubRoutines::object_copy and the various oop_copy
 311   // variants, and of the code generated by the inline_native_clone intrinsic.
 312 
 313   assert(MinObjAlignmentInBytes >= BytesPerLong, "objects misaligned");
 314   AccessInternal::arraycopy_conjoint_atomic(reinterpret_cast<jlong*>((oopDesc*)src),
 315                                             reinterpret_cast<jlong*>((oopDesc*)dst),
 316                                             align_object_size(size) / HeapWordsPerLong);
 317   // Clear the header
 318   dst->init_mark_raw();
 319 }
 320 
 321 #endif // SHARE_VM_RUNTIME_ACCESSBACKEND_INLINE_HPP
< prev index next >