1 /*
   2  * Copyright (c) 2017, 2019, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef SHARE_OOPS_ACCESSBACKEND_INLINE_HPP
  26 #define SHARE_OOPS_ACCESSBACKEND_INLINE_HPP
  27 
  28 #include "oops/access.hpp"
  29 #include "oops/accessBackend.hpp"
  30 #include "oops/compressedOops.inline.hpp"
  31 #include "oops/oopsHierarchy.hpp"
  32 
  33 template <DecoratorSet decorators>
  34 template <DecoratorSet idecorators, typename T>
  35 inline typename EnableIf<
  36   AccessInternal::MustConvertCompressedOop<idecorators, T>::value, T>::type
  37 RawAccessBarrier<decorators>::decode_internal(typename HeapOopType<idecorators>::type value) {
  38   if (HasDecorator<decorators, IS_NOT_NULL>::value) {
  39     return CompressedOops::decode_not_null(value);
  40   } else {
  41     return CompressedOops::decode(value);
  42   }
  43 }
  44 
  45 template <DecoratorSet decorators>
  46 template <DecoratorSet idecorators, typename T>
  47 inline typename EnableIf<
  48   AccessInternal::MustConvertCompressedOop<idecorators, T>::value,
  49   typename HeapOopType<idecorators>::type>::type
  50 RawAccessBarrier<decorators>::encode_internal(T value) {
  51   if (HasDecorator<decorators, IS_NOT_NULL>::value) {
  52     return CompressedOops::encode_not_null(value);
  53   } else {
  54     return CompressedOops::encode(value);
  55   }
  56 }
  57 
  58 template <DecoratorSet decorators>
  59 template <typename T>
  60 inline void RawAccessBarrier<decorators>::oop_store(void* addr, T value) {
  61   typedef typename AccessInternal::EncodedType<decorators, T>::type Encoded;
  62   Encoded encoded = encode(value);
  63   store(reinterpret_cast<Encoded*>(addr), encoded);
  64 }
  65 
  66 template <DecoratorSet decorators>
  67 template <typename T>
  68 inline void RawAccessBarrier<decorators>::oop_store_at(oop base, ptrdiff_t offset, T value) {
  69   oop_store(field_addr(base, offset), value);
  70 }
  71 
  72 template <DecoratorSet decorators>
  73 template <typename T>
  74 inline T RawAccessBarrier<decorators>::oop_load(void* addr) {
  75   typedef typename AccessInternal::EncodedType<decorators, T>::type Encoded;
  76   Encoded encoded = load<Encoded>(reinterpret_cast<Encoded*>(addr));
  77   return decode<T>(encoded);
  78 }
  79 
  80 template <DecoratorSet decorators>
  81 template <typename T>
  82 inline T RawAccessBarrier<decorators>::oop_load_at(oop base, ptrdiff_t offset) {
  83   return oop_load<T>(field_addr(base, offset));
  84 }
  85 
  86 template <DecoratorSet decorators>
  87 template <typename T>
  88 inline T RawAccessBarrier<decorators>::oop_atomic_cmpxchg(T new_value, void* addr, T compare_value) {
  89   typedef typename AccessInternal::EncodedType<decorators, T>::type Encoded;
  90   Encoded encoded_new = encode(new_value);
  91   Encoded encoded_compare = encode(compare_value);
  92   Encoded encoded_result = atomic_cmpxchg(encoded_new,
  93                                           reinterpret_cast<Encoded*>(addr),
  94                                           encoded_compare);
  95   return decode<T>(encoded_result);
  96 }
  97 
  98 template <DecoratorSet decorators>
  99 template <typename T>
 100 inline T RawAccessBarrier<decorators>::oop_atomic_cmpxchg_at(T new_value, oop base, ptrdiff_t offset, T compare_value) {
 101   return oop_atomic_cmpxchg(new_value, field_addr(base, offset), compare_value);
 102 }
 103 
 104 template <DecoratorSet decorators>
 105 template <typename T>
 106 inline T RawAccessBarrier<decorators>::oop_atomic_xchg(T new_value, void* addr) {
 107   typedef typename AccessInternal::EncodedType<decorators, T>::type Encoded;
 108   Encoded encoded_new = encode(new_value);
 109   Encoded encoded_result = atomic_xchg(encoded_new, reinterpret_cast<Encoded*>(addr));
 110   return decode<T>(encoded_result);
 111 }
 112 
 113 template <DecoratorSet decorators>
 114 template <typename T>
 115 inline T RawAccessBarrier<decorators>::oop_atomic_xchg_at(T new_value, oop base, ptrdiff_t offset) {
 116   return oop_atomic_xchg(new_value, field_addr(base, offset));
 117 }
 118 
 119 template <DecoratorSet decorators>
 120 template <typename T>
 121 inline bool RawAccessBarrier<decorators>::oop_arraycopy(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw,
 122                                                         arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
 123                                                         size_t length) {
 124   return arraycopy(src_obj, src_offset_in_bytes, src_raw,
 125                    dst_obj, dst_offset_in_bytes, dst_raw,
 126                    length);
 127 }
 128 
 129 template <DecoratorSet decorators>
 130 template <DecoratorSet ds, typename T>
 131 inline typename EnableIf<
 132   HasDecorator<ds, MO_SEQ_CST>::value, T>::type
 133 RawAccessBarrier<decorators>::load_internal(void* addr) {
 134   if (SUPPORT_IRIW_FOR_NOT_MULTI_COPY_ATOMIC_CPU) {
 135     OrderAccess::fence();
 136   }
 137   return OrderAccess::load_acquire(reinterpret_cast<const volatile T*>(addr));
 138 }
 139 
 140 template <DecoratorSet decorators>
 141 template <DecoratorSet ds, typename T>
 142 inline typename EnableIf<
 143   HasDecorator<ds, MO_ACQUIRE>::value, T>::type
 144 RawAccessBarrier<decorators>::load_internal(void* addr) {
 145   return OrderAccess::load_acquire(reinterpret_cast<const volatile T*>(addr));
 146 }
 147 
 148 template <DecoratorSet decorators>
 149 template <DecoratorSet ds, typename T>
 150 inline typename EnableIf<
 151   HasDecorator<ds, MO_RELAXED>::value, T>::type
 152 RawAccessBarrier<decorators>::load_internal(void* addr) {
 153   return Atomic::load(reinterpret_cast<const volatile T*>(addr));
 154 }
 155 
 156 template <DecoratorSet decorators>
 157 template <DecoratorSet ds, typename T>
 158 inline typename EnableIf<
 159   HasDecorator<ds, MO_SEQ_CST>::value>::type
 160 RawAccessBarrier<decorators>::store_internal(void* addr, T value) {
 161   OrderAccess::release_store_fence(reinterpret_cast<volatile T*>(addr), value);
 162 }
 163 
 164 template <DecoratorSet decorators>
 165 template <DecoratorSet ds, typename T>
 166 inline typename EnableIf<
 167   HasDecorator<ds, MO_RELEASE>::value>::type
 168 RawAccessBarrier<decorators>::store_internal(void* addr, T value) {
 169   OrderAccess::release_store(reinterpret_cast<volatile T*>(addr), value);
 170 }
 171 
 172 template <DecoratorSet decorators>
 173 template <DecoratorSet ds, typename T>
 174 inline typename EnableIf<
 175   HasDecorator<ds, MO_RELAXED>::value>::type
 176 RawAccessBarrier<decorators>::store_internal(void* addr, T value) {
 177   Atomic::store(value, reinterpret_cast<volatile T*>(addr));
 178 }
 179 
 180 template <DecoratorSet decorators>
 181 template <DecoratorSet ds, typename T>
 182 inline typename EnableIf<
 183   HasDecorator<ds, MO_RELAXED>::value, T>::type
 184 RawAccessBarrier<decorators>::atomic_cmpxchg_internal(T new_value, void* addr, T compare_value) {
 185   return Atomic::cmpxchg(new_value,
 186                          reinterpret_cast<volatile T*>(addr),
 187                          compare_value,
 188                          memory_order_relaxed);
 189 }
 190 
 191 template <DecoratorSet decorators>
 192 template <DecoratorSet ds, typename T>
 193 inline typename EnableIf<
 194   HasDecorator<ds, MO_SEQ_CST>::value, T>::type
 195 RawAccessBarrier<decorators>::atomic_cmpxchg_internal(T new_value, void* addr, T compare_value) {
 196   return Atomic::cmpxchg(new_value,
 197                          reinterpret_cast<volatile T*>(addr),
 198                          compare_value,
 199                          memory_order_conservative);
 200 }
 201 
 202 template <DecoratorSet decorators>
 203 template <DecoratorSet ds, typename T>
 204 inline typename EnableIf<
 205   HasDecorator<ds, MO_SEQ_CST>::value, T>::type
 206 RawAccessBarrier<decorators>::atomic_xchg_internal(T new_value, void* addr) {
 207   return Atomic::xchg(new_value,
 208                       reinterpret_cast<volatile T*>(addr));
 209 }
 210 
 211 // For platforms that do not have native support for wide atomics,
 212 // we can emulate the atomicity using a lock. So here we check
 213 // whether that is necessary or not.
 214 
 215 template <DecoratorSet ds>
 216 template <DecoratorSet decorators, typename T>
 217 inline typename EnableIf<
 218   AccessInternal::PossiblyLockedAccess<T>::value, T>::type
 219 RawAccessBarrier<ds>::atomic_xchg_maybe_locked(T new_value, void* addr) {
 220   if (!AccessInternal::wide_atomic_needs_locking()) {
 221     return atomic_xchg_internal<ds>(new_value, addr);
 222   } else {
 223     AccessInternal::AccessLocker access_lock;
 224     volatile T* p = reinterpret_cast<volatile T*>(addr);
 225     T old_val = RawAccess<>::load(p);
 226     RawAccess<>::store(p, new_value);
 227     return old_val;
 228   }
 229 }
 230 
 231 template <DecoratorSet ds>
 232 template <DecoratorSet decorators, typename T>
 233 inline typename EnableIf<
 234   AccessInternal::PossiblyLockedAccess<T>::value, T>::type
 235 RawAccessBarrier<ds>::atomic_cmpxchg_maybe_locked(T new_value, void* addr, T compare_value) {
 236   if (!AccessInternal::wide_atomic_needs_locking()) {
 237     return atomic_cmpxchg_internal<ds>(new_value, addr, compare_value);
 238   } else {
 239     AccessInternal::AccessLocker access_lock;
 240     volatile T* p = reinterpret_cast<volatile T*>(addr);
 241     T old_val = RawAccess<>::load(p);
 242     if (old_val == compare_value) {
 243       RawAccess<>::store(p, new_value);
 244     }
 245     return old_val;
 246   }
 247 }
 248 
 249 class RawAccessBarrierArrayCopy: public AllStatic {
 250   template<typename T> struct IsHeapWordSized: public IntegralConstant<bool, sizeof(T) == HeapWordSize> { };
 251 public:
 252   template <DecoratorSet decorators, typename T>
 253   static inline typename EnableIf<
 254     HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value>::type
 255   arraycopy(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw,
 256             arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
 257             size_t length) {
 258     src_raw = arrayOopDesc::obj_offset_to_raw(src_obj, src_offset_in_bytes, src_raw);
 259     dst_raw = arrayOopDesc::obj_offset_to_raw(dst_obj, dst_offset_in_bytes, dst_raw);
 260 
 261     // We do not check for ARRAYCOPY_ATOMIC for oops, because they are unconditionally always atomic.
 262     if (HasDecorator<decorators, ARRAYCOPY_ARRAYOF>::value) {
 263       AccessInternal::arraycopy_arrayof_conjoint_oops(src_raw, dst_raw, length);
 264     } else {
 265       typedef typename HeapOopType<decorators>::type OopType;
 266       AccessInternal::arraycopy_conjoint_oops(reinterpret_cast<OopType*>(src_raw),
 267                                               reinterpret_cast<OopType*>(dst_raw), length);
 268     }
 269   }
 270 
 271   template <DecoratorSet decorators, typename T>
 272   static inline typename EnableIf<
 273     !HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value &&
 274     HasDecorator<decorators, ARRAYCOPY_ARRAYOF>::value>::type
 275   arraycopy(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw,
 276             arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
 277             size_t length) {
 278     src_raw = arrayOopDesc::obj_offset_to_raw(src_obj, src_offset_in_bytes, src_raw);
 279     dst_raw = arrayOopDesc::obj_offset_to_raw(dst_obj, dst_offset_in_bytes, dst_raw);
 280 
 281     AccessInternal::arraycopy_arrayof_conjoint(src_raw, dst_raw, length);
 282   }
 283 
 284   template <DecoratorSet decorators, typename T>
 285   static inline typename EnableIf<
 286     !HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value &&
 287     HasDecorator<decorators, ARRAYCOPY_DISJOINT>::value && IsHeapWordSized<T>::value>::type
 288   arraycopy(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw,
 289             arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
 290             size_t length) {
 291     src_raw = arrayOopDesc::obj_offset_to_raw(src_obj, src_offset_in_bytes, src_raw);
 292     dst_raw = arrayOopDesc::obj_offset_to_raw(dst_obj, dst_offset_in_bytes, dst_raw);
 293 
 294     // There is only a disjoint optimization for word granularity copying
 295     if (HasDecorator<decorators, ARRAYCOPY_ATOMIC>::value) {
 296       AccessInternal::arraycopy_disjoint_words_atomic(src_raw, dst_raw, length);
 297     } else {
 298       AccessInternal::arraycopy_disjoint_words(src_raw, dst_raw, length);
 299     }
 300   }
 301 
 302   template <DecoratorSet decorators, typename T>
 303   static inline typename EnableIf<
 304     !HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value &&
 305     !(HasDecorator<decorators, ARRAYCOPY_DISJOINT>::value && IsHeapWordSized<T>::value) &&
 306     !HasDecorator<decorators, ARRAYCOPY_ARRAYOF>::value &&
 307     !HasDecorator<decorators, ARRAYCOPY_ATOMIC>::value>::type
 308   arraycopy(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw,
 309             arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
 310             size_t length) {
 311     src_raw = arrayOopDesc::obj_offset_to_raw(src_obj, src_offset_in_bytes, src_raw);
 312     dst_raw = arrayOopDesc::obj_offset_to_raw(dst_obj, dst_offset_in_bytes, dst_raw);
 313 
 314     AccessInternal::arraycopy_conjoint(src_raw, dst_raw, length);
 315   }
 316 
 317   template <DecoratorSet decorators, typename T>
 318   static inline typename EnableIf<
 319     !HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value &&
 320     !(HasDecorator<decorators, ARRAYCOPY_DISJOINT>::value && IsHeapWordSized<T>::value) &&
 321     !HasDecorator<decorators, ARRAYCOPY_ARRAYOF>::value &&
 322     HasDecorator<decorators, ARRAYCOPY_ATOMIC>::value>::type
 323   arraycopy(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw,
 324             arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
 325             size_t length) {
 326     src_raw = arrayOopDesc::obj_offset_to_raw(src_obj, src_offset_in_bytes, src_raw);
 327     dst_raw = arrayOopDesc::obj_offset_to_raw(dst_obj, dst_offset_in_bytes, dst_raw);
 328 
 329     AccessInternal::arraycopy_conjoint_atomic(src_raw, dst_raw, length);
 330   }
 331 };
 332 
 333 template<> struct RawAccessBarrierArrayCopy::IsHeapWordSized<void>: public IntegralConstant<bool, false> { };
 334 
 335 template <DecoratorSet decorators>
 336 template <typename T>
 337 inline bool RawAccessBarrier<decorators>::arraycopy(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw,
 338                                                     arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
 339                                                     size_t length) {
 340   RawAccessBarrierArrayCopy::arraycopy<decorators>(src_obj, src_offset_in_bytes, src_raw,
 341                                                    dst_obj, dst_offset_in_bytes, dst_raw,
 342                                                    length);
 343   return true;
 344 }
 345 
 346 template <DecoratorSet decorators>
 347 inline void RawAccessBarrier<decorators>::clone(oop src, oop dst, size_t size) {
 348   // 4839641 (4840070): We must do an oop-atomic copy, because if another thread
 349   // is modifying a reference field in the clonee, a non-oop-atomic copy might
 350   // be suspended in the middle of copying the pointer and end up with parts
 351   // of two different pointers in the field.  Subsequent dereferences will crash.
 352   // 4846409: an oop-copy of objects with long or double fields or arrays of same
 353   // won't copy the longs/doubles atomically in 32-bit vm's, so we copy jlongs instead
 354   // of oops.  We know objects are aligned on a minimum of an jlong boundary.
 355   // The same is true of StubRoutines::object_copy and the various oop_copy
 356   // variants, and of the code generated by the inline_native_clone intrinsic.
 357 
 358   assert(MinObjAlignmentInBytes >= BytesPerLong, "objects misaligned");
 359   AccessInternal::arraycopy_conjoint_atomic(reinterpret_cast<jlong*>((oopDesc*)src),
 360                                             reinterpret_cast<jlong*>((oopDesc*)dst),
 361                                             align_object_size(size) / HeapWordsPerLong);
 362   // Clear the header
 363   dst->init_mark_raw();
 364 }
 365 
 366 #endif // SHARE_OOPS_ACCESSBACKEND_INLINE_HPP