1 /* 2 * Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef SHARE_VM_RUNTIME_ACCESSBACKEND_INLINE_HPP 26 #define SHARE_VM_RUNTIME_ACCESSBACKEND_INLINE_HPP 27 28 #include "oops/access.hpp" 29 #include "oops/accessBackend.hpp" 30 #include "oops/compressedOops.inline.hpp" 31 #include "oops/oopsHierarchy.hpp" 32 33 template <DecoratorSet decorators> 34 template <DecoratorSet idecorators, typename T> 35 inline typename EnableIf< 36 AccessInternal::MustConvertCompressedOop<idecorators, T>::value, T>::type 37 RawAccessBarrier<decorators>::decode_internal(typename HeapOopType<idecorators>::type value) { 38 if (HasDecorator<decorators, OOP_NOT_NULL>::value) { 39 return CompressedOops::decode_not_null(value); 40 } else { 41 return CompressedOops::decode(value); 42 } 43 } 44 45 template <DecoratorSet decorators> 46 template <DecoratorSet idecorators, typename T> 47 inline typename EnableIf< 48 AccessInternal::MustConvertCompressedOop<idecorators, T>::value, 49 typename HeapOopType<idecorators>::type>::type 50 RawAccessBarrier<decorators>::encode_internal(T value) { 51 if (HasDecorator<decorators, OOP_NOT_NULL>::value) { 52 return CompressedOops::encode_not_null(value); 53 } else { 54 return CompressedOops::encode(value); 55 } 56 } 57 58 template <DecoratorSet decorators> 59 template <typename T> 60 inline void RawAccessBarrier<decorators>::oop_store(void* addr, T value) { 61 typedef typename AccessInternal::EncodedType<decorators, T>::type Encoded; 62 Encoded encoded = encode(value); 63 store(reinterpret_cast<Encoded*>(addr), encoded); 64 } 65 66 template <DecoratorSet decorators> 67 template <typename T> 68 inline void RawAccessBarrier<decorators>::oop_store_at(oop base, ptrdiff_t offset, T value) { 69 oop_store(field_addr(base, offset), value); 70 } 71 72 template <DecoratorSet decorators> 73 template <typename T> 74 inline T RawAccessBarrier<decorators>::oop_load(void* addr) { 75 typedef typename AccessInternal::EncodedType<decorators, T>::type Encoded; 76 Encoded encoded = load<Encoded>(reinterpret_cast<Encoded*>(addr)); 77 return decode<T>(encoded); 78 } 79 80 template <DecoratorSet decorators> 81 template <typename T> 82 inline T RawAccessBarrier<decorators>::oop_load_at(oop base, ptrdiff_t offset) { 83 return oop_load<T>(field_addr(base, offset)); 84 } 85 86 template <DecoratorSet decorators> 87 template <typename T> 88 inline T RawAccessBarrier<decorators>::oop_atomic_cmpxchg(T new_value, void* addr, T compare_value) { 89 typedef typename AccessInternal::EncodedType<decorators, T>::type Encoded; 90 Encoded encoded_new = encode(new_value); 91 Encoded encoded_compare = encode(compare_value); 92 Encoded encoded_result = atomic_cmpxchg(encoded_new, 93 reinterpret_cast<Encoded*>(addr), 94 encoded_compare); 95 return decode<T>(encoded_result); 96 } 97 98 template <DecoratorSet decorators> 99 template <typename T> 100 inline T RawAccessBarrier<decorators>::oop_atomic_cmpxchg_at(T new_value, oop base, ptrdiff_t offset, T compare_value) { 101 return oop_atomic_cmpxchg(new_value, field_addr(base, offset), compare_value); 102 } 103 104 template <DecoratorSet decorators> 105 template <typename T> 106 inline T RawAccessBarrier<decorators>::oop_atomic_xchg(T new_value, void* addr) { 107 typedef typename AccessInternal::EncodedType<decorators, T>::type Encoded; 108 Encoded encoded_new = encode(new_value); 109 Encoded encoded_result = atomic_xchg(encoded_new, reinterpret_cast<Encoded*>(addr)); 110 return decode<T>(encoded_result); 111 } 112 113 template <DecoratorSet decorators> 114 template <typename T> 115 inline T RawAccessBarrier<decorators>::oop_atomic_xchg_at(T new_value, oop base, ptrdiff_t offset) { 116 return oop_atomic_xchg(new_value, field_addr(base, offset)); 117 } 118 119 template <DecoratorSet decorators> 120 template <typename T> 121 inline bool RawAccessBarrier<decorators>::oop_arraycopy(arrayOop src_obj, arrayOop dst_obj, T* src, T* dst, size_t length) { 122 return arraycopy(src_obj, dst_obj, src, dst, length); 123 } 124 125 template <DecoratorSet decorators> 126 template <DecoratorSet ds, typename T> 127 inline typename EnableIf< 128 HasDecorator<ds, MO_SEQ_CST>::value, T>::type 129 RawAccessBarrier<decorators>::load_internal(void* addr) { 130 if (support_IRIW_for_not_multiple_copy_atomic_cpu) { 131 OrderAccess::fence(); 132 } 133 return OrderAccess::load_acquire(reinterpret_cast<const volatile T*>(addr)); 134 } 135 136 template <DecoratorSet decorators> 137 template <DecoratorSet ds, typename T> 138 inline typename EnableIf< 139 HasDecorator<ds, MO_ACQUIRE>::value, T>::type 140 RawAccessBarrier<decorators>::load_internal(void* addr) { 141 return OrderAccess::load_acquire(reinterpret_cast<const volatile T*>(addr)); 142 } 143 144 template <DecoratorSet decorators> 145 template <DecoratorSet ds, typename T> 146 inline typename EnableIf< 147 HasDecorator<ds, MO_RELAXED>::value, T>::type 148 RawAccessBarrier<decorators>::load_internal(void* addr) { 149 return Atomic::load(reinterpret_cast<const volatile T*>(addr)); 150 } 151 152 template <DecoratorSet decorators> 153 template <DecoratorSet ds, typename T> 154 inline typename EnableIf< 155 HasDecorator<ds, MO_SEQ_CST>::value>::type 156 RawAccessBarrier<decorators>::store_internal(void* addr, T value) { 157 OrderAccess::release_store_fence(reinterpret_cast<volatile T*>(addr), value); 158 } 159 160 template <DecoratorSet decorators> 161 template <DecoratorSet ds, typename T> 162 inline typename EnableIf< 163 HasDecorator<ds, MO_RELEASE>::value>::type 164 RawAccessBarrier<decorators>::store_internal(void* addr, T value) { 165 OrderAccess::release_store(reinterpret_cast<volatile T*>(addr), value); 166 } 167 168 template <DecoratorSet decorators> 169 template <DecoratorSet ds, typename T> 170 inline typename EnableIf< 171 HasDecorator<ds, MO_RELAXED>::value>::type 172 RawAccessBarrier<decorators>::store_internal(void* addr, T value) { 173 Atomic::store(value, reinterpret_cast<volatile T*>(addr)); 174 } 175 176 template <DecoratorSet decorators> 177 template <DecoratorSet ds, typename T> 178 inline typename EnableIf< 179 HasDecorator<ds, MO_RELAXED>::value, T>::type 180 RawAccessBarrier<decorators>::atomic_cmpxchg_internal(T new_value, void* addr, T compare_value) { 181 return Atomic::cmpxchg(new_value, 182 reinterpret_cast<volatile T*>(addr), 183 compare_value, 184 memory_order_relaxed); 185 } 186 187 template <DecoratorSet decorators> 188 template <DecoratorSet ds, typename T> 189 inline typename EnableIf< 190 HasDecorator<ds, MO_SEQ_CST>::value, T>::type 191 RawAccessBarrier<decorators>::atomic_cmpxchg_internal(T new_value, void* addr, T compare_value) { 192 return Atomic::cmpxchg(new_value, 193 reinterpret_cast<volatile T*>(addr), 194 compare_value, 195 memory_order_conservative); 196 } 197 198 template <DecoratorSet decorators> 199 template <DecoratorSet ds, typename T> 200 inline typename EnableIf< 201 HasDecorator<ds, MO_SEQ_CST>::value, T>::type 202 RawAccessBarrier<decorators>::atomic_xchg_internal(T new_value, void* addr) { 203 return Atomic::xchg(new_value, 204 reinterpret_cast<volatile T*>(addr)); 205 } 206 207 // For platforms that do not have native support for wide atomics, 208 // we can emulate the atomicity using a lock. So here we check 209 // whether that is necessary or not. 210 211 template <DecoratorSet ds> 212 template <DecoratorSet decorators, typename T> 213 inline typename EnableIf< 214 AccessInternal::PossiblyLockedAccess<T>::value, T>::type 215 RawAccessBarrier<ds>::atomic_xchg_maybe_locked(T new_value, void* addr) { 216 if (!AccessInternal::wide_atomic_needs_locking()) { 217 return atomic_xchg_internal<ds>(new_value, addr); 218 } else { 219 AccessInternal::AccessLocker access_lock; 220 volatile T* p = reinterpret_cast<volatile T*>(addr); 221 T old_val = RawAccess<>::load(p); 222 RawAccess<>::store(p, new_value); 223 return old_val; 224 } 225 } 226 227 template <DecoratorSet ds> 228 template <DecoratorSet decorators, typename T> 229 inline typename EnableIf< 230 AccessInternal::PossiblyLockedAccess<T>::value, T>::type 231 RawAccessBarrier<ds>::atomic_cmpxchg_maybe_locked(T new_value, void* addr, T compare_value) { 232 if (!AccessInternal::wide_atomic_needs_locking()) { 233 return atomic_cmpxchg_internal<ds>(new_value, addr, compare_value); 234 } else { 235 AccessInternal::AccessLocker access_lock; 236 volatile T* p = reinterpret_cast<volatile T*>(addr); 237 T old_val = RawAccess<>::load(p); 238 if (old_val == compare_value) { 239 RawAccess<>::store(p, new_value); 240 } 241 return old_val; 242 } 243 } 244 245 class RawAccessBarrierArrayCopy: public AllStatic { 246 public: 247 template <DecoratorSet decorators, typename T> 248 static inline typename EnableIf< 249 HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value>::type 250 arraycopy(arrayOop src_obj, arrayOop dst_obj, T* src, T* dst, size_t length) { 251 // We do not check for ARRAYCOPY_ATOMIC for oops, because they are unconditionally always atomic. 252 if (HasDecorator<decorators, ARRAYCOPY_ARRAYOF>::value) { 253 AccessInternal::arraycopy_arrayof_conjoint_oops(src, dst, length); 254 } else { 255 typedef typename HeapOopType<decorators>::type OopType; 256 AccessInternal::arraycopy_conjoint_oops(reinterpret_cast<OopType*>(src), 257 reinterpret_cast<OopType*>(dst), length); 258 } 259 } 260 261 template <DecoratorSet decorators, typename T> 262 static inline typename EnableIf< 263 !HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value>::type 264 arraycopy(arrayOop src_obj, arrayOop dst_obj, T* src, T* dst, size_t length) { 265 if (HasDecorator<decorators, ARRAYCOPY_ARRAYOF>::value) { 266 AccessInternal::arraycopy_arrayof_conjoint(src, dst, length); 267 } else if (HasDecorator<decorators, ARRAYCOPY_DISJOINT>::value && sizeof(T) == HeapWordSize) { 268 // There is only a disjoint optimization for word granularity copying 269 if (HasDecorator<decorators, ARRAYCOPY_ATOMIC>::value) { 270 AccessInternal::arraycopy_disjoint_words_atomic(src, dst, length); 271 } else { 272 AccessInternal::arraycopy_disjoint_words(src, dst, length); 273 } 274 } else { 275 if (HasDecorator<decorators, ARRAYCOPY_ATOMIC>::value) { 276 AccessInternal::arraycopy_conjoint_atomic(src, dst, length); 277 } else { 278 AccessInternal::arraycopy_conjoint(src, dst, length); 279 } 280 } 281 } 282 283 template <DecoratorSet decorators> 284 static inline typename EnableIf< 285 !HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value>::type 286 arraycopy(arrayOop src_obj, arrayOop dst_obj, void* src, void* dst, size_t length) { 287 if (HasDecorator<decorators, ARRAYCOPY_ATOMIC>::value) { 288 AccessInternal::arraycopy_conjoint_atomic(src, dst, length); 289 } else { 290 AccessInternal::arraycopy_conjoint(src, dst, length); 291 } 292 } 293 }; 294 295 template <DecoratorSet decorators> 296 template <typename T> 297 inline bool RawAccessBarrier<decorators>::arraycopy(arrayOop src_obj, arrayOop dst_obj, T* src, T* dst, size_t length) { 298 RawAccessBarrierArrayCopy::arraycopy<decorators>(src_obj, dst_obj, src, dst, length); 299 return true; 300 } 301 302 template <DecoratorSet decorators> 303 inline void RawAccessBarrier<decorators>::clone(oop src, oop dst, size_t size) { 304 // 4839641 (4840070): We must do an oop-atomic copy, because if another thread 305 // is modifying a reference field in the clonee, a non-oop-atomic copy might 306 // be suspended in the middle of copying the pointer and end up with parts 307 // of two different pointers in the field. Subsequent dereferences will crash. 308 // 4846409: an oop-copy of objects with long or double fields or arrays of same 309 // won't copy the longs/doubles atomically in 32-bit vm's, so we copy jlongs instead 310 // of oops. We know objects are aligned on a minimum of an jlong boundary. 311 // The same is true of StubRoutines::object_copy and the various oop_copy 312 // variants, and of the code generated by the inline_native_clone intrinsic. 313 314 assert(MinObjAlignmentInBytes >= BytesPerLong, "objects misaligned"); 315 AccessInternal::arraycopy_conjoint_atomic(reinterpret_cast<jlong*>((oopDesc*)src), 316 reinterpret_cast<jlong*>((oopDesc*)dst), 317 align_object_size(size) / HeapWordsPerLong); 318 // Clear the header 319 dst->init_mark_raw(); 320 } 321 322 #endif // SHARE_VM_RUNTIME_ACCESSBACKEND_INLINE_HPP