1 /*
  2  * Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #ifndef SHARE_VM_RUNTIME_ACCESSBACKEND_INLINE_HPP
 26 #define SHARE_VM_RUNTIME_ACCESSBACKEND_INLINE_HPP
 27 
 28 #include "oops/access.hpp"
 29 #include "oops/accessBackend.hpp"
 30 #include "oops/compressedOops.inline.hpp"
 31 #include "oops/oopsHierarchy.hpp"
 32 
 33 template <DecoratorSet decorators>
 34 template <DecoratorSet idecorators, typename T>
 35 inline typename EnableIf<
 36   AccessInternal::MustConvertCompressedOop<idecorators, T>::value, T>::type
 37 RawAccessBarrier<decorators>::decode_internal(typename HeapOopType<idecorators>::type value) {
 38   if (HasDecorator<decorators, OOP_NOT_NULL>::value) {
 39     return CompressedOops::decode_not_null(value);
 40   } else {
 41     return CompressedOops::decode(value);
 42   }
 43 }
 44 
 45 template <DecoratorSet decorators>
 46 template <DecoratorSet idecorators, typename T>
 47 inline typename EnableIf<
 48   AccessInternal::MustConvertCompressedOop<idecorators, T>::value,
 49   typename HeapOopType<idecorators>::type>::type
 50 RawAccessBarrier<decorators>::encode_internal(T value) {
 51   if (HasDecorator<decorators, OOP_NOT_NULL>::value) {
 52     return CompressedOops::encode_not_null(value);
 53   } else {
 54     return CompressedOops::encode(value);
 55   }
 56 }
 57 
 58 template <DecoratorSet decorators>
 59 template <typename T>
 60 inline void RawAccessBarrier<decorators>::oop_store(void* addr, T value) {
 61   typedef typename AccessInternal::EncodedType<decorators, T>::type Encoded;
 62   Encoded encoded = encode(value);
 63   store(reinterpret_cast<Encoded*>(addr), encoded);
 64 }
 65 
 66 template <DecoratorSet decorators>
 67 template <typename T>
 68 inline void RawAccessBarrier<decorators>::oop_store_at(oop base, ptrdiff_t offset, T value) {
 69   oop_store(field_addr(base, offset), value);
 70 }
 71 
 72 template <DecoratorSet decorators>
 73 template <typename T>
 74 inline T RawAccessBarrier<decorators>::oop_load(void* addr) {
 75   typedef typename AccessInternal::EncodedType<decorators, T>::type Encoded;
 76   Encoded encoded = load<Encoded>(reinterpret_cast<Encoded*>(addr));
 77   return decode<T>(encoded);
 78 }
 79 
 80 template <DecoratorSet decorators>
 81 template <typename T>
 82 inline T RawAccessBarrier<decorators>::oop_load_at(oop base, ptrdiff_t offset) {
 83   return oop_load<T>(field_addr(base, offset));
 84 }
 85 
 86 template <DecoratorSet decorators>
 87 template <typename T>
 88 inline T RawAccessBarrier<decorators>::oop_atomic_cmpxchg(T new_value, void* addr, T compare_value) {
 89   typedef typename AccessInternal::EncodedType<decorators, T>::type Encoded;
 90   Encoded encoded_new = encode(new_value);
 91   Encoded encoded_compare = encode(compare_value);
 92   Encoded encoded_result = atomic_cmpxchg(encoded_new,
 93                                           reinterpret_cast<Encoded*>(addr),
 94                                           encoded_compare);
 95   return decode<T>(encoded_result);
 96 }
 97 
 98 template <DecoratorSet decorators>
 99 template <typename T>
100 inline T RawAccessBarrier<decorators>::oop_atomic_cmpxchg_at(T new_value, oop base, ptrdiff_t offset, T compare_value) {
101   return oop_atomic_cmpxchg(new_value, field_addr(base, offset), compare_value);
102 }
103 
104 template <DecoratorSet decorators>
105 template <typename T>
106 inline T RawAccessBarrier<decorators>::oop_atomic_xchg(T new_value, void* addr) {
107   typedef typename AccessInternal::EncodedType<decorators, T>::type Encoded;
108   Encoded encoded_new = encode(new_value);
109   Encoded encoded_result = atomic_xchg(encoded_new, reinterpret_cast<Encoded*>(addr));
110   return decode<T>(encoded_result);
111 }
112 
113 template <DecoratorSet decorators>
114 template <typename T>
115 inline T RawAccessBarrier<decorators>::oop_atomic_xchg_at(T new_value, oop base, ptrdiff_t offset) {
116   return oop_atomic_xchg(new_value, field_addr(base, offset));
117 }
118 
119 template <DecoratorSet decorators>
120 template <typename T>
121 inline bool RawAccessBarrier<decorators>::oop_arraycopy(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw,
122                                                         arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
123                                                         size_t length) {
124   return arraycopy(src_obj, src_offset_in_bytes, src_raw,
125                    dst_obj, dst_offset_in_bytes, dst_raw,
126                    length);
127 }
128 
129 template <DecoratorSet decorators>
130 template <DecoratorSet ds, typename T>
131 inline typename EnableIf<
132   HasDecorator<ds, MO_SEQ_CST>::value, T>::type
133 RawAccessBarrier<decorators>::load_internal(void* addr) {
134   if (support_IRIW_for_not_multiple_copy_atomic_cpu) {
135     OrderAccess::fence();
136   }
137   return OrderAccess::load_acquire(reinterpret_cast<const volatile T*>(addr));
138 }
139 
140 template <DecoratorSet decorators>
141 template <DecoratorSet ds, typename T>
142 inline typename EnableIf<
143   HasDecorator<ds, MO_ACQUIRE>::value, T>::type
144 RawAccessBarrier<decorators>::load_internal(void* addr) {
145   return OrderAccess::load_acquire(reinterpret_cast<const volatile T*>(addr));
146 }
147 
148 template <DecoratorSet decorators>
149 template <DecoratorSet ds, typename T>
150 inline typename EnableIf<
151   HasDecorator<ds, MO_RELAXED>::value, T>::type
152 RawAccessBarrier<decorators>::load_internal(void* addr) {
153   return Atomic::load(reinterpret_cast<const volatile T*>(addr));
154 }
155 
156 template <DecoratorSet decorators>
157 template <DecoratorSet ds, typename T>
158 inline typename EnableIf<
159   HasDecorator<ds, MO_SEQ_CST>::value>::type
160 RawAccessBarrier<decorators>::store_internal(void* addr, T value) {
161   OrderAccess::release_store_fence(reinterpret_cast<volatile T*>(addr), value);
162 }
163 
164 template <DecoratorSet decorators>
165 template <DecoratorSet ds, typename T>
166 inline typename EnableIf<
167   HasDecorator<ds, MO_RELEASE>::value>::type
168 RawAccessBarrier<decorators>::store_internal(void* addr, T value) {
169   OrderAccess::release_store(reinterpret_cast<volatile T*>(addr), value);
170 }
171 
172 template <DecoratorSet decorators>
173 template <DecoratorSet ds, typename T>
174 inline typename EnableIf<
175   HasDecorator<ds, MO_RELAXED>::value>::type
176 RawAccessBarrier<decorators>::store_internal(void* addr, T value) {
177   Atomic::store(value, reinterpret_cast<volatile T*>(addr));
178 }
179 
180 template <DecoratorSet decorators>
181 template <DecoratorSet ds, typename T>
182 inline typename EnableIf<
183   HasDecorator<ds, MO_RELAXED>::value, T>::type
184 RawAccessBarrier<decorators>::atomic_cmpxchg_internal(T new_value, void* addr, T compare_value) {
185   return Atomic::cmpxchg(new_value,
186                          reinterpret_cast<volatile T*>(addr),
187                          compare_value,
188                          memory_order_relaxed);
189 }
190 
191 template <DecoratorSet decorators>
192 template <DecoratorSet ds, typename T>
193 inline typename EnableIf<
194   HasDecorator<ds, MO_SEQ_CST>::value, T>::type
195 RawAccessBarrier<decorators>::atomic_cmpxchg_internal(T new_value, void* addr, T compare_value) {
196   return Atomic::cmpxchg(new_value,
197                          reinterpret_cast<volatile T*>(addr),
198                          compare_value,
199                          memory_order_conservative);
200 }
201 
202 template <DecoratorSet decorators>
203 template <DecoratorSet ds, typename T>
204 inline typename EnableIf<
205   HasDecorator<ds, MO_SEQ_CST>::value, T>::type
206 RawAccessBarrier<decorators>::atomic_xchg_internal(T new_value, void* addr) {
207   return Atomic::xchg(new_value,
208                       reinterpret_cast<volatile T*>(addr));
209 }
210 
211 // For platforms that do not have native support for wide atomics,
212 // we can emulate the atomicity using a lock. So here we check
213 // whether that is necessary or not.
214 
215 template <DecoratorSet ds>
216 template <DecoratorSet decorators, typename T>
217 inline typename EnableIf<
218   AccessInternal::PossiblyLockedAccess<T>::value, T>::type
219 RawAccessBarrier<ds>::atomic_xchg_maybe_locked(T new_value, void* addr) {
220   if (!AccessInternal::wide_atomic_needs_locking()) {
221     return atomic_xchg_internal<ds>(new_value, addr);
222   } else {
223     AccessInternal::AccessLocker access_lock;
224     volatile T* p = reinterpret_cast<volatile T*>(addr);
225     T old_val = RawAccess<>::load(p);
226     RawAccess<>::store(p, new_value);
227     return old_val;
228   }
229 }
230 
231 template <DecoratorSet ds>
232 template <DecoratorSet decorators, typename T>
233 inline typename EnableIf<
234   AccessInternal::PossiblyLockedAccess<T>::value, T>::type
235 RawAccessBarrier<ds>::atomic_cmpxchg_maybe_locked(T new_value, void* addr, T compare_value) {
236   if (!AccessInternal::wide_atomic_needs_locking()) {
237     return atomic_cmpxchg_internal<ds>(new_value, addr, compare_value);
238   } else {
239     AccessInternal::AccessLocker access_lock;
240     volatile T* p = reinterpret_cast<volatile T*>(addr);
241     T old_val = RawAccess<>::load(p);
242     if (old_val == compare_value) {
243       RawAccess<>::store(p, new_value);
244     }
245     return old_val;
246   }
247 }
248 
249 class RawAccessBarrierArrayCopy: public AllStatic {
250 public:
251   template <DecoratorSet decorators, typename T>
252   static inline typename EnableIf<
253   HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value>::type
254   arraycopy(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw,
255             arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
256             size_t length) {
257 
258     src_raw = arrayOopDesc::obj_offset_to_raw(src_obj, src_offset_in_bytes, src_raw);
259     dst_raw = arrayOopDesc::obj_offset_to_raw(dst_obj, dst_offset_in_bytes, dst_raw);
260 
261     // We do not check for ARRAYCOPY_ATOMIC for oops, because they are unconditionally always atomic.
262     if (HasDecorator<decorators, ARRAYCOPY_ARRAYOF>::value) {
263       AccessInternal::arraycopy_arrayof_conjoint_oops(src_raw, dst_raw, length);
264     } else {
265       typedef typename HeapOopType<decorators>::type OopType;
266       AccessInternal::arraycopy_conjoint_oops(reinterpret_cast<OopType*>(src_raw),
267                                               reinterpret_cast<OopType*>(dst_raw), length);
268     }
269   }
270 
271   template <DecoratorSet decorators, typename T>
272   static inline typename EnableIf<
273     !HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value>::type
274   arraycopy(arrayOop src_obj, size_t src_offset_in_bytes, const T* src_raw, arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw, size_t length) {
275 
276     src_raw = arrayOopDesc::obj_offset_to_raw(src_obj, src_offset_in_bytes, src_raw);
277     dst_raw = arrayOopDesc::obj_offset_to_raw(dst_obj, dst_offset_in_bytes, dst_raw);
278 
279     if (HasDecorator<decorators, ARRAYCOPY_ARRAYOF>::value) {
280       AccessInternal::arraycopy_arrayof_conjoint(const_cast<T*>(src_raw), dst_raw, length);
281     } else if (HasDecorator<decorators, ARRAYCOPY_DISJOINT>::value && sizeof(T) == HeapWordSize) {
282       // There is only a disjoint optimization for word granularity copying
283       if (HasDecorator<decorators, ARRAYCOPY_ATOMIC>::value) {
284         AccessInternal::arraycopy_disjoint_words_atomic(const_cast<T*>(src_raw), dst_raw, length);
285       } else {
286         AccessInternal::arraycopy_disjoint_words(const_cast<T*>(src_raw), dst_raw, length);
287       }
288     } else {
289       if (HasDecorator<decorators, ARRAYCOPY_ATOMIC>::value) {
290         AccessInternal::arraycopy_conjoint_atomic(const_cast<T*>(src_raw), dst_raw, length);
291       } else {
292         AccessInternal::arraycopy_conjoint(const_cast<T*>(src_raw), dst_raw, length);
293       }
294     }
295   }
296 
297   template <DecoratorSet decorators>
298   static inline typename EnableIf<
299     !HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value>::type
300   arraycopy(arrayOop src_obj, size_t src_offset_in_bytes, const void* src_raw,
301             arrayOop dst_obj, size_t dst_offset_in_bytes, void* dst_raw,
302             size_t length) {
303 
304     src_raw = arrayOopDesc::obj_offset_to_raw(src_obj, src_offset_in_bytes, src_raw);
305     dst_raw = arrayOopDesc::obj_offset_to_raw(dst_obj, dst_offset_in_bytes, dst_raw);
306 
307     if (HasDecorator<decorators, ARRAYCOPY_ATOMIC>::value) {
308       AccessInternal::arraycopy_conjoint_atomic(const_cast<void*>(src_raw), dst_raw, length);
309     } else {
310       AccessInternal::arraycopy_conjoint(const_cast<void*>(src_raw), dst_raw, length);
311     }
312   }
313 };
314 
315 template <DecoratorSet decorators>
316 template <typename T>
317 inline bool RawAccessBarrier<decorators>::arraycopy(arrayOop src_obj, size_t src_offset_in_bytes, T* src_raw,
318                                                     arrayOop dst_obj, size_t dst_offset_in_bytes, T* dst_raw,
319                                                     size_t length) {
320   RawAccessBarrierArrayCopy::arraycopy<decorators>(src_obj, src_offset_in_bytes, src_raw,
321                                                    dst_obj, dst_offset_in_bytes, dst_raw,
322                                                    length);
323   return true;
324 }
325 
326 template <DecoratorSet decorators>
327 inline void RawAccessBarrier<decorators>::clone(oop src, oop dst, size_t size) {
328   // 4839641 (4840070): We must do an oop-atomic copy, because if another thread
329   // is modifying a reference field in the clonee, a non-oop-atomic copy might
330   // be suspended in the middle of copying the pointer and end up with parts
331   // of two different pointers in the field.  Subsequent dereferences will crash.
332   // 4846409: an oop-copy of objects with long or double fields or arrays of same
333   // won't copy the longs/doubles atomically in 32-bit vm's, so we copy jlongs instead
334   // of oops.  We know objects are aligned on a minimum of an jlong boundary.
335   // The same is true of StubRoutines::object_copy and the various oop_copy
336   // variants, and of the code generated by the inline_native_clone intrinsic.
337 
338   assert(MinObjAlignmentInBytes >= BytesPerLong, "objects misaligned");
339   AccessInternal::arraycopy_conjoint_atomic(reinterpret_cast<jlong*>((oopDesc*)src),
340                                             reinterpret_cast<jlong*>((oopDesc*)dst),
341                                             align_object_size(size) / HeapWordsPerLong);
342   // Clear the header
343   dst->init_mark_raw();
344 }
345 
346 #endif // SHARE_VM_RUNTIME_ACCESSBACKEND_INLINE_HPP