1 /*
  2  * Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #ifndef SHARE_VM_RUNTIME_ACCESSBACKEND_HPP
 26 #define SHARE_VM_RUNTIME_ACCESSBACKEND_HPP
 27 
 28 #include "metaprogramming/conditional.hpp"
 29 #include "metaprogramming/enableIf.hpp"
 30 #include "metaprogramming/integralConstant.hpp"
 31 #include "metaprogramming/isSame.hpp"
 32 #include "utilities/debug.hpp"
 33 #include "utilities/globalDefinitions.hpp"
 34 
 35 // This metafunction returns either oop or narrowOop depending on whether
 36 // an access needs to use compressed oops or not.
 37 template <DecoratorSet decorators>
 38 struct HeapOopType: AllStatic {
 39   static const bool needs_oop_compress = HasDecorator<decorators, INTERNAL_CONVERT_COMPRESSED_OOP>::value &&
 40                                          HasDecorator<decorators, INTERNAL_RT_USE_COMPRESSED_OOPS>::value;
 41   typedef typename Conditional<needs_oop_compress, narrowOop, oop>::type type;
 42 };
 43 
 44 namespace AccessInternal {
 45   enum BarrierType {
 46     BARRIER_STORE,
 47     BARRIER_STORE_AT,
 48     BARRIER_LOAD,
 49     BARRIER_LOAD_AT,
 50     BARRIER_ATOMIC_CMPXCHG,
 51     BARRIER_ATOMIC_CMPXCHG_AT,
 52     BARRIER_ATOMIC_XCHG,
 53     BARRIER_ATOMIC_XCHG_AT,
 54     BARRIER_ARRAYCOPY,
 55     BARRIER_CLONE,
 56     BARRIER_RESOLVE
 57   };
 58 
 59   template <DecoratorSet decorators, typename T>
 60   struct MustConvertCompressedOop: public IntegralConstant<bool,
 61     HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value &&
 62     IsSame<typename HeapOopType<decorators>::type, narrowOop>::value &&
 63     IsSame<T, oop>::value> {};
 64 
 65   // This metafunction returns an appropriate oop type if the value is oop-like
 66   // and otherwise returns the same type T.
 67   template <DecoratorSet decorators, typename T>
 68   struct EncodedType: AllStatic {
 69     typedef typename Conditional<
 70       HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value,
 71       typename HeapOopType<decorators>::type, T>::type type;
 72   };
 73 
 74   template <DecoratorSet decorators>
 75   inline typename HeapOopType<decorators>::type*
 76   oop_field_addr(oop base, ptrdiff_t byte_offset) {
 77     return reinterpret_cast<typename HeapOopType<decorators>::type*>(
 78              reinterpret_cast<intptr_t>((void*)base) + byte_offset);
 79   }
 80 
 81   // This metafunction returns whether it is possible for a type T to require
 82   // locking to support wide atomics or not.
 83   template <typename T>
 84 #ifdef SUPPORTS_NATIVE_CX8
 85   struct PossiblyLockedAccess: public IntegralConstant<bool, false> {};
 86 #else
 87   struct PossiblyLockedAccess: public IntegralConstant<bool, (sizeof(T) > 4)> {};
 88 #endif
 89 
 90   template <DecoratorSet decorators, typename T>
 91   struct AccessFunctionTypes {
 92     typedef T (*load_at_func_t)(oop base, ptrdiff_t offset);
 93     typedef void (*store_at_func_t)(oop base, ptrdiff_t offset, T value);
 94     typedef T (*atomic_cmpxchg_at_func_t)(T new_value, oop base, ptrdiff_t offset, T compare_value);
 95     typedef T (*atomic_xchg_at_func_t)(T new_value, oop base, ptrdiff_t offset);
 96 
 97     typedef T (*load_func_t)(void* addr);
 98     typedef void (*store_func_t)(void* addr, T value);
 99     typedef T (*atomic_cmpxchg_func_t)(T new_value, void* addr, T compare_value);
100     typedef T (*atomic_xchg_func_t)(T new_value, void* addr);
101 
102     typedef bool (*arraycopy_func_t)(arrayOop src_obj, arrayOop dst_obj, T* src, T* dst, size_t length);
103     typedef void (*clone_func_t)(oop src, oop dst, size_t size);
104     typedef oop (*resolve_func_t)(oop obj);
105   };
106 
107   template <DecoratorSet decorators>
108   struct AccessFunctionTypes<decorators, void> {
109     typedef bool (*arraycopy_func_t)(arrayOop src_obj, arrayOop dst_obj, void* src, void* dst, size_t length);
110   };
111 
112   template <DecoratorSet decorators, typename T, BarrierType barrier> struct AccessFunction {};
113 
114 #define ACCESS_GENERATE_ACCESS_FUNCTION(bt, func)                   \
115   template <DecoratorSet decorators, typename T>                    \
116   struct AccessFunction<decorators, T, bt>: AllStatic{              \
117     typedef typename AccessFunctionTypes<decorators, T>::func type; \
118   }
119   ACCESS_GENERATE_ACCESS_FUNCTION(BARRIER_STORE, store_func_t);
120   ACCESS_GENERATE_ACCESS_FUNCTION(BARRIER_STORE_AT, store_at_func_t);
121   ACCESS_GENERATE_ACCESS_FUNCTION(BARRIER_LOAD, load_func_t);
122   ACCESS_GENERATE_ACCESS_FUNCTION(BARRIER_LOAD_AT, load_at_func_t);
123   ACCESS_GENERATE_ACCESS_FUNCTION(BARRIER_ATOMIC_CMPXCHG, atomic_cmpxchg_func_t);
124   ACCESS_GENERATE_ACCESS_FUNCTION(BARRIER_ATOMIC_CMPXCHG_AT, atomic_cmpxchg_at_func_t);
125   ACCESS_GENERATE_ACCESS_FUNCTION(BARRIER_ATOMIC_XCHG, atomic_xchg_func_t);
126   ACCESS_GENERATE_ACCESS_FUNCTION(BARRIER_ATOMIC_XCHG_AT, atomic_xchg_at_func_t);
127   ACCESS_GENERATE_ACCESS_FUNCTION(BARRIER_ARRAYCOPY, arraycopy_func_t);
128   ACCESS_GENERATE_ACCESS_FUNCTION(BARRIER_CLONE, clone_func_t);
129   ACCESS_GENERATE_ACCESS_FUNCTION(BARRIER_RESOLVE, resolve_func_t);
130 #undef ACCESS_GENERATE_ACCESS_FUNCTION
131 
132   template <DecoratorSet decorators, typename T, BarrierType barrier_type>
133   typename AccessFunction<decorators, T, barrier_type>::type resolve_barrier();
134 
135   template <DecoratorSet decorators, typename T, BarrierType barrier_type>
136   typename AccessFunction<decorators, T, barrier_type>::type resolve_oop_barrier();
137 
138   class AccessLocker {
139   public:
140     AccessLocker();
141     ~AccessLocker();
142   };
143   bool wide_atomic_needs_locking();
144 
145   void* field_addr(oop base, ptrdiff_t offset);
146 
147   // Forward calls to Copy:: in the cpp file to reduce dependencies and allow
148   // faster build times, given how frequently included access is.
149   void arraycopy_arrayof_conjoint_oops(void* src, void* dst, size_t length);
150   void arraycopy_conjoint_oops(oop* src, oop* dst, size_t length);
151   void arraycopy_conjoint_oops(narrowOop* src, narrowOop* dst, size_t length);
152 
153   void arraycopy_disjoint_words(void* src, void* dst, size_t length);
154   void arraycopy_disjoint_words_atomic(void* src, void* dst, size_t length);
155 
156   template<typename T>
157   void arraycopy_conjoint(T* src, T* dst, size_t length);
158   template<typename T>
159   void arraycopy_arrayof_conjoint(T* src, T* dst, size_t length);
160   template<typename T>
161   void arraycopy_conjoint_atomic(T* src, T* dst, size_t length);
162 }
163 
164 // This mask specifies what decorators are relevant for raw accesses. When passing
165 // accesses to the raw layer, irrelevant decorators are removed.
166 const DecoratorSet RAW_DECORATOR_MASK = INTERNAL_DECORATOR_MASK | MO_DECORATOR_MASK |
167                                         ARRAYCOPY_DECORATOR_MASK | OOP_DECORATOR_MASK;
168 
169 // The RawAccessBarrier performs raw accesses with additional knowledge of
170 // memory ordering, so that OrderAccess/Atomic is called when necessary.
171 // It additionally handles compressed oops, and hence is not completely "raw"
172 // strictly speaking.
173 template <DecoratorSet decorators>
174 class RawAccessBarrier: public AllStatic {
175 protected:
176   static inline void* field_addr(oop base, ptrdiff_t byte_offset) {
177     return AccessInternal::field_addr(base, byte_offset);
178   }
179 
180 protected:
181   // Only encode if INTERNAL_VALUE_IS_OOP
182   template <DecoratorSet idecorators, typename T>
183   static inline typename EnableIf<
184     AccessInternal::MustConvertCompressedOop<idecorators, T>::value,
185     typename HeapOopType<idecorators>::type>::type
186   encode_internal(T value);
187 
188   template <DecoratorSet idecorators, typename T>
189   static inline typename EnableIf<
190     !AccessInternal::MustConvertCompressedOop<idecorators, T>::value, T>::type
191   encode_internal(T value) {
192     return value;
193   }
194 
195   template <typename T>
196   static inline typename AccessInternal::EncodedType<decorators, T>::type
197   encode(T value) {
198     return encode_internal<decorators, T>(value);
199   }
200 
201   // Only decode if INTERNAL_VALUE_IS_OOP
202   template <DecoratorSet idecorators, typename T>
203   static inline typename EnableIf<
204     AccessInternal::MustConvertCompressedOop<idecorators, T>::value, T>::type
205   decode_internal(typename HeapOopType<idecorators>::type value);
206 
207   template <DecoratorSet idecorators, typename T>
208   static inline typename EnableIf<
209     !AccessInternal::MustConvertCompressedOop<idecorators, T>::value, T>::type
210   decode_internal(T value) {
211     return value;
212   }
213 
214   template <typename T>
215   static inline T decode(typename AccessInternal::EncodedType<decorators, T>::type value) {
216     return decode_internal<decorators, T>(value);
217   }
218 
219 protected:
220   template <DecoratorSet ds, typename T>
221   static typename EnableIf<
222     HasDecorator<ds, MO_SEQ_CST>::value, T>::type
223   load_internal(void* addr);
224 
225   template <DecoratorSet ds, typename T>
226   static typename EnableIf<
227     HasDecorator<ds, MO_ACQUIRE>::value, T>::type
228   load_internal(void* addr);
229 
230   template <DecoratorSet ds, typename T>
231   static typename EnableIf<
232     HasDecorator<ds, MO_RELAXED>::value, T>::type
233   load_internal(void* addr);
234 
235   template <DecoratorSet ds, typename T>
236   static inline typename EnableIf<
237     HasDecorator<ds, MO_VOLATILE>::value, T>::type
238   load_internal(void* addr) {
239     return *reinterpret_cast<const volatile T*>(addr);
240   }
241 
242   template <DecoratorSet ds, typename T>
243   static inline typename EnableIf<
244     HasDecorator<ds, MO_UNORDERED>::value, T>::type
245   load_internal(void* addr) {
246     return *reinterpret_cast<const T*>(addr);
247   }
248 
249   template <DecoratorSet ds, typename T>
250   static typename EnableIf<
251     HasDecorator<ds, MO_SEQ_CST>::value>::type
252   store_internal(void* addr, T value);
253 
254   template <DecoratorSet ds, typename T>
255   static typename EnableIf<
256     HasDecorator<ds, MO_RELEASE>::value>::type
257   store_internal(void* addr, T value);
258 
259   template <DecoratorSet ds, typename T>
260   static typename EnableIf<
261     HasDecorator<ds, MO_RELAXED>::value>::type
262   store_internal(void* addr, T value);
263 
264   template <DecoratorSet ds, typename T>
265   static inline typename EnableIf<
266     HasDecorator<ds, MO_VOLATILE>::value>::type
267   store_internal(void* addr, T value) {
268     (void)const_cast<T&>(*reinterpret_cast<volatile T*>(addr) = value);
269   }
270 
271   template <DecoratorSet ds, typename T>
272   static inline typename EnableIf<
273     HasDecorator<ds, MO_UNORDERED>::value>::type
274   store_internal(void* addr, T value) {
275     *reinterpret_cast<T*>(addr) = value;
276   }
277 
278   template <DecoratorSet ds, typename T>
279   static typename EnableIf<
280     HasDecorator<ds, MO_SEQ_CST>::value, T>::type
281   atomic_cmpxchg_internal(T new_value, void* addr, T compare_value);
282 
283   template <DecoratorSet ds, typename T>
284   static typename EnableIf<
285     HasDecorator<ds, MO_RELAXED>::value, T>::type
286   atomic_cmpxchg_internal(T new_value, void* addr, T compare_value);
287 
288   template <DecoratorSet ds, typename T>
289   static typename EnableIf<
290     HasDecorator<ds, MO_SEQ_CST>::value, T>::type
291   atomic_xchg_internal(T new_value, void* addr);
292 
293   // The following *_locked mechanisms serve the purpose of handling atomic operations
294   // that are larger than a machine can handle, and then possibly opt for using
295   // a slower path using a mutex to perform the operation.
296 
297   template <DecoratorSet ds, typename T>
298   static inline typename EnableIf<
299     !AccessInternal::PossiblyLockedAccess<T>::value, T>::type
300   atomic_cmpxchg_maybe_locked(T new_value, void* addr, T compare_value) {
301     return atomic_cmpxchg_internal<ds>(new_value, addr, compare_value);
302   }
303 
304   template <DecoratorSet ds, typename T>
305   static typename EnableIf<
306     AccessInternal::PossiblyLockedAccess<T>::value, T>::type
307   atomic_cmpxchg_maybe_locked(T new_value, void* addr, T compare_value);
308 
309   template <DecoratorSet ds, typename T>
310   static inline typename EnableIf<
311     !AccessInternal::PossiblyLockedAccess<T>::value, T>::type
312   atomic_xchg_maybe_locked(T new_value, void* addr) {
313     return atomic_xchg_internal<ds>(new_value, addr);
314   }
315 
316   template <DecoratorSet ds, typename T>
317   static typename EnableIf<
318     AccessInternal::PossiblyLockedAccess<T>::value, T>::type
319   atomic_xchg_maybe_locked(T new_value, void* addr);
320 
321 public:
322   template <typename T>
323   static inline void store(void* addr, T value) {
324     store_internal<decorators>(addr, value);
325   }
326 
327   template <typename T>
328   static inline T load(void* addr) {
329     return load_internal<decorators, T>(addr);
330   }
331 
332   template <typename T>
333   static inline T atomic_cmpxchg(T new_value, void* addr, T compare_value) {
334     return atomic_cmpxchg_maybe_locked<decorators>(new_value, addr, compare_value);
335   }
336 
337   template <typename T>
338   static inline T atomic_xchg(T new_value, void* addr) {
339     return atomic_xchg_maybe_locked<decorators>(new_value, addr);
340   }
341 
342   template <typename T>
343   static bool arraycopy(arrayOop src_obj, arrayOop dst_obj, T* src, T* dst, size_t length);
344 
345   template <typename T>
346   static void oop_store(void* addr, T value);
347   template <typename T>
348   static void oop_store_at(oop base, ptrdiff_t offset, T value);
349 
350   template <typename T>
351   static T oop_load(void* addr);
352   template <typename T>
353   static T oop_load_at(oop base, ptrdiff_t offset);
354 
355   template <typename T>
356   static T oop_atomic_cmpxchg(T new_value, void* addr, T compare_value);
357   template <typename T>
358   static T oop_atomic_cmpxchg_at(T new_value, oop base, ptrdiff_t offset, T compare_value);
359 
360   template <typename T>
361   static T oop_atomic_xchg(T new_value, void* addr);
362   template <typename T>
363   static T oop_atomic_xchg_at(T new_value, oop base, ptrdiff_t offset);
364 
365   template <typename T>
366   static void store_at(oop base, ptrdiff_t offset, T value) {
367     store(field_addr(base, offset), value);
368   }
369 
370   template <typename T>
371   static T load_at(oop base, ptrdiff_t offset) {
372     return load<T>(field_addr(base, offset));
373   }
374 
375   template <typename T>
376   static T atomic_cmpxchg_at(T new_value, oop base, ptrdiff_t offset, T compare_value) {
377     return atomic_cmpxchg(new_value, field_addr(base, offset), compare_value);
378   }
379 
380   template <typename T>
381   static T atomic_xchg_at(T new_value, oop base, ptrdiff_t offset) {
382     return atomic_xchg(new_value, field_addr(base, offset));
383   }
384 
385   template <typename T>
386   static bool oop_arraycopy(arrayOop src_obj, arrayOop dst_obj, T* src, T* dst, size_t length);
387 
388   static void clone(oop src, oop dst, size_t size);
389 
390   static oop resolve(oop obj) { return obj; }
391 };
392 
393 #endif // SHARE_VM_RUNTIME_ACCESSBACKEND_HPP