1 /*
   2  * Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef SHARE_VM_RUNTIME_ACCESSBACKEND_HPP
  26 #define SHARE_VM_RUNTIME_ACCESSBACKEND_HPP
  27 
  28 #include "metaprogramming/conditional.hpp"
  29 #include "metaprogramming/enableIf.hpp"
  30 #include "metaprogramming/integralConstant.hpp"
  31 #include "metaprogramming/isSame.hpp"
  32 #include "utilities/debug.hpp"
  33 #include "utilities/globalDefinitions.hpp"
  34 
  35 // This metafunction returns either oop or narrowOop depending on whether
  36 // an access needs to use compressed oops or not.
  37 template <DecoratorSet decorators>
  38 struct HeapOopType: AllStatic {
  39   static const bool needs_oop_compress = HasDecorator<decorators, INTERNAL_CONVERT_COMPRESSED_OOP>::value &&
  40                                          HasDecorator<decorators, INTERNAL_RT_USE_COMPRESSED_OOPS>::value;
  41   typedef typename Conditional<needs_oop_compress, narrowOop, oop>::type type;
  42 };
  43 
  44 namespace AccessInternal {
  45   enum BarrierType {
  46     BARRIER_STORE,
  47     BARRIER_STORE_AT,
  48     BARRIER_LOAD,
  49     BARRIER_LOAD_AT,
  50     BARRIER_ATOMIC_CMPXCHG,
  51     BARRIER_ATOMIC_CMPXCHG_AT,
  52     BARRIER_ATOMIC_XCHG,
  53     BARRIER_ATOMIC_XCHG_AT,
  54     BARRIER_ARRAYCOPY,
  55     BARRIER_CLONE,
  56     BARRIER_RESOLVE,
  57     BARRIER_EQUALS
  58   };
  59 
  60   template <DecoratorSet decorators, typename T>
  61   struct MustConvertCompressedOop: public IntegralConstant<bool,
  62     HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value &&
  63     IsSame<typename HeapOopType<decorators>::type, narrowOop>::value &&
  64     IsSame<T, oop>::value> {};
  65 
  66   // This metafunction returns an appropriate oop type if the value is oop-like
  67   // and otherwise returns the same type T.
  68   template <DecoratorSet decorators, typename T>
  69   struct EncodedType: AllStatic {
  70     typedef typename Conditional<
  71       HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value,
  72       typename HeapOopType<decorators>::type, T>::type type;
  73   };
  74 
  75   template <DecoratorSet decorators>
  76   inline typename HeapOopType<decorators>::type*
  77   oop_field_addr(oop base, ptrdiff_t byte_offset) {
  78     return reinterpret_cast<typename HeapOopType<decorators>::type*>(
  79              reinterpret_cast<intptr_t>((void*)base) + byte_offset);
  80   }
  81 
  82   // This metafunction returns whether it is possible for a type T to require
  83   // locking to support wide atomics or not.
  84   template <typename T>
  85 #ifdef SUPPORTS_NATIVE_CX8
  86   struct PossiblyLockedAccess: public IntegralConstant<bool, false> {};
  87 #else
  88   struct PossiblyLockedAccess: public IntegralConstant<bool, (sizeof(T) > 4)> {};
  89 #endif
  90 
  91   template <DecoratorSet decorators, typename T>
  92   struct AccessFunctionTypes {
  93     typedef T (*load_at_func_t)(oop base, ptrdiff_t offset);
  94     typedef void (*store_at_func_t)(oop base, ptrdiff_t offset, T value);
  95     typedef T (*atomic_cmpxchg_at_func_t)(T new_value, oop base, ptrdiff_t offset, T compare_value);
  96     typedef T (*atomic_xchg_at_func_t)(T new_value, oop base, ptrdiff_t offset);
  97 
  98     typedef T (*load_func_t)(void* addr);
  99     typedef void (*store_func_t)(void* addr, T value);
 100     typedef T (*atomic_cmpxchg_func_t)(T new_value, void* addr, T compare_value);
 101     typedef T (*atomic_xchg_func_t)(T new_value, void* addr);
 102 
 103     typedef bool (*arraycopy_func_t)(arrayOop src_obj, arrayOop dst_obj, T* src, T* dst, size_t length);
 104     typedef void (*clone_func_t)(oop src, oop dst, size_t size);
 105     typedef oop (*resolve_func_t)(oop obj);
 106     typedef bool (*equals_func_t)(oop o1, oop o2);
 107   };
 108 
 109   template <DecoratorSet decorators>
 110   struct AccessFunctionTypes<decorators, void> {
 111     typedef bool (*arraycopy_func_t)(arrayOop src_obj, arrayOop dst_obj, void* src, void* dst, size_t length);
 112   };
 113 
 114   template <DecoratorSet decorators, typename T, BarrierType barrier> struct AccessFunction {};
 115 
 116 #define ACCESS_GENERATE_ACCESS_FUNCTION(bt, func)                   \
 117   template <DecoratorSet decorators, typename T>                    \
 118   struct AccessFunction<decorators, T, bt>: AllStatic{              \
 119     typedef typename AccessFunctionTypes<decorators, T>::func type; \
 120   }
 121   ACCESS_GENERATE_ACCESS_FUNCTION(BARRIER_STORE, store_func_t);
 122   ACCESS_GENERATE_ACCESS_FUNCTION(BARRIER_STORE_AT, store_at_func_t);
 123   ACCESS_GENERATE_ACCESS_FUNCTION(BARRIER_LOAD, load_func_t);
 124   ACCESS_GENERATE_ACCESS_FUNCTION(BARRIER_LOAD_AT, load_at_func_t);
 125   ACCESS_GENERATE_ACCESS_FUNCTION(BARRIER_ATOMIC_CMPXCHG, atomic_cmpxchg_func_t);
 126   ACCESS_GENERATE_ACCESS_FUNCTION(BARRIER_ATOMIC_CMPXCHG_AT, atomic_cmpxchg_at_func_t);
 127   ACCESS_GENERATE_ACCESS_FUNCTION(BARRIER_ATOMIC_XCHG, atomic_xchg_func_t);
 128   ACCESS_GENERATE_ACCESS_FUNCTION(BARRIER_ATOMIC_XCHG_AT, atomic_xchg_at_func_t);
 129   ACCESS_GENERATE_ACCESS_FUNCTION(BARRIER_ARRAYCOPY, arraycopy_func_t);
 130   ACCESS_GENERATE_ACCESS_FUNCTION(BARRIER_CLONE, clone_func_t);
 131   ACCESS_GENERATE_ACCESS_FUNCTION(BARRIER_RESOLVE, resolve_func_t);
 132   ACCESS_GENERATE_ACCESS_FUNCTION(BARRIER_EQUALS, equals_func_t);
 133 #undef ACCESS_GENERATE_ACCESS_FUNCTION
 134 
 135   template <DecoratorSet decorators, typename T, BarrierType barrier_type>
 136   typename AccessFunction<decorators, T, barrier_type>::type resolve_barrier();
 137 
 138   template <DecoratorSet decorators, typename T, BarrierType barrier_type>
 139   typename AccessFunction<decorators, T, barrier_type>::type resolve_oop_barrier();
 140 
 141   class AccessLocker {
 142   public:
 143     AccessLocker();
 144     ~AccessLocker();
 145   };
 146   bool wide_atomic_needs_locking();
 147 
 148   void* field_addr(oop base, ptrdiff_t offset);
 149 
 150   // Forward calls to Copy:: in the cpp file to reduce dependencies and allow
 151   // faster build times, given how frequently included access is.
 152   void arraycopy_arrayof_conjoint_oops(void* src, void* dst, size_t length);
 153   void arraycopy_conjoint_oops(oop* src, oop* dst, size_t length);
 154   void arraycopy_conjoint_oops(narrowOop* src, narrowOop* dst, size_t length);
 155 
 156   void arraycopy_disjoint_words(void* src, void* dst, size_t length);
 157   void arraycopy_disjoint_words_atomic(void* src, void* dst, size_t length);
 158 
 159   template<typename T>
 160   void arraycopy_conjoint(T* src, T* dst, size_t length);
 161   template<typename T>
 162   void arraycopy_arrayof_conjoint(T* src, T* dst, size_t length);
 163   template<typename T>
 164   void arraycopy_conjoint_atomic(T* src, T* dst, size_t length);
 165 }
 166 
 167 // This mask specifies what decorators are relevant for raw accesses. When passing
 168 // accesses to the raw layer, irrelevant decorators are removed.
 169 const DecoratorSet RAW_DECORATOR_MASK = INTERNAL_DECORATOR_MASK | MO_DECORATOR_MASK |
 170                                         ARRAYCOPY_DECORATOR_MASK | OOP_DECORATOR_MASK;
 171 
 172 // The RawAccessBarrier performs raw accesses with additional knowledge of
 173 // memory ordering, so that OrderAccess/Atomic is called when necessary.
 174 // It additionally handles compressed oops, and hence is not completely "raw"
 175 // strictly speaking.
 176 template <DecoratorSet decorators>
 177 class RawAccessBarrier: public AllStatic {
 178 protected:
 179   static inline void* field_addr(oop base, ptrdiff_t byte_offset) {
 180     return AccessInternal::field_addr(base, byte_offset);
 181   }
 182 
 183 protected:
 184   // Only encode if INTERNAL_VALUE_IS_OOP
 185   template <DecoratorSet idecorators, typename T>
 186   static inline typename EnableIf<
 187     AccessInternal::MustConvertCompressedOop<idecorators, T>::value,
 188     typename HeapOopType<idecorators>::type>::type
 189   encode_internal(T value);
 190 
 191   template <DecoratorSet idecorators, typename T>
 192   static inline typename EnableIf<
 193     !AccessInternal::MustConvertCompressedOop<idecorators, T>::value, T>::type
 194   encode_internal(T value) {
 195     return value;
 196   }
 197 
 198   template <typename T>
 199   static inline typename AccessInternal::EncodedType<decorators, T>::type
 200   encode(T value) {
 201     return encode_internal<decorators, T>(value);
 202   }
 203 
 204   // Only decode if INTERNAL_VALUE_IS_OOP
 205   template <DecoratorSet idecorators, typename T>
 206   static inline typename EnableIf<
 207     AccessInternal::MustConvertCompressedOop<idecorators, T>::value, T>::type
 208   decode_internal(typename HeapOopType<idecorators>::type value);
 209 
 210   template <DecoratorSet idecorators, typename T>
 211   static inline typename EnableIf<
 212     !AccessInternal::MustConvertCompressedOop<idecorators, T>::value, T>::type
 213   decode_internal(T value) {
 214     return value;
 215   }
 216 
 217   template <typename T>
 218   static inline T decode(typename AccessInternal::EncodedType<decorators, T>::type value) {
 219     return decode_internal<decorators, T>(value);
 220   }
 221 
 222 protected:
 223   template <DecoratorSet ds, typename T>
 224   static typename EnableIf<
 225     HasDecorator<ds, MO_SEQ_CST>::value, T>::type
 226   load_internal(void* addr);
 227 
 228   template <DecoratorSet ds, typename T>
 229   static typename EnableIf<
 230     HasDecorator<ds, MO_ACQUIRE>::value, T>::type
 231   load_internal(void* addr);
 232 
 233   template <DecoratorSet ds, typename T>
 234   static typename EnableIf<
 235     HasDecorator<ds, MO_RELAXED>::value, T>::type
 236   load_internal(void* addr);
 237 
 238   template <DecoratorSet ds, typename T>
 239   static inline typename EnableIf<
 240     HasDecorator<ds, MO_VOLATILE>::value, T>::type
 241   load_internal(void* addr) {
 242     return *reinterpret_cast<const volatile T*>(addr);
 243   }
 244 
 245   template <DecoratorSet ds, typename T>
 246   static inline typename EnableIf<
 247     HasDecorator<ds, MO_UNORDERED>::value, T>::type
 248   load_internal(void* addr) {
 249     return *reinterpret_cast<const T*>(addr);
 250   }
 251 
 252   template <DecoratorSet ds, typename T>
 253   static typename EnableIf<
 254     HasDecorator<ds, MO_SEQ_CST>::value>::type
 255   store_internal(void* addr, T value);
 256 
 257   template <DecoratorSet ds, typename T>
 258   static typename EnableIf<
 259     HasDecorator<ds, MO_RELEASE>::value>::type
 260   store_internal(void* addr, T value);
 261 
 262   template <DecoratorSet ds, typename T>
 263   static typename EnableIf<
 264     HasDecorator<ds, MO_RELAXED>::value>::type
 265   store_internal(void* addr, T value);
 266 
 267   template <DecoratorSet ds, typename T>
 268   static inline typename EnableIf<
 269     HasDecorator<ds, MO_VOLATILE>::value>::type
 270   store_internal(void* addr, T value) {
 271     (void)const_cast<T&>(*reinterpret_cast<volatile T*>(addr) = value);
 272   }
 273 
 274   template <DecoratorSet ds, typename T>
 275   static inline typename EnableIf<
 276     HasDecorator<ds, MO_UNORDERED>::value>::type
 277   store_internal(void* addr, T value) {
 278     *reinterpret_cast<T*>(addr) = value;
 279   }
 280 
 281   template <DecoratorSet ds, typename T>
 282   static typename EnableIf<
 283     HasDecorator<ds, MO_SEQ_CST>::value, T>::type
 284   atomic_cmpxchg_internal(T new_value, void* addr, T compare_value);
 285 
 286   template <DecoratorSet ds, typename T>
 287   static typename EnableIf<
 288     HasDecorator<ds, MO_RELAXED>::value, T>::type
 289   atomic_cmpxchg_internal(T new_value, void* addr, T compare_value);
 290 
 291   template <DecoratorSet ds, typename T>
 292   static typename EnableIf<
 293     HasDecorator<ds, MO_SEQ_CST>::value, T>::type
 294   atomic_xchg_internal(T new_value, void* addr);
 295 
 296   // The following *_locked mechanisms serve the purpose of handling atomic operations
 297   // that are larger than a machine can handle, and then possibly opt for using
 298   // a slower path using a mutex to perform the operation.
 299 
 300   template <DecoratorSet ds, typename T>
 301   static inline typename EnableIf<
 302     !AccessInternal::PossiblyLockedAccess<T>::value, T>::type
 303   atomic_cmpxchg_maybe_locked(T new_value, void* addr, T compare_value) {
 304     return atomic_cmpxchg_internal<ds>(new_value, addr, compare_value);
 305   }
 306 
 307   template <DecoratorSet ds, typename T>
 308   static typename EnableIf<
 309     AccessInternal::PossiblyLockedAccess<T>::value, T>::type
 310   atomic_cmpxchg_maybe_locked(T new_value, void* addr, T compare_value);
 311 
 312   template <DecoratorSet ds, typename T>
 313   static inline typename EnableIf<
 314     !AccessInternal::PossiblyLockedAccess<T>::value, T>::type
 315   atomic_xchg_maybe_locked(T new_value, void* addr) {
 316     return atomic_xchg_internal<ds>(new_value, addr);
 317   }
 318 
 319   template <DecoratorSet ds, typename T>
 320   static typename EnableIf<
 321     AccessInternal::PossiblyLockedAccess<T>::value, T>::type
 322   atomic_xchg_maybe_locked(T new_value, void* addr);
 323 
 324 public:
 325   template <typename T>
 326   static inline void store(void* addr, T value) {
 327     store_internal<decorators>(addr, value);
 328   }
 329 
 330   template <typename T>
 331   static inline T load(void* addr) {
 332     return load_internal<decorators, T>(addr);
 333   }
 334 
 335   template <typename T>
 336   static inline T atomic_cmpxchg(T new_value, void* addr, T compare_value) {
 337     return atomic_cmpxchg_maybe_locked<decorators>(new_value, addr, compare_value);
 338   }
 339 
 340   template <typename T>
 341   static inline T atomic_xchg(T new_value, void* addr) {
 342     return atomic_xchg_maybe_locked<decorators>(new_value, addr);
 343   }
 344 
 345   template <typename T>
 346   static bool arraycopy(arrayOop src_obj, arrayOop dst_obj, T* src, T* dst, size_t length);
 347 
 348   template <typename T>
 349   static void oop_store(void* addr, T value);
 350   template <typename T>
 351   static void oop_store_at(oop base, ptrdiff_t offset, T value);
 352 
 353   template <typename T>
 354   static T oop_load(void* addr);
 355   template <typename T>
 356   static T oop_load_at(oop base, ptrdiff_t offset);
 357 
 358   template <typename T>
 359   static T oop_atomic_cmpxchg(T new_value, void* addr, T compare_value);
 360   template <typename T>
 361   static T oop_atomic_cmpxchg_at(T new_value, oop base, ptrdiff_t offset, T compare_value);
 362 
 363   template <typename T>
 364   static T oop_atomic_xchg(T new_value, void* addr);
 365   template <typename T>
 366   static T oop_atomic_xchg_at(T new_value, oop base, ptrdiff_t offset);
 367 
 368   template <typename T>
 369   static void store_at(oop base, ptrdiff_t offset, T value) {
 370     store(field_addr(base, offset), value);
 371   }
 372 
 373   template <typename T>
 374   static T load_at(oop base, ptrdiff_t offset) {
 375     return load<T>(field_addr(base, offset));
 376   }
 377 
 378   template <typename T>
 379   static T atomic_cmpxchg_at(T new_value, oop base, ptrdiff_t offset, T compare_value) {
 380     return atomic_cmpxchg(new_value, field_addr(base, offset), compare_value);
 381   }
 382 
 383   template <typename T>
 384   static T atomic_xchg_at(T new_value, oop base, ptrdiff_t offset) {
 385     return atomic_xchg(new_value, field_addr(base, offset));
 386   }
 387 
 388   template <typename T>
 389   static bool oop_arraycopy(arrayOop src_obj, arrayOop dst_obj, T* src, T* dst, size_t length);
 390 
 391   static void clone(oop src, oop dst, size_t size);
 392 
 393   static oop resolve(oop obj) { return obj; }
 394 
 395   static bool equals(oop o1, oop o2) { return o1 == o2; }
 396 };
 397 
 398 #endif // SHARE_VM_RUNTIME_ACCESSBACKEND_HPP