1 /*
   2  * Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef SHARE_VM_RUNTIME_ACCESS_HPP
  26 #define SHARE_VM_RUNTIME_ACCESS_HPP
  27 
  28 #include "memory/allocation.hpp"
  29 #include "metaprogramming/decay.hpp"
  30 #include "metaprogramming/integralConstant.hpp"
  31 #include "oops/oopsHierarchy.hpp"
  32 #include "utilities/debug.hpp"
  33 #include "utilities/globalDefinitions.hpp"
  34 
  35 // = GENERAL =
  36 // Access is an API for performing accesses with declarative semantics. Each access can have a number of "decorators".
  37 // A decorator is an attribute or property that affects the way a memory access is performed in some way.
  38 // There are different groups of decorators. Some have to do with memory ordering, others to do with,
  39 // e.g. strength of references, strength of GC barriers, or whether compression should be applied or not.
  40 // Some decorators are set at buildtime, such as whether primitives require GC barriers or not, others
  41 // at callsites such as whether an access is in the heap or not, and others are resolved at runtime
  42 // such as GC-specific barriers and encoding/decoding compressed oops.
  43 // By pipelining handling of these decorators, the design of the Access API allows separation of concern
  44 // over the different orthogonal concerns of decorators, while providing a powerful way of
  45 // expressing these orthogonal semantic properties in a unified way.
  46 
  47 // == OPERATIONS ==
  48 // * load: Load a value from an address.
  49 // * load_at: Load a value from an internal pointer relative to a base object.
  50 // * store: Store a value at an address.
  51 // * store_at: Store a value in an internal pointer relative to a base object.
  52 // * atomic_cmpxchg: Atomically compare-and-swap a new value at an address if previous value matched the compared value.
  53 // * atomic_cmpxchg_at: Atomically compare-and-swap a new value at an internal pointer address if previous value matched the compared value.
  54 // * atomic_xchg: Atomically swap a new value at an address if previous value matched the compared value.
  55 // * atomic_xchg_at: Atomically swap a new value at an internal pointer address if previous value matched the compared value.
  56 // * arraycopy: Copy data from one heap array to another heap array.
  57 // * clone: Clone the contents of an object to a newly allocated object.
  58 // * resolve: Resolve a stable to-space invariant oop that is guaranteed not to relocate its payload until a subsequent thread transition.
  59 
  60 typedef uint64_t DecoratorSet;
  61 
  62 // == Internal Decorators - do not use ==
  63 // * INTERNAL_EMPTY: This is the name for the empty decorator set (in absence of other decorators).
  64 // * INTERNAL_CONVERT_COMPRESSED_OOPS: This is an oop access that will require converting an oop
  65 //   to a narrowOop or vice versa, if UseCompressedOops is known to be set.
  66 // * INTERNAL_VALUE_IS_OOP: Remember that the involved access is on oop rather than primitive.
  67 const DecoratorSet INTERNAL_EMPTY                    = UCONST64(0);
  68 const DecoratorSet INTERNAL_CONVERT_COMPRESSED_OOP   = UCONST64(1) << 1;
  69 const DecoratorSet INTERNAL_VALUE_IS_OOP             = UCONST64(1) << 2;
  70 
  71 // == Internal build-time Decorators ==
  72 // * INTERNAL_BT_BARRIER_ON_PRIMITIVES: This is set in the barrierSetConfig.hpp file.
  73 // * INTERNAL_BT_TO_SPACE_INVARIANT: This is set in the barrierSetConfig.hpp file iff
  74 //   no GC is bundled in the build that is to-space invariant.
  75 const DecoratorSet INTERNAL_BT_BARRIER_ON_PRIMITIVES = UCONST64(1) << 3;
  76 const DecoratorSet INTERNAL_BT_TO_SPACE_INVARIANT    = UCONST64(1) << 4;
  77 
  78 // == Internal run-time Decorators ==
  79 // * INTERNAL_RT_USE_COMPRESSED_OOPS: This decorator will be set in runtime resolved
  80 //   access backends iff UseCompressedOops is true.
  81 const DecoratorSet INTERNAL_RT_USE_COMPRESSED_OOPS   = UCONST64(1) << 5;
  82 
  83 const DecoratorSet INTERNAL_DECORATOR_MASK           = INTERNAL_CONVERT_COMPRESSED_OOP | INTERNAL_VALUE_IS_OOP |
  84                                                        INTERNAL_BT_BARRIER_ON_PRIMITIVES | INTERNAL_RT_USE_COMPRESSED_OOPS;
  85 
  86 // == Memory Ordering Decorators ==
  87 // The memory ordering decorators can be described in the following way:
  88 // === Decorator Rules ===
  89 // The different types of memory ordering guarantees have a strict order of strength.
  90 // Explicitly specifying the stronger ordering implies that the guarantees of the weaker
  91 // property holds too. The names come from the C++11 atomic operations, and typically
  92 // have a JMM equivalent property.
  93 // The equivalence may be viewed like this:
  94 // MO_UNORDERED is equivalent to JMM plain.
  95 // MO_VOLATILE has no equivalence in JMM, because it's a C++ thing.
  96 // MO_RELAXED is equivalent to JMM opaque.
  97 // MO_ACQUIRE is equivalent to JMM acquire.
  98 // MO_RELEASE is equivalent to JMM release.
  99 // MO_SEQ_CST is equivalent to JMM volatile.
 100 //
 101 // === Stores ===
 102 //  * MO_UNORDERED (Default): No guarantees.
 103 //    - The compiler and hardware are free to reorder aggressively. And they will.
 104 //  * MO_VOLATILE: Volatile stores (in the C++ sense).
 105 //    - The stores are not reordered by the compiler (but possibly the HW) w.r.t. other
 106 //      volatile accesses in program order (but possibly non-volatile accesses).
 107 //  * MO_RELAXED: Relaxed atomic stores.
 108 //    - The stores are atomic.
 109 //    - Guarantees from volatile stores hold.
 110 //  * MO_RELEASE: Releasing stores.
 111 //    - The releasing store will make its preceding memory accesses observable to memory accesses
 112 //      subsequent to an acquiring load observing this releasing store.
 113 //    - Guarantees from relaxed stores hold.
 114 //  * MO_SEQ_CST: Sequentially consistent stores.
 115 //    - The stores are observed in the same order by MO_SEQ_CST loads on other processors
 116 //    - Preceding loads and stores in program order are not reordered with subsequent loads and stores in program order.
 117 //    - Guarantees from releasing stores hold.
 118 // === Loads ===
 119 //  * MO_UNORDERED (Default): No guarantees
 120 //    - The compiler and hardware are free to reorder aggressively. And they will.
 121 //  * MO_VOLATILE: Volatile loads (in the C++ sense).
 122 //    - The loads are not reordered by the compiler (but possibly the HW) w.r.t. other
 123 //      volatile accesses in program order (but possibly non-volatile accesses).
 124 //  * MO_RELAXED: Relaxed atomic loads.
 125 //    - The stores are atomic.
 126 //    - Guarantees from volatile loads hold.
 127 //  * MO_ACQUIRE: Acquiring loads.
 128 //    - An acquiring load will make subsequent memory accesses observe the memory accesses
 129 //      preceding the releasing store that the acquiring load observed.
 130 //    - Guarantees from relaxed loads hold.
 131 //  * MO_SEQ_CST: Sequentially consistent loads.
 132 //    - These loads observe MO_SEQ_CST stores in the same order on other processors
 133 //    - Preceding loads and stores in program order are not reordered with subsequent loads and stores in program order.
 134 //    - Guarantees from acquiring loads hold.
 135 // === Atomic Cmpxchg ===
 136 //  * MO_RELAXED: Atomic but relaxed cmpxchg.
 137 //    - Guarantees from MO_RELAXED loads and MO_RELAXED stores hold unconditionally.
 138 //  * MO_SEQ_CST: Sequentially consistent cmpxchg.
 139 //    - Guarantees from MO_SEQ_CST loads and MO_SEQ_CST stores hold unconditionally.
 140 // === Atomic Xchg ===
 141 //  * MO_RELAXED: Atomic but relaxed atomic xchg.
 142 //    - Guarantees from MO_RELAXED loads and MO_RELAXED stores hold.
 143 //  * MO_SEQ_CST: Sequentially consistent xchg.
 144 //    - Guarantees from MO_SEQ_CST loads and MO_SEQ_CST stores hold.
 145 const DecoratorSet MO_UNORDERED      = UCONST64(1) << 6;
 146 const DecoratorSet MO_VOLATILE       = UCONST64(1) << 7;
 147 const DecoratorSet MO_RELAXED        = UCONST64(1) << 8;
 148 const DecoratorSet MO_ACQUIRE        = UCONST64(1) << 9;
 149 const DecoratorSet MO_RELEASE        = UCONST64(1) << 10;
 150 const DecoratorSet MO_SEQ_CST        = UCONST64(1) << 11;
 151 const DecoratorSet MO_DECORATOR_MASK = MO_UNORDERED | MO_VOLATILE | MO_RELAXED |
 152                                        MO_ACQUIRE | MO_RELEASE | MO_SEQ_CST;
 153 
 154 // === Barrier Strength Decorators ===
 155 // * AS_RAW: The access will translate into a raw memory access, hence ignoring all semantic concerns
 156 //   except memory ordering and compressed oops. This will bypass runtime function pointer dispatching
 157 //   in the pipeline and hardwire to raw accesses without going trough the GC access barriers.
 158 //  - Accesses on oop* translate to raw memory accesses without runtime checks
 159 //  - Accesses on narrowOop* translate to encoded/decoded memory accesses without runtime checks
 160 //  - Accesses on HeapWord* translate to a runtime check choosing one of the above
 161 //  - Accesses on other types translate to raw memory accesses without runtime checks
 162 // * AS_DEST_NOT_INITIALIZED: This property can be important to e.g. SATB barriers by
 163 //   marking that the previous value is uninitialized nonsense rather than a real value.
 164 // * AS_NO_KEEPALIVE: The barrier is used only on oop references and will not keep any involved objects
 165 //   alive, regardless of the type of reference being accessed. It will however perform the memory access
 166 //   in a consistent way w.r.t. e.g. concurrent compaction, so that the right field is being accessed,
 167 //   or maintain, e.g. intergenerational or interregional pointers if applicable. This should be used with
 168 //   extreme caution in isolated scopes.
 169 // * AS_NORMAL: The accesses will be resolved to an accessor on the BarrierSet class, giving the
 170 //   responsibility of performing the access and what barriers to be performed to the GC. This is the default.
 171 //   Note that primitive accesses will only be resolved on the barrier set if the appropriate build-time
 172 //   decorator for enabling primitive barriers is enabled for the build.
 173 const DecoratorSet AS_RAW                  = UCONST64(1) << 12;
 174 const DecoratorSet AS_DEST_NOT_INITIALIZED = UCONST64(1) << 13;
 175 const DecoratorSet AS_NO_KEEPALIVE         = UCONST64(1) << 14;
 176 const DecoratorSet AS_NORMAL               = UCONST64(1) << 15;
 177 const DecoratorSet AS_DECORATOR_MASK       = AS_RAW | AS_DEST_NOT_INITIALIZED |
 178                                              AS_NO_KEEPALIVE | AS_NORMAL;
 179 
 180 // === Reference Strength Decorators ===
 181 // These decorators only apply to accesses on oop-like types (oop/narrowOop).
 182 // * ON_STRONG_OOP_REF: Memory access is performed on a strongly reachable reference.
 183 // * ON_WEAK_OOP_REF: The memory access is performed on a weakly reachable reference.
 184 // * ON_PHANTOM_OOP_REF: The memory access is performed on a phantomly reachable reference.
 185 //   This is the same ring of strength as jweak and weak oops in the VM.
 186 // * ON_UNKNOWN_OOP_REF: The memory access is performed on a reference of unknown strength.
 187 //   This could for example come from the unsafe API.
 188 // * Default (no explicit reference strength specified): ON_STRONG_OOP_REF
 189 const DecoratorSet ON_STRONG_OOP_REF  = UCONST64(1) << 16;
 190 const DecoratorSet ON_WEAK_OOP_REF    = UCONST64(1) << 17;
 191 const DecoratorSet ON_PHANTOM_OOP_REF = UCONST64(1) << 18;
 192 const DecoratorSet ON_UNKNOWN_OOP_REF = UCONST64(1) << 19;
 193 const DecoratorSet ON_DECORATOR_MASK  = ON_STRONG_OOP_REF | ON_WEAK_OOP_REF |
 194                                         ON_PHANTOM_OOP_REF | ON_UNKNOWN_OOP_REF;
 195 
 196 // === Access Location ===
 197 // Accesses can take place in, e.g. the heap, old or young generation and different native roots.
 198 // The location is important to the GC as it may imply different actions. The following decorators are used:
 199 // * IN_HEAP: The access is performed in the heap. Many barriers such as card marking will
 200 //   be omitted if this decorator is not set.
 201 // * IN_HEAP_ARRAY: The access is performed on a heap allocated array. This is sometimes a special case
 202 //   for some GCs, and implies that it is an IN_HEAP.
 203 // * IN_ROOT: The access is performed in an off-heap data structure pointing into the Java heap.
 204 // * IN_CONCURRENT_ROOT: The access is performed in an off-heap data structure pointing into the Java heap,
 205 //   but is notably not scanned during safepoints. This is sometimes a special case for some GCs and
 206 //   implies that it is also an IN_ROOT.
 207 const DecoratorSet IN_HEAP            = UCONST64(1) << 20;
 208 const DecoratorSet IN_HEAP_ARRAY      = UCONST64(1) << 21;
 209 const DecoratorSet IN_ROOT            = UCONST64(1) << 22;
 210 const DecoratorSet IN_CONCURRENT_ROOT = UCONST64(1) << 23;
 211 const DecoratorSet IN_ARCHIVE_ROOT    = UCONST64(1) << 24;
 212 const DecoratorSet IN_DECORATOR_MASK  = IN_HEAP | IN_HEAP_ARRAY |
 213                                         IN_ROOT | IN_CONCURRENT_ROOT |
 214                                         IN_ARCHIVE_ROOT;
 215 
 216 // == Value Decorators ==
 217 // * OOP_NOT_NULL: This property can make certain barriers faster such as compressing oops.
 218 const DecoratorSet OOP_NOT_NULL       = UCONST64(1) << 25;
 219 const DecoratorSet OOP_DECORATOR_MASK = OOP_NOT_NULL;
 220 
 221 // == Arraycopy Decorators ==
 222 // * ARRAYCOPY_CHECKCAST: This property means that the class of the objects in source
 223 //   are not guaranteed to be subclasses of the class of the destination array. This requires
 224 //   a check-cast barrier during the copying operation. If this is not set, it is assumed
 225 //   that the array is covariant: (the source array type is-a destination array type)
 226 // * ARRAYCOPY_DISJOINT: This property means that it is known that the two array ranges
 227 //   are disjoint.
 228 // * ARRAYCOPY_ARRAYOF: The copy is in the arrayof form.
 229 // * ARRAYCOPY_ATOMIC: The accesses have to be atomic over the size of its elements.
 230 // * ARRAYCOPY_ALIGNED: The accesses have to be aligned on a HeapWord.
 231 const DecoratorSet ARRAYCOPY_CHECKCAST            = UCONST64(1) << 26;
 232 const DecoratorSet ARRAYCOPY_DISJOINT             = UCONST64(1) << 27;
 233 const DecoratorSet ARRAYCOPY_ARRAYOF              = UCONST64(1) << 28;
 234 const DecoratorSet ARRAYCOPY_ATOMIC               = UCONST64(1) << 29;
 235 const DecoratorSet ARRAYCOPY_ALIGNED              = UCONST64(1) << 30;
 236 const DecoratorSet ARRAYCOPY_DECORATOR_MASK       = ARRAYCOPY_CHECKCAST | ARRAYCOPY_DISJOINT |
 237                                                     ARRAYCOPY_DISJOINT | ARRAYCOPY_ARRAYOF |
 238                                                     ARRAYCOPY_ATOMIC | ARRAYCOPY_ALIGNED;
 239 
 240 // The HasDecorator trait can help at compile-time determining whether a decorator set
 241 // has an intersection with a certain other decorator set
 242 template <DecoratorSet decorators, DecoratorSet decorator>
 243 struct HasDecorator: public IntegralConstant<bool, (decorators & decorator) != 0> {};
 244 
 245 namespace AccessInternal {
 246   template <typename T>
 247   struct OopOrNarrowOopInternal: AllStatic {
 248     typedef oop type;
 249   };
 250 
 251   template <>
 252   struct OopOrNarrowOopInternal<narrowOop>: AllStatic {
 253     typedef narrowOop type;
 254   };
 255 
 256   // This metafunction returns a canonicalized oop/narrowOop type for a passed
 257   // in oop-like types passed in from oop_* overloads where the user has sworn
 258   // that the passed in values should be oop-like (e.g. oop, oopDesc*, arrayOop,
 259   // narrowOoop, instanceOopDesc*, and random other things).
 260   // In the oop_* overloads, it must hold that if the passed in type T is not
 261   // narrowOop, then it by contract has to be one of many oop-like types implicitly
 262   // convertible to oop, and hence returns oop as the canonical oop type.
 263   // If it turns out it was not, then the implicit conversion to oop will fail
 264   // to compile, as desired.
 265   template <typename T>
 266   struct OopOrNarrowOop: AllStatic {
 267     typedef typename OopOrNarrowOopInternal<typename Decay<T>::type>::type type;
 268   };
 269 
 270   inline void* field_addr(oop base, ptrdiff_t byte_offset) {
 271     return reinterpret_cast<void*>(reinterpret_cast<intptr_t>((void*)base) + byte_offset);
 272   }
 273 
 274   template <DecoratorSet decorators, typename T>
 275   void store_at(oop base, ptrdiff_t offset, T value);
 276 
 277   template <DecoratorSet decorators, typename T>
 278   T load_at(oop base, ptrdiff_t offset);
 279 
 280   template <DecoratorSet decorators, typename T>
 281   T atomic_cmpxchg_at(T new_value, oop base, ptrdiff_t offset, T compare_value);
 282 
 283   template <DecoratorSet decorators, typename T>
 284   T atomic_xchg_at(T new_value, oop base, ptrdiff_t offset);
 285 
 286   template <DecoratorSet decorators, typename P, typename T>
 287   void store(P* addr, T value);
 288 
 289   template <DecoratorSet decorators, typename P, typename T>
 290   T load(P* addr);
 291 
 292   template <DecoratorSet decorators, typename P, typename T>
 293   T atomic_cmpxchg(T new_value, P* addr, T compare_value);
 294 
 295   template <DecoratorSet decorators, typename P, typename T>
 296   T atomic_xchg(T new_value, P* addr);
 297 
 298   template <DecoratorSet decorators, typename T>
 299   bool arraycopy(arrayOop src_obj, arrayOop dst_obj, T *src, T *dst, size_t length);
 300 
 301   template <DecoratorSet decorators>
 302   void clone(oop src, oop dst, size_t size);
 303 
 304   template <DecoratorSet decorators>
 305   oop resolve(oop src);
 306 
 307   // Infer the type that should be returned from a load.
 308   template <typename P, DecoratorSet decorators>
 309   class LoadProxy: public StackObj {
 310   private:
 311     P *const _addr;
 312   public:
 313     LoadProxy(P* addr) : _addr(addr) {}
 314 
 315     template <typename T>
 316     inline operator T() {
 317       return load<decorators, P, T>(_addr);
 318     }
 319 
 320     inline operator P() {
 321       return load<decorators, P, P>(_addr);
 322     }
 323   };
 324 
 325   // Infer the type that should be returned from a load_at.
 326   template <DecoratorSet decorators>
 327   class LoadAtProxy: public StackObj {
 328   private:
 329     const oop _base;
 330     const ptrdiff_t _offset;
 331   public:
 332     LoadAtProxy(oop base, ptrdiff_t offset) : _base(base), _offset(offset) {}
 333 
 334     template <typename T>
 335     inline operator T() const {
 336       return load_at<decorators, T>(_base, _offset);
 337     }
 338   };
 339 }
 340 
 341 template <DecoratorSet decorators = INTERNAL_EMPTY>
 342 class Access: public AllStatic {
 343   // This function asserts that if an access gets passed in a decorator outside
 344   // of the expected_decorators, then something is wrong. It additionally checks
 345   // the consistency of the decorators so that supposedly disjoint decorators are indeed
 346   // disjoint. For example, an access can not be both in heap and on root at the
 347   // same time.
 348   template <DecoratorSet expected_decorators>
 349   static void verify_decorators();
 350 
 351   template <DecoratorSet expected_mo_decorators>
 352   static void verify_primitive_decorators() {
 353     const DecoratorSet primitive_decorators = (AS_DECORATOR_MASK ^ AS_NO_KEEPALIVE ^ AS_DEST_NOT_INITIALIZED) |
 354                                               IN_HEAP | IN_HEAP_ARRAY;
 355     verify_decorators<expected_mo_decorators | primitive_decorators>();
 356   }
 357 
 358   template <DecoratorSet expected_mo_decorators>
 359   static void verify_oop_decorators() {
 360     const DecoratorSet oop_decorators = AS_DECORATOR_MASK | IN_DECORATOR_MASK |
 361                                         (ON_DECORATOR_MASK ^ ON_UNKNOWN_OOP_REF) | // no unknown oop refs outside of the heap
 362                                         OOP_DECORATOR_MASK;
 363     verify_decorators<expected_mo_decorators | oop_decorators>();
 364   }
 365 
 366   template <DecoratorSet expected_mo_decorators>
 367   static void verify_heap_oop_decorators() {
 368     const DecoratorSet heap_oop_decorators = AS_DECORATOR_MASK | ON_DECORATOR_MASK |
 369                                              OOP_DECORATOR_MASK | (IN_DECORATOR_MASK ^
 370                                                                    (IN_ROOT | IN_CONCURRENT_ROOT)); // no root accesses in the heap
 371     verify_decorators<expected_mo_decorators | heap_oop_decorators>();
 372   }
 373 
 374   static const DecoratorSet load_mo_decorators = MO_UNORDERED | MO_VOLATILE | MO_RELAXED | MO_ACQUIRE | MO_SEQ_CST;
 375   static const DecoratorSet store_mo_decorators = MO_UNORDERED | MO_VOLATILE | MO_RELAXED | MO_RELEASE | MO_SEQ_CST;
 376   static const DecoratorSet atomic_xchg_mo_decorators = MO_SEQ_CST;
 377   static const DecoratorSet atomic_cmpxchg_mo_decorators = MO_RELAXED | MO_SEQ_CST;
 378 
 379 public:
 380   // Primitive heap accesses
 381   static inline AccessInternal::LoadAtProxy<decorators> load_at(oop base, ptrdiff_t offset) {
 382     verify_primitive_decorators<load_mo_decorators>();
 383     return AccessInternal::LoadAtProxy<decorators>(base, offset);
 384   }
 385 
 386   template <typename T>
 387   static inline void store_at(oop base, ptrdiff_t offset, T value) {
 388     verify_primitive_decorators<store_mo_decorators>();
 389     AccessInternal::store_at<decorators>(base, offset, value);
 390   }
 391 
 392   template <typename T>
 393   static inline T atomic_cmpxchg_at(T new_value, oop base, ptrdiff_t offset, T compare_value) {
 394     verify_primitive_decorators<atomic_cmpxchg_mo_decorators>();
 395     return AccessInternal::atomic_cmpxchg_at<decorators>(new_value, base, offset, compare_value);
 396   }
 397 
 398   template <typename T>
 399   static inline T atomic_xchg_at(T new_value, oop base, ptrdiff_t offset) {
 400     verify_primitive_decorators<atomic_xchg_mo_decorators>();
 401     return AccessInternal::atomic_xchg_at<decorators>(new_value, base, offset);
 402   }
 403 
 404   template <typename T>
 405   static inline void arraycopy(arrayOop src_obj, arrayOop dst_obj, T *src, T *dst, size_t length) {
 406     verify_decorators<ARRAYCOPY_DECORATOR_MASK | IN_HEAP |
 407                       AS_DECORATOR_MASK>();
 408     AccessInternal::arraycopy<decorators>(src_obj, dst_obj, src, dst, length);
 409   }
 410 
 411   // Oop heap accesses
 412   static inline AccessInternal::LoadAtProxy<decorators | INTERNAL_VALUE_IS_OOP> oop_load_at(oop base, ptrdiff_t offset) {
 413     verify_heap_oop_decorators<load_mo_decorators>();
 414     return AccessInternal::LoadAtProxy<decorators | INTERNAL_VALUE_IS_OOP>(base, offset);
 415   }
 416 
 417   template <typename T>
 418   static inline void oop_store_at(oop base, ptrdiff_t offset, T value) {
 419     verify_heap_oop_decorators<store_mo_decorators>();
 420     typedef typename AccessInternal::OopOrNarrowOop<T>::type OopType;
 421     OopType oop_value = value;
 422     AccessInternal::store_at<decorators | INTERNAL_VALUE_IS_OOP>(base, offset, oop_value);
 423   }
 424 
 425   template <typename T>
 426   static inline T oop_atomic_cmpxchg_at(T new_value, oop base, ptrdiff_t offset, T compare_value) {
 427     verify_heap_oop_decorators<atomic_cmpxchg_mo_decorators>();
 428     typedef typename AccessInternal::OopOrNarrowOop<T>::type OopType;
 429     OopType new_oop_value = new_value;
 430     OopType compare_oop_value = compare_value;
 431     return AccessInternal::atomic_cmpxchg_at<decorators | INTERNAL_VALUE_IS_OOP>(new_oop_value, base, offset, compare_oop_value);
 432   }
 433 
 434   template <typename T>
 435   static inline T oop_atomic_xchg_at(T new_value, oop base, ptrdiff_t offset) {
 436     verify_heap_oop_decorators<atomic_xchg_mo_decorators>();
 437     typedef typename AccessInternal::OopOrNarrowOop<T>::type OopType;
 438     OopType new_oop_value = new_value;
 439     return AccessInternal::atomic_xchg_at<decorators | INTERNAL_VALUE_IS_OOP>(new_oop_value, base, offset);
 440   }
 441 
 442   template <typename T>
 443   static inline bool oop_arraycopy(arrayOop src_obj, arrayOop dst_obj, T *src, T *dst, size_t length) {
 444     verify_decorators<ARRAYCOPY_DECORATOR_MASK | IN_HEAP | AS_DECORATOR_MASK>();
 445     return AccessInternal::arraycopy<decorators | INTERNAL_VALUE_IS_OOP>(src_obj, dst_obj, src, dst, length);
 446   }
 447 
 448   // Clone an object from src to dst
 449   static inline void clone(oop src, oop dst, size_t size) {
 450     verify_decorators<IN_HEAP>();
 451     AccessInternal::clone<decorators>(src, dst, size);
 452   }
 453 
 454   // Primitive accesses
 455   template <typename P>
 456   static inline P load(P* addr) {
 457     verify_primitive_decorators<load_mo_decorators>();
 458     return AccessInternal::load<decorators, P, P>(addr);
 459   }
 460 
 461   template <typename P, typename T>
 462   static inline void store(P* addr, T value) {
 463     verify_primitive_decorators<store_mo_decorators>();
 464     AccessInternal::store<decorators>(addr, value);
 465   }
 466 
 467   template <typename P, typename T>
 468   static inline T atomic_cmpxchg(T new_value, P* addr, T compare_value) {
 469     verify_primitive_decorators<atomic_cmpxchg_mo_decorators>();
 470     return AccessInternal::atomic_cmpxchg<decorators>(new_value, addr, compare_value);
 471   }
 472 
 473   template <typename P, typename T>
 474   static inline T atomic_xchg(T new_value, P* addr) {
 475     verify_primitive_decorators<atomic_xchg_mo_decorators>();
 476     return AccessInternal::atomic_xchg<decorators>(new_value, addr);
 477   }
 478 
 479   // Oop accesses
 480   template <typename P>
 481   static inline AccessInternal::LoadProxy<P, decorators | INTERNAL_VALUE_IS_OOP> oop_load(P* addr) {
 482     verify_oop_decorators<load_mo_decorators>();
 483     return AccessInternal::LoadProxy<P, decorators | INTERNAL_VALUE_IS_OOP>(addr);
 484   }
 485 
 486   template <typename P, typename T>
 487   static inline void oop_store(P* addr, T value) {
 488     verify_oop_decorators<store_mo_decorators>();
 489     typedef typename AccessInternal::OopOrNarrowOop<T>::type OopType;
 490     OopType oop_value = value;
 491     AccessInternal::store<decorators | INTERNAL_VALUE_IS_OOP>(addr, oop_value);
 492   }
 493 
 494   template <typename P, typename T>
 495   static inline T oop_atomic_cmpxchg(T new_value, P* addr, T compare_value) {
 496     verify_oop_decorators<atomic_cmpxchg_mo_decorators>();
 497     typedef typename AccessInternal::OopOrNarrowOop<T>::type OopType;
 498     OopType new_oop_value = new_value;
 499     OopType compare_oop_value = compare_value;
 500     return AccessInternal::atomic_cmpxchg<decorators | INTERNAL_VALUE_IS_OOP>(new_oop_value, addr, compare_oop_value);
 501   }
 502 
 503   template <typename P, typename T>
 504   static inline T oop_atomic_xchg(T new_value, P* addr) {
 505     verify_oop_decorators<atomic_xchg_mo_decorators>();
 506     typedef typename AccessInternal::OopOrNarrowOop<T>::type OopType;
 507     OopType new_oop_value = new_value;
 508     return AccessInternal::atomic_xchg<decorators | INTERNAL_VALUE_IS_OOP>(new_oop_value, addr);
 509   }
 510 
 511   static oop resolve(oop obj) {
 512     verify_decorators<INTERNAL_EMPTY>();
 513     return AccessInternal::resolve<decorators>(obj);
 514   }
 515 };
 516 
 517 // Helper for performing raw accesses (knows only of memory ordering
 518 // atomicity decorators as well as compressed oops)
 519 template <DecoratorSet decorators = INTERNAL_EMPTY>
 520 class RawAccess: public Access<AS_RAW | decorators> {};
 521 
 522 // Helper for performing normal accesses on the heap. These accesses
 523 // may resolve an accessor on a GC barrier set
 524 template <DecoratorSet decorators = INTERNAL_EMPTY>
 525 class HeapAccess: public Access<IN_HEAP | decorators> {};
 526 
 527 // Helper for performing normal accesses in roots. These accesses
 528 // may resolve an accessor on a GC barrier set
 529 template <DecoratorSet decorators = INTERNAL_EMPTY>
 530 class RootAccess: public Access<IN_ROOT | decorators> {};
 531 
 532 #endif // SHARE_VM_RUNTIME_ACCESS_HPP