1 /*
  2  * Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  *
 23  */
 24 
 25 #ifndef SHARE_VM_RUNTIME_ACCESS_HPP
 26 #define SHARE_VM_RUNTIME_ACCESS_HPP
 27 
 28 #include "memory/allocation.hpp"
 29 #include "metaprogramming/decay.hpp"
 30 #include "metaprogramming/integralConstant.hpp"
 31 #include "oops/oopsHierarchy.hpp"
 32 #include "utilities/debug.hpp"
 33 #include "utilities/globalDefinitions.hpp"
 34 
 35 // = GENERAL =
 36 // Access is an API for performing accesses with declarative semantics. Each access can have a number of "decorators".
 37 // A decorator is an attribute or property that affects the way a memory access is performed in some way.
 38 // There are different groups of decorators. Some have to do with memory ordering, others to do with,
 39 // e.g. strength of references, strength of GC barriers, or whether compression should be applied or not.
 40 // Some decorators are set at buildtime, such as whether primitives require GC barriers or not, others
 41 // at callsites such as whether an access is in the heap or not, and others are resolved at runtime
 42 // such as GC-specific barriers and encoding/decoding compressed oops.
 43 // By pipelining handling of these decorators, the design of the Access API allows separation of concern
 44 // over the different orthogonal concerns of decorators, while providing a powerful way of
 45 // expressing these orthogonal semantic properties in a unified way.
 46 
 47 // == OPERATIONS ==
 48 // * load: Load a value from an address.
 49 // * load_at: Load a value from an internal pointer relative to a base object.
 50 // * store: Store a value at an address.
 51 // * store_at: Store a value in an internal pointer relative to a base object.
 52 // * atomic_cmpxchg: Atomically compare-and-swap a new value at an address if previous value matched the compared value.
 53 // * atomic_cmpxchg_at: Atomically compare-and-swap a new value at an internal pointer address if previous value matched the compared value.
 54 // * atomic_xchg: Atomically swap a new value at an address if previous value matched the compared value.
 55 // * atomic_xchg_at: Atomically swap a new value at an internal pointer address if previous value matched the compared value.
 56 // * arraycopy: Copy data from one heap array to another heap array.
 57 // * clone: Clone the contents of an object to a newly allocated object.
 58 // * resolve: Resolve a stable to-space invariant oop that is guaranteed not to relocate its payload until a subsequent thread transition.
 59 
 60 typedef uint64_t DecoratorSet;
 61 
 62 // == Internal Decorators - do not use ==
 63 // * INTERNAL_EMPTY: This is the name for the empty decorator set (in absence of other decorators).
 64 // * INTERNAL_CONVERT_COMPRESSED_OOPS: This is an oop access that will require converting an oop
 65 //   to a narrowOop or vice versa, if UseCompressedOops is known to be set.
 66 // * INTERNAL_VALUE_IS_OOP: Remember that the involved access is on oop rather than primitive.
 67 const DecoratorSet INTERNAL_EMPTY                    = UCONST64(0);
 68 const DecoratorSet INTERNAL_CONVERT_COMPRESSED_OOP   = UCONST64(1) << 1;
 69 const DecoratorSet INTERNAL_VALUE_IS_OOP             = UCONST64(1) << 2;
 70 
 71 // == Internal build-time Decorators ==
 72 // * INTERNAL_BT_BARRIER_ON_PRIMITIVES: This is set in the barrierSetConfig.hpp file.
 73 // * INTERNAL_BT_TO_SPACE_INVARIANT: This is set in the barrierSetConfig.hpp file iff
 74 //   no GC is bundled in the build that is to-space invariant.
 75 const DecoratorSet INTERNAL_BT_BARRIER_ON_PRIMITIVES = UCONST64(1) << 3;
 76 const DecoratorSet INTERNAL_BT_TO_SPACE_INVARIANT    = UCONST64(1) << 4;
 77 
 78 // == Internal run-time Decorators ==
 79 // * INTERNAL_RT_USE_COMPRESSED_OOPS: This decorator will be set in runtime resolved
 80 //   access backends iff UseCompressedOops is true.
 81 const DecoratorSet INTERNAL_RT_USE_COMPRESSED_OOPS   = UCONST64(1) << 5;
 82 
 83 const DecoratorSet INTERNAL_DECORATOR_MASK           = INTERNAL_CONVERT_COMPRESSED_OOP | INTERNAL_VALUE_IS_OOP |
 84                                                        INTERNAL_BT_BARRIER_ON_PRIMITIVES | INTERNAL_RT_USE_COMPRESSED_OOPS;
 85 
 86 // == Memory Ordering Decorators ==
 87 // The memory ordering decorators can be described in the following way:
 88 // === Decorator Rules ===
 89 // The different types of memory ordering guarantees have a strict order of strength.
 90 // Explicitly specifying the stronger ordering implies that the guarantees of the weaker
 91 // property holds too. The names come from the C++11 atomic operations, and typically
 92 // have a JMM equivalent property.
 93 // The equivalence may be viewed like this:
 94 // MO_UNORDERED is equivalent to JMM plain.
 95 // MO_VOLATILE has no equivalence in JMM, because it's a C++ thing.
 96 // MO_RELAXED is equivalent to JMM opaque.
 97 // MO_ACQUIRE is equivalent to JMM acquire.
 98 // MO_RELEASE is equivalent to JMM release.
 99 // MO_SEQ_CST is equivalent to JMM volatile.
100 //
101 // === Stores ===
102 //  * MO_UNORDERED (Default): No guarantees.
103 //    - The compiler and hardware are free to reorder aggressively. And they will.
104 //  * MO_VOLATILE: Volatile stores (in the C++ sense).
105 //    - The stores are not reordered by the compiler (but possibly the HW) w.r.t. other
106 //      volatile accesses in program order (but possibly non-volatile accesses).
107 //  * MO_RELAXED: Relaxed atomic stores.
108 //    - The stores are atomic.
109 //    - Guarantees from volatile stores hold.
110 //  * MO_RELEASE: Releasing stores.
111 //    - The releasing store will make its preceding memory accesses observable to memory accesses
112 //      subsequent to an acquiring load observing this releasing store.
113 //    - Guarantees from relaxed stores hold.
114 //  * MO_SEQ_CST: Sequentially consistent stores.
115 //    - The stores are observed in the same order by MO_SEQ_CST loads on other processors
116 //    - Preceding loads and stores in program order are not reordered with subsequent loads and stores in program order.
117 //    - Guarantees from releasing stores hold.
118 // === Loads ===
119 //  * MO_UNORDERED (Default): No guarantees
120 //    - The compiler and hardware are free to reorder aggressively. And they will.
121 //  * MO_VOLATILE: Volatile loads (in the C++ sense).
122 //    - The loads are not reordered by the compiler (but possibly the HW) w.r.t. other
123 //      volatile accesses in program order (but possibly non-volatile accesses).
124 //  * MO_RELAXED: Relaxed atomic loads.
125 //    - The stores are atomic.
126 //    - Guarantees from volatile loads hold.
127 //  * MO_ACQUIRE: Acquiring loads.
128 //    - An acquiring load will make subsequent memory accesses observe the memory accesses
129 //      preceding the releasing store that the acquiring load observed.
130 //    - Guarantees from relaxed loads hold.
131 //  * MO_SEQ_CST: Sequentially consistent loads.
132 //    - These loads observe MO_SEQ_CST stores in the same order on other processors
133 //    - Preceding loads and stores in program order are not reordered with subsequent loads and stores in program order.
134 //    - Guarantees from acquiring loads hold.
135 // === Atomic Cmpxchg ===
136 //  * MO_RELAXED: Atomic but relaxed cmpxchg.
137 //    - Guarantees from MO_RELAXED loads and MO_RELAXED stores hold unconditionally.
138 //  * MO_SEQ_CST: Sequentially consistent cmpxchg.
139 //    - Guarantees from MO_SEQ_CST loads and MO_SEQ_CST stores hold unconditionally.
140 // === Atomic Xchg ===
141 //  * MO_RELAXED: Atomic but relaxed atomic xchg.
142 //    - Guarantees from MO_RELAXED loads and MO_RELAXED stores hold.
143 //  * MO_SEQ_CST: Sequentially consistent xchg.
144 //    - Guarantees from MO_SEQ_CST loads and MO_SEQ_CST stores hold.
145 const DecoratorSet MO_UNORDERED      = UCONST64(1) << 6;
146 const DecoratorSet MO_VOLATILE       = UCONST64(1) << 7;
147 const DecoratorSet MO_RELAXED        = UCONST64(1) << 8;
148 const DecoratorSet MO_ACQUIRE        = UCONST64(1) << 9;
149 const DecoratorSet MO_RELEASE        = UCONST64(1) << 10;
150 const DecoratorSet MO_SEQ_CST        = UCONST64(1) << 11;
151 const DecoratorSet MO_DECORATOR_MASK = MO_UNORDERED | MO_VOLATILE | MO_RELAXED |
152                                        MO_ACQUIRE | MO_RELEASE | MO_SEQ_CST;
153 
154 // === Barrier Strength Decorators ===
155 // * AS_RAW: The access will translate into a raw memory access, hence ignoring all semantic concerns
156 //   except memory ordering and compressed oops. This will bypass runtime function pointer dispatching
157 //   in the pipeline and hardwire to raw accesses without going trough the GC access barriers.
158 //  - Accesses on oop* translate to raw memory accesses without runtime checks
159 //  - Accesses on narrowOop* translate to encoded/decoded memory accesses without runtime checks
160 //  - Accesses on HeapWord* translate to a runtime check choosing one of the above
161 //  - Accesses on other types translate to raw memory accesses without runtime checks
162 // * AS_DEST_NOT_INITIALIZED: This property can be important to e.g. SATB barriers by
163 //   marking that the previous value is uninitialized nonsense rather than a real value.
164 // * AS_NO_KEEPALIVE: The barrier is used only on oop references and will not keep any involved objects
165 //   alive, regardless of the type of reference being accessed. It will however perform the memory access
166 //   in a consistent way w.r.t. e.g. concurrent compaction, so that the right field is being accessed,
167 //   or maintain, e.g. intergenerational or interregional pointers if applicable. This should be used with
168 //   extreme caution in isolated scopes.
169 // * AS_NORMAL: The accesses will be resolved to an accessor on the BarrierSet class, giving the
170 //   responsibility of performing the access and what barriers to be performed to the GC. This is the default.
171 //   Note that primitive accesses will only be resolved on the barrier set if the appropriate build-time
172 //   decorator for enabling primitive barriers is enabled for the build.
173 const DecoratorSet AS_RAW                  = UCONST64(1) << 12;
174 const DecoratorSet AS_DEST_NOT_INITIALIZED = UCONST64(1) << 13;
175 const DecoratorSet AS_NO_KEEPALIVE         = UCONST64(1) << 14;
176 const DecoratorSet AS_NORMAL               = UCONST64(1) << 15;
177 const DecoratorSet AS_DECORATOR_MASK       = AS_RAW | AS_DEST_NOT_INITIALIZED |
178                                              AS_NO_KEEPALIVE | AS_NORMAL;
179 
180 // === Reference Strength Decorators ===
181 // These decorators only apply to accesses on oop-like types (oop/narrowOop).
182 // * ON_STRONG_OOP_REF: Memory access is performed on a strongly reachable reference.
183 // * ON_WEAK_OOP_REF: The memory access is performed on a weakly reachable reference.
184 // * ON_PHANTOM_OOP_REF: The memory access is performed on a phantomly reachable reference.
185 //   This is the same ring of strength as jweak and weak oops in the VM.
186 // * ON_UNKNOWN_OOP_REF: The memory access is performed on a reference of unknown strength.
187 //   This could for example come from the unsafe API.
188 // * Default (no explicit reference strength specified): ON_STRONG_OOP_REF
189 const DecoratorSet ON_STRONG_OOP_REF  = UCONST64(1) << 16;
190 const DecoratorSet ON_WEAK_OOP_REF    = UCONST64(1) << 17;
191 const DecoratorSet ON_PHANTOM_OOP_REF = UCONST64(1) << 18;
192 const DecoratorSet ON_UNKNOWN_OOP_REF = UCONST64(1) << 19;
193 const DecoratorSet ON_DECORATOR_MASK  = ON_STRONG_OOP_REF | ON_WEAK_OOP_REF |
194                                         ON_PHANTOM_OOP_REF | ON_UNKNOWN_OOP_REF;
195 
196 // === Access Location ===
197 // Accesses can take place in, e.g. the heap, old or young generation and different native roots.
198 // The location is important to the GC as it may imply different actions. The following decorators are used:
199 // * IN_HEAP: The access is performed in the heap. Many barriers such as card marking will
200 //   be omitted if this decorator is not set.
201 // * IN_HEAP_ARRAY: The access is performed on a heap allocated array. This is sometimes a special case
202 //   for some GCs, and implies that it is an IN_HEAP.
203 // * IN_ROOT: The access is performed in an off-heap data structure pointing into the Java heap.
204 // * IN_CONCURRENT_ROOT: The access is performed in an off-heap data structure pointing into the Java heap,
205 //   but is notably not scanned during safepoints. This is sometimes a special case for some GCs and
206 //   implies that it is also an IN_ROOT.
207 const DecoratorSet IN_HEAP            = UCONST64(1) << 20;
208 const DecoratorSet IN_HEAP_ARRAY      = UCONST64(1) << 21;
209 const DecoratorSet IN_ROOT            = UCONST64(1) << 22;
210 const DecoratorSet IN_CONCURRENT_ROOT = UCONST64(1) << 23;
211 const DecoratorSet IN_ARCHIVE_ROOT    = UCONST64(1) << 24;
212 const DecoratorSet IN_DECORATOR_MASK  = IN_HEAP | IN_HEAP_ARRAY |
213                                         IN_ROOT | IN_CONCURRENT_ROOT |
214                                         IN_ARCHIVE_ROOT;
215 
216 // == Value Decorators ==
217 // * OOP_NOT_NULL: This property can make certain barriers faster such as compressing oops.
218 const DecoratorSet OOP_NOT_NULL       = UCONST64(1) << 25;
219 const DecoratorSet OOP_DECORATOR_MASK = OOP_NOT_NULL;
220 
221 // == Arraycopy Decorators ==
222 // * ARRAYCOPY_CHECKCAST: This property means that the class of the objects in source
223 //   are not guaranteed to be subclasses of the class of the destination array. This requires
224 //   a check-cast barrier during the copying operation. If this is not set, it is assumed
225 //   that the array is covariant: (the source array type is-a destination array type)
226 // * ARRAYCOPY_DISJOINT: This property means that it is known that the two array ranges
227 //   are disjoint.
228 // * ARRAYCOPY_ARRAYOF: The copy is in the arrayof form.
229 // * ARRAYCOPY_ATOMIC: The accesses have to be atomic over the size of its elements.
230 // * ARRAYCOPY_ALIGNED: The accesses have to be aligned on a HeapWord.
231 const DecoratorSet ARRAYCOPY_CHECKCAST            = UCONST64(1) << 26;
232 const DecoratorSet ARRAYCOPY_DISJOINT             = UCONST64(1) << 27;
233 const DecoratorSet ARRAYCOPY_ARRAYOF              = UCONST64(1) << 28;
234 const DecoratorSet ARRAYCOPY_ATOMIC               = UCONST64(1) << 29;
235 const DecoratorSet ARRAYCOPY_ALIGNED              = UCONST64(1) << 30;
236 const DecoratorSet ARRAYCOPY_DECORATOR_MASK       = ARRAYCOPY_CHECKCAST | ARRAYCOPY_DISJOINT |
237                                                     ARRAYCOPY_DISJOINT | ARRAYCOPY_ARRAYOF |
238                                                     ARRAYCOPY_ATOMIC | ARRAYCOPY_ALIGNED;
239 
240 // The HasDecorator trait can help at compile-time determining whether a decorator set
241 // has an intersection with a certain other decorator set
242 template <DecoratorSet decorators, DecoratorSet decorator>
243 struct HasDecorator: public IntegralConstant<bool, (decorators & decorator) != 0> {};
244 
245 namespace AccessInternal {
246   template <typename T>
247   struct OopOrNarrowOopInternal: AllStatic {
248     typedef oop type;
249   };
250 
251   template <>
252   struct OopOrNarrowOopInternal<narrowOop>: AllStatic {
253     typedef narrowOop type;
254   };
255 
256   // This metafunction returns a canonicalized oop/narrowOop type for a passed
257   // in oop-like types passed in from oop_* overloads where the user has sworn
258   // that the passed in values should be oop-like (e.g. oop, oopDesc*, arrayOop,
259   // narrowOoop, instanceOopDesc*, and random other things).
260   // In the oop_* overloads, it must hold that if the passed in type T is not
261   // narrowOop, then it by contract has to be one of many oop-like types implicitly
262   // convertible to oop, and hence returns oop as the canonical oop type.
263   // If it turns out it was not, then the implicit conversion to oop will fail
264   // to compile, as desired.
265   template <typename T>
266   struct OopOrNarrowOop: AllStatic {
267     typedef typename OopOrNarrowOopInternal<typename Decay<T>::type>::type type;
268   };
269 
270   inline void* field_addr(oop base, ptrdiff_t byte_offset) {
271     return reinterpret_cast<void*>(reinterpret_cast<intptr_t>((void*)base) + byte_offset);
272   }
273 
274   template <DecoratorSet decorators, typename T>
275   void store_at(oop base, ptrdiff_t offset, T value);
276 
277   template <DecoratorSet decorators, typename T>
278   T load_at(oop base, ptrdiff_t offset);
279 
280   template <DecoratorSet decorators, typename T>
281   T atomic_cmpxchg_at(T new_value, oop base, ptrdiff_t offset, T compare_value);
282 
283   template <DecoratorSet decorators, typename T>
284   T atomic_xchg_at(T new_value, oop base, ptrdiff_t offset);
285 
286   template <DecoratorSet decorators, typename P, typename T>
287   void store(P* addr, T value);
288 
289   template <DecoratorSet decorators, typename P, typename T>
290   T load(P* addr);
291 
292   template <DecoratorSet decorators, typename P, typename T>
293   T atomic_cmpxchg(T new_value, P* addr, T compare_value);
294 
295   template <DecoratorSet decorators, typename P, typename T>
296   T atomic_xchg(T new_value, P* addr);
297 
298   template <DecoratorSet decorators, typename T>
299   bool arraycopy(arrayOop src_obj, arrayOop dst_obj, T *src, T *dst, size_t length);
300 
301   template <DecoratorSet decorators>
302   void clone(oop src, oop dst, size_t size);
303 
304   template <DecoratorSet decorators>
305   oop resolve(oop src);
306 
307   // Infer the type that should be returned from a load.
308   template <typename P, DecoratorSet decorators>
309   class OopLoadProxy: public StackObj {
310   private:
311     P *const _addr;
312   public:
313     OopLoadProxy(P* addr) : _addr(addr) {}
314 
315     inline operator oop() {
316       return load<decorators | INTERNAL_VALUE_IS_OOP, P, oop>(_addr);
317     }
318 
319     inline operator narrowOop() {
320       return load<decorators | INTERNAL_VALUE_IS_OOP, P, narrowOop>(_addr);
321     }
322 
323     template <typename T>
324     inline bool operator ==(const T& other) const {
325       return load<decorators | INTERNAL_VALUE_IS_OOP, P, T>(_addr) == other;
326     }
327 
328     template <typename T>
329     inline bool operator !=(const T& other) const {
330       return load<decorators | INTERNAL_VALUE_IS_OOP, P, T>(_addr) != other;
331     }
332   };
333 
334   // Infer the type that should be returned from a load_at.
335   template <DecoratorSet decorators>
336   class LoadAtProxy: public StackObj {
337   private:
338     const oop _base;
339     const ptrdiff_t _offset;
340   public:
341     LoadAtProxy(oop base, ptrdiff_t offset) : _base(base), _offset(offset) {}
342 
343     template <typename T>
344     inline operator T() const {
345       return load_at<decorators, T>(_base, _offset);
346     }
347 
348     template <typename T>
349     inline bool operator ==(const T& other) const { return load_at<decorators, T>(_base, _offset) == other; }
350 
351     template <typename T>
352     inline bool operator !=(const T& other) const { return load_at<decorators, T>(_base, _offset) != other; }
353   };
354 
355   template <DecoratorSet decorators>
356   class OopLoadAtProxy: public StackObj {
357   private:
358     const oop _base;
359     const ptrdiff_t _offset;
360   public:
361     OopLoadAtProxy(oop base, ptrdiff_t offset) : _base(base), _offset(offset) {}
362 
363     inline operator oop() const {
364       return load_at<decorators | INTERNAL_VALUE_IS_OOP, oop>(_base, _offset);
365     }
366 
367     inline operator narrowOop() const {
368       return load_at<decorators | INTERNAL_VALUE_IS_OOP, narrowOop>(_base, _offset);
369     }
370 
371     template <typename T>
372     inline bool operator ==(const T& other) const {
373       return load_at<decorators | INTERNAL_VALUE_IS_OOP, T>(_base, _offset) == other;
374     }
375 
376     template <typename T>
377     inline bool operator !=(const T& other) const {
378       return load_at<decorators | INTERNAL_VALUE_IS_OOP, T>(_base, _offset) != other;
379     }
380   };
381 }
382 
383 template <DecoratorSet decorators = INTERNAL_EMPTY>
384 class Access: public AllStatic {
385   // This function asserts that if an access gets passed in a decorator outside
386   // of the expected_decorators, then something is wrong. It additionally checks
387   // the consistency of the decorators so that supposedly disjoint decorators are indeed
388   // disjoint. For example, an access can not be both in heap and on root at the
389   // same time.
390   template <DecoratorSet expected_decorators>
391   static void verify_decorators();
392 
393   template <DecoratorSet expected_mo_decorators>
394   static void verify_primitive_decorators() {
395     const DecoratorSet primitive_decorators = (AS_DECORATOR_MASK ^ AS_NO_KEEPALIVE ^ AS_DEST_NOT_INITIALIZED) |
396                                               IN_HEAP | IN_HEAP_ARRAY;
397     verify_decorators<expected_mo_decorators | primitive_decorators>();
398   }
399 
400   template <DecoratorSet expected_mo_decorators>
401   static void verify_oop_decorators() {
402     const DecoratorSet oop_decorators = AS_DECORATOR_MASK | IN_DECORATOR_MASK |
403                                         (ON_DECORATOR_MASK ^ ON_UNKNOWN_OOP_REF) | // no unknown oop refs outside of the heap
404                                         OOP_DECORATOR_MASK;
405     verify_decorators<expected_mo_decorators | oop_decorators>();
406   }
407 
408   template <DecoratorSet expected_mo_decorators>
409   static void verify_heap_oop_decorators() {
410     const DecoratorSet heap_oop_decorators = AS_DECORATOR_MASK | ON_DECORATOR_MASK |
411                                              OOP_DECORATOR_MASK | (IN_DECORATOR_MASK ^
412                                                                    (IN_ROOT | IN_CONCURRENT_ROOT)); // no root accesses in the heap
413     verify_decorators<expected_mo_decorators | heap_oop_decorators>();
414   }
415 
416   static const DecoratorSet load_mo_decorators = MO_UNORDERED | MO_VOLATILE | MO_RELAXED | MO_ACQUIRE | MO_SEQ_CST;
417   static const DecoratorSet store_mo_decorators = MO_UNORDERED | MO_VOLATILE | MO_RELAXED | MO_RELEASE | MO_SEQ_CST;
418   static const DecoratorSet atomic_xchg_mo_decorators = MO_SEQ_CST;
419   static const DecoratorSet atomic_cmpxchg_mo_decorators = MO_RELAXED | MO_SEQ_CST;
420 
421 public:
422   // Primitive heap accesses
423   static inline AccessInternal::LoadAtProxy<decorators> load_at(oop base, ptrdiff_t offset) {
424     verify_primitive_decorators<load_mo_decorators>();
425     return AccessInternal::LoadAtProxy<decorators>(base, offset);
426   }
427 
428   template <typename T>
429   static inline void store_at(oop base, ptrdiff_t offset, T value) {
430     verify_primitive_decorators<store_mo_decorators>();
431     AccessInternal::store_at<decorators>(base, offset, value);
432   }
433 
434   template <typename T>
435   static inline T atomic_cmpxchg_at(T new_value, oop base, ptrdiff_t offset, T compare_value) {
436     verify_primitive_decorators<atomic_cmpxchg_mo_decorators>();
437     return AccessInternal::atomic_cmpxchg_at<decorators>(new_value, base, offset, compare_value);
438   }
439 
440   template <typename T>
441   static inline T atomic_xchg_at(T new_value, oop base, ptrdiff_t offset) {
442     verify_primitive_decorators<atomic_xchg_mo_decorators>();
443     return AccessInternal::atomic_xchg_at<decorators>(new_value, base, offset);
444   }
445 
446   template <typename T>
447   static inline void arraycopy(arrayOop src_obj, arrayOop dst_obj, T *src, T *dst, size_t length) {
448     verify_decorators<ARRAYCOPY_DECORATOR_MASK | IN_HEAP |
449                       AS_DECORATOR_MASK>();
450     AccessInternal::arraycopy<decorators>(src_obj, dst_obj, src, dst, length);
451   }
452 
453   // Oop heap accesses
454   static inline AccessInternal::OopLoadAtProxy<decorators> oop_load_at(oop base, ptrdiff_t offset) {
455     verify_heap_oop_decorators<load_mo_decorators>();
456     return AccessInternal::OopLoadAtProxy<decorators>(base, offset);
457   }
458 
459   template <typename T>
460   static inline void oop_store_at(oop base, ptrdiff_t offset, T value) {
461     verify_heap_oop_decorators<store_mo_decorators>();
462     typedef typename AccessInternal::OopOrNarrowOop<T>::type OopType;
463     OopType oop_value = value;
464     AccessInternal::store_at<decorators | INTERNAL_VALUE_IS_OOP>(base, offset, oop_value);
465   }
466 
467   template <typename T>
468   static inline T oop_atomic_cmpxchg_at(T new_value, oop base, ptrdiff_t offset, T compare_value) {
469     verify_heap_oop_decorators<atomic_cmpxchg_mo_decorators>();
470     typedef typename AccessInternal::OopOrNarrowOop<T>::type OopType;
471     OopType new_oop_value = new_value;
472     OopType compare_oop_value = compare_value;
473     return AccessInternal::atomic_cmpxchg_at<decorators | INTERNAL_VALUE_IS_OOP>(new_oop_value, base, offset, compare_oop_value);
474   }
475 
476   template <typename T>
477   static inline T oop_atomic_xchg_at(T new_value, oop base, ptrdiff_t offset) {
478     verify_heap_oop_decorators<atomic_xchg_mo_decorators>();
479     typedef typename AccessInternal::OopOrNarrowOop<T>::type OopType;
480     OopType new_oop_value = new_value;
481     return AccessInternal::atomic_xchg_at<decorators | INTERNAL_VALUE_IS_OOP>(new_oop_value, base, offset);
482   }
483 
484   template <typename T>
485   static inline bool oop_arraycopy(arrayOop src_obj, arrayOop dst_obj, T *src, T *dst, size_t length) {
486     verify_decorators<ARRAYCOPY_DECORATOR_MASK | IN_HEAP | AS_DECORATOR_MASK>();
487     return AccessInternal::arraycopy<decorators | INTERNAL_VALUE_IS_OOP>(src_obj, dst_obj, src, dst, length);
488   }
489 
490   // Clone an object from src to dst
491   static inline void clone(oop src, oop dst, size_t size) {
492     verify_decorators<IN_HEAP>();
493     AccessInternal::clone<decorators>(src, dst, size);
494   }
495 
496   // Primitive accesses
497   template <typename P>
498   static inline P load(P* addr) {
499     verify_primitive_decorators<load_mo_decorators>();
500     return AccessInternal::load<decorators, P, P>(addr);
501   }
502 
503   template <typename P, typename T>
504   static inline void store(P* addr, T value) {
505     verify_primitive_decorators<store_mo_decorators>();
506     AccessInternal::store<decorators>(addr, value);
507   }
508 
509   template <typename P, typename T>
510   static inline T atomic_cmpxchg(T new_value, P* addr, T compare_value) {
511     verify_primitive_decorators<atomic_cmpxchg_mo_decorators>();
512     return AccessInternal::atomic_cmpxchg<decorators>(new_value, addr, compare_value);
513   }
514 
515   template <typename P, typename T>
516   static inline T atomic_xchg(T new_value, P* addr) {
517     verify_primitive_decorators<atomic_xchg_mo_decorators>();
518     return AccessInternal::atomic_xchg<decorators>(new_value, addr);
519   }
520 
521   // Oop accesses
522   template <typename P>
523   static inline AccessInternal::OopLoadProxy<P, decorators> oop_load(P* addr) {
524     verify_oop_decorators<load_mo_decorators>();
525     return AccessInternal::OopLoadProxy<P, decorators>(addr);
526   }
527 
528   template <typename P, typename T>
529   static inline void oop_store(P* addr, T value) {
530     verify_oop_decorators<store_mo_decorators>();
531     typedef typename AccessInternal::OopOrNarrowOop<T>::type OopType;
532     OopType oop_value = value;
533     AccessInternal::store<decorators | INTERNAL_VALUE_IS_OOP>(addr, oop_value);
534   }
535 
536   template <typename P, typename T>
537   static inline T oop_atomic_cmpxchg(T new_value, P* addr, T compare_value) {
538     verify_oop_decorators<atomic_cmpxchg_mo_decorators>();
539     typedef typename AccessInternal::OopOrNarrowOop<T>::type OopType;
540     OopType new_oop_value = new_value;
541     OopType compare_oop_value = compare_value;
542     return AccessInternal::atomic_cmpxchg<decorators | INTERNAL_VALUE_IS_OOP>(new_oop_value, addr, compare_oop_value);
543   }
544 
545   template <typename P, typename T>
546   static inline T oop_atomic_xchg(T new_value, P* addr) {
547     verify_oop_decorators<atomic_xchg_mo_decorators>();
548     typedef typename AccessInternal::OopOrNarrowOop<T>::type OopType;
549     OopType new_oop_value = new_value;
550     return AccessInternal::atomic_xchg<decorators | INTERNAL_VALUE_IS_OOP>(new_oop_value, addr);
551   }
552 
553   static oop resolve(oop obj) {
554     verify_decorators<INTERNAL_EMPTY>();
555     return AccessInternal::resolve<decorators>(obj);
556   }
557 };
558 
559 // Helper for performing raw accesses (knows only of memory ordering
560 // atomicity decorators as well as compressed oops)
561 template <DecoratorSet decorators = INTERNAL_EMPTY>
562 class RawAccess: public Access<AS_RAW | decorators> {};
563 
564 // Helper for performing normal accesses on the heap. These accesses
565 // may resolve an accessor on a GC barrier set
566 template <DecoratorSet decorators = INTERNAL_EMPTY>
567 class HeapAccess: public Access<IN_HEAP | decorators> {};
568 
569 // Helper for performing normal accesses in roots. These accesses
570 // may resolve an accessor on a GC barrier set
571 template <DecoratorSet decorators = INTERNAL_EMPTY>
572 class RootAccess: public Access<IN_ROOT | decorators> {};
573 
574 #endif // SHARE_VM_RUNTIME_ACCESS_HPP