1 /*
   2  * Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef SHARE_VM_RUNTIME_ACCESS_INLINE_HPP
  26 #define SHARE_VM_RUNTIME_ACCESS_INLINE_HPP
  27 
  28 #include "gc/shared/barrierSetConfig.inline.hpp"
  29 #include "metaprogramming/conditional.hpp"
  30 #include "metaprogramming/isFloatingPoint.hpp"
  31 #include "metaprogramming/isIntegral.hpp"
  32 #include "metaprogramming/isPointer.hpp"
  33 #include "metaprogramming/isVolatile.hpp"
  34 #include "oops/access.hpp"
  35 #include "oops/accessBackend.inline.hpp"
  36 #include "runtime/atomic.hpp"
  37 #include "runtime/orderAccess.inline.hpp"
  38 
  39 // This file outlines the template pipeline of accesses going through the Access
  40 // API. There are essentially 5 steps for each access.
  41 // * Step 1: Set default decorators and decay types. This step gets rid of CV qualifiers
  42 //           and sets default decorators to sensible values.
  43 // * Step 2: Reduce types. This step makes sure there is only a single T type and not
  44 //           multiple types. The P type of the address and T type of the value must
  45 //           match.
  46 // * Step 3: Pre-runtime dispatch. This step checks whether a runtime call can be
  47 //           avoided, and in that case avoids it (calling raw accesses or
  48 //           primitive accesses in a build that does not require primitive GC barriers)
  49 // * Step 4: Runtime-dispatch. This step performs a runtime dispatch to the corresponding
  50 //           BarrierSet::AccessBarrier accessor that attaches GC-required barriers
  51 //           to the access.
  52 // * Step 5: Post-runtime dispatch. This step now casts previously unknown types such
  53 //           as the address type of an oop on the heap (is it oop* or narrowOop*) to
  54 //           the appropriate type. It also splits sufficiently orthogonal accesses into
  55 //           different functions, such as whether the access involves oops or primitives
  56 //           and whether the access is performed on the heap or outside. Then the
  57 //           appropriate BarrierSet::AccessBarrier is called to perform the access.
  58 
  59 namespace AccessInternal {
  60 
  61   // Step 5: Post-runtime dispatch.
  62   // This class is the last step before calling the BarrierSet::AccessBarrier.
  63   // Here we make sure to figure out types that were not known prior to the
  64   // runtime dispatch, such as whether an oop on the heap is oop or narrowOop.
  65   // We also split orthogonal barriers such as handling primitives vs oops
  66   // and on-heap vs off-heap into different calls to the barrier set.
  67   template <class GCBarrierType, BarrierType type, DecoratorSet decorators>
  68   struct PostRuntimeDispatch: public AllStatic { };
  69 
  70   template <class GCBarrierType, DecoratorSet decorators>
  71   struct PostRuntimeDispatch<GCBarrierType, BARRIER_STORE, decorators>: public AllStatic {
  72     template <typename T>
  73     static void access_barrier(void* addr, T value) {
  74       GCBarrierType::store_in_heap(reinterpret_cast<T*>(addr), value);
  75     }
  76 
  77     static void oop_access_barrier(void* addr, oop value) {
  78       typedef typename HeapOopType<decorators>::type OopType;
  79       if (HasDecorator<decorators, IN_HEAP>::value) {
  80         GCBarrierType::oop_store_in_heap(reinterpret_cast<OopType*>(addr), value);
  81       } else {
  82         GCBarrierType::oop_store_not_in_heap(reinterpret_cast<OopType*>(addr), value);
  83       }
  84     }
  85   };
  86 
  87   template <class GCBarrierType, DecoratorSet decorators>
  88   struct PostRuntimeDispatch<GCBarrierType, BARRIER_LOAD, decorators>: public AllStatic {
  89     template <typename T>
  90     static T access_barrier(void* addr) {
  91       return GCBarrierType::load_in_heap(reinterpret_cast<T*>(addr));
  92     }
  93 
  94     static oop oop_access_barrier(void* addr) {
  95       typedef typename HeapOopType<decorators>::type OopType;
  96       if (HasDecorator<decorators, IN_HEAP>::value) {
  97         return GCBarrierType::oop_load_in_heap(reinterpret_cast<OopType*>(addr));
  98       } else {
  99         return GCBarrierType::oop_load_not_in_heap(reinterpret_cast<OopType*>(addr));
 100       }
 101     }
 102   };
 103 
 104   template <class GCBarrierType, DecoratorSet decorators>
 105   struct PostRuntimeDispatch<GCBarrierType, BARRIER_ATOMIC_XCHG, decorators>: public AllStatic {
 106     template <typename T>
 107     static T access_barrier(T new_value, void* addr) {
 108       return GCBarrierType::atomic_xchg_in_heap(new_value, reinterpret_cast<T*>(addr));
 109     }
 110 
 111     static oop oop_access_barrier(oop new_value, void* addr) {
 112       typedef typename HeapOopType<decorators>::type OopType;
 113       if (HasDecorator<decorators, IN_HEAP>::value) {
 114         return GCBarrierType::oop_atomic_xchg_in_heap(new_value, reinterpret_cast<OopType*>(addr));
 115       } else {
 116         return GCBarrierType::oop_atomic_xchg_not_in_heap(new_value, reinterpret_cast<OopType*>(addr));
 117       }
 118     }
 119   };
 120 
 121   template <class GCBarrierType, DecoratorSet decorators>
 122   struct PostRuntimeDispatch<GCBarrierType, BARRIER_ATOMIC_CMPXCHG, decorators>: public AllStatic {
 123     template <typename T>
 124     static T access_barrier(T new_value, void* addr, T compare_value) {
 125       return GCBarrierType::atomic_cmpxchg_in_heap(new_value, reinterpret_cast<T*>(addr), compare_value);
 126     }
 127 
 128     static oop oop_access_barrier(oop new_value, void* addr, oop compare_value) {
 129       typedef typename HeapOopType<decorators>::type OopType;
 130       if (HasDecorator<decorators, IN_HEAP>::value) {
 131         return GCBarrierType::oop_atomic_cmpxchg_in_heap(new_value, reinterpret_cast<OopType*>(addr), compare_value);
 132       } else {
 133         return GCBarrierType::oop_atomic_cmpxchg_not_in_heap(new_value, reinterpret_cast<OopType*>(addr), compare_value);
 134       }
 135     }
 136   };
 137 
 138   template <class GCBarrierType, DecoratorSet decorators>
 139   struct PostRuntimeDispatch<GCBarrierType, BARRIER_ARRAYCOPY, decorators>: public AllStatic {
 140     template <typename T>
 141     static bool access_barrier(arrayOop src_obj, arrayOop dst_obj, T* src, T* dst, size_t length) {
 142       GCBarrierType::arraycopy_in_heap(src_obj, dst_obj, src, dst, length);
 143       return true;
 144     }
 145 
 146     template <typename T>
 147     static bool oop_access_barrier(arrayOop src_obj, arrayOop dst_obj, T* src, T* dst, size_t length) {
 148       typedef typename HeapOopType<decorators>::type OopType;
 149       return GCBarrierType::oop_arraycopy_in_heap(src_obj, dst_obj,
 150                                                   reinterpret_cast<OopType*>(src),
 151                                                   reinterpret_cast<OopType*>(dst), length);
 152     }
 153   };
 154 
 155   template <class GCBarrierType, DecoratorSet decorators>
 156   struct PostRuntimeDispatch<GCBarrierType, BARRIER_STORE_AT, decorators>: public AllStatic {
 157     template <typename T>
 158     static void access_barrier(oop base, ptrdiff_t offset, T value) {
 159       GCBarrierType::store_in_heap_at(base, offset, value);
 160     }
 161 
 162     static void oop_access_barrier(oop base, ptrdiff_t offset, oop value) {
 163       GCBarrierType::oop_store_in_heap_at(base, offset, value);
 164     }
 165   };
 166 
 167   template <class GCBarrierType, DecoratorSet decorators>
 168   struct PostRuntimeDispatch<GCBarrierType, BARRIER_LOAD_AT, decorators>: public AllStatic {
 169     template <typename T>
 170     static T access_barrier(oop base, ptrdiff_t offset) {
 171       return GCBarrierType::template load_in_heap_at<T>(base, offset);
 172     }
 173 
 174     static oop oop_access_barrier(oop base, ptrdiff_t offset) {
 175       return GCBarrierType::oop_load_in_heap_at(base, offset);
 176     }
 177   };
 178 
 179   template <class GCBarrierType, DecoratorSet decorators>
 180   struct PostRuntimeDispatch<GCBarrierType, BARRIER_ATOMIC_XCHG_AT, decorators>: public AllStatic {
 181     template <typename T>
 182     static T access_barrier(T new_value, oop base, ptrdiff_t offset) {
 183       return GCBarrierType::atomic_xchg_in_heap_at(new_value, base, offset);
 184     }
 185 
 186     static oop oop_access_barrier(oop new_value, oop base, ptrdiff_t offset) {
 187       return GCBarrierType::oop_atomic_xchg_in_heap_at(new_value, base, offset);
 188     }
 189   };
 190 
 191   template <class GCBarrierType, DecoratorSet decorators>
 192   struct PostRuntimeDispatch<GCBarrierType, BARRIER_ATOMIC_CMPXCHG_AT, decorators>: public AllStatic {
 193     template <typename T>
 194     static T access_barrier(T new_value, oop base, ptrdiff_t offset, T compare_value) {
 195       return GCBarrierType::atomic_cmpxchg_in_heap_at(new_value, base, offset, compare_value);
 196     }
 197 
 198     static oop oop_access_barrier(oop new_value, oop base, ptrdiff_t offset, oop compare_value) {
 199       return GCBarrierType::oop_atomic_cmpxchg_in_heap_at(new_value, base, offset, compare_value);
 200     }
 201   };
 202 
 203   template <class GCBarrierType, DecoratorSet decorators>
 204   struct PostRuntimeDispatch<GCBarrierType, BARRIER_CLONE, decorators>: public AllStatic {
 205     static void access_barrier(oop src, oop dst, size_t size) {
 206       GCBarrierType::clone_in_heap(src, dst, size);
 207     }
 208   };
 209 
 210   template <class GCBarrierType, DecoratorSet decorators>
 211   struct PostRuntimeDispatch<GCBarrierType, BARRIER_RESOLVE, decorators>: public AllStatic {
 212     static oop access_barrier(oop obj) {
 213       return GCBarrierType::resolve(obj);
 214     }
 215   };
 216 
 217   // Resolving accessors with barriers from the barrier set happens in two steps.
 218   // 1. Expand paths with runtime-decorators, e.g. is UseCompressedOops on or off.
 219   // 2. Expand paths for each BarrierSet available in the system.
 220   template <DecoratorSet decorators, typename FunctionPointerT, BarrierType barrier_type>
 221   struct BarrierResolver: public AllStatic {
 222     template <DecoratorSet ds>
 223     static typename EnableIf<
 224       HasDecorator<ds, INTERNAL_VALUE_IS_OOP>::value,
 225       FunctionPointerT>::type
 226     resolve_barrier_gc() {
 227       BarrierSet* bs = BarrierSet::barrier_set();
 228       assert(bs != NULL, "GC barriers invoked before BarrierSet is set");
 229       switch (bs->kind()) {
 230 #define BARRIER_SET_RESOLVE_BARRIER_CLOSURE(bs_name)                    \
 231         case BarrierSet::bs_name: {                                     \
 232           return PostRuntimeDispatch<typename BarrierSet::GetType<BarrierSet::bs_name>::type:: \
 233             AccessBarrier<ds>, barrier_type, ds>::oop_access_barrier; \
 234         }                                                               \
 235         break;
 236         FOR_EACH_CONCRETE_BARRIER_SET_DO(BARRIER_SET_RESOLVE_BARRIER_CLOSURE)
 237 #undef BARRIER_SET_RESOLVE_BARRIER_CLOSURE
 238 
 239       default:
 240         fatal("BarrierSet AccessBarrier resolving not implemented");
 241         return NULL;
 242       };
 243     }
 244 
 245     template <DecoratorSet ds>
 246     static typename EnableIf<
 247       !HasDecorator<ds, INTERNAL_VALUE_IS_OOP>::value,
 248       FunctionPointerT>::type
 249     resolve_barrier_gc() {
 250       BarrierSet* bs = BarrierSet::barrier_set();
 251       assert(bs != NULL, "GC barriers invoked before BarrierSet is set");
 252       switch (bs->kind()) {
 253 #define BARRIER_SET_RESOLVE_BARRIER_CLOSURE(bs_name)                    \
 254         case BarrierSet::bs_name: {                                       \
 255           return PostRuntimeDispatch<typename BarrierSet::GetType<BarrierSet::bs_name>::type:: \
 256             AccessBarrier<ds>, barrier_type, ds>::access_barrier; \
 257         }                                                                 \
 258         break;
 259         FOR_EACH_CONCRETE_BARRIER_SET_DO(BARRIER_SET_RESOLVE_BARRIER_CLOSURE)
 260 #undef BARRIER_SET_RESOLVE_BARRIER_CLOSURE
 261 
 262       default:
 263         fatal("BarrierSet AccessBarrier resolving not implemented");
 264         return NULL;
 265       };
 266     }
 267 
 268     static FunctionPointerT resolve_barrier_rt() {
 269       if (UseCompressedOops) {
 270         const DecoratorSet expanded_decorators = decorators | INTERNAL_RT_USE_COMPRESSED_OOPS;
 271         return resolve_barrier_gc<expanded_decorators>();
 272       } else {
 273         return resolve_barrier_gc<decorators>();
 274       }
 275     }
 276 
 277     static FunctionPointerT resolve_barrier() {
 278       return resolve_barrier_rt();
 279     }
 280   };
 281 
 282   // Step 4: Runtime dispatch
 283   // The RuntimeDispatch class is responsible for performing a runtime dispatch of the
 284   // accessor. This is required when the access either depends on whether compressed oops
 285   // is being used, or it depends on which GC implementation was chosen (e.g. requires GC
 286   // barriers). The way it works is that a function pointer initially pointing to an
 287   // accessor resolution function gets called for each access. Upon first invocation,
 288   // it resolves which accessor to be used in future invocations and patches the
 289   // function pointer to this new accessor.
 290 
 291   template <DecoratorSet decorators, typename T, BarrierType type>
 292   struct RuntimeDispatch: AllStatic {};
 293 
 294   template <DecoratorSet decorators, typename T>
 295   struct RuntimeDispatch<decorators, T, BARRIER_STORE>: AllStatic {
 296     typedef typename AccessFunction<decorators, T, BARRIER_STORE>::type func_t;
 297     static func_t _store_func;
 298 
 299     static void store_init(void* addr, T value) {
 300       func_t function = BarrierResolver<decorators, func_t, BARRIER_STORE>::resolve_barrier();
 301       _store_func = function;
 302       function(addr, value);
 303     }
 304 
 305     static inline void store(void* addr, T value) {
 306       _store_func(addr, value);
 307     }
 308   };
 309 
 310   template <DecoratorSet decorators, typename T>
 311   struct RuntimeDispatch<decorators, T, BARRIER_STORE_AT>: AllStatic {
 312     typedef typename AccessFunction<decorators, T, BARRIER_STORE_AT>::type func_t;
 313     static func_t _store_at_func;
 314 
 315     static void store_at_init(oop base, ptrdiff_t offset, T value) {
 316       func_t function = BarrierResolver<decorators, func_t, BARRIER_STORE_AT>::resolve_barrier();
 317       _store_at_func = function;
 318       function(base, offset, value);
 319     }
 320 
 321     static inline void store_at(oop base, ptrdiff_t offset, T value) {
 322       _store_at_func(base, offset, value);
 323     }
 324   };
 325 
 326   template <DecoratorSet decorators, typename T>
 327   struct RuntimeDispatch<decorators, T, BARRIER_LOAD>: AllStatic {
 328     typedef typename AccessFunction<decorators, T, BARRIER_LOAD>::type func_t;
 329     static func_t _load_func;
 330 
 331     static T load_init(void* addr) {
 332       func_t function = BarrierResolver<decorators, func_t, BARRIER_LOAD>::resolve_barrier();
 333       _load_func = function;
 334       return function(addr);
 335     }
 336 
 337     static inline T load(void* addr) {
 338       return _load_func(addr);
 339     }
 340   };
 341 
 342   template <DecoratorSet decorators, typename T>
 343   struct RuntimeDispatch<decorators, T, BARRIER_LOAD_AT>: AllStatic {
 344     typedef typename AccessFunction<decorators, T, BARRIER_LOAD_AT>::type func_t;
 345     static func_t _load_at_func;
 346 
 347     static T load_at_init(oop base, ptrdiff_t offset) {
 348       func_t function = BarrierResolver<decorators, func_t, BARRIER_LOAD_AT>::resolve_barrier();
 349       _load_at_func = function;
 350       return function(base, offset);
 351     }
 352 
 353     static inline T load_at(oop base, ptrdiff_t offset) {
 354       return _load_at_func(base, offset);
 355     }
 356   };
 357 
 358   template <DecoratorSet decorators, typename T>
 359   struct RuntimeDispatch<decorators, T, BARRIER_ATOMIC_CMPXCHG>: AllStatic {
 360     typedef typename AccessFunction<decorators, T, BARRIER_ATOMIC_CMPXCHG>::type func_t;
 361     static func_t _atomic_cmpxchg_func;
 362 
 363     static T atomic_cmpxchg_init(T new_value, void* addr, T compare_value) {
 364       func_t function = BarrierResolver<decorators, func_t, BARRIER_ATOMIC_CMPXCHG>::resolve_barrier();
 365       _atomic_cmpxchg_func = function;
 366       return function(new_value, addr, compare_value);
 367     }
 368 
 369     static inline T atomic_cmpxchg(T new_value, void* addr, T compare_value) {
 370       return _atomic_cmpxchg_func(new_value, addr, compare_value);
 371     }
 372   };
 373 
 374   template <DecoratorSet decorators, typename T>
 375   struct RuntimeDispatch<decorators, T, BARRIER_ATOMIC_CMPXCHG_AT>: AllStatic {
 376     typedef typename AccessFunction<decorators, T, BARRIER_ATOMIC_CMPXCHG_AT>::type func_t;
 377     static func_t _atomic_cmpxchg_at_func;
 378 
 379     static T atomic_cmpxchg_at_init(T new_value, oop base, ptrdiff_t offset, T compare_value) {
 380       func_t function = BarrierResolver<decorators, func_t, BARRIER_ATOMIC_CMPXCHG_AT>::resolve_barrier();
 381       _atomic_cmpxchg_at_func = function;
 382       return function(new_value, base, offset, compare_value);
 383     }
 384 
 385     static inline T atomic_cmpxchg_at(T new_value, oop base, ptrdiff_t offset, T compare_value) {
 386       return _atomic_cmpxchg_at_func(new_value, base, offset, compare_value);
 387     }
 388   };
 389 
 390   template <DecoratorSet decorators, typename T>
 391   struct RuntimeDispatch<decorators, T, BARRIER_ATOMIC_XCHG>: AllStatic {
 392     typedef typename AccessFunction<decorators, T, BARRIER_ATOMIC_XCHG>::type func_t;
 393     static func_t _atomic_xchg_func;
 394 
 395     static T atomic_xchg_init(T new_value, void* addr) {
 396       func_t function = BarrierResolver<decorators, func_t, BARRIER_ATOMIC_XCHG>::resolve_barrier();
 397       _atomic_xchg_func = function;
 398       return function(new_value, addr);
 399     }
 400 
 401     static inline T atomic_xchg(T new_value, void* addr) {
 402       return _atomic_xchg_func(new_value, addr);
 403     }
 404   };
 405 
 406   template <DecoratorSet decorators, typename T>
 407   struct RuntimeDispatch<decorators, T, BARRIER_ATOMIC_XCHG_AT>: AllStatic {
 408     typedef typename AccessFunction<decorators, T, BARRIER_ATOMIC_XCHG_AT>::type func_t;
 409     static func_t _atomic_xchg_at_func;
 410 
 411     static T atomic_xchg_at_init(T new_value, oop base, ptrdiff_t offset) {
 412       func_t function = BarrierResolver<decorators, func_t, BARRIER_ATOMIC_XCHG_AT>::resolve_barrier();
 413       _atomic_xchg_at_func = function;
 414       return function(new_value, base, offset);
 415     }
 416 
 417     static inline T atomic_xchg_at(T new_value, oop base, ptrdiff_t offset) {
 418       return _atomic_xchg_at_func(new_value, base, offset);
 419     }
 420   };
 421 
 422   template <DecoratorSet decorators, typename T>
 423   struct RuntimeDispatch<decorators, T, BARRIER_ARRAYCOPY>: AllStatic {
 424     typedef typename AccessFunction<decorators, T, BARRIER_ARRAYCOPY>::type func_t;
 425     static func_t _arraycopy_func;
 426 
 427     static bool arraycopy_init(arrayOop src_obj, arrayOop dst_obj, T *src, T* dst, size_t length) {
 428       func_t function = BarrierResolver<decorators, func_t, BARRIER_ARRAYCOPY>::resolve_barrier();
 429       _arraycopy_func = function;
 430       return function(src_obj, dst_obj, src, dst, length);
 431     }
 432 
 433     static inline bool arraycopy(arrayOop src_obj, arrayOop dst_obj, T *src, T* dst, size_t length) {
 434       return _arraycopy_func(src_obj, dst_obj, src, dst, length);
 435     }
 436   };
 437 
 438   template <DecoratorSet decorators, typename T>
 439   struct RuntimeDispatch<decorators, T, BARRIER_CLONE>: AllStatic {
 440     typedef typename AccessFunction<decorators, T, BARRIER_CLONE>::type func_t;
 441     static func_t _clone_func;
 442 
 443     static void clone_init(oop src, oop dst, size_t size) {
 444       func_t function = BarrierResolver<decorators, func_t, BARRIER_CLONE>::resolve_barrier();
 445       _clone_func = function;
 446       function(src, dst, size);
 447     }
 448 
 449     static inline void clone(oop src, oop dst, size_t size) {
 450       _clone_func(src, dst, size);
 451     }
 452   };
 453 
 454   template <DecoratorSet decorators, typename T>
 455   struct RuntimeDispatch<decorators, T, BARRIER_RESOLVE>: AllStatic {
 456     typedef typename AccessFunction<decorators, T, BARRIER_RESOLVE>::type func_t;
 457     static func_t _resolve_func;
 458 
 459     static oop resolve_init(oop obj) {
 460       func_t function = BarrierResolver<decorators, func_t, BARRIER_RESOLVE>::resolve_barrier();
 461       _resolve_func = function;
 462       return function(obj);
 463     }
 464 
 465     static inline oop resolve(oop obj) {
 466       return _resolve_func(obj);
 467     }
 468   };
 469 
 470   // Initialize the function pointers to point to the resolving function.
 471   template <DecoratorSet decorators, typename T>
 472   typename AccessFunction<decorators, T, BARRIER_STORE>::type
 473   RuntimeDispatch<decorators, T, BARRIER_STORE>::_store_func = &store_init;
 474 
 475   template <DecoratorSet decorators, typename T>
 476   typename AccessFunction<decorators, T, BARRIER_STORE_AT>::type
 477   RuntimeDispatch<decorators, T, BARRIER_STORE_AT>::_store_at_func = &store_at_init;
 478 
 479   template <DecoratorSet decorators, typename T>
 480   typename AccessFunction<decorators, T, BARRIER_LOAD>::type
 481   RuntimeDispatch<decorators, T, BARRIER_LOAD>::_load_func = &load_init;
 482 
 483   template <DecoratorSet decorators, typename T>
 484   typename AccessFunction<decorators, T, BARRIER_LOAD_AT>::type
 485   RuntimeDispatch<decorators, T, BARRIER_LOAD_AT>::_load_at_func = &load_at_init;
 486 
 487   template <DecoratorSet decorators, typename T>
 488   typename AccessFunction<decorators, T, BARRIER_ATOMIC_CMPXCHG>::type
 489   RuntimeDispatch<decorators, T, BARRIER_ATOMIC_CMPXCHG>::_atomic_cmpxchg_func = &atomic_cmpxchg_init;
 490 
 491   template <DecoratorSet decorators, typename T>
 492   typename AccessFunction<decorators, T, BARRIER_ATOMIC_CMPXCHG_AT>::type
 493   RuntimeDispatch<decorators, T, BARRIER_ATOMIC_CMPXCHG_AT>::_atomic_cmpxchg_at_func = &atomic_cmpxchg_at_init;
 494 
 495   template <DecoratorSet decorators, typename T>
 496   typename AccessFunction<decorators, T, BARRIER_ATOMIC_XCHG>::type
 497   RuntimeDispatch<decorators, T, BARRIER_ATOMIC_XCHG>::_atomic_xchg_func = &atomic_xchg_init;
 498 
 499   template <DecoratorSet decorators, typename T>
 500   typename AccessFunction<decorators, T, BARRIER_ATOMIC_XCHG_AT>::type
 501   RuntimeDispatch<decorators, T, BARRIER_ATOMIC_XCHG_AT>::_atomic_xchg_at_func = &atomic_xchg_at_init;
 502 
 503   template <DecoratorSet decorators, typename T>
 504   typename AccessFunction<decorators, T, BARRIER_ARRAYCOPY>::type
 505   RuntimeDispatch<decorators, T, BARRIER_ARRAYCOPY>::_arraycopy_func = &arraycopy_init;
 506 
 507   template <DecoratorSet decorators, typename T>
 508   typename AccessFunction<decorators, T, BARRIER_CLONE>::type
 509   RuntimeDispatch<decorators, T, BARRIER_CLONE>::_clone_func = &clone_init;
 510 
 511   template <DecoratorSet decorators, typename T>
 512   typename AccessFunction<decorators, T, BARRIER_RESOLVE>::type
 513   RuntimeDispatch<decorators, T, BARRIER_RESOLVE>::_resolve_func = &resolve_init;
 514 
 515   // Step 3: Pre-runtime dispatching.
 516   // The PreRuntimeDispatch class is responsible for filtering the barrier strength
 517   // decorators. That is, for AS_RAW, it hardwires the accesses without a runtime
 518   // dispatch point. Otherwise it goes through a runtime check if hardwiring was
 519   // not possible.
 520   struct PreRuntimeDispatch: AllStatic {
 521     template<DecoratorSet decorators>
 522     struct CanHardwireRaw: public IntegralConstant<
 523       bool,
 524       !HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value || // primitive access
 525       !HasDecorator<decorators, INTERNAL_CONVERT_COMPRESSED_OOP>::value || // don't care about compressed oops (oop* address)
 526       HasDecorator<decorators, INTERNAL_RT_USE_COMPRESSED_OOPS>::value> // we can infer we use compressed oops (narrowOop* address)
 527     {};
 528 
 529     static const DecoratorSet convert_compressed_oops = INTERNAL_RT_USE_COMPRESSED_OOPS | INTERNAL_CONVERT_COMPRESSED_OOP;
 530 
 531     template<DecoratorSet decorators>
 532     static bool is_hardwired_primitive() {
 533       return !HasDecorator<decorators, INTERNAL_BT_BARRIER_ON_PRIMITIVES>::value &&
 534              !HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value;
 535     }
 536 
 537     template <DecoratorSet decorators, typename T>
 538     inline static typename EnableIf<
 539       HasDecorator<decorators, AS_RAW>::value && CanHardwireRaw<decorators>::value>::type
 540     store(void* addr, T value) {
 541       typedef RawAccessBarrier<decorators & RAW_DECORATOR_MASK> Raw;
 542       if (HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value) {
 543         Raw::oop_store(addr, value);
 544       } else {
 545         Raw::store(addr, value);
 546       }
 547     }
 548 
 549     template <DecoratorSet decorators, typename T>
 550     inline static typename EnableIf<
 551       HasDecorator<decorators, AS_RAW>::value && !CanHardwireRaw<decorators>::value>::type
 552     store(void* addr, T value) {
 553       if (UseCompressedOops) {
 554         const DecoratorSet expanded_decorators = decorators | convert_compressed_oops;
 555         PreRuntimeDispatch::store<expanded_decorators>(addr, value);
 556       } else {
 557         const DecoratorSet expanded_decorators = decorators & ~convert_compressed_oops;
 558         PreRuntimeDispatch::store<expanded_decorators>(addr, value);
 559       }
 560     }
 561 
 562     template <DecoratorSet decorators, typename T>
 563     inline static typename EnableIf<
 564       !HasDecorator<decorators, AS_RAW>::value>::type
 565     store(void* addr, T value) {
 566       if (is_hardwired_primitive<decorators>()) {
 567         const DecoratorSet expanded_decorators = decorators | AS_RAW;
 568         PreRuntimeDispatch::store<expanded_decorators>(addr, value);
 569       } else {
 570         RuntimeDispatch<decorators, T, BARRIER_STORE>::store(addr, value);
 571       }
 572     }
 573 
 574     template <DecoratorSet decorators, typename T>
 575     inline static typename EnableIf<
 576       HasDecorator<decorators, AS_RAW>::value>::type
 577     store_at(oop base, ptrdiff_t offset, T value) {
 578       store<decorators>(field_addr(base, offset), value);
 579     }
 580 
 581     template <DecoratorSet decorators, typename T>
 582     inline static typename EnableIf<
 583       !HasDecorator<decorators, AS_RAW>::value>::type
 584     store_at(oop base, ptrdiff_t offset, T value) {
 585       if (is_hardwired_primitive<decorators>()) {
 586         const DecoratorSet expanded_decorators = decorators | AS_RAW;
 587         PreRuntimeDispatch::store_at<expanded_decorators>(base, offset, value);
 588       } else {
 589         RuntimeDispatch<decorators, T, BARRIER_STORE_AT>::store_at(base, offset, value);
 590       }
 591     }
 592 
 593     template <DecoratorSet decorators, typename T>
 594     inline static typename EnableIf<
 595       HasDecorator<decorators, AS_RAW>::value && CanHardwireRaw<decorators>::value, T>::type
 596     load(void* addr) {
 597       typedef RawAccessBarrier<decorators & RAW_DECORATOR_MASK> Raw;
 598       if (HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value) {
 599         return Raw::template oop_load<T>(addr);
 600       } else {
 601         return Raw::template load<T>(addr);
 602       }
 603     }
 604 
 605     template <DecoratorSet decorators, typename T>
 606     inline static typename EnableIf<
 607       HasDecorator<decorators, AS_RAW>::value && !CanHardwireRaw<decorators>::value, T>::type
 608     load(void* addr) {
 609       if (UseCompressedOops) {
 610         const DecoratorSet expanded_decorators = decorators | convert_compressed_oops;
 611         return PreRuntimeDispatch::load<expanded_decorators, T>(addr);
 612       } else {
 613         const DecoratorSet expanded_decorators = decorators & ~convert_compressed_oops;
 614         return PreRuntimeDispatch::load<expanded_decorators, T>(addr);
 615       }
 616     }
 617 
 618     template <DecoratorSet decorators, typename T>
 619     inline static typename EnableIf<
 620       !HasDecorator<decorators, AS_RAW>::value, T>::type
 621     load(void* addr) {
 622       if (is_hardwired_primitive<decorators>()) {
 623         const DecoratorSet expanded_decorators = decorators | AS_RAW;
 624         return PreRuntimeDispatch::load<expanded_decorators, T>(addr);
 625       } else {
 626         return RuntimeDispatch<decorators, T, BARRIER_LOAD>::load(addr);
 627       }
 628     }
 629 
 630     template <DecoratorSet decorators, typename T>
 631     inline static typename EnableIf<
 632       HasDecorator<decorators, AS_RAW>::value, T>::type
 633     load_at(oop base, ptrdiff_t offset) {
 634       return load<decorators, T>(field_addr(base, offset));
 635     }
 636 
 637     template <DecoratorSet decorators, typename T>
 638     inline static typename EnableIf<
 639       !HasDecorator<decorators, AS_RAW>::value, T>::type
 640     load_at(oop base, ptrdiff_t offset) {
 641       if (is_hardwired_primitive<decorators>()) {
 642         const DecoratorSet expanded_decorators = decorators | AS_RAW;
 643         return PreRuntimeDispatch::load_at<expanded_decorators, T>(base, offset);
 644       } else {
 645         return RuntimeDispatch<decorators, T, BARRIER_LOAD_AT>::load_at(base, offset);
 646       }
 647     }
 648 
 649     template <DecoratorSet decorators, typename T>
 650     inline static typename EnableIf<
 651       HasDecorator<decorators, AS_RAW>::value && CanHardwireRaw<decorators>::value, T>::type
 652     atomic_cmpxchg(T new_value, void* addr, T compare_value) {
 653       typedef RawAccessBarrier<decorators & RAW_DECORATOR_MASK> Raw;
 654       if (HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value) {
 655         return Raw::oop_atomic_cmpxchg(new_value, addr, compare_value);
 656       } else {
 657         return Raw::atomic_cmpxchg(new_value, addr, compare_value);
 658       }
 659     }
 660 
 661     template <DecoratorSet decorators, typename T>
 662     inline static typename EnableIf<
 663       HasDecorator<decorators, AS_RAW>::value && !CanHardwireRaw<decorators>::value, T>::type
 664     atomic_cmpxchg(T new_value, void* addr, T compare_value) {
 665       if (UseCompressedOops) {
 666         const DecoratorSet expanded_decorators = decorators | convert_compressed_oops;
 667         return PreRuntimeDispatch::atomic_cmpxchg<expanded_decorators>(new_value, addr, compare_value);
 668       } else {
 669         const DecoratorSet expanded_decorators = decorators & ~convert_compressed_oops;
 670         return PreRuntimeDispatch::atomic_cmpxchg<expanded_decorators>(new_value, addr, compare_value);
 671       }
 672     }
 673 
 674     template <DecoratorSet decorators, typename T>
 675     inline static typename EnableIf<
 676       !HasDecorator<decorators, AS_RAW>::value, T>::type
 677     atomic_cmpxchg(T new_value, void* addr, T compare_value) {
 678       if (is_hardwired_primitive<decorators>()) {
 679         const DecoratorSet expanded_decorators = decorators | AS_RAW;
 680         return PreRuntimeDispatch::atomic_cmpxchg<expanded_decorators>(new_value, addr, compare_value);
 681       } else {
 682         return RuntimeDispatch<decorators, T, BARRIER_ATOMIC_CMPXCHG>::atomic_cmpxchg(new_value, addr, compare_value);
 683       }
 684     }
 685 
 686     template <DecoratorSet decorators, typename T>
 687     inline static typename EnableIf<
 688       HasDecorator<decorators, AS_RAW>::value, T>::type
 689     atomic_cmpxchg_at(T new_value, oop base, ptrdiff_t offset, T compare_value) {
 690       return atomic_cmpxchg<decorators>(new_value, field_addr(base, offset), compare_value);
 691     }
 692 
 693     template <DecoratorSet decorators, typename T>
 694     inline static typename EnableIf<
 695       !HasDecorator<decorators, AS_RAW>::value, T>::type
 696     atomic_cmpxchg_at(T new_value, oop base, ptrdiff_t offset, T compare_value) {
 697       if (is_hardwired_primitive<decorators>()) {
 698         const DecoratorSet expanded_decorators = decorators | AS_RAW;
 699         return PreRuntimeDispatch::atomic_cmpxchg_at<expanded_decorators>(new_value, base, offset, compare_value);
 700       } else {
 701         return RuntimeDispatch<decorators, T, BARRIER_ATOMIC_CMPXCHG_AT>::atomic_cmpxchg_at(new_value, base, offset, compare_value);
 702       }
 703     }
 704 
 705     template <DecoratorSet decorators, typename T>
 706     inline static typename EnableIf<
 707       HasDecorator<decorators, AS_RAW>::value && CanHardwireRaw<decorators>::value, T>::type
 708     atomic_xchg(T new_value, void* addr) {
 709       typedef RawAccessBarrier<decorators & RAW_DECORATOR_MASK> Raw;
 710       if (HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value) {
 711         return Raw::oop_atomic_xchg(new_value, addr);
 712       } else {
 713         return Raw::atomic_xchg(new_value, addr);
 714       }
 715     }
 716 
 717     template <DecoratorSet decorators, typename T>
 718     inline static typename EnableIf<
 719       HasDecorator<decorators, AS_RAW>::value && !CanHardwireRaw<decorators>::value, T>::type
 720     atomic_xchg(T new_value, void* addr) {
 721       if (UseCompressedOops) {
 722         const DecoratorSet expanded_decorators = decorators | convert_compressed_oops;
 723         return PreRuntimeDispatch::atomic_xchg<expanded_decorators>(new_value, addr);
 724       } else {
 725         const DecoratorSet expanded_decorators = decorators & ~convert_compressed_oops;
 726         return PreRuntimeDispatch::atomic_xchg<expanded_decorators>(new_value, addr);
 727       }
 728     }
 729 
 730     template <DecoratorSet decorators, typename T>
 731     inline static typename EnableIf<
 732       !HasDecorator<decorators, AS_RAW>::value, T>::type
 733     atomic_xchg(T new_value, void* addr) {
 734       if (is_hardwired_primitive<decorators>()) {
 735         const DecoratorSet expanded_decorators = decorators | AS_RAW;
 736         return PreRuntimeDispatch::atomic_xchg<expanded_decorators>(new_value, addr);
 737       } else {
 738         return RuntimeDispatch<decorators, T, BARRIER_ATOMIC_XCHG>::atomic_xchg(new_value, addr);
 739       }
 740     }
 741 
 742     template <DecoratorSet decorators, typename T>
 743     inline static typename EnableIf<
 744       HasDecorator<decorators, AS_RAW>::value, T>::type
 745     atomic_xchg_at(T new_value, oop base, ptrdiff_t offset) {
 746       return atomic_xchg<decorators>(new_value, field_addr(base, offset));
 747     }
 748 
 749     template <DecoratorSet decorators, typename T>
 750     inline static typename EnableIf<
 751       !HasDecorator<decorators, AS_RAW>::value, T>::type
 752     atomic_xchg_at(T new_value, oop base, ptrdiff_t offset) {
 753       if (is_hardwired_primitive<decorators>()) {
 754         const DecoratorSet expanded_decorators = decorators | AS_RAW;
 755         return PreRuntimeDispatch::atomic_xchg<expanded_decorators>(new_value, base, offset);
 756       } else {
 757         return RuntimeDispatch<decorators, T, BARRIER_ATOMIC_XCHG_AT>::atomic_xchg_at(new_value, base, offset);
 758       }
 759     }
 760 
 761     template <DecoratorSet decorators, typename T>
 762     inline static typename EnableIf<
 763       HasDecorator<decorators, AS_RAW>::value && CanHardwireRaw<decorators>::value, bool>::type
 764     arraycopy(arrayOop src_obj, arrayOop dst_obj, T* src, T* dst, size_t length) {
 765       typedef RawAccessBarrier<decorators & RAW_DECORATOR_MASK> Raw;
 766       if (HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value) {
 767         return Raw::oop_arraycopy(src_obj, dst_obj, src, dst, length);
 768       } else {
 769         return Raw::arraycopy(src_obj, dst_obj, src, dst, length);
 770       }
 771     }
 772 
 773     template <DecoratorSet decorators, typename T>
 774     inline static typename EnableIf<
 775       HasDecorator<decorators, AS_RAW>::value && !CanHardwireRaw<decorators>::value, bool>::type
 776     arraycopy(arrayOop src_obj, arrayOop dst_obj, T* src, T* dst, size_t length) {
 777       if (UseCompressedOops) {
 778         const DecoratorSet expanded_decorators = decorators | convert_compressed_oops;
 779         return PreRuntimeDispatch::arraycopy<expanded_decorators>(src_obj, dst_obj, src, dst, length);
 780       } else {
 781         const DecoratorSet expanded_decorators = decorators & ~convert_compressed_oops;
 782         return PreRuntimeDispatch::arraycopy<expanded_decorators>(src_obj, dst_obj, src, dst, length);
 783       }
 784     }
 785 
 786     template <DecoratorSet decorators, typename T>
 787     inline static typename EnableIf<
 788       !HasDecorator<decorators, AS_RAW>::value, bool>::type
 789     arraycopy(arrayOop src_obj, arrayOop dst_obj, T* src, T* dst, size_t length) {
 790       if (is_hardwired_primitive<decorators>()) {
 791         const DecoratorSet expanded_decorators = decorators | AS_RAW;
 792         return PreRuntimeDispatch::arraycopy<expanded_decorators>(src_obj, dst_obj, src, dst, length);
 793       } else {
 794         return RuntimeDispatch<decorators, T, BARRIER_ARRAYCOPY>::arraycopy(src_obj, dst_obj, src, dst, length);
 795       }
 796     }
 797 
 798     template <DecoratorSet decorators>
 799     inline static typename EnableIf<
 800       HasDecorator<decorators, AS_RAW>::value>::type
 801     clone(oop src, oop dst, size_t size) {
 802       typedef RawAccessBarrier<decorators & RAW_DECORATOR_MASK> Raw;
 803       Raw::clone(src, dst, size);
 804     }
 805 
 806     template <DecoratorSet decorators>
 807     inline static typename EnableIf<
 808       !HasDecorator<decorators, AS_RAW>::value>::type
 809     clone(oop src, oop dst, size_t size) {
 810       RuntimeDispatch<decorators, oop, BARRIER_CLONE>::clone(src, dst, size);
 811     }
 812 
 813     template <DecoratorSet decorators>
 814     inline static typename EnableIf<
 815       HasDecorator<decorators, INTERNAL_BT_TO_SPACE_INVARIANT>::value, oop>::type
 816     resolve(oop obj) {
 817       typedef RawAccessBarrier<decorators & RAW_DECORATOR_MASK> Raw;
 818       return Raw::resolve(obj);
 819     }
 820 
 821     template <DecoratorSet decorators>
 822     inline static typename EnableIf<
 823       !HasDecorator<decorators, INTERNAL_BT_TO_SPACE_INVARIANT>::value, oop>::type
 824     resolve(oop obj) {
 825       return RuntimeDispatch<decorators, oop, BARRIER_RESOLVE>::resolve(obj);
 826     }
 827   };
 828 
 829   // This class adds implied decorators that follow according to decorator rules.
 830   // For example adding default reference strength and default memory ordering
 831   // semantics.
 832   template <DecoratorSet input_decorators>
 833   struct DecoratorFixup: AllStatic {
 834     // If no reference strength has been picked, then strong will be picked
 835     static const DecoratorSet ref_strength_default = input_decorators |
 836       (((ON_DECORATOR_MASK & input_decorators) == 0 && (INTERNAL_VALUE_IS_OOP & input_decorators) != 0) ?
 837        ON_STRONG_OOP_REF : INTERNAL_EMPTY);
 838     // If no memory ordering has been picked, unordered will be picked
 839     static const DecoratorSet memory_ordering_default = ref_strength_default |
 840       ((MO_DECORATOR_MASK & ref_strength_default) == 0 ? MO_UNORDERED : INTERNAL_EMPTY);
 841     // If no barrier strength has been picked, normal will be used
 842     static const DecoratorSet barrier_strength_default = memory_ordering_default |
 843       ((AS_DECORATOR_MASK & memory_ordering_default) == 0 ? AS_NORMAL : INTERNAL_EMPTY);
 844     // Heap array accesses imply it is a heap access
 845     static const DecoratorSet heap_array_is_in_heap = barrier_strength_default |
 846       ((IN_HEAP_ARRAY & barrier_strength_default) != 0 ? IN_HEAP : INTERNAL_EMPTY);
 847     static const DecoratorSet conc_root_is_root = heap_array_is_in_heap |
 848       ((IN_CONCURRENT_ROOT & heap_array_is_in_heap) != 0 ? IN_ROOT : INTERNAL_EMPTY);
 849     static const DecoratorSet archive_root_is_root = conc_root_is_root |
 850       ((IN_ARCHIVE_ROOT & conc_root_is_root) != 0 ? IN_ROOT : INTERNAL_EMPTY);
 851     static const DecoratorSet value = archive_root_is_root | BT_BUILDTIME_DECORATORS;
 852   };
 853 
 854   // Step 2: Reduce types.
 855   // Enforce that for non-oop types, T and P have to be strictly the same.
 856   // P is the type of the address and T is the type of the values.
 857   // As for oop types, it is allow to send T in {narrowOop, oop} and
 858   // P in {narrowOop, oop, HeapWord*}. The following rules apply according to
 859   // the subsequent table. (columns are P, rows are T)
 860   // |           | HeapWord  |   oop   | narrowOop |
 861   // |   oop     |  rt-comp  | hw-none |  hw-comp  |
 862   // | narrowOop |     x     |    x    |  hw-none  |
 863   //
 864   // x means not allowed
 865   // rt-comp means it must be checked at runtime whether the oop is compressed.
 866   // hw-none means it is statically known the oop will not be compressed.
 867   // hw-comp means it is statically known the oop will be compressed.
 868 
 869   template <DecoratorSet decorators, typename T>
 870   inline void store_reduce_types(T* addr, T value) {
 871     PreRuntimeDispatch::store<decorators>(addr, value);
 872   }
 873 
 874   template <DecoratorSet decorators>
 875   inline void store_reduce_types(narrowOop* addr, oop value) {
 876     const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP |
 877                                              INTERNAL_RT_USE_COMPRESSED_OOPS;
 878     PreRuntimeDispatch::store<expanded_decorators>(addr, value);
 879   }
 880 
 881   template <DecoratorSet decorators>
 882   inline void store_reduce_types(narrowOop* addr, narrowOop value) {
 883     const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP |
 884                                              INTERNAL_RT_USE_COMPRESSED_OOPS;
 885     PreRuntimeDispatch::store<expanded_decorators>(addr, value);
 886   }
 887 
 888   template <DecoratorSet decorators>
 889   inline void store_reduce_types(HeapWord* addr, oop value) {
 890     const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP;
 891     PreRuntimeDispatch::store<expanded_decorators>(addr, value);
 892   }
 893 
 894   template <DecoratorSet decorators, typename T>
 895   inline T atomic_cmpxchg_reduce_types(T new_value, T* addr, T compare_value) {
 896     return PreRuntimeDispatch::atomic_cmpxchg<decorators>(new_value, addr, compare_value);
 897   }
 898 
 899   template <DecoratorSet decorators>
 900   inline oop atomic_cmpxchg_reduce_types(oop new_value, narrowOop* addr, oop compare_value) {
 901     const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP |
 902                                              INTERNAL_RT_USE_COMPRESSED_OOPS;
 903     return PreRuntimeDispatch::atomic_cmpxchg<expanded_decorators>(new_value, addr, compare_value);
 904   }
 905 
 906   template <DecoratorSet decorators>
 907   inline narrowOop atomic_cmpxchg_reduce_types(narrowOop new_value, narrowOop* addr, narrowOop compare_value) {
 908     const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP |
 909                                              INTERNAL_RT_USE_COMPRESSED_OOPS;
 910     return PreRuntimeDispatch::atomic_cmpxchg<expanded_decorators>(new_value, addr, compare_value);
 911   }
 912 
 913   template <DecoratorSet decorators>
 914   inline oop atomic_cmpxchg_reduce_types(oop new_value,
 915                                          HeapWord* addr,
 916                                          oop compare_value) {
 917     const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP;
 918     return PreRuntimeDispatch::atomic_cmpxchg<expanded_decorators>(new_value, addr, compare_value);
 919   }
 920 
 921   template <DecoratorSet decorators, typename T>
 922   inline T atomic_xchg_reduce_types(T new_value, T* addr) {
 923     const DecoratorSet expanded_decorators = decorators;
 924     return PreRuntimeDispatch::atomic_xchg<expanded_decorators>(new_value, addr);
 925   }
 926 
 927   template <DecoratorSet decorators>
 928   inline oop atomic_xchg_reduce_types(oop new_value, narrowOop* addr) {
 929     const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP |
 930                                              INTERNAL_RT_USE_COMPRESSED_OOPS;
 931     return PreRuntimeDispatch::atomic_xchg<expanded_decorators>(new_value, addr);
 932   }
 933 
 934   template <DecoratorSet decorators>
 935   inline narrowOop atomic_xchg_reduce_types(narrowOop new_value, narrowOop* addr) {
 936     const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP |
 937                                              INTERNAL_RT_USE_COMPRESSED_OOPS;
 938     return PreRuntimeDispatch::atomic_xchg<expanded_decorators>(new_value, addr);
 939   }
 940 
 941   template <DecoratorSet decorators>
 942   inline oop atomic_xchg_reduce_types(oop new_value, HeapWord* addr) {
 943     const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP;
 944     return PreRuntimeDispatch::atomic_xchg<expanded_decorators>(new_value, addr);
 945   }
 946 
 947   template <DecoratorSet decorators, typename T>
 948   inline T load_reduce_types(T* addr) {
 949     return PreRuntimeDispatch::load<decorators, T>(addr);
 950   }
 951 
 952   template <DecoratorSet decorators, typename T>
 953   inline typename OopOrNarrowOop<T>::type load_reduce_types(narrowOop* addr) {
 954     const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP |
 955                                              INTERNAL_RT_USE_COMPRESSED_OOPS;
 956     return PreRuntimeDispatch::load<expanded_decorators, typename OopOrNarrowOop<T>::type>(addr);
 957   }
 958 
 959   template <DecoratorSet decorators, typename T>
 960   inline oop load_reduce_types(HeapWord* addr) {
 961     const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP;
 962     return PreRuntimeDispatch::load<expanded_decorators, oop>(addr);
 963   }
 964 
 965   template <DecoratorSet decorators, typename T>
 966   inline bool arraycopy_reduce_types(arrayOop src_obj, arrayOop dst_obj, T* src, T* dst, size_t length) {
 967     return PreRuntimeDispatch::arraycopy<decorators>(src_obj, dst_obj, src, dst, length);
 968   }
 969 
 970   template <DecoratorSet decorators>
 971   inline bool arraycopy_reduce_types(arrayOop src_obj, arrayOop dst_obj, HeapWord* src, HeapWord* dst, size_t length) {
 972     const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP;
 973     return PreRuntimeDispatch::arraycopy<expanded_decorators>(src_obj, dst_obj, src, dst, length);
 974   }
 975 
 976   template <DecoratorSet decorators>
 977   inline bool arraycopy_reduce_types(arrayOop src_obj, arrayOop dst_obj, narrowOop* src, narrowOop* dst, size_t length) {
 978     const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP |
 979                                              INTERNAL_RT_USE_COMPRESSED_OOPS;
 980     return PreRuntimeDispatch::arraycopy<expanded_decorators>(src_obj, dst_obj, src, dst, length);
 981   }
 982 
 983   // Step 1: Set default decorators. This step remembers if a type was volatile
 984   // and then sets the MO_VOLATILE decorator by default. Otherwise, a default
 985   // memory ordering is set for the access, and the implied decorator rules
 986   // are applied to select sensible defaults for decorators that have not been
 987   // explicitly set. For example, default object referent strength is set to strong.
 988   // This step also decays the types passed in (e.g. getting rid of CV qualifiers
 989   // and references from the types). This step also perform some type verification
 990   // that the passed in types make sense.
 991 
 992   template <DecoratorSet decorators, typename T>
 993   static void verify_types(){
 994     // If this fails to compile, then you have sent in something that is
 995     // not recognized as a valid primitive type to a primitive Access function.
 996     STATIC_ASSERT((HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value || // oops have already been validated
 997                    (IsPointer<T>::value || IsIntegral<T>::value) ||
 998                     IsFloatingPoint<T>::value)); // not allowed primitive type
 999   }
1000 
1001   template <DecoratorSet decorators, typename P, typename T>
1002   inline void store(P* addr, T value) {
1003     verify_types<decorators, T>();
1004     typedef typename Decay<P>::type DecayedP;
1005     typedef typename Decay<T>::type DecayedT;
1006     DecayedT decayed_value = value;
1007     // If a volatile address is passed in but no memory ordering decorator,
1008     // set the memory ordering to MO_VOLATILE by default.
1009     const DecoratorSet expanded_decorators = DecoratorFixup<
1010       (IsVolatile<P>::value && !HasDecorator<decorators, MO_DECORATOR_MASK>::value) ?
1011       (MO_VOLATILE | decorators) : decorators>::value;
1012     store_reduce_types<expanded_decorators>(const_cast<DecayedP*>(addr), decayed_value);
1013   }
1014 
1015   template <DecoratorSet decorators, typename T>
1016   inline void store_at(oop base, ptrdiff_t offset, T value) {
1017     verify_types<decorators, T>();
1018     typedef typename Decay<T>::type DecayedT;
1019     DecayedT decayed_value = value;
1020     const DecoratorSet expanded_decorators = DecoratorFixup<decorators |
1021                                              (HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value ?
1022                                               INTERNAL_CONVERT_COMPRESSED_OOP : INTERNAL_EMPTY)>::value;
1023     PreRuntimeDispatch::store_at<expanded_decorators>(base, offset, decayed_value);
1024   }
1025 
1026   template <DecoratorSet decorators, typename P, typename T>
1027   inline T load(P* addr) {
1028     verify_types<decorators, T>();
1029     typedef typename Decay<P>::type DecayedP;
1030     typedef typename Conditional<HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value,
1031                                  typename OopOrNarrowOop<T>::type,
1032                                  typename Decay<T>::type>::type DecayedT;
1033     // If a volatile address is passed in but no memory ordering decorator,
1034     // set the memory ordering to MO_VOLATILE by default.
1035     const DecoratorSet expanded_decorators = DecoratorFixup<
1036       (IsVolatile<P>::value && !HasDecorator<decorators, MO_DECORATOR_MASK>::value) ?
1037       (MO_VOLATILE | decorators) : decorators>::value;
1038     return load_reduce_types<expanded_decorators, DecayedT>(const_cast<DecayedP*>(addr));
1039   }
1040 
1041   template <DecoratorSet decorators, typename T>
1042   inline T load_at(oop base, ptrdiff_t offset) {
1043     verify_types<decorators, T>();
1044     typedef typename Conditional<HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value,
1045                                  typename OopOrNarrowOop<T>::type,
1046                                  typename Decay<T>::type>::type DecayedT;
1047     // Expand the decorators (figure out sensible defaults)
1048     // Potentially remember if we need compressed oop awareness
1049     const DecoratorSet expanded_decorators = DecoratorFixup<decorators |
1050                                              (HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value ?
1051                                               INTERNAL_CONVERT_COMPRESSED_OOP : INTERNAL_EMPTY)>::value;
1052     return PreRuntimeDispatch::load_at<expanded_decorators, DecayedT>(base, offset);
1053   }
1054 
1055   template <DecoratorSet decorators, typename P, typename T>
1056   inline T atomic_cmpxchg(T new_value, P* addr, T compare_value) {
1057     verify_types<decorators, T>();
1058     typedef typename Decay<P>::type DecayedP;
1059     typedef typename Decay<T>::type DecayedT;
1060     DecayedT new_decayed_value = new_value;
1061     DecayedT compare_decayed_value = compare_value;
1062     const DecoratorSet expanded_decorators = DecoratorFixup<
1063       (!HasDecorator<decorators, MO_DECORATOR_MASK>::value) ?
1064       (MO_SEQ_CST | decorators) : decorators>::value;
1065     return atomic_cmpxchg_reduce_types<expanded_decorators>(new_decayed_value,
1066                                                             const_cast<DecayedP*>(addr),
1067                                                             compare_decayed_value);
1068   }
1069 
1070   template <DecoratorSet decorators, typename T>
1071   inline T atomic_cmpxchg_at(T new_value, oop base, ptrdiff_t offset, T compare_value) {
1072     verify_types<decorators, T>();
1073     typedef typename Decay<T>::type DecayedT;
1074     DecayedT new_decayed_value = new_value;
1075     DecayedT compare_decayed_value = compare_value;
1076     // Determine default memory ordering
1077     const DecoratorSet expanded_decorators = DecoratorFixup<
1078       (!HasDecorator<decorators, MO_DECORATOR_MASK>::value) ?
1079       (MO_SEQ_CST | decorators) : decorators>::value;
1080     // Potentially remember that we need compressed oop awareness
1081     const DecoratorSet final_decorators = expanded_decorators |
1082                                           (HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value ?
1083                                            INTERNAL_CONVERT_COMPRESSED_OOP : INTERNAL_EMPTY);
1084     return PreRuntimeDispatch::atomic_cmpxchg_at<final_decorators>(new_decayed_value, base,
1085                                                                    offset, compare_decayed_value);
1086   }
1087 
1088   template <DecoratorSet decorators, typename P, typename T>
1089   inline T atomic_xchg(T new_value, P* addr) {
1090     verify_types<decorators, T>();
1091     typedef typename Decay<P>::type DecayedP;
1092     typedef typename Decay<T>::type DecayedT;
1093     DecayedT new_decayed_value = new_value;
1094     // atomic_xchg is only available in SEQ_CST flavour.
1095     const DecoratorSet expanded_decorators = DecoratorFixup<decorators | MO_SEQ_CST>::value;
1096     return atomic_xchg_reduce_types<expanded_decorators>(new_decayed_value,
1097                                                          const_cast<DecayedP*>(addr));
1098   }
1099 
1100   template <DecoratorSet decorators, typename T>
1101   inline T atomic_xchg_at(T new_value, oop base, ptrdiff_t offset) {
1102     verify_types<decorators, T>();
1103     typedef typename Decay<T>::type DecayedT;
1104     DecayedT new_decayed_value = new_value;
1105     // atomic_xchg is only available in SEQ_CST flavour.
1106     const DecoratorSet expanded_decorators = DecoratorFixup<decorators | MO_SEQ_CST |
1107                                              (HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value ?
1108                                               INTERNAL_CONVERT_COMPRESSED_OOP : INTERNAL_EMPTY)>::value;
1109     return PreRuntimeDispatch::atomic_xchg_at<expanded_decorators>(new_decayed_value, base, offset);
1110   }
1111 
1112   template <DecoratorSet decorators, typename T>
1113   inline bool arraycopy(arrayOop src_obj, arrayOop dst_obj, T* src, T* dst, size_t length) {
1114     STATIC_ASSERT((HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value ||
1115                    (IsSame<T, void>::value || IsIntegral<T>::value) ||
1116                     IsFloatingPoint<T>::value)); // arraycopy allows type erased void elements
1117     typedef typename Decay<T>::type DecayedT;
1118     const DecoratorSet expanded_decorators = DecoratorFixup<decorators | IN_HEAP_ARRAY | IN_HEAP>::value;
1119     return arraycopy_reduce_types<expanded_decorators>(src_obj, dst_obj,
1120                                                        const_cast<DecayedT*>(src),
1121                                                        const_cast<DecayedT*>(dst),
1122                                                        length);
1123   }
1124 
1125   template <DecoratorSet decorators>
1126   inline void clone(oop src, oop dst, size_t size) {
1127     const DecoratorSet expanded_decorators = DecoratorFixup<decorators>::value;
1128     PreRuntimeDispatch::clone<expanded_decorators>(src, dst, size);
1129   }
1130 
1131   template <DecoratorSet decorators>
1132   inline oop resolve(oop obj) {
1133     const DecoratorSet expanded_decorators = DecoratorFixup<decorators>::value;
1134     return PreRuntimeDispatch::resolve<expanded_decorators>(obj);
1135   }
1136 }
1137 
1138 template <DecoratorSet decorators>
1139 template <DecoratorSet expected_decorators>
1140 void Access<decorators>::verify_decorators() {
1141   STATIC_ASSERT((~expected_decorators & decorators) == 0); // unexpected decorator used
1142   const DecoratorSet barrier_strength_decorators = decorators & AS_DECORATOR_MASK;
1143   STATIC_ASSERT(barrier_strength_decorators == 0 || ( // make sure barrier strength decorators are disjoint if set
1144     (barrier_strength_decorators ^ AS_NO_KEEPALIVE) == 0 ||
1145     (barrier_strength_decorators ^ AS_DEST_NOT_INITIALIZED) == 0 ||
1146     (barrier_strength_decorators ^ AS_RAW) == 0 ||
1147     (barrier_strength_decorators ^ AS_NORMAL) == 0
1148   ));
1149   const DecoratorSet ref_strength_decorators = decorators & ON_DECORATOR_MASK;
1150   STATIC_ASSERT(ref_strength_decorators == 0 || ( // make sure ref strength decorators are disjoint if set
1151     (ref_strength_decorators ^ ON_STRONG_OOP_REF) == 0 ||
1152     (ref_strength_decorators ^ ON_WEAK_OOP_REF) == 0 ||
1153     (ref_strength_decorators ^ ON_PHANTOM_OOP_REF) == 0 ||
1154     (ref_strength_decorators ^ ON_UNKNOWN_OOP_REF) == 0
1155   ));
1156   const DecoratorSet memory_ordering_decorators = decorators & MO_DECORATOR_MASK;
1157   STATIC_ASSERT(memory_ordering_decorators == 0 || ( // make sure memory ordering decorators are disjoint if set
1158     (memory_ordering_decorators ^ MO_UNORDERED) == 0 ||
1159     (memory_ordering_decorators ^ MO_VOLATILE) == 0 ||
1160     (memory_ordering_decorators ^ MO_RELAXED) == 0 ||
1161     (memory_ordering_decorators ^ MO_ACQUIRE) == 0 ||
1162     (memory_ordering_decorators ^ MO_RELEASE) == 0 ||
1163     (memory_ordering_decorators ^ MO_SEQ_CST) == 0
1164   ));
1165   const DecoratorSet location_decorators = decorators & IN_DECORATOR_MASK;
1166   STATIC_ASSERT(location_decorators == 0 || ( // make sure location decorators are disjoint if set
1167     (location_decorators ^ IN_ROOT) == 0 ||
1168     (location_decorators ^ IN_HEAP) == 0 ||
1169     (location_decorators ^ (IN_HEAP | IN_HEAP_ARRAY)) == 0 ||
1170     (location_decorators ^ (IN_ROOT | IN_CONCURRENT_ROOT)) == 0 ||
1171     (location_decorators ^ (IN_ROOT | IN_ARCHIVE_ROOT)) == 0
1172   ));
1173 }
1174 
1175 #endif // SHARE_VM_RUNTIME_ACCESS_INLINE_HPP