1 /*
   2  * Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef SHARE_VM_RUNTIME_ACCESS_INLINE_HPP
  26 #define SHARE_VM_RUNTIME_ACCESS_INLINE_HPP
  27 
  28 #include "gc/shared/barrierSetConfig.inline.hpp"
  29 #include "metaprogramming/conditional.hpp"
  30 #include "metaprogramming/isFloatingPoint.hpp"
  31 #include "metaprogramming/isIntegral.hpp"
  32 #include "metaprogramming/isPointer.hpp"
  33 #include "metaprogramming/isVolatile.hpp"
  34 #include "oops/access.hpp"
  35 #include "oops/accessBackend.inline.hpp"
  36 #include "runtime/atomic.hpp"
  37 #include "runtime/orderAccess.inline.hpp"
  38 
  39 // This file outlines the template pipeline of accesses going through the Access
  40 // API. There are essentially 5 steps for each access.
  41 // * Step 1: Set default decorators and decay types. This step gets rid of CV qualifiers
  42 //           and sets default decorators to sensible values.
  43 // * Step 2: Reduce types. This step makes sure there is only a single T type and not
  44 //           multiple types. The P type of the address and T type of the value must
  45 //           match.
  46 // * Step 3: Pre-runtime dispatch. This step checks whether a runtime call can be
  47 //           avoided, and in that case avoids it (calling raw accesses or
  48 //           primitive accesses in a build that does not require primitive GC barriers)
  49 // * Step 4: Runtime-dispatch. This step performs a runtime dispatch to the corresponding
  50 //           BarrierSet::AccessBarrier accessor that attaches GC-required barriers
  51 //           to the access.
  52 // * Step 5: Post-runtime dispatch. This step now casts previously unknown types such
  53 //           as the address type of an oop on the heap (is it oop* or narrowOop*) to
  54 //           the appropriate type. It also splits sufficiently orthogonal accesses into
  55 //           different functions, such as whether the access involves oops or primitives
  56 //           and whether the access is performed on the heap or outside. Then the
  57 //           appropriate BarrierSet::AccessBarrier is called to perform the access.
  58 
  59 namespace AccessInternal {
  60 
  61   // Step 5: Post-runtime dispatch.
  62   // This class is the last step before calling the BarrierSet::AccessBarrier.
  63   // Here we make sure to figure out types that were not known prior to the
  64   // runtime dispatch, such as whether an oop on the heap is oop or narrowOop.
  65   // We also split orthogonal barriers such as handling primitives vs oops
  66   // and on-heap vs off-heap into different calls to the barrier set.
  67   template <class GCBarrierType, BarrierType type, DecoratorSet decorators>
  68   struct PostRuntimeDispatch: public AllStatic { };
  69 
  70   template <class GCBarrierType, DecoratorSet decorators>
  71   struct PostRuntimeDispatch<GCBarrierType, BARRIER_STORE, decorators>: public AllStatic {
  72     template <typename T>
  73     static void access_barrier(void* addr, T value) {
  74       GCBarrierType::store_in_heap(reinterpret_cast<T*>(addr), value);
  75     }
  76 
  77     static void oop_access_barrier(void* addr, oop value) {
  78       typedef typename HeapOopType<decorators>::type OopType;
  79       if (HasDecorator<decorators, IN_HEAP>::value) {
  80         GCBarrierType::oop_store_in_heap(reinterpret_cast<OopType*>(addr), value);
  81       } else {
  82         GCBarrierType::oop_store_not_in_heap(reinterpret_cast<OopType*>(addr), value);
  83       }
  84     }
  85   };
  86 
  87   template <class GCBarrierType, DecoratorSet decorators>
  88   struct PostRuntimeDispatch<GCBarrierType, BARRIER_LOAD, decorators>: public AllStatic {
  89     template <typename T>
  90     static T access_barrier(void* addr) {
  91       return GCBarrierType::load_in_heap(reinterpret_cast<T*>(addr));
  92     }
  93 
  94     static oop oop_access_barrier(void* addr) {
  95       typedef typename HeapOopType<decorators>::type OopType;
  96       if (HasDecorator<decorators, IN_HEAP>::value) {
  97         return GCBarrierType::oop_load_in_heap(reinterpret_cast<OopType*>(addr));
  98       } else {
  99         return GCBarrierType::oop_load_not_in_heap(reinterpret_cast<OopType*>(addr));
 100       }
 101     }
 102   };
 103 
 104   template <class GCBarrierType, DecoratorSet decorators>
 105   struct PostRuntimeDispatch<GCBarrierType, BARRIER_ATOMIC_XCHG, decorators>: public AllStatic {
 106     template <typename T>
 107     static T access_barrier(T new_value, void* addr) {
 108       return GCBarrierType::atomic_xchg_in_heap(new_value, reinterpret_cast<T*>(addr));
 109     }
 110 
 111     static oop oop_access_barrier(oop new_value, void* addr) {
 112       typedef typename HeapOopType<decorators>::type OopType;
 113       if (HasDecorator<decorators, IN_HEAP>::value) {
 114         return GCBarrierType::oop_atomic_xchg_in_heap(new_value, reinterpret_cast<OopType*>(addr));
 115       } else {
 116         return GCBarrierType::oop_atomic_xchg_not_in_heap(new_value, reinterpret_cast<OopType*>(addr));
 117       }
 118     }
 119   };
 120 
 121   template <class GCBarrierType, DecoratorSet decorators>
 122   struct PostRuntimeDispatch<GCBarrierType, BARRIER_ATOMIC_CMPXCHG, decorators>: public AllStatic {
 123     template <typename T>
 124     static T access_barrier(T new_value, void* addr, T compare_value) {
 125       return GCBarrierType::atomic_cmpxchg_in_heap(new_value, reinterpret_cast<T*>(addr), compare_value);
 126     }
 127 
 128     static oop oop_access_barrier(oop new_value, void* addr, oop compare_value) {
 129       typedef typename HeapOopType<decorators>::type OopType;
 130       if (HasDecorator<decorators, IN_HEAP>::value) {
 131         return GCBarrierType::oop_atomic_cmpxchg_in_heap(new_value, reinterpret_cast<OopType*>(addr), compare_value);
 132       } else {
 133         return GCBarrierType::oop_atomic_cmpxchg_not_in_heap(new_value, reinterpret_cast<OopType*>(addr), compare_value);
 134       }
 135     }
 136   };
 137 
 138   template <class GCBarrierType, DecoratorSet decorators>
 139   struct PostRuntimeDispatch<GCBarrierType, BARRIER_ARRAYCOPY, decorators>: public AllStatic {
 140     template <typename T>
 141     static bool access_barrier(arrayOop src_obj, arrayOop dst_obj, T* src, T* dst, size_t length) {
 142       return GCBarrierType::arraycopy_in_heap(src_obj, dst_obj, src, dst, length);
 143     }
 144 
 145     template <typename T>
 146     static bool oop_access_barrier(arrayOop src_obj, arrayOop dst_obj, T* src, T* dst, size_t length) {
 147       typedef typename HeapOopType<decorators>::type OopType;
 148       return GCBarrierType::oop_arraycopy_in_heap(src_obj, dst_obj,
 149                                                   reinterpret_cast<OopType*>(src),
 150                                                   reinterpret_cast<OopType*>(dst), length);
 151     }
 152   };
 153 
 154   template <class GCBarrierType, DecoratorSet decorators>
 155   struct PostRuntimeDispatch<GCBarrierType, BARRIER_STORE_AT, decorators>: public AllStatic {
 156     template <typename T>
 157     static void access_barrier(oop base, ptrdiff_t offset, T value) {
 158       GCBarrierType::store_in_heap_at(base, offset, value);
 159     }
 160 
 161     static void oop_access_barrier(oop base, ptrdiff_t offset, oop value) {
 162       GCBarrierType::oop_store_in_heap_at(base, offset, value);
 163     }
 164   };
 165 
 166   template <class GCBarrierType, DecoratorSet decorators>
 167   struct PostRuntimeDispatch<GCBarrierType, BARRIER_LOAD_AT, decorators>: public AllStatic {
 168     template <typename T>
 169     static T access_barrier(oop base, ptrdiff_t offset) {
 170       return GCBarrierType::template load_in_heap_at<T>(base, offset);
 171     }
 172 
 173     static oop oop_access_barrier(oop base, ptrdiff_t offset) {
 174       return GCBarrierType::oop_load_in_heap_at(base, offset);
 175     }
 176   };
 177 
 178   template <class GCBarrierType, DecoratorSet decorators>
 179   struct PostRuntimeDispatch<GCBarrierType, BARRIER_ATOMIC_XCHG_AT, decorators>: public AllStatic {
 180     template <typename T>
 181     static T access_barrier(T new_value, oop base, ptrdiff_t offset) {
 182       return GCBarrierType::atomic_xchg_in_heap_at(new_value, base, offset);
 183     }
 184 
 185     static oop oop_access_barrier(oop new_value, oop base, ptrdiff_t offset) {
 186       return GCBarrierType::oop_atomic_xchg_in_heap_at(new_value, base, offset);
 187     }
 188   };
 189 
 190   template <class GCBarrierType, DecoratorSet decorators>
 191   struct PostRuntimeDispatch<GCBarrierType, BARRIER_ATOMIC_CMPXCHG_AT, decorators>: public AllStatic {
 192     template <typename T>
 193     static T access_barrier(T new_value, oop base, ptrdiff_t offset, T compare_value) {
 194       return GCBarrierType::atomic_cmpxchg_in_heap_at(new_value, base, offset, compare_value);
 195     }
 196 
 197     static oop oop_access_barrier(oop new_value, oop base, ptrdiff_t offset, oop compare_value) {
 198       return GCBarrierType::oop_atomic_cmpxchg_in_heap_at(new_value, base, offset, compare_value);
 199     }
 200   };
 201 
 202   template <class GCBarrierType, DecoratorSet decorators>
 203   struct PostRuntimeDispatch<GCBarrierType, BARRIER_CLONE, decorators>: public AllStatic {
 204     static void access_barrier(oop src, oop dst, size_t size) {
 205       GCBarrierType::clone_in_heap(src, dst, size);
 206     }
 207   };
 208 
 209   template <class GCBarrierType, DecoratorSet decorators>
 210   struct PostRuntimeDispatch<GCBarrierType, BARRIER_RESOLVE, decorators>: public AllStatic {
 211     static oop access_barrier(oop obj) {
 212       return GCBarrierType::resolve(obj);
 213     }
 214   };
 215 
 216   // Resolving accessors with barriers from the barrier set happens in two steps.
 217   // 1. Expand paths with runtime-decorators, e.g. is UseCompressedOops on or off.
 218   // 2. Expand paths for each BarrierSet available in the system.
 219   template <DecoratorSet decorators, typename FunctionPointerT, BarrierType barrier_type>
 220   struct BarrierResolver: public AllStatic {
 221     template <DecoratorSet ds>
 222     static typename EnableIf<
 223       HasDecorator<ds, INTERNAL_VALUE_IS_OOP>::value,
 224       FunctionPointerT>::type
 225     resolve_barrier_gc() {
 226       BarrierSet* bs = BarrierSet::barrier_set();
 227       assert(bs != NULL, "GC barriers invoked before BarrierSet is set");
 228       switch (bs->kind()) {
 229 #define BARRIER_SET_RESOLVE_BARRIER_CLOSURE(bs_name)                    \
 230         case BarrierSet::bs_name: {                                     \
 231           return PostRuntimeDispatch<typename BarrierSet::GetType<BarrierSet::bs_name>::type:: \
 232             AccessBarrier<ds>, barrier_type, ds>::oop_access_barrier; \
 233         }                                                               \
 234         break;
 235         FOR_EACH_CONCRETE_BARRIER_SET_DO(BARRIER_SET_RESOLVE_BARRIER_CLOSURE)
 236 #undef BARRIER_SET_RESOLVE_BARRIER_CLOSURE
 237 
 238       default:
 239         fatal("BarrierSet AccessBarrier resolving not implemented");
 240         return NULL;
 241       };
 242     }
 243 
 244     template <DecoratorSet ds>
 245     static typename EnableIf<
 246       !HasDecorator<ds, INTERNAL_VALUE_IS_OOP>::value,
 247       FunctionPointerT>::type
 248     resolve_barrier_gc() {
 249       BarrierSet* bs = BarrierSet::barrier_set();
 250       assert(bs != NULL, "GC barriers invoked before BarrierSet is set");
 251       switch (bs->kind()) {
 252 #define BARRIER_SET_RESOLVE_BARRIER_CLOSURE(bs_name)                    \
 253         case BarrierSet::bs_name: {                                       \
 254           return PostRuntimeDispatch<typename BarrierSet::GetType<BarrierSet::bs_name>::type:: \
 255             AccessBarrier<ds>, barrier_type, ds>::access_barrier; \
 256         }                                                                 \
 257         break;
 258         FOR_EACH_CONCRETE_BARRIER_SET_DO(BARRIER_SET_RESOLVE_BARRIER_CLOSURE)
 259 #undef BARRIER_SET_RESOLVE_BARRIER_CLOSURE
 260 
 261       default:
 262         fatal("BarrierSet AccessBarrier resolving not implemented");
 263         return NULL;
 264       };
 265     }
 266 
 267     static FunctionPointerT resolve_barrier_rt() {
 268       if (UseCompressedOops) {
 269         const DecoratorSet expanded_decorators = decorators | INTERNAL_RT_USE_COMPRESSED_OOPS;
 270         return resolve_barrier_gc<expanded_decorators>();
 271       } else {
 272         return resolve_barrier_gc<decorators>();
 273       }
 274     }
 275 
 276     static FunctionPointerT resolve_barrier() {
 277       return resolve_barrier_rt();
 278     }
 279   };
 280 
 281   // Step 4: Runtime dispatch
 282   // The RuntimeDispatch class is responsible for performing a runtime dispatch of the
 283   // accessor. This is required when the access either depends on whether compressed oops
 284   // is being used, or it depends on which GC implementation was chosen (e.g. requires GC
 285   // barriers). The way it works is that a function pointer initially pointing to an
 286   // accessor resolution function gets called for each access. Upon first invocation,
 287   // it resolves which accessor to be used in future invocations and patches the
 288   // function pointer to this new accessor.
 289 
 290   template <DecoratorSet decorators, typename T, BarrierType type>
 291   struct RuntimeDispatch: AllStatic {};
 292 
 293   template <DecoratorSet decorators, typename T>
 294   struct RuntimeDispatch<decorators, T, BARRIER_STORE>: AllStatic {
 295     typedef typename AccessFunction<decorators, T, BARRIER_STORE>::type func_t;
 296     static func_t _store_func;
 297 
 298     static void store_init(void* addr, T value) {
 299       func_t function = BarrierResolver<decorators, func_t, BARRIER_STORE>::resolve_barrier();
 300       _store_func = function;
 301       function(addr, value);
 302     }
 303 
 304     static inline void store(void* addr, T value) {
 305       _store_func(addr, value);
 306     }
 307   };
 308 
 309   template <DecoratorSet decorators, typename T>
 310   struct RuntimeDispatch<decorators, T, BARRIER_STORE_AT>: AllStatic {
 311     typedef typename AccessFunction<decorators, T, BARRIER_STORE_AT>::type func_t;
 312     static func_t _store_at_func;
 313 
 314     static void store_at_init(oop base, ptrdiff_t offset, T value) {
 315       func_t function = BarrierResolver<decorators, func_t, BARRIER_STORE_AT>::resolve_barrier();
 316       _store_at_func = function;
 317       function(base, offset, value);
 318     }
 319 
 320     static inline void store_at(oop base, ptrdiff_t offset, T value) {
 321       _store_at_func(base, offset, value);
 322     }
 323   };
 324 
 325   template <DecoratorSet decorators, typename T>
 326   struct RuntimeDispatch<decorators, T, BARRIER_LOAD>: AllStatic {
 327     typedef typename AccessFunction<decorators, T, BARRIER_LOAD>::type func_t;
 328     static func_t _load_func;
 329 
 330     static T load_init(void* addr) {
 331       func_t function = BarrierResolver<decorators, func_t, BARRIER_LOAD>::resolve_barrier();
 332       _load_func = function;
 333       return function(addr);
 334     }
 335 
 336     static inline T load(void* addr) {
 337       return _load_func(addr);
 338     }
 339   };
 340 
 341   template <DecoratorSet decorators, typename T>
 342   struct RuntimeDispatch<decorators, T, BARRIER_LOAD_AT>: AllStatic {
 343     typedef typename AccessFunction<decorators, T, BARRIER_LOAD_AT>::type func_t;
 344     static func_t _load_at_func;
 345 
 346     static T load_at_init(oop base, ptrdiff_t offset) {
 347       func_t function = BarrierResolver<decorators, func_t, BARRIER_LOAD_AT>::resolve_barrier();
 348       _load_at_func = function;
 349       return function(base, offset);
 350     }
 351 
 352     static inline T load_at(oop base, ptrdiff_t offset) {
 353       return _load_at_func(base, offset);
 354     }
 355   };
 356 
 357   template <DecoratorSet decorators, typename T>
 358   struct RuntimeDispatch<decorators, T, BARRIER_ATOMIC_CMPXCHG>: AllStatic {
 359     typedef typename AccessFunction<decorators, T, BARRIER_ATOMIC_CMPXCHG>::type func_t;
 360     static func_t _atomic_cmpxchg_func;
 361 
 362     static T atomic_cmpxchg_init(T new_value, void* addr, T compare_value) {
 363       func_t function = BarrierResolver<decorators, func_t, BARRIER_ATOMIC_CMPXCHG>::resolve_barrier();
 364       _atomic_cmpxchg_func = function;
 365       return function(new_value, addr, compare_value);
 366     }
 367 
 368     static inline T atomic_cmpxchg(T new_value, void* addr, T compare_value) {
 369       return _atomic_cmpxchg_func(new_value, addr, compare_value);
 370     }
 371   };
 372 
 373   template <DecoratorSet decorators, typename T>
 374   struct RuntimeDispatch<decorators, T, BARRIER_ATOMIC_CMPXCHG_AT>: AllStatic {
 375     typedef typename AccessFunction<decorators, T, BARRIER_ATOMIC_CMPXCHG_AT>::type func_t;
 376     static func_t _atomic_cmpxchg_at_func;
 377 
 378     static T atomic_cmpxchg_at_init(T new_value, oop base, ptrdiff_t offset, T compare_value) {
 379       func_t function = BarrierResolver<decorators, func_t, BARRIER_ATOMIC_CMPXCHG_AT>::resolve_barrier();
 380       _atomic_cmpxchg_at_func = function;
 381       return function(new_value, base, offset, compare_value);
 382     }
 383 
 384     static inline T atomic_cmpxchg_at(T new_value, oop base, ptrdiff_t offset, T compare_value) {
 385       return _atomic_cmpxchg_at_func(new_value, base, offset, compare_value);
 386     }
 387   };
 388 
 389   template <DecoratorSet decorators, typename T>
 390   struct RuntimeDispatch<decorators, T, BARRIER_ATOMIC_XCHG>: AllStatic {
 391     typedef typename AccessFunction<decorators, T, BARRIER_ATOMIC_XCHG>::type func_t;
 392     static func_t _atomic_xchg_func;
 393 
 394     static T atomic_xchg_init(T new_value, void* addr) {
 395       func_t function = BarrierResolver<decorators, func_t, BARRIER_ATOMIC_XCHG>::resolve_barrier();
 396       _atomic_xchg_func = function;
 397       return function(new_value, addr);
 398     }
 399 
 400     static inline T atomic_xchg(T new_value, void* addr) {
 401       return _atomic_xchg_func(new_value, addr);
 402     }
 403   };
 404 
 405   template <DecoratorSet decorators, typename T>
 406   struct RuntimeDispatch<decorators, T, BARRIER_ATOMIC_XCHG_AT>: AllStatic {
 407     typedef typename AccessFunction<decorators, T, BARRIER_ATOMIC_XCHG_AT>::type func_t;
 408     static func_t _atomic_xchg_at_func;
 409 
 410     static T atomic_xchg_at_init(T new_value, oop base, ptrdiff_t offset) {
 411       func_t function = BarrierResolver<decorators, func_t, BARRIER_ATOMIC_XCHG_AT>::resolve_barrier();
 412       _atomic_xchg_at_func = function;
 413       return function(new_value, base, offset);
 414     }
 415 
 416     static inline T atomic_xchg_at(T new_value, oop base, ptrdiff_t offset) {
 417       return _atomic_xchg_at_func(new_value, base, offset);
 418     }
 419   };
 420 
 421   template <DecoratorSet decorators, typename T>
 422   struct RuntimeDispatch<decorators, T, BARRIER_ARRAYCOPY>: AllStatic {
 423     typedef typename AccessFunction<decorators, T, BARRIER_ARRAYCOPY>::type func_t;
 424     static func_t _arraycopy_func;
 425 
 426     static bool arraycopy_init(arrayOop src_obj, arrayOop dst_obj, T *src, T* dst, size_t length) {
 427       func_t function = BarrierResolver<decorators, func_t, BARRIER_ARRAYCOPY>::resolve_barrier();
 428       _arraycopy_func = function;
 429       return function(src_obj, dst_obj, src, dst, length);
 430     }
 431 
 432     static inline bool arraycopy(arrayOop src_obj, arrayOop dst_obj, T *src, T* dst, size_t length) {
 433       return _arraycopy_func(src_obj, dst_obj, src, dst, length);
 434     }
 435   };
 436 
 437   template <DecoratorSet decorators, typename T>
 438   struct RuntimeDispatch<decorators, T, BARRIER_CLONE>: AllStatic {
 439     typedef typename AccessFunction<decorators, T, BARRIER_CLONE>::type func_t;
 440     static func_t _clone_func;
 441 
 442     static void clone_init(oop src, oop dst, size_t size) {
 443       func_t function = BarrierResolver<decorators, func_t, BARRIER_CLONE>::resolve_barrier();
 444       _clone_func = function;
 445       function(src, dst, size);
 446     }
 447 
 448     static inline void clone(oop src, oop dst, size_t size) {
 449       _clone_func(src, dst, size);
 450     }
 451   };
 452 
 453   template <DecoratorSet decorators, typename T>
 454   struct RuntimeDispatch<decorators, T, BARRIER_RESOLVE>: AllStatic {
 455     typedef typename AccessFunction<decorators, T, BARRIER_RESOLVE>::type func_t;
 456     static func_t _resolve_func;
 457 
 458     static oop resolve_init(oop obj) {
 459       func_t function = BarrierResolver<decorators, func_t, BARRIER_RESOLVE>::resolve_barrier();
 460       _resolve_func = function;
 461       return function(obj);
 462     }
 463 
 464     static inline oop resolve(oop obj) {
 465       return _resolve_func(obj);
 466     }
 467   };
 468 
 469   // Initialize the function pointers to point to the resolving function.
 470   template <DecoratorSet decorators, typename T>
 471   typename AccessFunction<decorators, T, BARRIER_STORE>::type
 472   RuntimeDispatch<decorators, T, BARRIER_STORE>::_store_func = &store_init;
 473 
 474   template <DecoratorSet decorators, typename T>
 475   typename AccessFunction<decorators, T, BARRIER_STORE_AT>::type
 476   RuntimeDispatch<decorators, T, BARRIER_STORE_AT>::_store_at_func = &store_at_init;
 477 
 478   template <DecoratorSet decorators, typename T>
 479   typename AccessFunction<decorators, T, BARRIER_LOAD>::type
 480   RuntimeDispatch<decorators, T, BARRIER_LOAD>::_load_func = &load_init;
 481 
 482   template <DecoratorSet decorators, typename T>
 483   typename AccessFunction<decorators, T, BARRIER_LOAD_AT>::type
 484   RuntimeDispatch<decorators, T, BARRIER_LOAD_AT>::_load_at_func = &load_at_init;
 485 
 486   template <DecoratorSet decorators, typename T>
 487   typename AccessFunction<decorators, T, BARRIER_ATOMIC_CMPXCHG>::type
 488   RuntimeDispatch<decorators, T, BARRIER_ATOMIC_CMPXCHG>::_atomic_cmpxchg_func = &atomic_cmpxchg_init;
 489 
 490   template <DecoratorSet decorators, typename T>
 491   typename AccessFunction<decorators, T, BARRIER_ATOMIC_CMPXCHG_AT>::type
 492   RuntimeDispatch<decorators, T, BARRIER_ATOMIC_CMPXCHG_AT>::_atomic_cmpxchg_at_func = &atomic_cmpxchg_at_init;
 493 
 494   template <DecoratorSet decorators, typename T>
 495   typename AccessFunction<decorators, T, BARRIER_ATOMIC_XCHG>::type
 496   RuntimeDispatch<decorators, T, BARRIER_ATOMIC_XCHG>::_atomic_xchg_func = &atomic_xchg_init;
 497 
 498   template <DecoratorSet decorators, typename T>
 499   typename AccessFunction<decorators, T, BARRIER_ATOMIC_XCHG_AT>::type
 500   RuntimeDispatch<decorators, T, BARRIER_ATOMIC_XCHG_AT>::_atomic_xchg_at_func = &atomic_xchg_at_init;
 501 
 502   template <DecoratorSet decorators, typename T>
 503   typename AccessFunction<decorators, T, BARRIER_ARRAYCOPY>::type
 504   RuntimeDispatch<decorators, T, BARRIER_ARRAYCOPY>::_arraycopy_func = &arraycopy_init;
 505 
 506   template <DecoratorSet decorators, typename T>
 507   typename AccessFunction<decorators, T, BARRIER_CLONE>::type
 508   RuntimeDispatch<decorators, T, BARRIER_CLONE>::_clone_func = &clone_init;
 509 
 510   template <DecoratorSet decorators, typename T>
 511   typename AccessFunction<decorators, T, BARRIER_RESOLVE>::type
 512   RuntimeDispatch<decorators, T, BARRIER_RESOLVE>::_resolve_func = &resolve_init;
 513 
 514   // Step 3: Pre-runtime dispatching.
 515   // The PreRuntimeDispatch class is responsible for filtering the barrier strength
 516   // decorators. That is, for AS_RAW, it hardwires the accesses without a runtime
 517   // dispatch point. Otherwise it goes through a runtime check if hardwiring was
 518   // not possible.
 519   struct PreRuntimeDispatch: AllStatic {
 520     template<DecoratorSet decorators>
 521     struct CanHardwireRaw: public IntegralConstant<
 522       bool,
 523       !HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value || // primitive access
 524       !HasDecorator<decorators, INTERNAL_CONVERT_COMPRESSED_OOP>::value || // don't care about compressed oops (oop* address)
 525       HasDecorator<decorators, INTERNAL_RT_USE_COMPRESSED_OOPS>::value> // we can infer we use compressed oops (narrowOop* address)
 526     {};
 527 
 528     static const DecoratorSet convert_compressed_oops = INTERNAL_RT_USE_COMPRESSED_OOPS | INTERNAL_CONVERT_COMPRESSED_OOP;
 529 
 530     template<DecoratorSet decorators>
 531     static bool is_hardwired_primitive() {
 532       return !HasDecorator<decorators, INTERNAL_BT_BARRIER_ON_PRIMITIVES>::value &&
 533              !HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value;
 534     }
 535 
 536     template <DecoratorSet decorators, typename T>
 537     inline static typename EnableIf<
 538       HasDecorator<decorators, AS_RAW>::value && CanHardwireRaw<decorators>::value>::type
 539     store(void* addr, T value) {
 540       typedef RawAccessBarrier<decorators & RAW_DECORATOR_MASK> Raw;
 541       if (HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value) {
 542         Raw::oop_store(addr, value);
 543       } else {
 544         Raw::store(addr, value);
 545       }
 546     }
 547 
 548     template <DecoratorSet decorators, typename T>
 549     inline static typename EnableIf<
 550       HasDecorator<decorators, AS_RAW>::value && !CanHardwireRaw<decorators>::value>::type
 551     store(void* addr, T value) {
 552       if (UseCompressedOops) {
 553         const DecoratorSet expanded_decorators = decorators | convert_compressed_oops;
 554         PreRuntimeDispatch::store<expanded_decorators>(addr, value);
 555       } else {
 556         const DecoratorSet expanded_decorators = decorators & ~convert_compressed_oops;
 557         PreRuntimeDispatch::store<expanded_decorators>(addr, value);
 558       }
 559     }
 560 
 561     template <DecoratorSet decorators, typename T>
 562     inline static typename EnableIf<
 563       !HasDecorator<decorators, AS_RAW>::value>::type
 564     store(void* addr, T value) {
 565       if (is_hardwired_primitive<decorators>()) {
 566         const DecoratorSet expanded_decorators = decorators | AS_RAW;
 567         PreRuntimeDispatch::store<expanded_decorators>(addr, value);
 568       } else {
 569         RuntimeDispatch<decorators, T, BARRIER_STORE>::store(addr, value);
 570       }
 571     }
 572 
 573     template <DecoratorSet decorators, typename T>
 574     inline static typename EnableIf<
 575       HasDecorator<decorators, AS_RAW>::value>::type
 576     store_at(oop base, ptrdiff_t offset, T value) {
 577       store<decorators>(field_addr(base, offset), value);
 578     }
 579 
 580     template <DecoratorSet decorators, typename T>
 581     inline static typename EnableIf<
 582       !HasDecorator<decorators, AS_RAW>::value>::type
 583     store_at(oop base, ptrdiff_t offset, T value) {
 584       if (is_hardwired_primitive<decorators>()) {
 585         const DecoratorSet expanded_decorators = decorators | AS_RAW;
 586         PreRuntimeDispatch::store_at<expanded_decorators>(base, offset, value);
 587       } else {
 588         RuntimeDispatch<decorators, T, BARRIER_STORE_AT>::store_at(base, offset, value);
 589       }
 590     }
 591 
 592     template <DecoratorSet decorators, typename T>
 593     inline static typename EnableIf<
 594       HasDecorator<decorators, AS_RAW>::value && CanHardwireRaw<decorators>::value, T>::type
 595     load(void* addr) {
 596       typedef RawAccessBarrier<decorators & RAW_DECORATOR_MASK> Raw;
 597       if (HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value) {
 598         return Raw::template oop_load<T>(addr);
 599       } else {
 600         return Raw::template load<T>(addr);
 601       }
 602     }
 603 
 604     template <DecoratorSet decorators, typename T>
 605     inline static typename EnableIf<
 606       HasDecorator<decorators, AS_RAW>::value && !CanHardwireRaw<decorators>::value, T>::type
 607     load(void* addr) {
 608       if (UseCompressedOops) {
 609         const DecoratorSet expanded_decorators = decorators | convert_compressed_oops;
 610         return PreRuntimeDispatch::load<expanded_decorators, T>(addr);
 611       } else {
 612         const DecoratorSet expanded_decorators = decorators & ~convert_compressed_oops;
 613         return PreRuntimeDispatch::load<expanded_decorators, T>(addr);
 614       }
 615     }
 616 
 617     template <DecoratorSet decorators, typename T>
 618     inline static typename EnableIf<
 619       !HasDecorator<decorators, AS_RAW>::value, T>::type
 620     load(void* addr) {
 621       if (is_hardwired_primitive<decorators>()) {
 622         const DecoratorSet expanded_decorators = decorators | AS_RAW;
 623         return PreRuntimeDispatch::load<expanded_decorators, T>(addr);
 624       } else {
 625         return RuntimeDispatch<decorators, T, BARRIER_LOAD>::load(addr);
 626       }
 627     }
 628 
 629     template <DecoratorSet decorators, typename T>
 630     inline static typename EnableIf<
 631       HasDecorator<decorators, AS_RAW>::value, T>::type
 632     load_at(oop base, ptrdiff_t offset) {
 633       return load<decorators, T>(field_addr(base, offset));
 634     }
 635 
 636     template <DecoratorSet decorators, typename T>
 637     inline static typename EnableIf<
 638       !HasDecorator<decorators, AS_RAW>::value, T>::type
 639     load_at(oop base, ptrdiff_t offset) {
 640       if (is_hardwired_primitive<decorators>()) {
 641         const DecoratorSet expanded_decorators = decorators | AS_RAW;
 642         return PreRuntimeDispatch::load_at<expanded_decorators, T>(base, offset);
 643       } else {
 644         return RuntimeDispatch<decorators, T, BARRIER_LOAD_AT>::load_at(base, offset);
 645       }
 646     }
 647 
 648     template <DecoratorSet decorators, typename T>
 649     inline static typename EnableIf<
 650       HasDecorator<decorators, AS_RAW>::value && CanHardwireRaw<decorators>::value, T>::type
 651     atomic_cmpxchg(T new_value, void* addr, T compare_value) {
 652       typedef RawAccessBarrier<decorators & RAW_DECORATOR_MASK> Raw;
 653       if (HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value) {
 654         return Raw::oop_atomic_cmpxchg(new_value, addr, compare_value);
 655       } else {
 656         return Raw::atomic_cmpxchg(new_value, addr, compare_value);
 657       }
 658     }
 659 
 660     template <DecoratorSet decorators, typename T>
 661     inline static typename EnableIf<
 662       HasDecorator<decorators, AS_RAW>::value && !CanHardwireRaw<decorators>::value, T>::type
 663     atomic_cmpxchg(T new_value, void* addr, T compare_value) {
 664       if (UseCompressedOops) {
 665         const DecoratorSet expanded_decorators = decorators | convert_compressed_oops;
 666         return PreRuntimeDispatch::atomic_cmpxchg<expanded_decorators>(new_value, addr, compare_value);
 667       } else {
 668         const DecoratorSet expanded_decorators = decorators & ~convert_compressed_oops;
 669         return PreRuntimeDispatch::atomic_cmpxchg<expanded_decorators>(new_value, addr, compare_value);
 670       }
 671     }
 672 
 673     template <DecoratorSet decorators, typename T>
 674     inline static typename EnableIf<
 675       !HasDecorator<decorators, AS_RAW>::value, T>::type
 676     atomic_cmpxchg(T new_value, void* addr, T compare_value) {
 677       typedef RawAccessBarrier<decorators & RAW_DECORATOR_MASK> Raw;
 678       if (is_hardwired_primitive<decorators>()) {
 679         const DecoratorSet expanded_decorators = decorators | AS_RAW;
 680         return PreRuntimeDispatch::atomic_cmpxchg<expanded_decorators>(new_value, addr, compare_value);
 681       } else {
 682         return RuntimeDispatch<decorators, T, BARRIER_ATOMIC_CMPXCHG>::atomic_cmpxchg(new_value, addr, compare_value);
 683       }
 684     }
 685 
 686     template <DecoratorSet decorators, typename T>
 687     inline static typename EnableIf<
 688       HasDecorator<decorators, AS_RAW>::value, T>::type
 689     atomic_cmpxchg_at(T new_value, oop base, ptrdiff_t offset, T compare_value) {
 690       return atomic_cmpxchg<decorators>(new_value, field_addr(base, offset), compare_value);
 691     }
 692 
 693     template <DecoratorSet decorators, typename T>
 694     inline static typename EnableIf<
 695       !HasDecorator<decorators, AS_RAW>::value, T>::type
 696     atomic_cmpxchg_at(T new_value, oop base, ptrdiff_t offset, T compare_value) {
 697       if (is_hardwired_primitive<decorators>()) {
 698         const DecoratorSet expanded_decorators = decorators | AS_RAW;
 699         return PreRuntimeDispatch::atomic_cmpxchg_at<expanded_decorators>(new_value, base, offset, compare_value);
 700       } else {
 701         return RuntimeDispatch<decorators, T, BARRIER_ATOMIC_CMPXCHG_AT>::atomic_cmpxchg_at(new_value, base, offset, compare_value);
 702       }
 703     }
 704 
 705     template <DecoratorSet decorators, typename T>
 706     inline static typename EnableIf<
 707       HasDecorator<decorators, AS_RAW>::value && CanHardwireRaw<decorators>::value, T>::type
 708     atomic_xchg(T new_value, void* addr) {
 709       typedef RawAccessBarrier<decorators & RAW_DECORATOR_MASK> Raw;
 710       if (HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value) {
 711         return Raw::oop_atomic_xchg(new_value, addr);
 712       } else {
 713         return Raw::atomic_xchg(new_value, addr);
 714       }
 715     }
 716 
 717     template <DecoratorSet decorators, typename T>
 718     inline static typename EnableIf<
 719       HasDecorator<decorators, AS_RAW>::value && !CanHardwireRaw<decorators>::value, T>::type
 720     atomic_xchg(T new_value, void* addr) {
 721       if (UseCompressedOops) {
 722         const DecoratorSet expanded_decorators = decorators | convert_compressed_oops;
 723         return PreRuntimeDispatch::atomic_xchg<expanded_decorators>(new_value, addr);
 724       } else {
 725         const DecoratorSet expanded_decorators = decorators & ~convert_compressed_oops;
 726         return PreRuntimeDispatch::atomic_xchg<expanded_decorators>(new_value, addr);
 727       }
 728     }
 729 
 730     template <DecoratorSet decorators, typename T>
 731     inline static typename EnableIf<
 732       !HasDecorator<decorators, AS_RAW>::value, T>::type
 733     atomic_xchg(T new_value, void* addr) {
 734       if (is_hardwired_primitive<decorators>()) {
 735         const DecoratorSet expanded_decorators = decorators | AS_RAW;
 736         return PreRuntimeDispatch::atomic_xchg<expanded_decorators>(new_value, addr);
 737       } else {
 738         return RuntimeDispatch<decorators, T, BARRIER_ATOMIC_XCHG>::atomic_xchg(new_value, addr);
 739       }
 740     }
 741 
 742     template <DecoratorSet decorators, typename T>
 743     inline static typename EnableIf<
 744       HasDecorator<decorators, AS_RAW>::value, T>::type
 745     atomic_xchg_at(T new_value, oop base, ptrdiff_t offset) {
 746       return atomic_xchg<decorators>(new_value, field_addr(base, offset));
 747     }
 748 
 749     template <DecoratorSet decorators, typename T>
 750     inline static typename EnableIf<
 751       !HasDecorator<decorators, AS_RAW>::value, T>::type
 752     atomic_xchg_at(T new_value, oop base, ptrdiff_t offset) {
 753       if (is_hardwired_primitive<decorators>()) {
 754         const DecoratorSet expanded_decorators = decorators | AS_RAW;
 755         return PreRuntimeDispatch::atomic_xchg<expanded_decorators>(new_value, base, offset);
 756       } else {
 757         return RuntimeDispatch<decorators, T, BARRIER_ATOMIC_XCHG_AT>::atomic_xchg_at(new_value, base, offset);
 758       }
 759     }
 760 
 761     template <DecoratorSet decorators, typename T>
 762     inline static typename EnableIf<
 763       HasDecorator<decorators, AS_RAW>::value, bool>::type
 764     arraycopy(arrayOop src_obj, arrayOop dst_obj, T *src, T* dst, size_t length) {
 765       typedef RawAccessBarrier<decorators & RAW_DECORATOR_MASK> Raw;
 766       return Raw::arraycopy(src, dst, length);
 767     }
 768 
 769     template <DecoratorSet decorators, typename T>
 770     inline static typename EnableIf<
 771       !HasDecorator<decorators, AS_RAW>::value, bool>::type
 772     arraycopy(arrayOop src_obj, arrayOop dst_obj, T *src, T* dst, size_t length) {
 773       typedef RawAccessBarrier<decorators & RAW_DECORATOR_MASK> Raw;
 774       if (is_hardwired_primitive<decorators>()) {
 775         const DecoratorSet expanded_decorators = decorators | AS_RAW;
 776         return PreRuntimeDispatch::arraycopy<expanded_decorators>(src_obj, dst_obj, src, dst, length);
 777       } else {
 778         return RuntimeDispatch<decorators, T, BARRIER_ARRAYCOPY>::arraycopy(src_obj, dst_obj, src, dst, length);
 779       }
 780     }
 781 
 782     template <DecoratorSet decorators>
 783     inline static typename EnableIf<
 784       HasDecorator<decorators, AS_RAW>::value>::type
 785     clone(oop src, oop dst, size_t size) {
 786       typedef RawAccessBarrier<decorators & RAW_DECORATOR_MASK> Raw;
 787       Raw::clone(src, dst, size);
 788     }
 789 
 790     template <DecoratorSet decorators>
 791     inline static typename EnableIf<
 792       !HasDecorator<decorators, AS_RAW>::value>::type
 793     clone(oop src, oop dst, size_t size) {
 794       RuntimeDispatch<decorators, oop, BARRIER_CLONE>::clone(src, dst, size);
 795     }
 796 
 797     template <DecoratorSet decorators>
 798     inline static typename EnableIf<
 799       HasDecorator<decorators, INTERNAL_BT_TO_SPACE_INVARIANT>::value, oop>::type
 800     resolve(oop obj) {
 801       typedef RawAccessBarrier<decorators & RAW_DECORATOR_MASK> Raw;
 802       return Raw::resolve(obj);
 803     }
 804 
 805     template <DecoratorSet decorators>
 806     inline static typename EnableIf<
 807       !HasDecorator<decorators, INTERNAL_BT_TO_SPACE_INVARIANT>::value, oop>::type
 808     resolve(oop obj) {
 809       return RuntimeDispatch<decorators, oop, BARRIER_RESOLVE>::resolve(obj);
 810     }
 811   };
 812 
 813   // This class adds implied decorators that follow according to decorator rules.
 814   // For example adding default reference strength and default memory ordering
 815   // semantics.
 816   template <DecoratorSet input_decorators>
 817   struct DecoratorFixup: AllStatic {
 818     // If no reference strength has been picked, then strong will be picked
 819     static const DecoratorSet ref_strength_default = input_decorators |
 820       (((ON_DECORATOR_MASK & input_decorators) == 0 && (INTERNAL_VALUE_IS_OOP & input_decorators) != 0) ?
 821        ON_STRONG_OOP_REF : INTERNAL_EMPTY);
 822     // If no memory ordering has been picked, unordered will be picked
 823     static const DecoratorSet memory_ordering_default = ref_strength_default |
 824       ((MO_DECORATOR_MASK & ref_strength_default) == 0 ? MO_UNORDERED : INTERNAL_EMPTY);
 825     // If no barrier strength has been picked, normal will be used
 826     static const DecoratorSet barrier_strength_default = memory_ordering_default |
 827       ((AS_DECORATOR_MASK & memory_ordering_default) == 0 ? AS_NORMAL : INTERNAL_EMPTY);
 828     // Heap array accesses imply it is a heap access
 829     static const DecoratorSet heap_array_is_in_heap = barrier_strength_default |
 830       ((IN_HEAP_ARRAY & barrier_strength_default) != 0 ? IN_HEAP : INTERNAL_EMPTY);
 831     static const DecoratorSet conc_root_is_root = heap_array_is_in_heap |
 832       ((IN_CONCURRENT_ROOT & heap_array_is_in_heap) != 0 ? IN_ROOT : INTERNAL_EMPTY);
 833     static const DecoratorSet archive_root_is_root = conc_root_is_root |
 834       ((IN_ARCHIVE_ROOT & conc_root_is_root) != 0 ? IN_ROOT : INTERNAL_EMPTY);
 835     static const DecoratorSet value = archive_root_is_root | BT_BUILDTIME_DECORATORS;
 836   };
 837 
 838   // Step 2: Reduce types.
 839   // Enforce that for non-oop types, T and P have to be strictly the same.
 840   // P is the type of the address and T is the type of the values.
 841   // As for oop types, it is allow to send T in {narrowOop, oop} and
 842   // P in {narrowOop, oop, HeapWord*}. The following rules apply according to
 843   // the subsequent table. (columns are P, rows are T)
 844   // |           | HeapWord  |   oop   | narrowOop |
 845   // |   oop     |  rt-comp  | hw-none |  hw-comp  |
 846   // | narrowOop |     x     |    x    |  hw-none  |
 847   //
 848   // x means not allowed
 849   // rt-comp means it must be checked at runtime whether the oop is compressed.
 850   // hw-none means it is statically known the oop will not be compressed.
 851   // hw-comp means it is statically known the oop will be compressed.
 852 
 853   template <DecoratorSet decorators, typename T>
 854   inline void store_reduce_types(T* addr, T value) {
 855     PreRuntimeDispatch::store<decorators>(addr, value);
 856   }
 857 
 858   template <DecoratorSet decorators>
 859   inline void store_reduce_types(narrowOop* addr, oop value) {
 860     const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP |
 861                                              INTERNAL_RT_USE_COMPRESSED_OOPS;
 862     PreRuntimeDispatch::store<expanded_decorators>(addr, value);
 863   }
 864 
 865   template <DecoratorSet decorators>
 866   inline void store_reduce_types(narrowOop* addr, narrowOop value) {
 867     const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP |
 868                                              INTERNAL_RT_USE_COMPRESSED_OOPS;
 869     PreRuntimeDispatch::store<expanded_decorators>(addr, value);
 870   }
 871 
 872   template <DecoratorSet decorators>
 873   inline void store_reduce_types(HeapWord* addr, oop value) {
 874     const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP;
 875     PreRuntimeDispatch::store<expanded_decorators>(addr, value);
 876   }
 877 
 878   template <DecoratorSet decorators, typename T>
 879   inline T atomic_cmpxchg_reduce_types(T new_value, T* addr, T compare_value) {
 880     return PreRuntimeDispatch::atomic_cmpxchg<decorators>(new_value, addr, compare_value);
 881   }
 882 
 883   template <DecoratorSet decorators>
 884   inline oop atomic_cmpxchg_reduce_types(oop new_value, narrowOop* addr, oop compare_value) {
 885     const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP |
 886                                              INTERNAL_RT_USE_COMPRESSED_OOPS;
 887     return PreRuntimeDispatch::atomic_cmpxchg<expanded_decorators>(new_value, addr, compare_value);
 888   }
 889 
 890   template <DecoratorSet decorators>
 891   inline narrowOop atomic_cmpxchg_reduce_types(narrowOop new_value, narrowOop* addr, narrowOop compare_value) {
 892     const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP |
 893                                              INTERNAL_RT_USE_COMPRESSED_OOPS;
 894     return PreRuntimeDispatch::atomic_cmpxchg<expanded_decorators>(new_value, addr, compare_value);
 895   }
 896 
 897   template <DecoratorSet decorators>
 898   inline oop atomic_cmpxchg_reduce_types(oop new_value,
 899                                          HeapWord* addr,
 900                                          oop compare_value) {
 901     const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP;
 902     return PreRuntimeDispatch::atomic_cmpxchg<expanded_decorators>(new_value, addr, compare_value);
 903   }
 904 
 905   template <DecoratorSet decorators, typename T>
 906   inline T atomic_xchg_reduce_types(T new_value, T* addr) {
 907     const DecoratorSet expanded_decorators = decorators;
 908     return PreRuntimeDispatch::atomic_xchg<expanded_decorators>(new_value, addr);
 909   }
 910 
 911   template <DecoratorSet decorators>
 912   inline oop atomic_xchg_reduce_types(oop new_value, narrowOop* addr) {
 913     const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP |
 914                                              INTERNAL_RT_USE_COMPRESSED_OOPS;
 915     return PreRuntimeDispatch::atomic_xchg<expanded_decorators>(new_value, addr);
 916   }
 917 
 918   template <DecoratorSet decorators>
 919   inline narrowOop atomic_xchg_reduce_types(narrowOop new_value, narrowOop* addr) {
 920     const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP |
 921                                              INTERNAL_RT_USE_COMPRESSED_OOPS;
 922     return PreRuntimeDispatch::atomic_xchg<expanded_decorators>(new_value, addr);
 923   }
 924 
 925   template <DecoratorSet decorators>
 926   inline oop atomic_xchg_reduce_types(oop new_value, HeapWord* addr) {
 927     const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP;
 928     return PreRuntimeDispatch::atomic_xchg<expanded_decorators>(new_value, addr);
 929   }
 930 
 931   template <DecoratorSet decorators, typename T>
 932   inline T load_reduce_types(T* addr) {
 933     return PreRuntimeDispatch::load<decorators, T>(addr);
 934   }
 935 
 936   template <DecoratorSet decorators, typename T>
 937   inline typename OopOrNarrowOop<T>::type load_reduce_types(narrowOop* addr) {
 938     const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP |
 939                                              INTERNAL_RT_USE_COMPRESSED_OOPS;
 940     return PreRuntimeDispatch::load<expanded_decorators, typename OopOrNarrowOop<T>::type>(addr);
 941   }
 942 
 943   template <DecoratorSet decorators, typename T>
 944   inline oop load_reduce_types(HeapWord* addr) {
 945     const DecoratorSet expanded_decorators = decorators | INTERNAL_CONVERT_COMPRESSED_OOP;
 946     return PreRuntimeDispatch::load<expanded_decorators, oop>(addr);
 947   }
 948 
 949   // Step 1: Set default decorators. This step remembers if a type was volatile
 950   // and then sets the MO_VOLATILE decorator by default. Otherwise, a default
 951   // memory ordering is set for the access, and the implied decorator rules
 952   // are applied to select sensible defaults for decorators that have not been
 953   // explicitly set. For example, default object referent strength is set to strong.
 954   // This step also decays the types passed in (e.g. getting rid of CV qualifiers
 955   // and references from the types). This step also perform some type verification
 956   // that the passed in types make sense.
 957 
 958   template <DecoratorSet decorators, typename T>
 959   static void verify_types(){
 960     // If this fails to compile, then you have sent in something that is
 961     // not recognized as a valid primitive type to a primitive Access function.
 962     STATIC_ASSERT((HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value || // oops have already been validated
 963                    (IsPointer<T>::value || IsIntegral<T>::value) ||
 964                     IsFloatingPoint<T>::value)); // not allowed primitive type
 965   }
 966 
 967   template <DecoratorSet decorators, typename P, typename T>
 968   inline void store(P* addr, T value) {
 969     verify_types<decorators, T>();
 970     typedef typename Decay<P>::type DecayedP;
 971     typedef typename Decay<T>::type DecayedT;
 972     DecayedT decayed_value = value;
 973     // If a volatile address is passed in but no memory ordering decorator,
 974     // set the memory ordering to MO_VOLATILE by default.
 975     const DecoratorSet expanded_decorators = DecoratorFixup<
 976       (IsVolatile<P>::value && !HasDecorator<decorators, MO_DECORATOR_MASK>::value) ?
 977       (MO_VOLATILE | decorators) : decorators>::value;
 978     store_reduce_types<expanded_decorators>(const_cast<DecayedP*>(addr), decayed_value);
 979   }
 980 
 981   template <DecoratorSet decorators, typename T>
 982   inline void store_at(oop base, ptrdiff_t offset, T value) {
 983     verify_types<decorators, T>();
 984     typedef typename Decay<T>::type DecayedT;
 985     DecayedT decayed_value = value;
 986     const DecoratorSet expanded_decorators = DecoratorFixup<decorators |
 987                                              (HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value ?
 988                                               INTERNAL_CONVERT_COMPRESSED_OOP : INTERNAL_EMPTY)>::value;
 989     PreRuntimeDispatch::store_at<expanded_decorators>(base, offset, decayed_value);
 990   }
 991 
 992   template <DecoratorSet decorators, typename P, typename T>
 993   inline T load(P* addr) {
 994     verify_types<decorators, T>();
 995     typedef typename Decay<P>::type DecayedP;
 996     typedef typename Conditional<HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value,
 997                                  typename OopOrNarrowOop<T>::type,
 998                                  typename Decay<T>::type>::type DecayedT;
 999     // If a volatile address is passed in but no memory ordering decorator,
1000     // set the memory ordering to MO_VOLATILE by default.
1001     const DecoratorSet expanded_decorators = DecoratorFixup<
1002       (IsVolatile<P>::value && !HasDecorator<decorators, MO_DECORATOR_MASK>::value) ?
1003       (MO_VOLATILE | decorators) : decorators>::value;
1004     return load_reduce_types<expanded_decorators, DecayedT>(const_cast<DecayedP*>(addr));
1005   }
1006 
1007   template <DecoratorSet decorators, typename T>
1008   inline T load_at(oop base, ptrdiff_t offset) {
1009     verify_types<decorators, T>();
1010     typedef typename Conditional<HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value,
1011                                  typename OopOrNarrowOop<T>::type,
1012                                  typename Decay<T>::type>::type DecayedT;
1013     // Expand the decorators (figure out sensible defaults)
1014     // Potentially remember if we need compressed oop awareness
1015     const DecoratorSet expanded_decorators = DecoratorFixup<decorators |
1016                                              (HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value ?
1017                                               INTERNAL_CONVERT_COMPRESSED_OOP : INTERNAL_EMPTY)>::value;
1018     return PreRuntimeDispatch::load_at<expanded_decorators, DecayedT>(base, offset);
1019   }
1020 
1021   template <DecoratorSet decorators, typename P, typename T>
1022   inline T atomic_cmpxchg(T new_value, P* addr, T compare_value) {
1023     verify_types<decorators, T>();
1024     typedef typename Decay<P>::type DecayedP;
1025     typedef typename Decay<T>::type DecayedT;
1026     DecayedT new_decayed_value = new_value;
1027     DecayedT compare_decayed_value = compare_value;
1028     const DecoratorSet expanded_decorators = DecoratorFixup<
1029       (!HasDecorator<decorators, MO_DECORATOR_MASK>::value) ?
1030       (MO_SEQ_CST | decorators) : decorators>::value;
1031     return atomic_cmpxchg_reduce_types<expanded_decorators>(new_decayed_value,
1032                                                             const_cast<DecayedP*>(addr),
1033                                                             compare_decayed_value);
1034   }
1035 
1036   template <DecoratorSet decorators, typename T>
1037   inline T atomic_cmpxchg_at(T new_value, oop base, ptrdiff_t offset, T compare_value) {
1038     verify_types<decorators, T>();
1039     typedef typename Decay<T>::type DecayedT;
1040     DecayedT new_decayed_value = new_value;
1041     DecayedT compare_decayed_value = compare_value;
1042     // Determine default memory ordering
1043     const DecoratorSet expanded_decorators = DecoratorFixup<
1044       (!HasDecorator<decorators, MO_DECORATOR_MASK>::value) ?
1045       (MO_SEQ_CST | decorators) : decorators>::value;
1046     // Potentially remember that we need compressed oop awareness
1047     const DecoratorSet final_decorators = expanded_decorators |
1048                                           (HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value ?
1049                                            INTERNAL_CONVERT_COMPRESSED_OOP : INTERNAL_EMPTY);
1050     return PreRuntimeDispatch::atomic_cmpxchg_at<final_decorators>(new_decayed_value, base,
1051                                                                    offset, compare_decayed_value);
1052   }
1053 
1054   template <DecoratorSet decorators, typename P, typename T>
1055   inline T atomic_xchg(T new_value, P* addr) {
1056     verify_types<decorators, T>();
1057     typedef typename Decay<P>::type DecayedP;
1058     typedef typename Decay<T>::type DecayedT;
1059     DecayedT new_decayed_value = new_value;
1060     // atomic_xchg is only available in SEQ_CST flavour.
1061     const DecoratorSet expanded_decorators = DecoratorFixup<decorators | MO_SEQ_CST>::value;
1062     return atomic_xchg_reduce_types<expanded_decorators>(new_decayed_value,
1063                                                          const_cast<DecayedP*>(addr));
1064   }
1065 
1066   template <DecoratorSet decorators, typename T>
1067   inline T atomic_xchg_at(T new_value, oop base, ptrdiff_t offset) {
1068     verify_types<decorators, T>();
1069     typedef typename Decay<T>::type DecayedT;
1070     DecayedT new_decayed_value = new_value;
1071     // atomic_xchg is only available in SEQ_CST flavour.
1072     const DecoratorSet expanded_decorators = DecoratorFixup<decorators | MO_SEQ_CST |
1073                                              (HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value ?
1074                                               INTERNAL_CONVERT_COMPRESSED_OOP : INTERNAL_EMPTY)>::value;
1075     return PreRuntimeDispatch::atomic_xchg_at<expanded_decorators>(new_decayed_value, base, offset);
1076   }
1077 
1078   template <DecoratorSet decorators, typename T>
1079   inline bool arraycopy(arrayOop src_obj, arrayOop dst_obj, T *src, T *dst, size_t length) {
1080     verify_types<decorators, T>();
1081     typedef typename Decay<T>::type DecayedT;
1082     const DecoratorSet expanded_decorators = DecoratorFixup<decorators | IN_HEAP_ARRAY | IN_HEAP |
1083                                              (HasDecorator<decorators, INTERNAL_VALUE_IS_OOP>::value ?
1084                                               INTERNAL_CONVERT_COMPRESSED_OOP : INTERNAL_EMPTY)>::value;
1085     return PreRuntimeDispatch::arraycopy<expanded_decorators>(src_obj, dst_obj,
1086                                                               const_cast<DecayedT*>(src),
1087                                                               const_cast<DecayedT*>(dst),
1088                                                               length);
1089   }
1090 
1091   template <DecoratorSet decorators>
1092   inline void clone(oop src, oop dst, size_t size) {
1093     const DecoratorSet expanded_decorators = DecoratorFixup<decorators>::value;
1094     PreRuntimeDispatch::clone<expanded_decorators>(src, dst, size);
1095   }
1096 
1097   template <DecoratorSet decorators>
1098   inline oop resolve(oop obj) {
1099     const DecoratorSet expanded_decorators = DecoratorFixup<decorators>::value;
1100     return PreRuntimeDispatch::resolve<expanded_decorators>(obj);
1101   }
1102 }
1103 
1104 template <DecoratorSet decorators>
1105 template <DecoratorSet expected_decorators>
1106 void Access<decorators>::verify_decorators() {
1107   STATIC_ASSERT((~expected_decorators & decorators) == 0); // unexpected decorator used
1108   const DecoratorSet barrier_strength_decorators = decorators & AS_DECORATOR_MASK;
1109   STATIC_ASSERT(barrier_strength_decorators == 0 || ( // make sure barrier strength decorators are disjoint if set
1110     (barrier_strength_decorators ^ AS_NO_KEEPALIVE) == 0 ||
1111     (barrier_strength_decorators ^ AS_DEST_NOT_INITIALIZED) == 0 ||
1112     (barrier_strength_decorators ^ AS_RAW) == 0 ||
1113     (barrier_strength_decorators ^ AS_NORMAL) == 0
1114   ));
1115   const DecoratorSet ref_strength_decorators = decorators & ON_DECORATOR_MASK;
1116   STATIC_ASSERT(ref_strength_decorators == 0 || ( // make sure ref strength decorators are disjoint if set
1117     (ref_strength_decorators ^ ON_STRONG_OOP_REF) == 0 ||
1118     (ref_strength_decorators ^ ON_WEAK_OOP_REF) == 0 ||
1119     (ref_strength_decorators ^ ON_PHANTOM_OOP_REF) == 0 ||
1120     (ref_strength_decorators ^ ON_UNKNOWN_OOP_REF) == 0
1121   ));
1122   const DecoratorSet memory_ordering_decorators = decorators & MO_DECORATOR_MASK;
1123   STATIC_ASSERT(memory_ordering_decorators == 0 || ( // make sure memory ordering decorators are disjoint if set
1124     (memory_ordering_decorators ^ MO_UNORDERED) == 0 ||
1125     (memory_ordering_decorators ^ MO_VOLATILE) == 0 ||
1126     (memory_ordering_decorators ^ MO_RELAXED) == 0 ||
1127     (memory_ordering_decorators ^ MO_ACQUIRE) == 0 ||
1128     (memory_ordering_decorators ^ MO_RELEASE) == 0 ||
1129     (memory_ordering_decorators ^ MO_SEQ_CST) == 0
1130   ));
1131   const DecoratorSet location_decorators = decorators & IN_DECORATOR_MASK;
1132   STATIC_ASSERT(location_decorators == 0 || ( // make sure location decorators are disjoint if set
1133     (location_decorators ^ IN_ROOT) == 0 ||
1134     (location_decorators ^ IN_HEAP) == 0 ||
1135     (location_decorators ^ (IN_HEAP | IN_HEAP_ARRAY)) == 0 ||
1136     (location_decorators ^ (IN_ROOT | IN_CONCURRENT_ROOT)) == 0 ||
1137     (location_decorators ^ (IN_ROOT | IN_ARCHIVE_ROOT)) == 0
1138   ));
1139 }
1140 
1141 #endif // SHARE_VM_RUNTIME_ACCESS_INLINE_HPP