< prev index next >

src/share/vm/runtime/orderAccess.hpp

Print this page




  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef SHARE_VM_RUNTIME_ORDERACCESS_HPP
  26 #define SHARE_VM_RUNTIME_ORDERACCESS_HPP
  27 
  28 #include "memory/allocation.hpp"
  29 
  30 //                Memory Access Ordering Model
  31 //
  32 // This interface is based on the JSR-133 Cookbook for Compiler Writers
  33 // and on the IA64 memory model.  It is the dynamic equivalent of the
  34 // C/C++ volatile specifier.  I.e., volatility restricts compile-time
  35 // memory access reordering in a way similar to what we want to occur
  36 // at runtime.
  37 //
  38 // In the following, the terms 'previous', 'subsequent', 'before',
  39 // 'after', 'preceding' and 'succeeding' refer to program order.  The
  40 // terms 'down' and 'below' refer to forward load or store motion
  41 // relative to program order, while 'up' and 'above' refer to backward
  42 // motion.
  43 //
  44 //
  45 // We define four primitive memory barrier operations.
  46 //
  47 // LoadLoad:   Load1(s); LoadLoad; Load2
  48 //
  49 // Ensures that Load1 completes (obtains the value it loads from memory)
  50 // before Load2 and any subsequent load operations.  Loads before Load1
  51 // may *not* float below Load2 and any subsequent load operations.
  52 //
  53 // StoreStore: Store1(s); StoreStore; Store2
  54 //
  55 // Ensures that Store1 completes (the effect on memory of Store1 is made
  56 // visible to other processors) before Store2 and any subsequent store
  57 // operations.  Stores before Store1 may *not* float below Store2 and any
  58 // subsequent store operations.
  59 //
  60 // LoadStore:  Load1(s); LoadStore; Store2
  61 //
  62 // Ensures that Load1 completes before Store2 and any subsequent store
  63 // operations.  Loads before Load1 may *not* float below Store2 and any
  64 // subsequent store operations.
  65 //
  66 // StoreLoad:  Store1(s); StoreLoad; Load2
  67 //
  68 // Ensures that Store1 completes before Load2 and any subsequent load
  69 // operations.  Stores before Store1 may *not* float below Load2 and any
  70 // subsequent load operations.
  71 //

  72 //
  73 // We define two further operations, 'release' and 'acquire'.  They are
  74 // mirror images of each other.






































































  75 //
  76 // Execution by a processor of release makes the effect of all memory
  77 // accesses issued by it previous to the release visible to all
  78 // processors *before* the release completes.  The effect of subsequent
  79 // memory accesses issued by it *may* be made visible *before* the
  80 // release.  I.e., subsequent memory accesses may float above the
  81 // release, but prior ones may not float below it.
  82 //
  83 // Execution by a processor of acquire makes the effect of all memory
  84 // accesses issued by it subsequent to the acquire visible to all
  85 // processors *after* the acquire completes.  The effect of prior memory
  86 // accesses issued by it *may* be made visible *after* the acquire.
  87 // I.e., prior memory accesses may float below the acquire, but
  88 // subsequent ones may not float above it.
  89 //
  90 // Finally, we define a 'fence' operation, which conceptually is a
  91 // release combined with an acquire.  In the real world these operations
  92 // require one or more machine instructions which can float above and
  93 // below the release or acquire, so we usually can't just issue the
  94 // release-acquire back-to-back.  All machines we know of implement some
  95 // sort of memory fence instruction.
  96 //
  97 //
  98 // The standalone implementations of release and acquire need an associated
  99 // dummy volatile store or load respectively.  To avoid redundant operations,
 100 // we can define the composite operators: 'release_store', 'store_fence' and
 101 // 'load_acquire'.  Here's a summary of the machine instructions corresponding
 102 // to each operation.
 103 //
 104 //               sparc RMO             ia64             x86
 105 // ---------------------------------------------------------------------
 106 // fence         membar #LoadStore |   mf               lock addl 0,(sp)
 107 //                      #StoreStore |
 108 //                      #LoadLoad |
 109 //                      #StoreLoad
 110 //
 111 // release       membar #LoadStore |   st.rel [sp]=r0   movl $0,<dummy>
 112 //                      #StoreStore
 113 //               st %g0,[]
 114 //
 115 // acquire       ld [%sp],%g0          ld.acq <r>=[sp]  movl (sp),<r>
 116 //               membar #LoadLoad |
 117 //                      #LoadStore
 118 //
 119 // release_store membar #LoadStore |   st.rel           <store>
 120 //                      #StoreStore
 121 //               st
 122 //
 123 // store_fence   st                    st               lock xchg
 124 //               fence                 mf
 125 //
 126 // load_acquire  ld                    ld.acq           <load>
 127 //               membar #LoadLoad |
 128 //                      #LoadStore
 129 //
 130 // Using only release_store and load_acquire, we can implement the
 131 // following ordered sequences.
 132 //
 133 // 1. load, load   == load_acquire,  load
 134 //                 or load_acquire,  load_acquire
 135 // 2. load, store  == load,          release_store
 136 //                 or load_acquire,  store
 137 //                 or load_acquire,  release_store
 138 // 3. store, store == store,         release_store
 139 //                 or release_store, release_store
 140 //
 141 // These require no membar instructions for sparc-TSO and no extra
 142 // instructions for ia64.
 143 //
 144 // Ordering a load relative to preceding stores requires a store_fence,
 145 // which implies a membar #StoreLoad between the store and load under
 146 // sparc-TSO.  A fence is required by ia64.  On x86, we use locked xchg.

 147 //
 148 // 4. store, load  == store_fence, load
 149 //
 150 // Use store_fence to make sure all stores done in an 'interesting'
 151 // region are made visible prior to both subsequent loads and stores.
 152 //
 153 // Conventional usage is to issue a load_acquire for ordered loads.  Use
 154 // release_store for ordered stores when you care only that prior stores
 155 // are visible before the release_store, but don't care exactly when the
 156 // store associated with the release_store becomes visible.  Use
 157 // release_store_fence to update values like the thread state, where we
 158 // don't want the current thread to continue until all our prior memory
 159 // accesses (including the new thread state) are visible to other threads.
 160 //
 161 //
 162 //                C++ Volatility
 163 //
 164 // C++ guarantees ordering at operations termed 'sequence points' (defined
 165 // to be volatile accesses and calls to library I/O functions).  'Side
 166 // effects' (defined as volatile accesses, calls to library I/O functions
 167 // and object modification) previous to a sequence point must be visible
 168 // at that sequence point.  See the C++ standard, section 1.9, titled
 169 // "Program Execution".  This means that all barrier implementations,
 170 // including standalone loadload, storestore, loadstore, storeload, acquire
 171 // and release must include a sequence point, usually via a volatile memory
 172 // access.  Other ways to guarantee a sequence point are, e.g., use of
 173 // indirect calls and linux's __asm__ volatile.
 174 // Note: as of 6973570, we have replaced the originally static "dummy" field
 175 // (see above) by a volatile store to the stack. All of the versions of the
 176 // compilers that we currently use (SunStudio, gcc and VC++) respect the
 177 // semantics of volatile here. If you build HotSpot using other
 178 // compilers, you may need to verify that no compiler reordering occurs
 179 // across the sequence point represented by the volatile access.
 180 //
 181 //
 182 //                os::is_MP Considered Redundant
 183 //
 184 // Callers of this interface do not need to test os::is_MP() before
 185 // issuing an operation. The test is taken care of by the implementation
 186 // of the interface (depending on the vm version and platform, the test
 187 // may or may not be actually done by the implementation).
 188 //
 189 //
 190 //                A Note on Memory Ordering and Cache Coherency
 191 //
 192 // Cache coherency and memory ordering are orthogonal concepts, though they
 193 // interact.  E.g., all existing itanium machines are cache-coherent, but
 194 // the hardware can freely reorder loads wrt other loads unless it sees a
 195 // load-acquire instruction.  All existing sparc machines are cache-coherent
 196 // and, unlike itanium, TSO guarantees that the hardware orders loads wrt
 197 // loads and stores, and stores wrt to each other.
 198 //
 199 // Consider the implementation of loadload.  *If* your platform *isn't*


 223 // Either of these alternatives is a pain, so no current machine we know of
 224 // has incoherent caches.
 225 //
 226 // If loadload didn't have these properties, the store-release sequence for
 227 // publishing a shared data structure wouldn't work, because a processor
 228 // trying to read data newly published by another processor might go to
 229 // its own incoherent caches to satisfy the read instead of to the newly
 230 // written shared memory.
 231 //
 232 //
 233 //                NOTE WELL!!
 234 //
 235 //                A Note on MutexLocker and Friends
 236 //
 237 // See mutexLocker.hpp.  We assume throughout the VM that MutexLocker's
 238 // and friends' constructors do a fence, a lock and an acquire *in that
 239 // order*.  And that their destructors do a release and unlock, in *that*
 240 // order.  If their implementations change such that these assumptions
 241 // are violated, a whole lot of code will break.
 242 

























 243 class OrderAccess : AllStatic {
 244  public:

 245   static void     loadload();
 246   static void     storestore();
 247   static void     loadstore();
 248   static void     storeload();
 249 
 250   static void     acquire();
 251   static void     release();
 252   static void     fence();
 253 
 254   static jbyte    load_acquire(volatile jbyte*   p);
 255   static jshort   load_acquire(volatile jshort*  p);
 256   static jint     load_acquire(volatile jint*    p);
 257   static jlong    load_acquire(volatile jlong*   p);
 258   static jubyte   load_acquire(volatile jubyte*  p);
 259   static jushort  load_acquire(volatile jushort* p);
 260   static juint    load_acquire(volatile juint*   p);
 261   static julong   load_acquire(volatile julong*  p);
 262   static jfloat   load_acquire(volatile jfloat*  p);
 263   static jdouble  load_acquire(volatile jdouble* p);
 264 
 265   static intptr_t load_ptr_acquire(volatile intptr_t*   p);
 266   static void*    load_ptr_acquire(volatile void*       p);
 267   static void*    load_ptr_acquire(const volatile void* p);
 268 
 269   static void     release_store(volatile jbyte*   p, jbyte   v);
 270   static void     release_store(volatile jshort*  p, jshort  v);
 271   static void     release_store(volatile jint*    p, jint    v);
 272   static void     release_store(volatile jlong*   p, jlong   v);
 273   static void     release_store(volatile jubyte*  p, jubyte  v);
 274   static void     release_store(volatile jushort* p, jushort v);
 275   static void     release_store(volatile juint*   p, juint   v);
 276   static void     release_store(volatile julong*  p, julong  v);
 277   static void     release_store(volatile jfloat*  p, jfloat  v);
 278   static void     release_store(volatile jdouble* p, jdouble v);
 279 
 280   static void     release_store_ptr(volatile intptr_t* p, intptr_t v);
 281   static void     release_store_ptr(volatile void*     p, void*    v);
 282 
 283   static void     store_fence(jbyte*   p, jbyte   v);
 284   static void     store_fence(jshort*  p, jshort  v);
 285   static void     store_fence(jint*    p, jint    v);
 286   static void     store_fence(jlong*   p, jlong   v);
 287   static void     store_fence(jubyte*  p, jubyte  v);
 288   static void     store_fence(jushort* p, jushort v);
 289   static void     store_fence(juint*   p, juint   v);
 290   static void     store_fence(julong*  p, julong  v);
 291   static void     store_fence(jfloat*  p, jfloat  v);
 292   static void     store_fence(jdouble* p, jdouble v);
 293 
 294   static void     store_ptr_fence(intptr_t* p, intptr_t v);
 295   static void     store_ptr_fence(void**    p, void*    v);
 296 
 297   static void     release_store_fence(volatile jbyte*   p, jbyte   v);
 298   static void     release_store_fence(volatile jshort*  p, jshort  v);
 299   static void     release_store_fence(volatile jint*    p, jint    v);
 300   static void     release_store_fence(volatile jlong*   p, jlong   v);
 301   static void     release_store_fence(volatile jubyte*  p, jubyte  v);
 302   static void     release_store_fence(volatile jushort* p, jushort v);
 303   static void     release_store_fence(volatile juint*   p, juint   v);
 304   static void     release_store_fence(volatile julong*  p, julong  v);
 305   static void     release_store_fence(volatile jfloat*  p, jfloat  v);
 306   static void     release_store_fence(volatile jdouble* p, jdouble v);
 307 
 308   static void     release_store_ptr_fence(volatile intptr_t* p, intptr_t v);
 309   static void     release_store_ptr_fence(volatile void*     p, void*    v);
 310 
 311  private:
 312   // This is a helper that invokes the StubRoutines::fence_entry()
 313   // routine if it exists, It should only be used by platforms that
 314   // don't have another way to do the inline assembly.
 315   static void StubRoutines_fence();









































 316 };
 317 
 318 #endif // SHARE_VM_RUNTIME_ORDERACCESS_HPP


  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef SHARE_VM_RUNTIME_ORDERACCESS_HPP
  26 #define SHARE_VM_RUNTIME_ORDERACCESS_HPP
  27 
  28 #include "memory/allocation.hpp"
  29 
  30 //                Memory Access Ordering Model
  31 //
  32 // This interface is based on the JSR-133 Cookbook for Compiler Writers.




  33 //
  34 // In the following, the terms 'previous', 'subsequent', 'before',
  35 // 'after', 'preceding' and 'succeeding' refer to program order.  The
  36 // terms 'down' and 'below' refer to forward load or store motion
  37 // relative to program order, while 'up' and 'above' refer to backward
  38 // motion.
  39 //
  40 //
  41 // We define four primitive memory barrier operations.
  42 //
  43 // LoadLoad:   Load1(s); LoadLoad; Load2
  44 //
  45 // Ensures that Load1 completes (obtains the value it loads from memory)
  46 // before Load2 and any subsequent load operations.  Loads before Load1
  47 // may *not* float below Load2 and any subsequent load operations.
  48 //
  49 // StoreStore: Store1(s); StoreStore; Store2
  50 //
  51 // Ensures that Store1 completes (the effect on memory of Store1 is made
  52 // visible to other processors) before Store2 and any subsequent store
  53 // operations.  Stores before Store1 may *not* float below Store2 and any
  54 // subsequent store operations.
  55 //
  56 // LoadStore:  Load1(s); LoadStore; Store2
  57 //
  58 // Ensures that Load1 completes before Store2 and any subsequent store
  59 // operations.  Loads before Load1 may *not* float below Store2 and any
  60 // subsequent store operations.
  61 //
  62 // StoreLoad:  Store1(s); StoreLoad; Load2
  63 //
  64 // Ensures that Store1 completes before Load2 and any subsequent load
  65 // operations.  Stores before Store1 may *not* float below Load2 and any
  66 // subsequent load operations.
  67 //
  68 // We define two further barriers: acquire and release.
  69 //
  70 // Conceptually, acquire/release semantics form unidirectional and
  71 // asynchronous barriers w.r.t. a synchronizing load(X) and store(X) pair.
  72 // They should always be used in pairs to publish (release store) and
  73 // access (load acquire) some implicitly understood shared data between
  74 // threads in a relatively cheap fashion not requiring storeload. If not
  75 // used in such a pair, it is adviced to use a membar instead:
  76 // acquire/release only make sense as pairs.
  77 //
  78 // T1: access_shared_data
  79 // T1: ]release
  80 // T1: (...)
  81 // T1: store(X)
  82 //
  83 // T2: load(X)
  84 // T2: (...)
  85 // T2: acquire[
  86 // T2: access_shared_data
  87 //
  88 // It is guaranteed that if T2: load(X) synchronizes with (observes the
  89 // value written by) T1: store(X), then the memory accesses before the T1:
  90 // ]release happen before the memory accesses after the T2: acquire[.
  91 //
  92 // Total Store Order (TSO) machines can be seen as machines issuing a
  93 // release store for each store and a load acquire for each load. Therefore
  94 // there is an inherent resemblence between TSO and acquire/release
  95 // semantics. TSO can be seen as an abstract machine where loads are
  96 // executed immediately when encountered (hence loadload reordering not
  97 // happening) but enqueues stores in a FIFO queue
  98 // for asynchronous serialization (neither storestore or loadstore
  99 // reordering happening). The only reordering happening is storeload due to
 100 // the queue asynchronously serializing stores (yet in order).
 101 //
 102 // Acquire/release semantics essentially exploits this asynchronicity: when
 103 // the load(X) acquire[ observes the store of ]release store(X), the
 104 // accesses before the release must have happened before the accesses after
 105 // acquire.
 106 //
 107 // The API offers both stand-alone acquire() and release() as well as joined
 108 // load_acquire() and release_store(). It is guaranteed that these are
 109 // semantically equivalent w.r.t. the defined model. However, since
 110 // stand-alone acquire()/release() does not know which previous
 111 // load/subsequent store is considered the synchronizing load/store, they
 112 // may be more conservative in implementations. We advice using the joined
 113 // variants whenever possible.
 114 //
 115 // Finally, we define a "fence" operation, as a bidirectional barrier.
 116 // It guarantees that any memory access preceding the fence is not
 117 // reordered w.r.t. any memory accesses subsequent to the fence in program
 118 // order. This may be used to prevent sequences of loads from floating up
 119 // above sequences of stores.
 120 //
 121 // The following table shows the implementations on some architectures:
 122 //
 123 //                       Constraint     x86          sparc              ppc
 124 // ---------------------------------------------------------------------------
 125 // fence                 LoadStore  |   lock         membar #StoreLoad  sync
 126 //                       StoreStore |   addl 0,(sp)
 127 //                       LoadLoad   |
 128 //                       StoreLoad
 129 //
 130 // release               LoadStore  |                                   lwsync
 131 //                       StoreStore
 132 //
 133 // acquire               LoadLoad   |                                   lwsync
 134 //                       LoadStore
 135 //
 136 // release_store                        <store>      <store>            lwsync
 137 //                                                                      <store>
 138 //
 139 // release_store_fence                  xchg         <store>            lwsync
 140 //                                                   membar #StoreLoad  <store>
 141 //                                                                      sync
 142 //
































































 143 //
 144 // load_acquire                         <load>       <load>             <load>
 145 //                                                                      lwsync
 146 //
 147 // Ordering a load relative to preceding stores requires a fence,
 148 // which implies a membar #StoreLoad between the store and load under
 149 // sparc-TSO.  A fence is required by x86.  On x86, we use explicitly
 150 // locked add.
 151 //
 152 // 4. store, load  <= is constrained by => store, fence, load
 153 //
 154 // Use store, fence to make sure all stores done in an 'interesting'
 155 // region are made visible prior to both subsequent loads and stores.
 156 //
 157 // Conventional usage is to issue a load_acquire for ordered loads.  Use
 158 // release_store for ordered stores when you care only that prior stores
 159 // are visible before the release_store, but don't care exactly when the
 160 // store associated with the release_store becomes visible.  Use
 161 // release_store_fence to update values like the thread state, where we
 162 // don't want the current thread to continue until all our prior memory
 163 // accesses (including the new thread state) are visible to other threads.
 164 // This is equivalent to the volatile semantics of the Java Memory Model.



















 165 //
 166 //
 167 //                os::is_MP Considered Redundant
 168 //
 169 // Callers of this interface do not need to test os::is_MP() before
 170 // issuing an operation. The test is taken care of by the implementation
 171 // of the interface (depending on the vm version and platform, the test
 172 // may or may not be actually done by the implementation).
 173 //
 174 //
 175 //                A Note on Memory Ordering and Cache Coherency
 176 //
 177 // Cache coherency and memory ordering are orthogonal concepts, though they
 178 // interact.  E.g., all existing itanium machines are cache-coherent, but
 179 // the hardware can freely reorder loads wrt other loads unless it sees a
 180 // load-acquire instruction.  All existing sparc machines are cache-coherent
 181 // and, unlike itanium, TSO guarantees that the hardware orders loads wrt
 182 // loads and stores, and stores wrt to each other.
 183 //
 184 // Consider the implementation of loadload.  *If* your platform *isn't*


 208 // Either of these alternatives is a pain, so no current machine we know of
 209 // has incoherent caches.
 210 //
 211 // If loadload didn't have these properties, the store-release sequence for
 212 // publishing a shared data structure wouldn't work, because a processor
 213 // trying to read data newly published by another processor might go to
 214 // its own incoherent caches to satisfy the read instead of to the newly
 215 // written shared memory.
 216 //
 217 //
 218 //                NOTE WELL!!
 219 //
 220 //                A Note on MutexLocker and Friends
 221 //
 222 // See mutexLocker.hpp.  We assume throughout the VM that MutexLocker's
 223 // and friends' constructors do a fence, a lock and an acquire *in that
 224 // order*.  And that their destructors do a release and unlock, in *that*
 225 // order.  If their implementations change such that these assumptions
 226 // are violated, a whole lot of code will break.
 227 
 228 enum ScopedFenceType {
 229     X_ACQUIRE
 230   , RELEASE_X
 231   , RELEASE_X_FENCE
 232 };
 233 
 234 template <ScopedFenceType T>
 235 class ScopedFenceGeneral: public StackObj {
 236  public:
 237   void prefix() {}
 238   void postfix() {}
 239 };
 240 
 241 template <ScopedFenceType T>
 242 class ScopedFence : public ScopedFenceGeneral<T> {
 243   void *const _field;
 244  public:
 245   ScopedFence(void *const field) : _field(field) { prefix(); }
 246   ~ScopedFence() { postfix(); }
 247   void prefix() { ScopedFenceGeneral<T>::prefix(); }
 248   void postfix() { ScopedFenceGeneral<T>::postfix(); }
 249 };
 250 
 251 // This class implements some fences for different platforms and specializes
 252 // the methods of its superclass using template specialization for improved performance.
 253 class OrderAccess : AllStatic {
 254  public:
 255   // barriers
 256   static void     loadload();
 257   static void     storestore();
 258   static void     loadstore();
 259   static void     storeload();
 260 
 261   static void     acquire();
 262   static void     release();
 263   static void     fence();
 264 
 265   static jbyte    load_acquire(volatile jbyte*   p);
 266   static jshort   load_acquire(volatile jshort*  p);
 267   static jint     load_acquire(volatile jint*    p);
 268   static jlong    load_acquire(volatile jlong*   p);
 269   static jubyte   load_acquire(volatile jubyte*  p);
 270   static jushort  load_acquire(volatile jushort* p);
 271   static juint    load_acquire(volatile juint*   p);
 272   static julong   load_acquire(volatile julong*  p);
 273   static jfloat   load_acquire(volatile jfloat*  p);
 274   static jdouble  load_acquire(volatile jdouble* p);
 275 
 276   static intptr_t load_ptr_acquire(volatile intptr_t*   p);
 277   static void*    load_ptr_acquire(volatile void*       p);
 278   static void*    load_ptr_acquire(const volatile void* p);
 279 
 280   static void     release_store(volatile jbyte*   p, jbyte   v);
 281   static void     release_store(volatile jshort*  p, jshort  v);
 282   static void     release_store(volatile jint*    p, jint    v);
 283   static void     release_store(volatile jlong*   p, jlong   v);
 284   static void     release_store(volatile jubyte*  p, jubyte  v);
 285   static void     release_store(volatile jushort* p, jushort v);
 286   static void     release_store(volatile juint*   p, juint   v);
 287   static void     release_store(volatile julong*  p, julong  v);
 288   static void     release_store(volatile jfloat*  p, jfloat  v);
 289   static void     release_store(volatile jdouble* p, jdouble v);
 290 
 291   static void     release_store_ptr(volatile intptr_t* p, intptr_t v);
 292   static void     release_store_ptr(volatile void*     p, void*    v);
 293 














 294   static void     release_store_fence(volatile jbyte*   p, jbyte   v);
 295   static void     release_store_fence(volatile jshort*  p, jshort  v);
 296   static void     release_store_fence(volatile jint*    p, jint    v);
 297   static void     release_store_fence(volatile jlong*   p, jlong   v);
 298   static void     release_store_fence(volatile jubyte*  p, jubyte  v);
 299   static void     release_store_fence(volatile jushort* p, jushort v);
 300   static void     release_store_fence(volatile juint*   p, juint   v);
 301   static void     release_store_fence(volatile julong*  p, julong  v);
 302   static void     release_store_fence(volatile jfloat*  p, jfloat  v);
 303   static void     release_store_fence(volatile jdouble* p, jdouble v);
 304 
 305   static void     release_store_ptr_fence(volatile intptr_t* p, intptr_t v);
 306   static void     release_store_ptr_fence(volatile void*     p, void*    v);
 307 
 308  private:
 309   // This is a helper that invokes the StubRoutines::fence_entry()
 310   // routine if it exists, It should only be used by platforms that
 311   // don't have another way to do the inline assembly.
 312   static void StubRoutines_fence();
 313 
 314   // Give platforms a varation point to specialize.
 315   template<typename T> static T    specialized_load_acquire       (volatile T* p     );
 316   template<typename T> static void specialized_release_store      (volatile T* p, T v);
 317   template<typename T> static void specialized_release_store_fence(volatile T* p, T v);
 318 
 319   template<typename FieldType, ScopedFenceType FenceType>
 320   static void ordered_store(volatile FieldType* p, FieldType v);
 321 
 322   template<typename FieldType, ScopedFenceType FenceType>
 323   static FieldType ordered_load(volatile FieldType* p);
 324 
 325   static void    store(volatile jbyte*   p, jbyte   v);
 326   static void    store(volatile jshort*  p, jshort  v);
 327   static void    store(volatile jint*    p, jint    v);
 328   static void    store(volatile jlong*   p, jlong   v);
 329   static void    store(volatile jdouble* p, jdouble v);
 330   static void    store(volatile jfloat*  p, jfloat  v);
 331 
 332   static jbyte   load (volatile jbyte*   p);
 333   static jshort  load (volatile jshort*  p);
 334   static jint    load (volatile jint*    p);
 335   static jlong   load (volatile jlong*   p);
 336   static jdouble load (volatile jdouble* p);
 337   static jfloat  load (volatile jfloat*  p);
 338 
 339   // The following store_fence methods are deprecated and will be removed
 340   // when all repos conform to the new generalized OrderAccess.
 341   static void    store_fence(jbyte*   p, jbyte   v);
 342   static void    store_fence(jshort*  p, jshort  v);
 343   static void    store_fence(jint*    p, jint    v);
 344   static void    store_fence(jlong*   p, jlong   v);
 345   static void    store_fence(jubyte*  p, jubyte  v);
 346   static void    store_fence(jushort* p, jushort v);
 347   static void    store_fence(juint*   p, juint   v);
 348   static void    store_fence(julong*  p, julong  v);
 349   static void    store_fence(jfloat*  p, jfloat  v);
 350   static void    store_fence(jdouble* p, jdouble v);
 351 
 352   static void    store_ptr_fence(intptr_t* p, intptr_t v);
 353   static void    store_ptr_fence(void**    p, void*    v);
 354 };
 355 
 356 #endif // SHARE_VM_RUNTIME_ORDERACCESS_HPP
< prev index next >