1 /*
   2  * Copyright (c) 2003, 2015, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef SHARE_VM_RUNTIME_ORDERACCESS_HPP
  26 #define SHARE_VM_RUNTIME_ORDERACCESS_HPP
  27 
  28 #include "memory/allocation.hpp"
  29 
  30 //                Memory Access Ordering Model
  31 //
  32 // This interface is based on the JSR-133 Cookbook for Compiler Writers.
  33 //
  34 // In the following, the terms 'previous', 'subsequent', 'before',
  35 // 'after', 'preceding' and 'succeeding' refer to program order.  The
  36 // terms 'down' and 'below' refer to forward load or store motion
  37 // relative to program order, while 'up' and 'above' refer to backward
  38 // motion.
  39 //
  40 // We define four primitive memory barrier operations.
  41 //
  42 // LoadLoad:   Load1(s); LoadLoad; Load2
  43 //
  44 // Ensures that Load1 completes (obtains the value it loads from memory)
  45 // before Load2 and any subsequent load operations.  Loads before Load1
  46 // may *not* float below Load2 and any subsequent load operations.
  47 //
  48 // StoreStore: Store1(s); StoreStore; Store2
  49 //
  50 // Ensures that Store1 completes (the effect on memory of Store1 is made
  51 // visible to other processors) before Store2 and any subsequent store
  52 // operations.  Stores before Store1 may *not* float below Store2 and any
  53 // subsequent store operations.
  54 //
  55 // LoadStore:  Load1(s); LoadStore; Store2
  56 //
  57 // Ensures that Load1 completes before Store2 and any subsequent store
  58 // operations.  Loads before Load1 may *not* float below Store2 and any
  59 // subsequent store operations.
  60 //
  61 // StoreLoad:  Store1(s); StoreLoad; Load2
  62 //
  63 // Ensures that Store1 completes before Load2 and any subsequent load
  64 // operations.  Stores before Store1 may *not* float below Load2 and any
  65 // subsequent load operations.
  66 //
  67 // We define two further barriers: acquire and release.
  68 //
  69 // Conceptually, acquire/release semantics form unidirectional and
  70 // asynchronous barriers w.r.t. a synchronizing load(X) and store(X) pair.
  71 // They should always be used in pairs to publish (release store) and
  72 // access (load acquire) some implicitly understood shared data between
  73 // threads in a relatively cheap fashion not requiring storeload. If not
  74 // used in such a pair, it is advised to use a membar instead:
  75 // acquire/release only make sense as pairs.
  76 //
  77 // T1: access_shared_data
  78 // T1: ]release
  79 // T1: (...)
  80 // T1: store(X)
  81 //
  82 // T2: load(X)
  83 // T2: (...)
  84 // T2: acquire[
  85 // T2: access_shared_data
  86 //
  87 // It is guaranteed that if T2: load(X) synchronizes with (observes the
  88 // value written by) T1: store(X), then the memory accesses before the T1:
  89 // ]release happen before the memory accesses after the T2: acquire[.
  90 //
  91 // Total Store Order (TSO) machines can be seen as machines issuing a
  92 // release store for each store and a load acquire for each load. Therefore
  93 // there is an inherent resemblence between TSO and acquire/release
  94 // semantics. TSO can be seen as an abstract machine where loads are
  95 // executed immediately when encountered (hence loadload reordering not
  96 // happening) but enqueues stores in a FIFO queue
  97 // for asynchronous serialization (neither storestore or loadstore
  98 // reordering happening). The only reordering happening is storeload due to
  99 // the queue asynchronously serializing stores (yet in order).
 100 //
 101 // Acquire/release semantics essentially exploits this asynchronicity: when
 102 // the load(X) acquire[ observes the store of ]release store(X), the
 103 // accesses before the release must have happened before the accesses after
 104 // acquire.
 105 //
 106 // The API offers both stand-alone acquire() and release() as well as bound
 107 // load_acquire() and release_store(). It is guaranteed that these are
 108 // semantically equivalent w.r.t. the defined model. However, since
 109 // stand-alone acquire()/release() does not know which previous
 110 // load/subsequent store is considered the synchronizing load/store, they
 111 // may be more conservative in implementations. We advise using the bound
 112 // variants whenever possible.
 113 //
 114 // Finally, we define a "fence" operation, as a bidirectional barrier.
 115 // It guarantees that any memory access preceding the fence is not
 116 // reordered w.r.t. any memory accesses subsequent to the fence in program
 117 // order. This may be used to prevent sequences of loads from floating up
 118 // above sequences of stores.
 119 //
 120 // The following table shows the implementations on some architectures:
 121 //
 122 //                       Constraint     x86          sparc TSO          ppc
 123 // ---------------------------------------------------------------------------
 124 // fence                 LoadStore  |   lock         membar #StoreLoad  sync
 125 //                       StoreStore |   addl 0,(sp)
 126 //                       LoadLoad   |
 127 //                       StoreLoad
 128 //
 129 // release               LoadStore  |                                   lwsync
 130 //                       StoreStore
 131 //
 132 // acquire               LoadLoad   |                                   lwsync
 133 //                       LoadStore
 134 //
 135 // release_store                        <store>      <store>            lwsync
 136 //                                                                      <store>
 137 //
 138 // release_store_fence                  xchg         <store>            lwsync
 139 //                                                   membar #StoreLoad  <store>
 140 //                                                                      sync
 141 //
 142 //
 143 // load_acquire                         <load>       <load>             <load>
 144 //                                                                      lwsync
 145 //
 146 // Ordering a load relative to preceding stores requires a StoreLoad,
 147 // which implies a membar #StoreLoad between the store and load under
 148 // sparc-TSO. On x86, we use explicitly locked add.
 149 //
 150 // Conventional usage is to issue a load_acquire for ordered loads.  Use
 151 // release_store for ordered stores when you care only that prior stores
 152 // are visible before the release_store, but don't care exactly when the
 153 // store associated with the release_store becomes visible.  Use
 154 // release_store_fence to update values like the thread state, where we
 155 // don't want the current thread to continue until all our prior memory
 156 // accesses (including the new thread state) are visible to other threads.
 157 // This is equivalent to the volatile semantics of the Java Memory Model.
 158 //
 159 //                    C++ Volatile Semantics
 160 //
 161 // C++ volatile semantics prevent compiler re-ordering between
 162 // volatile memory accesses. However, reordering between non-volatile
 163 // and volatile memory accesses is in general undefined. For compiler
 164 // reordering constraints taking non-volatile memory accesses into
 165 // consideration, a compiler barrier has to be used instead.  Some
 166 // compiler implementations may choose to enforce additional
 167 // constraints beyond those required by the language. Note also that
 168 // both volatile semantics and compiler barrier do not prevent
 169 // hardware reordering.
 170 //
 171 //                os::is_MP Considered Redundant
 172 //
 173 // Callers of this interface do not need to test os::is_MP() before
 174 // issuing an operation. The test is taken care of by the implementation
 175 // of the interface (depending on the vm version and platform, the test
 176 // may or may not be actually done by the implementation).
 177 //
 178 //
 179 //                A Note on Memory Ordering and Cache Coherency
 180 //
 181 // Cache coherency and memory ordering are orthogonal concepts, though they
 182 // interact.  E.g., all existing itanium machines are cache-coherent, but
 183 // the hardware can freely reorder loads wrt other loads unless it sees a
 184 // load-acquire instruction.  All existing sparc machines are cache-coherent
 185 // and, unlike itanium, TSO guarantees that the hardware orders loads wrt
 186 // loads and stores, and stores wrt to each other.
 187 //
 188 // Consider the implementation of loadload.  *If* your platform *isn't*
 189 // cache-coherent, then loadload must not only prevent hardware load
 190 // instruction reordering, but it must *also* ensure that subsequent
 191 // loads from addresses that could be written by other processors (i.e.,
 192 // that are broadcast by other processors) go all the way to the first
 193 // level of memory shared by those processors and the one issuing
 194 // the loadload.
 195 //
 196 // So if we have a MP that has, say, a per-processor D$ that doesn't see
 197 // writes by other processors, and has a shared E$ that does, the loadload
 198 // barrier would have to make sure that either
 199 //
 200 // 1. cache lines in the issuing processor's D$ that contained data from
 201 // addresses that could be written by other processors are invalidated, so
 202 // subsequent loads from those addresses go to the E$, (it could do this
 203 // by tagging such cache lines as 'shared', though how to tell the hardware
 204 // to do the tagging is an interesting problem), or
 205 //
 206 // 2. there never are such cache lines in the issuing processor's D$, which
 207 // means all references to shared data (however identified: see above)
 208 // bypass the D$ (i.e., are satisfied from the E$).
 209 //
 210 // If your machine doesn't have an E$, substitute 'main memory' for 'E$'.
 211 //
 212 // Either of these alternatives is a pain, so no current machine we know of
 213 // has incoherent caches.
 214 //
 215 // If loadload didn't have these properties, the store-release sequence for
 216 // publishing a shared data structure wouldn't work, because a processor
 217 // trying to read data newly published by another processor might go to
 218 // its own incoherent caches to satisfy the read instead of to the newly
 219 // written shared memory.
 220 //
 221 //
 222 //                NOTE WELL!!
 223 //
 224 //                A Note on MutexLocker and Friends
 225 //
 226 // See mutexLocker.hpp.  We assume throughout the VM that MutexLocker's
 227 // and friends' constructors do a fence, a lock and an acquire *in that
 228 // order*.  And that their destructors do a release and unlock, in *that*
 229 // order.  If their implementations change such that these assumptions
 230 // are violated, a whole lot of code will break.
 231 
 232 enum ScopedFenceType {
 233     X_ACQUIRE
 234   , RELEASE_X
 235   , RELEASE_X_FENCE
 236 };
 237 
 238 template <ScopedFenceType T>
 239 class ScopedFenceGeneral: public StackObj {
 240  public:
 241   void prefix() {}
 242   void postfix() {}
 243 };
 244 
 245 template <ScopedFenceType T>
 246 class ScopedFence : public ScopedFenceGeneral<T> {
 247   void *const _field;
 248  public:
 249   ScopedFence(void *const field) : _field(field) { prefix(); }
 250   ~ScopedFence() { postfix(); }
 251   void prefix() { ScopedFenceGeneral<T>::prefix(); }
 252   void postfix() { ScopedFenceGeneral<T>::postfix(); }
 253 };
 254 
 255 class OrderAccess : AllStatic {
 256  public:
 257   // barriers
 258   static void     loadload();
 259   static void     storestore();
 260   static void     loadstore();
 261   static void     storeload();
 262 
 263   static void     acquire();
 264   static void     release();
 265   static void     fence();
 266 
 267   // flushes all pending memory_accesses even on remote CPUs
 268   static void     global_fence();
 269 
 270   static jbyte    load_acquire(volatile jbyte*   p);
 271   static jshort   load_acquire(volatile jshort*  p);
 272   static jint     load_acquire(volatile jint*    p);
 273   static jlong    load_acquire(volatile jlong*   p);
 274   static jubyte   load_acquire(volatile jubyte*  p);
 275   static jushort  load_acquire(volatile jushort* p);
 276   static juint    load_acquire(volatile juint*   p);
 277   static julong   load_acquire(volatile julong*  p);
 278   static jfloat   load_acquire(volatile jfloat*  p);
 279   static jdouble  load_acquire(volatile jdouble* p);
 280 
 281   static intptr_t load_ptr_acquire(volatile intptr_t*   p);
 282   static void*    load_ptr_acquire(volatile void*       p);
 283   static void*    load_ptr_acquire(const volatile void* p);
 284 
 285   static void     release_store(volatile jbyte*   p, jbyte   v);
 286   static void     release_store(volatile jshort*  p, jshort  v);
 287   static void     release_store(volatile jint*    p, jint    v);
 288   static void     release_store(volatile jlong*   p, jlong   v);
 289   static void     release_store(volatile jubyte*  p, jubyte  v);
 290   static void     release_store(volatile jushort* p, jushort v);
 291   static void     release_store(volatile juint*   p, juint   v);
 292   static void     release_store(volatile julong*  p, julong  v);
 293   static void     release_store(volatile jfloat*  p, jfloat  v);
 294   static void     release_store(volatile jdouble* p, jdouble v);
 295 
 296   static void     release_store_ptr(volatile intptr_t* p, intptr_t v);
 297   static void     release_store_ptr(volatile void*     p, void*    v);
 298 
 299   static void     release_store_fence(volatile jbyte*   p, jbyte   v);
 300   static void     release_store_fence(volatile jshort*  p, jshort  v);
 301   static void     release_store_fence(volatile jint*    p, jint    v);
 302   static void     release_store_fence(volatile jlong*   p, jlong   v);
 303   static void     release_store_fence(volatile jubyte*  p, jubyte  v);
 304   static void     release_store_fence(volatile jushort* p, jushort v);
 305   static void     release_store_fence(volatile juint*   p, juint   v);
 306   static void     release_store_fence(volatile julong*  p, julong  v);
 307   static void     release_store_fence(volatile jfloat*  p, jfloat  v);
 308   static void     release_store_fence(volatile jdouble* p, jdouble v);
 309 
 310   static void     release_store_ptr_fence(volatile intptr_t* p, intptr_t v);
 311   static void     release_store_ptr_fence(volatile void*     p, void*    v);
 312 
 313  private:
 314   // This is a helper that invokes the StubRoutines::fence_entry()
 315   // routine if it exists, It should only be used by platforms that
 316   // don't have another way to do the inline assembly.
 317   static void StubRoutines_fence();
 318 
 319   // Give platforms a variation point to specialize.
 320   template<typename T> static T    specialized_load_acquire       (volatile T* p     );
 321   template<typename T> static void specialized_release_store      (volatile T* p, T v);
 322   template<typename T> static void specialized_release_store_fence(volatile T* p, T v);
 323 
 324   template<bool DummyFlag>
 325   static void specialized_global_fence();
 326   static void general_global_fence();
 327 
 328   template<typename FieldType, ScopedFenceType FenceType>
 329   static void ordered_store(volatile FieldType* p, FieldType v);
 330 
 331   template<typename FieldType, ScopedFenceType FenceType>
 332   static FieldType ordered_load(volatile FieldType* p);
 333 
 334   static void    store(volatile jbyte*   p, jbyte   v);
 335   static void    store(volatile jshort*  p, jshort  v);
 336   static void    store(volatile jint*    p, jint    v);
 337   static void    store(volatile jlong*   p, jlong   v);
 338   static void    store(volatile jdouble* p, jdouble v);
 339   static void    store(volatile jfloat*  p, jfloat  v);
 340 
 341   static jbyte   load (volatile jbyte*   p);
 342   static jshort  load (volatile jshort*  p);
 343   static jint    load (volatile jint*    p);
 344   static jlong   load (volatile jlong*   p);
 345   static jdouble load (volatile jdouble* p);
 346   static jfloat  load (volatile jfloat*  p);
 347 
 348   // The following store_fence methods are deprecated and will be removed
 349   // when all repos conform to the new generalized OrderAccess.
 350   static void    store_fence(jbyte*   p, jbyte   v);
 351   static void    store_fence(jshort*  p, jshort  v);
 352   static void    store_fence(jint*    p, jint    v);
 353   static void    store_fence(jlong*   p, jlong   v);
 354   static void    store_fence(jubyte*  p, jubyte  v);
 355   static void    store_fence(jushort* p, jushort v);
 356   static void    store_fence(juint*   p, juint   v);
 357   static void    store_fence(julong*  p, julong  v);
 358   static void    store_fence(jfloat*  p, jfloat  v);
 359   static void    store_fence(jdouble* p, jdouble v);
 360 
 361   static void    store_ptr_fence(intptr_t* p, intptr_t v);
 362   static void    store_ptr_fence(void**    p, void*    v);
 363 };
 364 
 365 #endif // SHARE_VM_RUNTIME_ORDERACCESS_HPP