1 /*
   2  * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 //                Memory Access Ordering Model
  26 //
  27 // This interface is based on the JSR-133 Cookbook for Compiler Writers
  28 // and on the IA64 memory model.  It is the dynamic equivalent of the
  29 // C/C++ volatile specifier.  I.e., volatility restricts compile-time
  30 // memory access reordering in a way similar to what we want to occur
  31 // at runtime.
  32 //
  33 // In the following, the terms 'previous', 'subsequent', 'before',
  34 // 'after', 'preceding' and 'succeeding' refer to program order.  The
  35 // terms 'down' and 'below' refer to forward load or store motion
  36 // relative to program order, while 'up' and 'above' refer to backward
  37 // motion.
  38 //
  39 //
  40 // We define four primitive memory barrier operations.
  41 //
  42 // LoadLoad:   Load1(s); LoadLoad; Load2
  43 //
  44 // Ensures that Load1 completes (obtains the value it loads from memory)
  45 // before Load2 and any subsequent load operations.  Loads before Load1
  46 // may *not* float below Load2 and any subsequent load operations.
  47 //
  48 // StoreStore: Store1(s); StoreStore; Store2
  49 //
  50 // Ensures that Store1 completes (the effect on memory of Store1 is made
  51 // visible to other processors) before Store2 and any subsequent store
  52 // operations.  Stores before Store1 may *not* float below Store2 and any
  53 // subsequent store operations.
  54 //
  55 // LoadStore:  Load1(s); LoadStore; Store2
  56 //
  57 // Ensures that Load1 completes before Store2 and any subsequent store
  58 // operations.  Loads before Load1 may *not* float below Store2 and any
  59 // subseqeuent store operations.
  60 //
  61 // StoreLoad:  Store1(s); StoreLoad; Load2
  62 //
  63 // Ensures that Store1 completes before Load2 and any subsequent load
  64 // operations.  Stores before Store1 may *not* float below Load2 and any
  65 // subseqeuent load operations.
  66 //
  67 //
  68 // We define two further operations, 'release' and 'acquire'.  They are
  69 // mirror images of each other.
  70 //
  71 // Execution by a processor of release makes the effect of all memory
  72 // accesses issued by it previous to the release visible to all
  73 // processors *before* the release completes.  The effect of subsequent
  74 // memory accesses issued by it *may* be made visible *before* the
  75 // release.  I.e., subsequent memory accesses may float above the
  76 // release, but prior ones may not float below it.
  77 //
  78 // Execution by a processor of acquire makes the effect of all memory
  79 // accesses issued by it subsequent to the acquire visible to all
  80 // processors *after* the acquire completes.  The effect of prior memory
  81 // accesses issued by it *may* be made visible *after* the acquire.
  82 // I.e., prior memory accesses may float below the acquire, but
  83 // subsequent ones may not float above it.
  84 //
  85 // Finally, we define a 'fence' operation, which conceptually is a
  86 // release combined with an acquire.  In the real world these operations
  87 // require one or more machine instructions which can float above and
  88 // below the release or acquire, so we usually can't just issue the
  89 // release-acquire back-to-back.  All machines we know of implement some
  90 // sort of memory fence instruction.
  91 //
  92 //
  93 // The standalone implementations of release and acquire need an associated
  94 // dummy volatile store or load respectively.  To avoid redundant operations,
  95 // we can define the composite operators: 'release_store', 'store_fence' and
  96 // 'load_acquire'.  Here's a summary of the machine instructions corresponding
  97 // to each operation.
  98 //
  99 //               sparc RMO             ia64             x86
 100 // ---------------------------------------------------------------------
 101 // fence         membar #LoadStore |   mf               lock addl 0,(sp)
 102 //                      #StoreStore |
 103 //                      #LoadLoad |
 104 //                      #StoreLoad
 105 //
 106 // release       membar #LoadStore |   st.rel [sp]=r0   movl $0,<dummy>
 107 //                      #StoreStore
 108 //               st %g0,[]
 109 //
 110 // acquire       ld [%sp],%g0          ld.acq <r>=[sp]  movl (sp),<r>
 111 //               membar #LoadLoad |
 112 //                      #LoadStore
 113 //
 114 // release_store membar #LoadStore |   st.rel           <store>
 115 //                      #StoreStore
 116 //               st
 117 //
 118 // store_fence   st                    st               lock xchg
 119 //               fence                 mf
 120 //
 121 // load_acquire  ld                    ld.acq           <load>
 122 //               membar #LoadLoad |
 123 //                      #LoadStore
 124 //
 125 // Using only release_store and load_acquire, we can implement the
 126 // following ordered sequences.
 127 //
 128 // 1. load, load   == load_acquire,  load
 129 //                 or load_acquire,  load_acquire
 130 // 2. load, store  == load,          release_store
 131 //                 or load_acquire,  store
 132 //                 or load_acquire,  release_store
 133 // 3. store, store == store,         release_store
 134 //                 or release_store, release_store
 135 //
 136 // These require no membar instructions for sparc-TSO and no extra
 137 // instructions for ia64.
 138 //
 139 // Ordering a load relative to preceding stores requires a store_fence,
 140 // which implies a membar #StoreLoad between the store and load under
 141 // sparc-TSO.  A fence is required by ia64.  On x86, we use locked xchg.
 142 //
 143 // 4. store, load  == store_fence, load
 144 //
 145 // Use store_fence to make sure all stores done in an 'interesting'
 146 // region are made visible prior to both subsequent loads and stores.
 147 //
 148 // Conventional usage is to issue a load_acquire for ordered loads.  Use
 149 // release_store for ordered stores when you care only that prior stores
 150 // are visible before the release_store, but don't care exactly when the
 151 // store associated with the release_store becomes visible.  Use
 152 // release_store_fence to update values like the thread state, where we
 153 // don't want the current thread to continue until all our prior memory
 154 // accesses (including the new thread state) are visible to other threads.
 155 //
 156 //
 157 //                C++ Volatility
 158 //
 159 // C++ guarantees ordering at operations termed 'sequence points' (defined
 160 // to be volatile accesses and calls to library I/O functions).  'Side
 161 // effects' (defined as volatile accesses, calls to library I/O functions
 162 // and object modification) previous to a sequence point must be visible
 163 // at that sequence point.  See the C++ standard, section 1.9, titled
 164 // "Program Execution".  This means that all barrier implementations,
 165 // including standalone loadload, storestore, loadstore, storeload, acquire
 166 // and release must include a sequence point, usually via a volatile memory
 167 // access.  Other ways to guarantee a sequence point are, e.g., use of
 168 // indirect calls and linux's __asm__ volatile.
 169 // Note: as of 6973570, we have replaced the originally static "dummy" field
 170 // (see above) by a volatile store to the stack. All of the versions of the
 171 // compilers that we currently use (SunStudio, gcc and VC++) respect the
 172 // semantics of volatile here. If you build HotSpot using other
 173 // compilers, you may need to verify that no compiler reordering occurs
 174 // across the sequence point respresented by the volatile access.
 175 //
 176 //
 177 //                os::is_MP Considered Redundant
 178 //
 179 // Callers of this interface do not need to test os::is_MP() before
 180 // issuing an operation. The test is taken care of by the implementation
 181 // of the interface (depending on the vm version and platform, the test
 182 // may or may not be actually done by the implementation).
 183 //
 184 //
 185 //                A Note on Memory Ordering and Cache Coherency
 186 //
 187 // Cache coherency and memory ordering are orthogonal concepts, though they
 188 // interact.  E.g., all existing itanium machines are cache-coherent, but
 189 // the hardware can freely reorder loads wrt other loads unless it sees a
 190 // load-acquire instruction.  All existing sparc machines are cache-coherent
 191 // and, unlike itanium, TSO guarantees that the hardware orders loads wrt
 192 // loads and stores, and stores wrt to each other.
 193 //
 194 // Consider the implementation of loadload.  *If* your platform *isn't*
 195 // cache-coherent, then loadload must not only prevent hardware load
 196 // instruction reordering, but it must *also* ensure that subsequent
 197 // loads from addresses that could be written by other processors (i.e.,
 198 // that are broadcast by other processors) go all the way to the first
 199 // level of memory shared by those processors and the one issuing
 200 // the loadload.
 201 //
 202 // So if we have a MP that has, say, a per-processor D$ that doesn't see
 203 // writes by other processors, and has a shared E$ that does, the loadload
 204 // barrier would have to make sure that either
 205 //
 206 // 1. cache lines in the issuing processor's D$ that contained data from
 207 // addresses that could be written by other processors are invalidated, so
 208 // subsequent loads from those addresses go to the E$, (it could do this
 209 // by tagging such cache lines as 'shared', though how to tell the hardware
 210 // to do the tagging is an interesting problem), or
 211 //
 212 // 2. there never are such cache lines in the issuing processor's D$, which
 213 // means all references to shared data (however identified: see above)
 214 // bypass the D$ (i.e., are satisfied from the E$).
 215 //
 216 // If your machine doesn't have an E$, substitute 'main memory' for 'E$'.
 217 //
 218 // Either of these alternatives is a pain, so no current machine we know of
 219 // has incoherent caches.
 220 //
 221 // If loadload didn't have these properties, the store-release sequence for
 222 // publishing a shared data structure wouldn't work, because a processor
 223 // trying to read data newly published by another processor might go to
 224 // its own incoherent caches to satisfy the read instead of to the newly
 225 // written shared memory.
 226 //
 227 //
 228 //                NOTE WELL!!
 229 //
 230 //                A Note on MutexLocker and Friends
 231 //
 232 // See mutexLocker.hpp.  We assume throughout the VM that MutexLocker's
 233 // and friends' constructors do a fence, a lock and an acquire *in that
 234 // order*.  And that their destructors do a release and unlock, in *that*
 235 // order.  If their implementations change such that these assumptions
 236 // are violated, a whole lot of code will break.
 237 
 238 class OrderAccess : AllStatic {
 239  public:
 240   static void     loadload();
 241   static void     storestore();
 242   static void     loadstore();
 243   static void     storeload();
 244 
 245   static void     acquire();
 246   static void     release();
 247   static void     fence();
 248 
 249   static jbyte    load_acquire(volatile jbyte*   p);
 250   static jshort   load_acquire(volatile jshort*  p);
 251   static jint     load_acquire(volatile jint*    p);
 252   static jlong    load_acquire(volatile jlong*   p);
 253   static jubyte   load_acquire(volatile jubyte*  p);
 254   static jushort  load_acquire(volatile jushort* p);
 255   static juint    load_acquire(volatile juint*   p);
 256   static julong   load_acquire(volatile julong*  p);
 257   static jfloat   load_acquire(volatile jfloat*  p);
 258   static jdouble  load_acquire(volatile jdouble* p);
 259 
 260   static intptr_t load_ptr_acquire(volatile intptr_t*   p);
 261   static void*    load_ptr_acquire(volatile void*       p);
 262   static void*    load_ptr_acquire(const volatile void* p);
 263 
 264   static void     release_store(volatile jbyte*   p, jbyte   v);
 265   static void     release_store(volatile jshort*  p, jshort  v);
 266   static void     release_store(volatile jint*    p, jint    v);
 267   static void     release_store(volatile jlong*   p, jlong   v);
 268   static void     release_store(volatile jubyte*  p, jubyte  v);
 269   static void     release_store(volatile jushort* p, jushort v);
 270   static void     release_store(volatile juint*   p, juint   v);
 271   static void     release_store(volatile julong*  p, julong  v);
 272   static void     release_store(volatile jfloat*  p, jfloat  v);
 273   static void     release_store(volatile jdouble* p, jdouble v);
 274 
 275   static void     release_store_ptr(volatile intptr_t* p, intptr_t v);
 276   static void     release_store_ptr(volatile void*     p, void*    v);
 277 
 278   static void     store_fence(jbyte*   p, jbyte   v);
 279   static void     store_fence(jshort*  p, jshort  v);
 280   static void     store_fence(jint*    p, jint    v);
 281   static void     store_fence(jlong*   p, jlong   v);
 282   static void     store_fence(jubyte*  p, jubyte  v);
 283   static void     store_fence(jushort* p, jushort v);
 284   static void     store_fence(juint*   p, juint   v);
 285   static void     store_fence(julong*  p, julong  v);
 286   static void     store_fence(jfloat*  p, jfloat  v);
 287   static void     store_fence(jdouble* p, jdouble v);
 288 
 289   static void     store_ptr_fence(intptr_t* p, intptr_t v);
 290   static void     store_ptr_fence(void**    p, void*    v);
 291 
 292   static void     release_store_fence(volatile jbyte*   p, jbyte   v);
 293   static void     release_store_fence(volatile jshort*  p, jshort  v);
 294   static void     release_store_fence(volatile jint*    p, jint    v);
 295   static void     release_store_fence(volatile jlong*   p, jlong   v);
 296   static void     release_store_fence(volatile jubyte*  p, jubyte  v);
 297   static void     release_store_fence(volatile jushort* p, jushort v);
 298   static void     release_store_fence(volatile juint*   p, juint   v);
 299   static void     release_store_fence(volatile julong*  p, julong  v);
 300   static void     release_store_fence(volatile jfloat*  p, jfloat  v);
 301   static void     release_store_fence(volatile jdouble* p, jdouble v);
 302 
 303   static void     release_store_ptr_fence(volatile intptr_t* p, intptr_t v);
 304   static void     release_store_ptr_fence(volatile void*     p, void*    v);
 305 
 306  private:
 307   // This is a helper that invokes the StubRoutines::fence_entry()
 308   // routine if it exists, It should only be used by platforms that
 309   // don't another way to do the inline eassembly.
 310   static void StubRoutines_fence();
 311 };