1 /* 2 * Copyright (c) 2003, 2017, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef SHARE_VM_RUNTIME_ORDERACCESS_HPP 26 #define SHARE_VM_RUNTIME_ORDERACCESS_HPP 27 28 #include "memory/allocation.hpp" 29 #include "runtime/atomic.hpp" 30 31 // Memory Access Ordering Model 32 // 33 // This interface is based on the JSR-133 Cookbook for Compiler Writers. 34 // 35 // In the following, the terms 'previous', 'subsequent', 'before', 36 // 'after', 'preceding' and 'succeeding' refer to program order. The 37 // terms 'down' and 'below' refer to forward load or store motion 38 // relative to program order, while 'up' and 'above' refer to backward 39 // motion. 40 // 41 // We define four primitive memory barrier operations. 42 // 43 // LoadLoad: Load1(s); LoadLoad; Load2 44 // 45 // Ensures that Load1 completes (obtains the value it loads from memory) 46 // before Load2 and any subsequent load operations. Loads before Load1 47 // may *not* float below Load2 and any subsequent load operations. 48 // 49 // StoreStore: Store1(s); StoreStore; Store2 50 // 51 // Ensures that Store1 completes (the effect on memory of Store1 is made 52 // visible to other processors) before Store2 and any subsequent store 53 // operations. Stores before Store1 may *not* float below Store2 and any 54 // subsequent store operations. 55 // 56 // LoadStore: Load1(s); LoadStore; Store2 57 // 58 // Ensures that Load1 completes before Store2 and any subsequent store 59 // operations. Loads before Load1 may *not* float below Store2 and any 60 // subsequent store operations. 61 // 62 // StoreLoad: Store1(s); StoreLoad; Load2 63 // 64 // Ensures that Store1 completes before Load2 and any subsequent load 65 // operations. Stores before Store1 may *not* float below Load2 and any 66 // subsequent load operations. 67 // 68 // We define two further barriers: acquire and release. 69 // 70 // Conceptually, acquire/release semantics form unidirectional and 71 // asynchronous barriers w.r.t. a synchronizing load(X) and store(X) pair. 72 // They should always be used in pairs to publish (release store) and 73 // access (load acquire) some implicitly understood shared data between 74 // threads in a relatively cheap fashion not requiring storeload. If not 75 // used in such a pair, it is advised to use a membar instead: 76 // acquire/release only make sense as pairs. 77 // 78 // T1: access_shared_data 79 // T1: ]release 80 // T1: (...) 81 // T1: store(X) 82 // 83 // T2: load(X) 84 // T2: (...) 85 // T2: acquire[ 86 // T2: access_shared_data 87 // 88 // It is guaranteed that if T2: load(X) synchronizes with (observes the 89 // value written by) T1: store(X), then the memory accesses before the T1: 90 // ]release happen before the memory accesses after the T2: acquire[. 91 // 92 // Total Store Order (TSO) machines can be seen as machines issuing a 93 // release store for each store and a load acquire for each load. Therefore 94 // there is an inherent resemblence between TSO and acquire/release 95 // semantics. TSO can be seen as an abstract machine where loads are 96 // executed immediately when encountered (hence loadload reordering not 97 // happening) but enqueues stores in a FIFO queue 98 // for asynchronous serialization (neither storestore or loadstore 99 // reordering happening). The only reordering happening is storeload due to 100 // the queue asynchronously serializing stores (yet in order). 101 // 102 // Acquire/release semantics essentially exploits this asynchronicity: when 103 // the load(X) acquire[ observes the store of ]release store(X), the 104 // accesses before the release must have happened before the accesses after 105 // acquire. 106 // 107 // The API offers both stand-alone acquire() and release() as well as bound 108 // load_acquire() and release_store(). It is guaranteed that these are 109 // semantically equivalent w.r.t. the defined model. However, since 110 // stand-alone acquire()/release() does not know which previous 111 // load/subsequent store is considered the synchronizing load/store, they 112 // may be more conservative in implementations. We advise using the bound 113 // variants whenever possible. 114 // 115 // Finally, we define a "fence" operation, as a bidirectional barrier. 116 // It guarantees that any memory access preceding the fence is not 117 // reordered w.r.t. any memory accesses subsequent to the fence in program 118 // order. This may be used to prevent sequences of loads from floating up 119 // above sequences of stores. 120 // 121 // The following table shows the implementations on some architectures: 122 // 123 // Constraint x86 sparc TSO ppc 124 // --------------------------------------------------------------------------- 125 // fence LoadStore | lock membar #StoreLoad sync 126 // StoreStore | addl 0,(sp) 127 // LoadLoad | 128 // StoreLoad 129 // 130 // release LoadStore | lwsync 131 // StoreStore 132 // 133 // acquire LoadLoad | lwsync 134 // LoadStore 135 // 136 // release_store <store> <store> lwsync 137 // <store> 138 // 139 // release_store_fence xchg <store> lwsync 140 // membar #StoreLoad <store> 141 // sync 142 // 143 // 144 // load_acquire <load> <load> <load> 145 // lwsync 146 // 147 // Ordering a load relative to preceding stores requires a StoreLoad, 148 // which implies a membar #StoreLoad between the store and load under 149 // sparc-TSO. On x86, we use explicitly locked add. 150 // 151 // Conventional usage is to issue a load_acquire for ordered loads. Use 152 // release_store for ordered stores when you care only that prior stores 153 // are visible before the release_store, but don't care exactly when the 154 // store associated with the release_store becomes visible. Use 155 // release_store_fence to update values like the thread state, where we 156 // don't want the current thread to continue until all our prior memory 157 // accesses (including the new thread state) are visible to other threads. 158 // This is equivalent to the volatile semantics of the Java Memory Model. 159 // 160 // C++ Volatile Semantics 161 // 162 // C++ volatile semantics prevent compiler re-ordering between 163 // volatile memory accesses. However, reordering between non-volatile 164 // and volatile memory accesses is in general undefined. For compiler 165 // reordering constraints taking non-volatile memory accesses into 166 // consideration, a compiler barrier has to be used instead. Some 167 // compiler implementations may choose to enforce additional 168 // constraints beyond those required by the language. Note also that 169 // both volatile semantics and compiler barrier do not prevent 170 // hardware reordering. 171 // 172 // os::is_MP Considered Redundant 173 // 174 // Callers of this interface do not need to test os::is_MP() before 175 // issuing an operation. The test is taken care of by the implementation 176 // of the interface (depending on the vm version and platform, the test 177 // may or may not be actually done by the implementation). 178 // 179 // 180 // A Note on Memory Ordering and Cache Coherency 181 // 182 // Cache coherency and memory ordering are orthogonal concepts, though they 183 // interact. E.g., all existing itanium machines are cache-coherent, but 184 // the hardware can freely reorder loads wrt other loads unless it sees a 185 // load-acquire instruction. All existing sparc machines are cache-coherent 186 // and, unlike itanium, TSO guarantees that the hardware orders loads wrt 187 // loads and stores, and stores wrt to each other. 188 // 189 // Consider the implementation of loadload. *If* your platform *isn't* 190 // cache-coherent, then loadload must not only prevent hardware load 191 // instruction reordering, but it must *also* ensure that subsequent 192 // loads from addresses that could be written by other processors (i.e., 193 // that are broadcast by other processors) go all the way to the first 194 // level of memory shared by those processors and the one issuing 195 // the loadload. 196 // 197 // So if we have a MP that has, say, a per-processor D$ that doesn't see 198 // writes by other processors, and has a shared E$ that does, the loadload 199 // barrier would have to make sure that either 200 // 201 // 1. cache lines in the issuing processor's D$ that contained data from 202 // addresses that could be written by other processors are invalidated, so 203 // subsequent loads from those addresses go to the E$, (it could do this 204 // by tagging such cache lines as 'shared', though how to tell the hardware 205 // to do the tagging is an interesting problem), or 206 // 207 // 2. there never are such cache lines in the issuing processor's D$, which 208 // means all references to shared data (however identified: see above) 209 // bypass the D$ (i.e., are satisfied from the E$). 210 // 211 // If your machine doesn't have an E$, substitute 'main memory' for 'E$'. 212 // 213 // Either of these alternatives is a pain, so no current machine we know of 214 // has incoherent caches. 215 // 216 // If loadload didn't have these properties, the store-release sequence for 217 // publishing a shared data structure wouldn't work, because a processor 218 // trying to read data newly published by another processor might go to 219 // its own incoherent caches to satisfy the read instead of to the newly 220 // written shared memory. 221 // 222 // 223 // NOTE WELL!! 224 // 225 // A Note on MutexLocker and Friends 226 // 227 // See mutexLocker.hpp. We assume throughout the VM that MutexLocker's 228 // and friends' constructors do a fence, a lock and an acquire *in that 229 // order*. And that their destructors do a release and unlock, in *that* 230 // order. If their implementations change such that these assumptions 231 // are violated, a whole lot of code will break. 232 233 enum ScopedFenceType { 234 X_ACQUIRE 235 , RELEASE_X 236 , RELEASE_X_FENCE 237 }; 238 239 template <ScopedFenceType T> 240 class ScopedFenceGeneral: public StackObj { 241 public: 242 void prefix() {} 243 void postfix() {} 244 }; 245 246 template <ScopedFenceType T> 247 class ScopedFence : public ScopedFenceGeneral<T> { 248 void *const _field; 249 public: 250 ScopedFence(void *const field) : _field(field) { prefix(); } 251 ~ScopedFence() { postfix(); } 252 void prefix() { ScopedFenceGeneral<T>::prefix(); } 253 void postfix() { ScopedFenceGeneral<T>::postfix(); } 254 }; 255 256 class OrderAccess : private Atomic { 257 public: 258 // barriers 259 static void loadload(); 260 static void storestore(); 261 static void loadstore(); 262 static void storeload(); 263 264 static void acquire(); 265 static void release(); 266 static void fence(); 267 268 template <typename T> 269 static T load_acquire(const volatile T* p); 270 271 template <typename T, typename D> 272 static void release_store(volatile D* p, T v); 273 274 template <typename T, typename D> 275 static void release_store_fence(volatile D* p, T v); 276 277 private: 278 // This is a helper that invokes the StubRoutines::fence_entry() 279 // routine if it exists, It should only be used by platforms that 280 // don't have another way to do the inline assembly. 281 static void StubRoutines_fence(); 282 283 // Give platforms a variation point to specialize. 284 template<size_t byte_size, ScopedFenceType type> struct PlatformOrderedStore; 285 template<size_t byte_size, ScopedFenceType type> struct PlatformOrderedLoad; 286 287 template<typename FieldType, ScopedFenceType FenceType> 288 static void ordered_store(volatile FieldType* p, FieldType v); 289 290 template<typename FieldType, ScopedFenceType FenceType> 291 static FieldType ordered_load(const volatile FieldType* p); 292 }; 293 294 // The following methods can be specialized using simple template specialization 295 // in the platform specific files for optimization purposes. Otherwise the 296 // generalized variant is used. 297 298 template<size_t byte_size, ScopedFenceType type> 299 struct OrderAccess::PlatformOrderedStore VALUE_OBJ_CLASS_SPEC { 300 template <typename T> 301 void operator()(T v, volatile T* p) const { 302 ordered_store<T, type>(p, v); 303 } 304 }; 305 306 template<size_t byte_size, ScopedFenceType type> 307 struct OrderAccess::PlatformOrderedLoad VALUE_OBJ_CLASS_SPEC { 308 template <typename T> 309 T operator()(const volatile T* p) const { 310 return ordered_load<T, type>(p); 311 } 312 }; 313 314 #endif // SHARE_VM_RUNTIME_ORDERACCESS_HPP