9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #ifndef SHARE_RUNTIME_ORDERACCESS_HPP
26 #define SHARE_RUNTIME_ORDERACCESS_HPP
27
28 #include "memory/allocation.hpp"
29 #include "runtime/atomic.hpp"
30 #include "utilities/macros.hpp"
31
32 // Memory Access Ordering Model
33 //
34 // This interface is based on the JSR-133 Cookbook for Compiler Writers.
35 //
36 // In the following, the terms 'previous', 'subsequent', 'before',
37 // 'after', 'preceding' and 'succeeding' refer to program order. The
38 // terms 'down' and 'below' refer to forward load or store motion
39 // relative to program order, while 'up' and 'above' refer to backward
40 // motion.
41 //
42 // We define four primitive memory barrier operations.
43 //
44 // LoadLoad: Load1(s); LoadLoad; Load2
45 //
46 // Ensures that Load1 completes (obtains the value it loads from memory)
47 // before Load2 and any subsequent load operations. Loads before Load1
48 // may *not* float below Load2 and any subsequent load operations.
49 //
214 // Either of these alternatives is a pain, so no current machine we know of
215 // has incoherent caches.
216 //
217 // If loadload didn't have these properties, the store-release sequence for
218 // publishing a shared data structure wouldn't work, because a processor
219 // trying to read data newly published by another processor might go to
220 // its own incoherent caches to satisfy the read instead of to the newly
221 // written shared memory.
222 //
223 //
224 // NOTE WELL!!
225 //
226 // A Note on MutexLocker and Friends
227 //
228 // See mutexLocker.hpp. We assume throughout the VM that MutexLocker's
229 // and friends' constructors do a fence, a lock and an acquire *in that
230 // order*. And that their destructors do a release and unlock, in *that*
231 // order. If their implementations change such that these assumptions
232 // are violated, a whole lot of code will break.
233
234 enum ScopedFenceType {
235 X_ACQUIRE
236 , RELEASE_X
237 , RELEASE_X_FENCE
238 };
239
240 template <ScopedFenceType T>
241 class ScopedFenceGeneral: public StackObj {
242 public:
243 void prefix() {}
244 void postfix() {}
245 };
246
247 template <ScopedFenceType T>
248 class ScopedFence : public ScopedFenceGeneral<T> {
249 void *const _field;
250 public:
251 ScopedFence(void *const field) : _field(field) { prefix(); }
252 ~ScopedFence() { postfix(); }
253 void prefix() { ScopedFenceGeneral<T>::prefix(); }
254 void postfix() { ScopedFenceGeneral<T>::postfix(); }
255 };
256
257 class OrderAccess : private Atomic {
258 public:
259 // barriers
260 static void loadload();
261 static void storestore();
262 static void loadstore();
263 static void storeload();
264
265 static void acquire();
266 static void release();
267 static void fence();
268
269 static void cross_modify_fence();
270
271 template <typename T>
272 static T load_acquire(const volatile T* p);
273
274 template <typename T, typename D>
275 static void release_store(volatile D* p, T v);
276
277 template <typename T, typename D>
278 static void release_store_fence(volatile D* p, T v);
279
280 private:
281 // This is a helper that invokes the StubRoutines::fence_entry()
282 // routine if it exists, It should only be used by platforms that
283 // don't have another way to do the inline assembly.
284 static void StubRoutines_fence();
285
286 // Give platforms a variation point to specialize.
287 template<size_t byte_size, ScopedFenceType type> struct PlatformOrderedStore;
288 template<size_t byte_size, ScopedFenceType type> struct PlatformOrderedLoad;
289
290 template<typename FieldType, ScopedFenceType FenceType>
291 static void ordered_store(volatile FieldType* p, FieldType v);
292
293 template<typename FieldType, ScopedFenceType FenceType>
294 static FieldType ordered_load(const volatile FieldType* p);
295 };
296
297 // The following methods can be specialized using simple template specialization
298 // in the platform specific files for optimization purposes. Otherwise the
299 // generalized variant is used.
300
301 template<size_t byte_size, ScopedFenceType type>
302 struct OrderAccess::PlatformOrderedStore {
303 template <typename T>
304 void operator()(T v, volatile T* p) const {
305 ordered_store<T, type>(p, v);
306 }
307 };
308
309 template<size_t byte_size, ScopedFenceType type>
310 struct OrderAccess::PlatformOrderedLoad {
311 template <typename T>
312 T operator()(const volatile T* p) const {
313 return ordered_load<T, type>(p);
314 }
315 };
316
317 #include OS_CPU_HEADER(orderAccess)
318
319 template<> inline void ScopedFenceGeneral<X_ACQUIRE>::postfix() { OrderAccess::acquire(); }
320 template<> inline void ScopedFenceGeneral<RELEASE_X>::prefix() { OrderAccess::release(); }
321 template<> inline void ScopedFenceGeneral<RELEASE_X_FENCE>::prefix() { OrderAccess::release(); }
322 template<> inline void ScopedFenceGeneral<RELEASE_X_FENCE>::postfix() { OrderAccess::fence(); }
323
324
325 template <typename FieldType, ScopedFenceType FenceType>
326 inline void OrderAccess::ordered_store(volatile FieldType* p, FieldType v) {
327 ScopedFence<FenceType> f((void*)p);
328 Atomic::store(v, p);
329 }
330
331 template <typename FieldType, ScopedFenceType FenceType>
332 inline FieldType OrderAccess::ordered_load(const volatile FieldType* p) {
333 ScopedFence<FenceType> f((void*)p);
334 return Atomic::load(p);
335 }
336
337 template <typename T>
338 inline T OrderAccess::load_acquire(const volatile T* p) {
339 return LoadImpl<T, PlatformOrderedLoad<sizeof(T), X_ACQUIRE> >()(p);
340 }
341
342 template <typename T, typename D>
343 inline void OrderAccess::release_store(volatile D* p, T v) {
344 StoreImpl<T, D, PlatformOrderedStore<sizeof(D), RELEASE_X> >()(v, p);
345 }
346
347 template <typename T, typename D>
348 inline void OrderAccess::release_store_fence(volatile D* p, T v) {
349 StoreImpl<T, D, PlatformOrderedStore<sizeof(D), RELEASE_X_FENCE> >()(v, p);
350 }
351 #endif // SHARE_RUNTIME_ORDERACCESS_HPP
|
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #ifndef SHARE_RUNTIME_ORDERACCESS_HPP
26 #define SHARE_RUNTIME_ORDERACCESS_HPP
27
28 #include "memory/allocation.hpp"
29 #include "utilities/macros.hpp"
30
31 // Memory Access Ordering Model
32 //
33 // This interface is based on the JSR-133 Cookbook for Compiler Writers.
34 //
35 // In the following, the terms 'previous', 'subsequent', 'before',
36 // 'after', 'preceding' and 'succeeding' refer to program order. The
37 // terms 'down' and 'below' refer to forward load or store motion
38 // relative to program order, while 'up' and 'above' refer to backward
39 // motion.
40 //
41 // We define four primitive memory barrier operations.
42 //
43 // LoadLoad: Load1(s); LoadLoad; Load2
44 //
45 // Ensures that Load1 completes (obtains the value it loads from memory)
46 // before Load2 and any subsequent load operations. Loads before Load1
47 // may *not* float below Load2 and any subsequent load operations.
48 //
213 // Either of these alternatives is a pain, so no current machine we know of
214 // has incoherent caches.
215 //
216 // If loadload didn't have these properties, the store-release sequence for
217 // publishing a shared data structure wouldn't work, because a processor
218 // trying to read data newly published by another processor might go to
219 // its own incoherent caches to satisfy the read instead of to the newly
220 // written shared memory.
221 //
222 //
223 // NOTE WELL!!
224 //
225 // A Note on MutexLocker and Friends
226 //
227 // See mutexLocker.hpp. We assume throughout the VM that MutexLocker's
228 // and friends' constructors do a fence, a lock and an acquire *in that
229 // order*. And that their destructors do a release and unlock, in *that*
230 // order. If their implementations change such that these assumptions
231 // are violated, a whole lot of code will break.
232
233 class OrderAccess : public AllStatic {
234 public:
235 // barriers
236 static void loadload();
237 static void storestore();
238 static void loadstore();
239 static void storeload();
240
241 static void acquire();
242 static void release();
243 static void fence();
244
245 static void cross_modify_fence();
246 private:
247 // This is a helper that invokes the StubRoutines::fence_entry()
248 // routine if it exists, It should only be used by platforms that
249 // don't have another way to do the inline assembly.
250 static void StubRoutines_fence();
251 };
252
253 #include OS_CPU_HEADER(orderAccess)
254
255 #endif // SHARE_RUNTIME_ORDERACCESS_HPP
|