251 ~ScopedFence() { postfix(); }
252 void prefix() { ScopedFenceGeneral<T>::prefix(); }
253 void postfix() { ScopedFenceGeneral<T>::postfix(); }
254 };
255
256 class OrderAccess : private Atomic {
257 public:
258 // barriers
259 static void loadload();
260 static void storestore();
261 static void loadstore();
262 static void storeload();
263
264 static void acquire();
265 static void release();
266 static void fence();
267
268 template <typename T>
269 static T load_acquire(const volatile T* p);
270
271 static intptr_t load_ptr_acquire(const volatile intptr_t* p);
272 static void* load_ptr_acquire(const volatile void* p);
273
274 template <typename T, typename D>
275 static void release_store(volatile D* p, T v);
276
277 static void release_store_ptr(volatile intptr_t* p, intptr_t v);
278 static void release_store_ptr(volatile void* p, void* v);
279
280 template <typename T, typename D>
281 static void release_store_fence(volatile D* p, T v);
282
283 static void release_store_ptr_fence(volatile intptr_t* p, intptr_t v);
284 static void release_store_ptr_fence(volatile void* p, void* v);
285
286 private:
287 // This is a helper that invokes the StubRoutines::fence_entry()
288 // routine if it exists, It should only be used by platforms that
289 // don't have another way to do the inline assembly.
290 static void StubRoutines_fence();
291
292 // Give platforms a variation point to specialize.
293 template<size_t byte_size, ScopedFenceType type> struct PlatformOrderedStore;
294 template<size_t byte_size, ScopedFenceType type> struct PlatformOrderedLoad;
295
296 template<typename FieldType, ScopedFenceType FenceType>
297 static void ordered_store(volatile FieldType* p, FieldType v);
298
299 template<typename FieldType, ScopedFenceType FenceType>
300 static FieldType ordered_load(const volatile FieldType* p);
301 };
302
303 // The following methods can be specialized using simple template specialization
304 // in the platform specific files for optimization purposes. Otherwise the
|
251 ~ScopedFence() { postfix(); }
252 void prefix() { ScopedFenceGeneral<T>::prefix(); }
253 void postfix() { ScopedFenceGeneral<T>::postfix(); }
254 };
255
256 class OrderAccess : private Atomic {
257 public:
258 // barriers
259 static void loadload();
260 static void storestore();
261 static void loadstore();
262 static void storeload();
263
264 static void acquire();
265 static void release();
266 static void fence();
267
268 template <typename T>
269 static T load_acquire(const volatile T* p);
270
271 template <typename T, typename D>
272 static void release_store(volatile D* p, T v);
273
274 template <typename T, typename D>
275 static void release_store_fence(volatile D* p, T v);
276
277 private:
278 // This is a helper that invokes the StubRoutines::fence_entry()
279 // routine if it exists, It should only be used by platforms that
280 // don't have another way to do the inline assembly.
281 static void StubRoutines_fence();
282
283 // Give platforms a variation point to specialize.
284 template<size_t byte_size, ScopedFenceType type> struct PlatformOrderedStore;
285 template<size_t byte_size, ScopedFenceType type> struct PlatformOrderedLoad;
286
287 template<typename FieldType, ScopedFenceType FenceType>
288 static void ordered_store(volatile FieldType* p, FieldType v);
289
290 template<typename FieldType, ScopedFenceType FenceType>
291 static FieldType ordered_load(const volatile FieldType* p);
292 };
293
294 // The following methods can be specialized using simple template specialization
295 // in the platform specific files for optimization purposes. Otherwise the
|