< prev index next >

src/hotspot/os_cpu/windows_x86/orderAccess_windows_x86.hpp

Print this page




  22  *
  23  */
  24 
  25 #ifndef OS_CPU_WINDOWS_X86_ORDERACCESS_WINDOWS_X86_HPP
  26 #define OS_CPU_WINDOWS_X86_ORDERACCESS_WINDOWS_X86_HPP
  27 
  28 // Included in orderAccess.hpp header file.
  29 
  30 #include <intrin.h>
  31 
  32 // Compiler version last used for testing: Microsoft Visual Studio 2010
  33 // Please update this information when this file changes
  34 
  35 // Implementation of class OrderAccess.
  36 
  37 // A compiler barrier, forcing the C++ compiler to invalidate all memory assumptions
  38 inline void compiler_barrier() {
  39   _ReadWriteBarrier();
  40 }
  41 
  42 // Note that in MSVC, volatile memory accesses are explicitly
  43 // guaranteed to have acquire release semantics (w.r.t. compiler
  44 // reordering) and therefore does not even need a compiler barrier
  45 // for normal acquire release accesses. And all generalized
  46 // bound calls like release_store go through OrderAccess::load
  47 // and OrderAccess::store which do volatile memory accesses.
  48 template<> inline void ScopedFence<X_ACQUIRE>::postfix()       { }
  49 template<> inline void ScopedFence<RELEASE_X>::prefix()        { }
  50 template<> inline void ScopedFence<RELEASE_X_FENCE>::prefix()  { }
  51 template<> inline void ScopedFence<RELEASE_X_FENCE>::postfix() { OrderAccess::fence(); }
  52 
  53 inline void OrderAccess::loadload()   { compiler_barrier(); }
  54 inline void OrderAccess::storestore() { compiler_barrier(); }
  55 inline void OrderAccess::loadstore()  { compiler_barrier(); }
  56 inline void OrderAccess::storeload()  { fence(); }
  57 
  58 inline void OrderAccess::acquire()    { compiler_barrier(); }
  59 inline void OrderAccess::release()    { compiler_barrier(); }
  60 
  61 inline void OrderAccess::fence() {
  62 #ifdef AMD64
  63   StubRoutines_fence();
  64 #else
  65   __asm {
  66     lock add dword ptr [esp], 0;
  67   }
  68 #endif // AMD64
  69   compiler_barrier();
  70 }
  71 
  72 inline void OrderAccess::cross_modify_fence() {
  73   int regs[4];
  74   __cpuid(regs, 0);
  75 }
  76 
  77 #ifndef AMD64
  78 template<>
  79 struct OrderAccess::PlatformOrderedStore<1, RELEASE_X_FENCE>
  80 {
  81   template <typename T>
  82   void operator()(T v, volatile T* p) const {
  83     __asm {
  84       mov edx, p;
  85       mov al, v;
  86       xchg al, byte ptr [edx];
  87     }
  88   }
  89 };
  90 
  91 template<>
  92 struct OrderAccess::PlatformOrderedStore<2, RELEASE_X_FENCE>
  93 {
  94   template <typename T>
  95   void operator()(T v, volatile T* p) const {
  96     __asm {
  97       mov edx, p;
  98       mov ax, v;
  99       xchg ax, word ptr [edx];
 100     }
 101   }
 102 };
 103 
 104 template<>
 105 struct OrderAccess::PlatformOrderedStore<4, RELEASE_X_FENCE>
 106 {
 107   template <typename T>
 108   void operator()(T v, volatile T* p) const {
 109     __asm {
 110       mov edx, p;
 111       mov eax, v;
 112       xchg eax, dword ptr [edx];
 113     }
 114   }
 115 };
 116 #endif // AMD64
 117 
 118 #endif // OS_CPU_WINDOWS_X86_ORDERACCESS_WINDOWS_X86_HPP


  22  *
  23  */
  24 
  25 #ifndef OS_CPU_WINDOWS_X86_ORDERACCESS_WINDOWS_X86_HPP
  26 #define OS_CPU_WINDOWS_X86_ORDERACCESS_WINDOWS_X86_HPP
  27 
  28 // Included in orderAccess.hpp header file.
  29 
  30 #include <intrin.h>
  31 
  32 // Compiler version last used for testing: Microsoft Visual Studio 2010
  33 // Please update this information when this file changes
  34 
  35 // Implementation of class OrderAccess.
  36 
  37 // A compiler barrier, forcing the C++ compiler to invalidate all memory assumptions
  38 inline void compiler_barrier() {
  39   _ReadWriteBarrier();
  40 }
  41 











  42 inline void OrderAccess::loadload()   { compiler_barrier(); }
  43 inline void OrderAccess::storestore() { compiler_barrier(); }
  44 inline void OrderAccess::loadstore()  { compiler_barrier(); }
  45 inline void OrderAccess::storeload()  { fence(); }
  46 
  47 inline void OrderAccess::acquire()    { compiler_barrier(); }
  48 inline void OrderAccess::release()    { compiler_barrier(); }
  49 
  50 inline void OrderAccess::fence() {
  51 #ifdef AMD64
  52   StubRoutines_fence();
  53 #else
  54   __asm {
  55     lock add dword ptr [esp], 0;
  56   }
  57 #endif // AMD64
  58   compiler_barrier();
  59 }
  60 
  61 inline void OrderAccess::cross_modify_fence() {
  62   int regs[4];
  63   __cpuid(regs, 0);
  64 }









































  65 
  66 #endif // OS_CPU_WINDOWS_X86_ORDERACCESS_WINDOWS_X86_HPP
< prev index next >