< prev index next >

src/hotspot/os_cpu/bsd_x86/orderAccess_bsd_x86.hpp

Print this page




  47 inline void OrderAccess::storeload()  { fence();            }
  48 
  49 inline void OrderAccess::acquire()    { compiler_barrier(); }
  50 inline void OrderAccess::release()    { compiler_barrier(); }
  51 
  52 inline void OrderAccess::fence() {
  53   // always use locked addl since mfence is sometimes expensive
  54 #ifdef AMD64
  55   __asm__ volatile ("lock; addl $0,0(%%rsp)" : : : "cc", "memory");
  56 #else
  57   __asm__ volatile ("lock; addl $0,0(%%esp)" : : : "cc", "memory");
  58 #endif
  59   compiler_barrier();
  60 }
  61 
  62 inline void OrderAccess::cross_modify_fence() {
  63   int idx = 0;
  64   __asm__ volatile ("cpuid " : "+a" (idx) : : "ebx", "ecx", "edx", "memory");
  65 }
  66 
  67 template<>
  68 struct OrderAccess::PlatformOrderedStore<1, RELEASE_X_FENCE>
  69 {
  70   template <typename T>
  71   void operator()(T v, volatile T* p) const {
  72     __asm__ volatile (  "xchgb (%2),%0"
  73                       : "=q" (v)
  74                       : "0" (v), "r" (p)
  75                       : "memory");
  76   }
  77 };
  78 
  79 template<>
  80 struct OrderAccess::PlatformOrderedStore<2, RELEASE_X_FENCE>
  81 {
  82   template <typename T>
  83   void operator()(T v, volatile T* p) const {
  84     __asm__ volatile (  "xchgw (%2),%0"
  85                       : "=r" (v)
  86                       : "0" (v), "r" (p)
  87                       : "memory");
  88   }
  89 };
  90 
  91 template<>
  92 struct OrderAccess::PlatformOrderedStore<4, RELEASE_X_FENCE>
  93 {
  94   template <typename T>
  95   void operator()(T v, volatile T* p) const {
  96     __asm__ volatile (  "xchgl (%2),%0"
  97                       : "=r" (v)
  98                       : "0" (v), "r" (p)
  99                       : "memory");
 100   }
 101 };
 102 
 103 #ifdef AMD64
 104 template<>
 105 struct OrderAccess::PlatformOrderedStore<8, RELEASE_X_FENCE>
 106 {
 107   template <typename T>
 108   void operator()(T v, volatile T* p) const {
 109     __asm__ volatile (  "xchgq (%2), %0"
 110                       : "=r" (v)
 111                       : "0" (v), "r" (p)
 112                       : "memory");
 113   }
 114 };
 115 #endif // AMD64
 116 
 117 #endif // OS_CPU_BSD_X86_ORDERACCESS_BSD_X86_HPP


  47 inline void OrderAccess::storeload()  { fence();            }
  48 
  49 inline void OrderAccess::acquire()    { compiler_barrier(); }
  50 inline void OrderAccess::release()    { compiler_barrier(); }
  51 
  52 inline void OrderAccess::fence() {
  53   // always use locked addl since mfence is sometimes expensive
  54 #ifdef AMD64
  55   __asm__ volatile ("lock; addl $0,0(%%rsp)" : : : "cc", "memory");
  56 #else
  57   __asm__ volatile ("lock; addl $0,0(%%esp)" : : : "cc", "memory");
  58 #endif
  59   compiler_barrier();
  60 }
  61 
  62 inline void OrderAccess::cross_modify_fence() {
  63   int idx = 0;
  64   __asm__ volatile ("cpuid " : "+a" (idx) : : "ebx", "ecx", "edx", "memory");
  65 }
  66 


















































  67 #endif // OS_CPU_BSD_X86_ORDERACCESS_BSD_X86_HPP
< prev index next >