< prev index next >

src/hotspot/os_cpu/linux_x86/orderAccess_linux_x86.inline.hpp

Print this page




  44 inline void OrderAccess::loadstore()  { compiler_barrier(); }
  45 inline void OrderAccess::storeload()  { fence();            }
  46 
  47 inline void OrderAccess::acquire()    { compiler_barrier(); }
  48 inline void OrderAccess::release()    { compiler_barrier(); }
  49 
  50 inline void OrderAccess::fence() {
  51   if (os::is_MP()) {
  52     // always use locked addl since mfence is sometimes expensive
  53 #ifdef AMD64
  54     __asm__ volatile ("lock; addl $0,0(%%rsp)" : : : "cc", "memory");
  55 #else
  56     __asm__ volatile ("lock; addl $0,0(%%esp)" : : : "cc", "memory");
  57 #endif
  58   }
  59   compiler_barrier();
  60 }
  61 
  62 template<>
  63 struct OrderAccess::PlatformOrderedStore<1, RELEASE_X_FENCE>
  64   VALUE_OBJ_CLASS_SPEC
  65 {
  66   template <typename T>
  67   void operator()(T v, volatile T* p) const {
  68     __asm__ volatile (  "xchgb (%2),%0"
  69                       : "=q" (v)
  70                       : "0" (v), "r" (p)
  71                       : "memory");
  72   }
  73 };
  74 
  75 template<>
  76 struct OrderAccess::PlatformOrderedStore<2, RELEASE_X_FENCE>
  77   VALUE_OBJ_CLASS_SPEC
  78 {
  79   template <typename T>
  80   void operator()(T v, volatile T* p) const {
  81     __asm__ volatile (  "xchgw (%2),%0"
  82                       : "=r" (v)
  83                       : "0" (v), "r" (p)
  84                       : "memory");
  85   }
  86 };
  87 
  88 template<>
  89 struct OrderAccess::PlatformOrderedStore<4, RELEASE_X_FENCE>
  90   VALUE_OBJ_CLASS_SPEC
  91 {
  92   template <typename T>
  93   void operator()(T v, volatile T* p) const {
  94     __asm__ volatile (  "xchgl (%2),%0"
  95                       : "=r" (v)
  96                       : "0" (v), "r" (p)
  97                       : "memory");
  98   }
  99 };
 100 
 101 #ifdef AMD64
 102 template<>
 103 struct OrderAccess::PlatformOrderedStore<8, RELEASE_X_FENCE>
 104   VALUE_OBJ_CLASS_SPEC
 105 {
 106   template <typename T>
 107   void operator()(T v, volatile T* p) const {
 108     __asm__ volatile (  "xchgq (%2), %0"
 109                       : "=r" (v)
 110                       : "0" (v), "r" (p)
 111                       : "memory");
 112   }
 113 };
 114 #endif // AMD64
 115 
 116 #endif // OS_CPU_LINUX_X86_VM_ORDERACCESS_LINUX_X86_INLINE_HPP


  44 inline void OrderAccess::loadstore()  { compiler_barrier(); }
  45 inline void OrderAccess::storeload()  { fence();            }
  46 
  47 inline void OrderAccess::acquire()    { compiler_barrier(); }
  48 inline void OrderAccess::release()    { compiler_barrier(); }
  49 
  50 inline void OrderAccess::fence() {
  51   if (os::is_MP()) {
  52     // always use locked addl since mfence is sometimes expensive
  53 #ifdef AMD64
  54     __asm__ volatile ("lock; addl $0,0(%%rsp)" : : : "cc", "memory");
  55 #else
  56     __asm__ volatile ("lock; addl $0,0(%%esp)" : : : "cc", "memory");
  57 #endif
  58   }
  59   compiler_barrier();
  60 }
  61 
  62 template<>
  63 struct OrderAccess::PlatformOrderedStore<1, RELEASE_X_FENCE>

  64 {
  65   template <typename T>
  66   void operator()(T v, volatile T* p) const {
  67     __asm__ volatile (  "xchgb (%2),%0"
  68                       : "=q" (v)
  69                       : "0" (v), "r" (p)
  70                       : "memory");
  71   }
  72 };
  73 
  74 template<>
  75 struct OrderAccess::PlatformOrderedStore<2, RELEASE_X_FENCE>

  76 {
  77   template <typename T>
  78   void operator()(T v, volatile T* p) const {
  79     __asm__ volatile (  "xchgw (%2),%0"
  80                       : "=r" (v)
  81                       : "0" (v), "r" (p)
  82                       : "memory");
  83   }
  84 };
  85 
  86 template<>
  87 struct OrderAccess::PlatformOrderedStore<4, RELEASE_X_FENCE>

  88 {
  89   template <typename T>
  90   void operator()(T v, volatile T* p) const {
  91     __asm__ volatile (  "xchgl (%2),%0"
  92                       : "=r" (v)
  93                       : "0" (v), "r" (p)
  94                       : "memory");
  95   }
  96 };
  97 
  98 #ifdef AMD64
  99 template<>
 100 struct OrderAccess::PlatformOrderedStore<8, RELEASE_X_FENCE>

 101 {
 102   template <typename T>
 103   void operator()(T v, volatile T* p) const {
 104     __asm__ volatile (  "xchgq (%2), %0"
 105                       : "=r" (v)
 106                       : "0" (v), "r" (p)
 107                       : "memory");
 108   }
 109 };
 110 #endif // AMD64
 111 
 112 #endif // OS_CPU_LINUX_X86_VM_ORDERACCESS_LINUX_X86_INLINE_HPP
< prev index next >