< prev index next >

src/hotspot/os_cpu/linux_x86/orderAccess_linux_x86.hpp

Print this page




  49    // always use locked addl since mfence is sometimes expensive
  50 #ifdef AMD64
  51   __asm__ volatile ("lock; addl $0,0(%%rsp)" : : : "cc", "memory");
  52 #else
  53   __asm__ volatile ("lock; addl $0,0(%%esp)" : : : "cc", "memory");
  54 #endif
  55   compiler_barrier();
  56 }
  57 
  58 inline void OrderAccess::cross_modify_fence() {
  59   int idx = 0;
  60 #ifdef AMD64
  61   __asm__ volatile ("cpuid " : "+a" (idx) : : "ebx", "ecx", "edx", "memory");
  62 #else
  63   // On some x86 systems EBX is a reserved register that cannot be
  64   // clobbered, so we must protect it around the CPUID.
  65   __asm__ volatile ("xchg %%esi, %%ebx; cpuid; xchg %%esi, %%ebx " : "+a" (idx) : : "esi", "ecx", "edx", "memory");
  66 #endif
  67 }
  68 
  69 template<>
  70 struct OrderAccess::PlatformOrderedStore<1, RELEASE_X_FENCE>
  71 {
  72   template <typename T>
  73   void operator()(T v, volatile T* p) const {
  74     __asm__ volatile (  "xchgb (%2),%0"
  75                       : "=q" (v)
  76                       : "0" (v), "r" (p)
  77                       : "memory");
  78   }
  79 };
  80 
  81 template<>
  82 struct OrderAccess::PlatformOrderedStore<2, RELEASE_X_FENCE>
  83 {
  84   template <typename T>
  85   void operator()(T v, volatile T* p) const {
  86     __asm__ volatile (  "xchgw (%2),%0"
  87                       : "=r" (v)
  88                       : "0" (v), "r" (p)
  89                       : "memory");
  90   }
  91 };
  92 
  93 template<>
  94 struct OrderAccess::PlatformOrderedStore<4, RELEASE_X_FENCE>
  95 {
  96   template <typename T>
  97   void operator()(T v, volatile T* p) const {
  98     __asm__ volatile (  "xchgl (%2),%0"
  99                       : "=r" (v)
 100                       : "0" (v), "r" (p)
 101                       : "memory");
 102   }
 103 };
 104 
 105 #ifdef AMD64
 106 template<>
 107 struct OrderAccess::PlatformOrderedStore<8, RELEASE_X_FENCE>
 108 {
 109   template <typename T>
 110   void operator()(T v, volatile T* p) const {
 111     __asm__ volatile (  "xchgq (%2), %0"
 112                       : "=r" (v)
 113                       : "0" (v), "r" (p)
 114                       : "memory");
 115   }
 116 };
 117 #endif // AMD64
 118 
 119 #endif // OS_CPU_LINUX_X86_ORDERACCESS_LINUX_X86_HPP


  49    // always use locked addl since mfence is sometimes expensive
  50 #ifdef AMD64
  51   __asm__ volatile ("lock; addl $0,0(%%rsp)" : : : "cc", "memory");
  52 #else
  53   __asm__ volatile ("lock; addl $0,0(%%esp)" : : : "cc", "memory");
  54 #endif
  55   compiler_barrier();
  56 }
  57 
  58 inline void OrderAccess::cross_modify_fence() {
  59   int idx = 0;
  60 #ifdef AMD64
  61   __asm__ volatile ("cpuid " : "+a" (idx) : : "ebx", "ecx", "edx", "memory");
  62 #else
  63   // On some x86 systems EBX is a reserved register that cannot be
  64   // clobbered, so we must protect it around the CPUID.
  65   __asm__ volatile ("xchg %%esi, %%ebx; cpuid; xchg %%esi, %%ebx " : "+a" (idx) : : "esi", "ecx", "edx", "memory");
  66 #endif
  67 }
  68 


















































  69 #endif // OS_CPU_LINUX_X86_ORDERACCESS_LINUX_X86_HPP
< prev index next >