< prev index next >

src/os_cpu/linux_x86/vm/orderAccess_linux_x86.inline.hpp

Print this page




  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef OS_CPU_LINUX_X86_VM_ORDERACCESS_LINUX_X86_INLINE_HPP
  26 #define OS_CPU_LINUX_X86_VM_ORDERACCESS_LINUX_X86_INLINE_HPP
  27 
  28 #include "runtime/atomic.inline.hpp"
  29 #include "runtime/orderAccess.hpp"
  30 #include "runtime/os.hpp"
  31 
  32 // Implementation of class OrderAccess.
  33 
  34 // A compiler barrier, forcing the C++ compiler to invalidate all memory assumptions
  35 static inline void compiler_barrier() {
  36   __asm__ volatile ("" : : : "memory");
  37 }
  38 
  39 inline void OrderAccess::loadload()   { acquire(); }
  40 inline void OrderAccess::storestore() { release(); }
  41 inline void OrderAccess::loadstore()  { acquire(); }
  42 inline void OrderAccess::storeload()  { fence(); }
  43 
  44 inline void OrderAccess::acquire() {
  45   volatile intptr_t local_dummy;
  46 #ifdef AMD64
  47   __asm__ volatile ("movq 0(%%rsp), %0" : "=r" (local_dummy) : : "memory");
  48 #else
  49   __asm__ volatile ("movl 0(%%esp),%0" : "=r" (local_dummy) : : "memory");
  50 #endif // AMD64
  51 }
  52 
  53 inline void OrderAccess::release() {
  54   compiler_barrier();
  55 }
  56 
  57 inline void OrderAccess::fence() {
  58   if (os::is_MP()) {
  59     // always use locked addl since mfence is sometimes expensive
  60 #ifdef AMD64
  61     __asm__ volatile ("lock; addl $0,0(%%rsp)" : : : "cc", "memory");
  62 #else
  63     __asm__ volatile ("lock; addl $0,0(%%esp)" : : : "cc", "memory");
  64 #endif
  65   }

  66 }
  67 
  68 inline jbyte    OrderAccess::load_acquire(volatile jbyte*   p) { jbyte   v = *p; compiler_barrier(); return v; }
  69 inline jshort   OrderAccess::load_acquire(volatile jshort*  p) { jshort  v = *p; compiler_barrier(); return v; }
  70 inline jint     OrderAccess::load_acquire(volatile jint*    p) { jint    v = *p; compiler_barrier(); return v; }
  71 inline jlong    OrderAccess::load_acquire(volatile jlong*   p) { jlong   v = Atomic::load(p); compiler_barrier(); return v; }
  72 inline jubyte   OrderAccess::load_acquire(volatile jubyte*  p) { jubyte  v = *p; compiler_barrier(); return v; }
  73 inline jushort  OrderAccess::load_acquire(volatile jushort* p) { jushort v = *p; compiler_barrier(); return v; }
  74 inline juint    OrderAccess::load_acquire(volatile juint*   p) { juint   v = *p; compiler_barrier(); return v; }
  75 inline julong   OrderAccess::load_acquire(volatile julong*  p) { julong  v = Atomic::load((volatile jlong*)p); compiler_barrier(); return v; }
  76 inline jfloat   OrderAccess::load_acquire(volatile jfloat*  p) { jfloat  v = *p; compiler_barrier(); return v; }
  77 inline jdouble  OrderAccess::load_acquire(volatile jdouble* p) { jdouble v = jdouble_cast(Atomic::load((volatile jlong*)p)); compiler_barrier(); return v; }
  78 
  79 inline intptr_t OrderAccess::load_ptr_acquire(volatile intptr_t*   p) { intptr_t v = *p; compiler_barrier(); return v; }
  80 inline void*    OrderAccess::load_ptr_acquire(volatile void*       p) { void*    v = *(void* volatile *)p; compiler_barrier(); return v; }
  81 inline void*    OrderAccess::load_ptr_acquire(const volatile void* p) { void*    v = *(void* const volatile *)p; compiler_barrier(); return v; }
  82 
  83 inline void     OrderAccess::release_store(volatile jbyte*   p, jbyte   v) { compiler_barrier(); *p = v; }
  84 inline void     OrderAccess::release_store(volatile jshort*  p, jshort  v) { compiler_barrier(); *p = v; }
  85 inline void     OrderAccess::release_store(volatile jint*    p, jint    v) { compiler_barrier(); *p = v; }
  86 inline void     OrderAccess::release_store(volatile jlong*   p, jlong   v) { compiler_barrier(); Atomic::store(v, p); }
  87 inline void     OrderAccess::release_store(volatile jubyte*  p, jubyte  v) { compiler_barrier(); *p = v; }
  88 inline void     OrderAccess::release_store(volatile jushort* p, jushort v) { compiler_barrier(); *p = v; }
  89 inline void     OrderAccess::release_store(volatile juint*   p, juint   v) { compiler_barrier(); *p = v; }
  90 inline void     OrderAccess::release_store(volatile julong*  p, julong  v) { compiler_barrier(); Atomic::store((jlong)v, (volatile jlong*)p); }
  91 inline void     OrderAccess::release_store(volatile jfloat*  p, jfloat  v) { compiler_barrier(); *p = v; }
  92 inline void     OrderAccess::release_store(volatile jdouble* p, jdouble v) { release_store((volatile jlong *)p, jlong_cast(v)); }
  93 
  94 inline void     OrderAccess::release_store_ptr(volatile intptr_t* p, intptr_t v) { compiler_barrier(); *p = v; }
  95 inline void     OrderAccess::release_store_ptr(volatile void*     p, void*    v) { compiler_barrier(); *(void* volatile *)p = v; }
  96 
  97 inline void     OrderAccess::store_fence(jbyte*  p, jbyte  v) {
  98   __asm__ volatile (  "xchgb (%2),%0"
  99                     : "=q" (v)
 100                     : "0" (v), "r" (p)
 101                     : "memory");
 102 }
 103 inline void     OrderAccess::store_fence(jshort* p, jshort v) {

 104   __asm__ volatile (  "xchgw (%2),%0"
 105                     : "=r" (v)
 106                     : "0" (v), "r" (p)
 107                     : "memory");
 108 }
 109 inline void     OrderAccess::store_fence(jint*   p, jint   v) {

 110   __asm__ volatile (  "xchgl (%2),%0"
 111                     : "=r" (v)
 112                     : "0" (v), "r" (p)
 113                     : "memory");
 114 }
 115 
 116 inline void     OrderAccess::store_fence(jlong*   p, jlong   v) {
 117 #ifdef AMD64
 118   __asm__ __volatile__ ("xchgq (%2), %0"


 119                         : "=r" (v)
 120                         : "0" (v), "r" (p)
 121                         : "memory");
 122 #else
 123   *p = v; fence();
 124 #endif // AMD64
 125 }
 126 
 127 // AMD64 copied the bodies for the the signed version. 32bit did this. As long as the
 128 // compiler does the inlining this is simpler.
 129 inline void     OrderAccess::store_fence(jubyte*  p, jubyte  v) { store_fence((jbyte*)p,  (jbyte)v);  }
 130 inline void     OrderAccess::store_fence(jushort* p, jushort v) { store_fence((jshort*)p, (jshort)v); }
 131 inline void     OrderAccess::store_fence(juint*   p, juint   v) { store_fence((jint*)p,   (jint)v);   }
 132 inline void     OrderAccess::store_fence(julong*  p, julong  v) { store_fence((jlong*)p,  (jlong)v);  }
 133 inline void     OrderAccess::store_fence(jfloat*  p, jfloat  v) { *p = v; fence(); }
 134 inline void     OrderAccess::store_fence(jdouble* p, jdouble v) { store_fence((jlong*)p, jlong_cast(v)); }
 135 
 136 inline void     OrderAccess::store_ptr_fence(intptr_t* p, intptr_t v) {
 137 #ifdef AMD64
 138   __asm__ __volatile__ ("xchgq (%2), %0"
 139                         : "=r" (v)
 140                         : "0" (v), "r" (p)
 141                         : "memory");
 142 #else
 143   store_fence((jint*)p, (jint)v);
 144 #endif // AMD64
 145 }
 146 
 147 inline void     OrderAccess::store_ptr_fence(void**    p, void*    v) {
 148 #ifdef AMD64
 149   __asm__ __volatile__ ("xchgq (%2), %0"
 150                         : "=r" (v)
 151                         : "0" (v), "r" (p)
 152                         : "memory");
 153 #else
 154   store_fence((jint*)p, (jint)v);
 155 #endif // AMD64
 156 }
 157 
 158 // Must duplicate definitions instead of calling store_fence because we don't want to cast away volatile.
 159 inline void     OrderAccess::release_store_fence(volatile jbyte*  p, jbyte  v) {
 160   __asm__ volatile (  "xchgb (%2),%0"
 161                     : "=q" (v)
 162                     : "0" (v), "r" (p)
 163                     : "memory");
 164 }
 165 inline void     OrderAccess::release_store_fence(volatile jshort* p, jshort v) {
 166   __asm__ volatile (  "xchgw (%2),%0"
 167                     : "=r" (v)
 168                     : "0" (v), "r" (p)
 169                     : "memory");
 170 }
 171 inline void     OrderAccess::release_store_fence(volatile jint*   p, jint   v) {
 172   __asm__ volatile (  "xchgl (%2),%0"
 173                     : "=r" (v)
 174                     : "0" (v), "r" (p)
 175                     : "memory");
 176 }
 177 
 178 inline void     OrderAccess::release_store_fence(volatile jlong*   p, jlong   v) {
 179 #ifdef AMD64
 180   __asm__ __volatile__ (  "xchgq (%2), %0"
 181                           : "=r" (v)
 182                           : "0" (v), "r" (p)
 183                           : "memory");
 184 #else
 185   release_store(p, v); fence();
 186 #endif // AMD64
 187 }
 188 
 189 inline void     OrderAccess::release_store_fence(volatile jubyte*  p, jubyte  v) { release_store_fence((volatile jbyte*)p,  (jbyte)v);  }
 190 inline void     OrderAccess::release_store_fence(volatile jushort* p, jushort v) { release_store_fence((volatile jshort*)p, (jshort)v); }
 191 inline void     OrderAccess::release_store_fence(volatile juint*   p, juint   v) { release_store_fence((volatile jint*)p,   (jint)v);   }
 192 inline void     OrderAccess::release_store_fence(volatile julong*  p, julong  v) { release_store_fence((volatile jlong*)p,  (jlong)v);  }
 193 
 194 inline void     OrderAccess::release_store_fence(volatile jfloat*  p, jfloat  v) { *p = v; fence(); }
 195 inline void     OrderAccess::release_store_fence(volatile jdouble* p, jdouble v) { release_store_fence((volatile jlong*)p, jlong_cast(v)); }
 196 
 197 inline void     OrderAccess::release_store_ptr_fence(volatile intptr_t* p, intptr_t v) {
 198 #ifdef AMD64
 199   __asm__ __volatile__ (  "xchgq (%2), %0"
 200                           : "=r" (v)
 201                           : "0" (v), "r" (p)
 202                           : "memory");
 203 #else
 204   release_store_fence((volatile jint*)p, (jint)v);
 205 #endif // AMD64
 206 }
 207 inline void     OrderAccess::release_store_ptr_fence(volatile void*     p, void*    v) {
 208 #ifdef AMD64
 209   __asm__ __volatile__ (  "xchgq (%2), %0"
 210                           : "=r" (v)
 211                           : "0" (v), "r" (p)
 212                           : "memory");
 213 #else
 214   release_store_fence((volatile jint*)p, (jint)v);
 215 #endif // AMD64
 216 }
 217 
 218 #endif // OS_CPU_LINUX_X86_VM_ORDERACCESS_LINUX_X86_INLINE_HPP


  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef OS_CPU_LINUX_X86_VM_ORDERACCESS_LINUX_X86_INLINE_HPP
  26 #define OS_CPU_LINUX_X86_VM_ORDERACCESS_LINUX_X86_INLINE_HPP
  27 
  28 #include "runtime/atomic.inline.hpp"
  29 #include "runtime/orderAccess.hpp"
  30 #include "runtime/os.hpp"
  31 
  32 // Implementation of class OrderAccess.
  33 
  34 // A compiler barrier, forcing the C++ compiler to invalidate all memory assumptions
  35 static inline void compiler_barrier() {
  36   __asm__ volatile ("" : : : "memory");
  37 }
  38 
  39 inline void OrderAccess::loadload()   { compiler_barrier(); }
  40 inline void OrderAccess::storestore() { compiler_barrier(); }
  41 inline void OrderAccess::loadstore()  { compiler_barrier(); }
  42 inline void OrderAccess::storeload()  { fence();            }
  43 
  44 inline void OrderAccess::acquire()    { compiler_barrier(); }
  45 inline void OrderAccess::release()    { compiler_barrier(); }










  46 
  47 inline void OrderAccess::fence() {
  48   if (os::is_MP()) {
  49     // always use locked addl since mfence is sometimes expensive
  50 #ifdef AMD64
  51     __asm__ volatile ("lock; addl $0,0(%%rsp)" : : : "cc", "memory");
  52 #else
  53     __asm__ volatile ("lock; addl $0,0(%%esp)" : : : "cc", "memory");
  54 #endif
  55   }
  56   compiler_barrier();
  57 }
  58 
  59 template<>
  60 inline void OrderAccess::specialized_release_store_fence<jbyte> (volatile jbyte*  p, jbyte  v) {




























  61   __asm__ volatile (  "xchgb (%2),%0"
  62                     : "=q" (v)
  63                     : "0" (v), "r" (p)
  64                     : "memory");
  65 }
  66 template<>
  67 inline void OrderAccess::specialized_release_store_fence<jshort>(volatile jshort* p, jshort v) {
  68   __asm__ volatile (  "xchgw (%2),%0"
  69                     : "=r" (v)
  70                     : "0" (v), "r" (p)
  71                     : "memory");
  72 }
  73 template<>
  74 inline void OrderAccess::specialized_release_store_fence<jint>  (volatile jint*   p, jint   v) {
  75   __asm__ volatile (  "xchgl (%2),%0"
  76                     : "=r" (v)
  77                     : "0" (v), "r" (p)
  78                     : "memory");
  79 }
  80 

  81 #ifdef AMD64
  82 template<>
  83 inline void OrderAccess::specialized_release_store_fence<jlong> (volatile jlong*  p, jlong  v) {
  84   __asm__ volatile (  "xchgq (%2), %0"
  85                     : "=r" (v)
  86                     : "0" (v), "r" (p)
  87                     : "memory");



  88 }


















  89 #endif // AMD64

  90 
  91 template<>
  92 inline void OrderAccess::specialized_release_store_fence<jfloat> (volatile jfloat*  p, jfloat  v) {
  93   release_store_fence((volatile jint*)p, jint_cast(v));






  94 }
  95 template<>
  96 inline void OrderAccess::specialized_release_store_fence<jdouble>(volatile jdouble* p, jdouble v) {
  97   release_store_fence((volatile jlong*)p, jlong_cast(v));



























  98 }
  99 
 100 #define VM_HAS_GENERALIZED_ORDER_ACCESS 1



























 101 
 102 #endif // OS_CPU_LINUX_X86_VM_ORDERACCESS_LINUX_X86_INLINE_HPP
< prev index next >