1 /*
   2  * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef OS_CPU_LINUX_X86_VM_ORDERACCESS_LINUX_X86_INLINE_HPP
  26 #define OS_CPU_LINUX_X86_VM_ORDERACCESS_LINUX_X86_INLINE_HPP
  27 
  28 #include "runtime/orderAccess.hpp"
  29 #include "vm_version_x86.hpp"
  30 
  31 // Implementation of class OrderAccess.
  32 
  33 inline void OrderAccess::loadload()   { acquire(); }
  34 inline void OrderAccess::storestore() { release(); }
  35 inline void OrderAccess::loadstore()  { acquire(); }
  36 inline void OrderAccess::storeload()  { fence(); }
  37 
  38 inline void OrderAccess::acquire() {
  39   volatile intptr_t local_dummy;
  40 #ifdef AMD64
  41   __asm__ volatile ("movq 0(%%rsp), %0" : "=r" (local_dummy) : : "memory");
  42 #else
  43   __asm__ volatile ("movl 0(%%esp),%0" : "=r" (local_dummy) : : "memory");
  44 #endif // AMD64
  45 }
  46 
  47 inline void OrderAccess::release() {
  48   // Avoid hitting the same cache-line from
  49   // different threads.
  50   volatile jint local_dummy = 0;
  51 }
  52 
  53 inline void OrderAccess::fence() {
  54   if (os::is_MP()) {
  55     // always use locked addl since mfence is sometimes expensive
  56 #ifdef AMD64
  57     __asm__ volatile ("lock; addl $0,0(%%rsp)" : : : "cc", "memory");
  58 #else
  59     __asm__ volatile ("lock; addl $0,0(%%esp)" : : : "cc", "memory");
  60 #endif
  61   }
  62 }
  63 
  64 inline jbyte    OrderAccess::load_acquire(volatile jbyte*   p) { return *p; }
  65 inline jshort   OrderAccess::load_acquire(volatile jshort*  p) { return *p; }
  66 inline jint     OrderAccess::load_acquire(volatile jint*    p) { return *p; }
  67 inline jlong    OrderAccess::load_acquire(volatile jlong*   p) { return *p; }
  68 inline jubyte   OrderAccess::load_acquire(volatile jubyte*  p) { return *p; }
  69 inline jushort  OrderAccess::load_acquire(volatile jushort* p) { return *p; }
  70 inline juint    OrderAccess::load_acquire(volatile juint*   p) { return *p; }
  71 inline julong   OrderAccess::load_acquire(volatile julong*  p) { return *p; }
  72 inline jfloat   OrderAccess::load_acquire(volatile jfloat*  p) { return *p; }
  73 inline jdouble  OrderAccess::load_acquire(volatile jdouble* p) { return *p; }
  74 
  75 inline intptr_t OrderAccess::load_ptr_acquire(volatile intptr_t*   p) { return *p; }
  76 inline void*    OrderAccess::load_ptr_acquire(volatile void*       p) { return *(void* volatile *)p; }
  77 inline void*    OrderAccess::load_ptr_acquire(const volatile void* p) { return *(void* const volatile *)p; }
  78 
  79 inline void     OrderAccess::release_store(volatile jbyte*   p, jbyte   v) { *p = v; }
  80 inline void     OrderAccess::release_store(volatile jshort*  p, jshort  v) { *p = v; }
  81 inline void     OrderAccess::release_store(volatile jint*    p, jint    v) { *p = v; }
  82 inline void     OrderAccess::release_store(volatile jlong*   p, jlong   v) { *p = v; }
  83 inline void     OrderAccess::release_store(volatile jubyte*  p, jubyte  v) { *p = v; }
  84 inline void     OrderAccess::release_store(volatile jushort* p, jushort v) { *p = v; }
  85 inline void     OrderAccess::release_store(volatile juint*   p, juint   v) { *p = v; }
  86 inline void     OrderAccess::release_store(volatile julong*  p, julong  v) { *p = v; }
  87 inline void     OrderAccess::release_store(volatile jfloat*  p, jfloat  v) { *p = v; }
  88 inline void     OrderAccess::release_store(volatile jdouble* p, jdouble v) { *p = v; }
  89 
  90 inline void     OrderAccess::release_store_ptr(volatile intptr_t* p, intptr_t v) { *p = v; }
  91 inline void     OrderAccess::release_store_ptr(volatile void*     p, void*    v) { *(void* volatile *)p = v; }
  92 
  93 inline void     OrderAccess::store_fence(jbyte*  p, jbyte  v) {
  94   __asm__ volatile (  "xchgb (%2),%0"
  95                     : "=r" (v)
  96                     : "0" (v), "r" (p)
  97                     : "memory");
  98 }
  99 inline void     OrderAccess::store_fence(jshort* p, jshort v) {
 100   __asm__ volatile (  "xchgw (%2),%0"
 101                     : "=r" (v)
 102                     : "0" (v), "r" (p)
 103                     : "memory");
 104 }
 105 inline void     OrderAccess::store_fence(jint*   p, jint   v) {
 106   __asm__ volatile (  "xchgl (%2),%0"
 107                     : "=r" (v)
 108                     : "0" (v), "r" (p)
 109                     : "memory");
 110 }
 111 
 112 inline void     OrderAccess::store_fence(jlong*   p, jlong   v) {
 113 #ifdef AMD64
 114   __asm__ __volatile__ ("xchgq (%2), %0"
 115                         : "=r" (v)
 116                         : "0" (v), "r" (p)
 117                         : "memory");
 118 #else
 119   *p = v; fence();
 120 #endif // AMD64
 121 }
 122 
 123 // AMD64 copied the bodies for the the signed version. 32bit did this. As long as the
 124 // compiler does the inlining this is simpler.
 125 inline void     OrderAccess::store_fence(jubyte*  p, jubyte  v) { store_fence((jbyte*)p,  (jbyte)v);  }
 126 inline void     OrderAccess::store_fence(jushort* p, jushort v) { store_fence((jshort*)p, (jshort)v); }
 127 inline void     OrderAccess::store_fence(juint*   p, juint   v) { store_fence((jint*)p,   (jint)v);   }
 128 inline void     OrderAccess::store_fence(julong*  p, julong  v) { store_fence((jlong*)p,  (jlong)v);  }
 129 inline void     OrderAccess::store_fence(jfloat*  p, jfloat  v) { *p = v; fence(); }
 130 inline void     OrderAccess::store_fence(jdouble* p, jdouble v) { *p = v; fence(); }
 131 
 132 inline void     OrderAccess::store_ptr_fence(intptr_t* p, intptr_t v) {
 133 #ifdef AMD64
 134   __asm__ __volatile__ ("xchgq (%2), %0"
 135                         : "=r" (v)
 136                         : "0" (v), "r" (p)
 137                         : "memory");
 138 #else
 139   store_fence((jint*)p, (jint)v);
 140 #endif // AMD64
 141 }
 142 
 143 inline void     OrderAccess::store_ptr_fence(void**    p, void*    v) {
 144 #ifdef AMD64
 145   __asm__ __volatile__ ("xchgq (%2), %0"
 146                         : "=r" (v)
 147                         : "0" (v), "r" (p)
 148                         : "memory");
 149 #else
 150   store_fence((jint*)p, (jint)v);
 151 #endif // AMD64
 152 }
 153 
 154 // Must duplicate definitions instead of calling store_fence because we don't want to cast away volatile.
 155 inline void     OrderAccess::release_store_fence(volatile jbyte*  p, jbyte  v) {
 156   __asm__ volatile (  "xchgb (%2),%0"
 157                     : "=r" (v)
 158                     : "0" (v), "r" (p)
 159                     : "memory");
 160 }
 161 inline void     OrderAccess::release_store_fence(volatile jshort* p, jshort v) {
 162   __asm__ volatile (  "xchgw (%2),%0"
 163                     : "=r" (v)
 164                     : "0" (v), "r" (p)
 165                     : "memory");
 166 }
 167 inline void     OrderAccess::release_store_fence(volatile jint*   p, jint   v) {
 168   __asm__ volatile (  "xchgl (%2),%0"
 169                     : "=r" (v)
 170                     : "0" (v), "r" (p)
 171                     : "memory");
 172 }
 173 
 174 inline void     OrderAccess::release_store_fence(volatile jlong*   p, jlong   v) {
 175 #ifdef AMD64
 176   __asm__ __volatile__ (  "xchgq (%2), %0"
 177                           : "=r" (v)
 178                           : "0" (v), "r" (p)
 179                           : "memory");
 180 #else
 181   *p = v; fence();
 182 #endif // AMD64
 183 }
 184 
 185 inline void     OrderAccess::release_store_fence(volatile jubyte*  p, jubyte  v) { release_store_fence((volatile jbyte*)p,  (jbyte)v);  }
 186 inline void     OrderAccess::release_store_fence(volatile jushort* p, jushort v) { release_store_fence((volatile jshort*)p, (jshort)v); }
 187 inline void     OrderAccess::release_store_fence(volatile juint*   p, juint   v) { release_store_fence((volatile jint*)p,   (jint)v);   }
 188 inline void     OrderAccess::release_store_fence(volatile julong*  p, julong  v) { release_store_fence((volatile jlong*)p,  (jlong)v);  }
 189 
 190 inline void     OrderAccess::release_store_fence(volatile jfloat*  p, jfloat  v) { *p = v; fence(); }
 191 inline void     OrderAccess::release_store_fence(volatile jdouble* p, jdouble v) { *p = v; fence(); }
 192 
 193 inline void     OrderAccess::release_store_ptr_fence(volatile intptr_t* p, intptr_t v) {
 194 #ifdef AMD64
 195   __asm__ __volatile__ (  "xchgq (%2), %0"
 196                           : "=r" (v)
 197                           : "0" (v), "r" (p)
 198                           : "memory");
 199 #else
 200   release_store_fence((volatile jint*)p, (jint)v);
 201 #endif // AMD64
 202 }
 203 inline void     OrderAccess::release_store_ptr_fence(volatile void*     p, void*    v) {
 204 #ifdef AMD64
 205   __asm__ __volatile__ (  "xchgq (%2), %0"
 206                           : "=r" (v)
 207                           : "0" (v), "r" (p)
 208                           : "memory");
 209 #else
 210   release_store_fence((volatile jint*)p, (jint)v);
 211 #endif // AMD64
 212 }
 213 
 214 #endif // OS_CPU_LINUX_X86_VM_ORDERACCESS_LINUX_X86_INLINE_HPP