1 /*
   2  * Copyright (c) 2003, 2011, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef OS_CPU_BSD_X86_VM_ORDERACCESS_BSD_X86_INLINE_HPP
  26 #define OS_CPU_BSD_X86_VM_ORDERACCESS_BSD_X86_INLINE_HPP
  27 
  28 #include "runtime/atomic.hpp"
  29 #include "runtime/orderAccess.hpp"
  30 #include "vm_version_x86.hpp"
  31 
  32 // Implementation of class OrderAccess.
  33 
  34 inline void OrderAccess::loadload()   { acquire(); }
  35 inline void OrderAccess::storestore() { release(); }
  36 inline void OrderAccess::loadstore()  { acquire(); }
  37 inline void OrderAccess::storeload()  { fence(); }
  38 
  39 inline void OrderAccess::acquire() {
  40   volatile intptr_t local_dummy;
  41 #ifdef AMD64
  42   __asm__ volatile ("movq 0(%%rsp), %0" : "=r" (local_dummy) : : "memory");
  43 #else
  44   __asm__ volatile ("movl 0(%%esp),%0" : "=r" (local_dummy) : : "memory");
  45 #endif // AMD64
  46 }
  47 
  48 inline void OrderAccess::release() {
  49   // Avoid hitting the same cache-line from
  50   // different threads.
  51   volatile jint local_dummy = 0;
  52 }
  53 
  54 inline void OrderAccess::fence() {
  55   if (os::is_MP()) {
  56     // always use locked addl since mfence is sometimes expensive
  57 #ifdef AMD64
  58     __asm__ volatile ("lock; addl $0,0(%%rsp)" : : : "cc", "memory");
  59 #else
  60     __asm__ volatile ("lock; addl $0,0(%%esp)" : : : "cc", "memory");
  61 #endif
  62   }
  63 }
  64 
  65 inline jbyte    OrderAccess::load_acquire(volatile jbyte*   p) { return *p; }
  66 inline jshort   OrderAccess::load_acquire(volatile jshort*  p) { return *p; }
  67 inline jint     OrderAccess::load_acquire(volatile jint*    p) { return *p; }
  68 inline jlong    OrderAccess::load_acquire(volatile jlong*   p) { return Atomic::load(p); }
  69 inline jubyte   OrderAccess::load_acquire(volatile jubyte*  p) { return *p; }
  70 inline jushort  OrderAccess::load_acquire(volatile jushort* p) { return *p; }
  71 inline juint    OrderAccess::load_acquire(volatile juint*   p) { return *p; }
  72 inline julong   OrderAccess::load_acquire(volatile julong*  p) { return Atomic::load((volatile jlong*)p); }
  73 inline jfloat   OrderAccess::load_acquire(volatile jfloat*  p) { return *p; }
  74 inline jdouble  OrderAccess::load_acquire(volatile jdouble* p) { return *p; }
  75 
  76 inline intptr_t OrderAccess::load_ptr_acquire(volatile intptr_t*   p) { return *p; }
  77 inline void*    OrderAccess::load_ptr_acquire(volatile void*       p) { return *(void* volatile *)p; }
  78 inline void*    OrderAccess::load_ptr_acquire(const volatile void* p) { return *(void* const volatile *)p; }
  79 
  80 inline void     OrderAccess::release_store(volatile jbyte*   p, jbyte   v) { *p = v; }
  81 inline void     OrderAccess::release_store(volatile jshort*  p, jshort  v) { *p = v; }
  82 inline void     OrderAccess::release_store(volatile jint*    p, jint    v) { *p = v; }
  83 inline void     OrderAccess::release_store(volatile jlong*   p, jlong   v) { Atomic::store(v, p); }
  84 inline void     OrderAccess::release_store(volatile jubyte*  p, jubyte  v) { *p = v; }
  85 inline void     OrderAccess::release_store(volatile jushort* p, jushort v) { *p = v; }
  86 inline void     OrderAccess::release_store(volatile juint*   p, juint   v) { *p = v; }
  87 inline void     OrderAccess::release_store(volatile julong*  p, julong  v) { Atomic::store((jlong)v, (volatile jlong*)p); }
  88 inline void     OrderAccess::release_store(volatile jfloat*  p, jfloat  v) { *p = v; }
  89 inline void     OrderAccess::release_store(volatile jdouble* p, jdouble v) { *p = v; }
  90 
  91 inline void     OrderAccess::release_store_ptr(volatile intptr_t* p, intptr_t v) { *p = v; }
  92 inline void     OrderAccess::release_store_ptr(volatile void*     p, void*    v) { *(void* volatile *)p = v; }
  93 
  94 inline void     OrderAccess::store_fence(jbyte*  p, jbyte  v) {
  95   __asm__ volatile (  "xchgb (%2),%0"
  96                     : "=q" (v)
  97                     : "0" (v), "r" (p)
  98                     : "memory");
  99 }
 100 inline void     OrderAccess::store_fence(jshort* p, jshort v) {
 101   __asm__ volatile (  "xchgw (%2),%0"
 102                     : "=r" (v)
 103                     : "0" (v), "r" (p)
 104                     : "memory");
 105 }
 106 inline void     OrderAccess::store_fence(jint*   p, jint   v) {
 107   __asm__ volatile (  "xchgl (%2),%0"
 108                     : "=r" (v)
 109                     : "0" (v), "r" (p)
 110                     : "memory");
 111 }
 112 
 113 inline void     OrderAccess::store_fence(jlong*   p, jlong   v) {
 114 #ifdef AMD64
 115   __asm__ __volatile__ ("xchgq (%2), %0"
 116                         : "=r" (v)
 117                         : "0" (v), "r" (p)
 118                         : "memory");
 119 #else
 120   *p = v; fence();
 121 #endif // AMD64
 122 }
 123 
 124 // AMD64 copied the bodies for the the signed version. 32bit did this. As long as the
 125 // compiler does the inlining this is simpler.
 126 inline void     OrderAccess::store_fence(jubyte*  p, jubyte  v) { store_fence((jbyte*)p,  (jbyte)v);  }
 127 inline void     OrderAccess::store_fence(jushort* p, jushort v) { store_fence((jshort*)p, (jshort)v); }
 128 inline void     OrderAccess::store_fence(juint*   p, juint   v) { store_fence((jint*)p,   (jint)v);   }
 129 inline void     OrderAccess::store_fence(julong*  p, julong  v) { store_fence((jlong*)p,  (jlong)v);  }
 130 inline void     OrderAccess::store_fence(jfloat*  p, jfloat  v) { *p = v; fence(); }
 131 inline void     OrderAccess::store_fence(jdouble* p, jdouble v) { *p = v; fence(); }
 132 
 133 inline void     OrderAccess::store_ptr_fence(intptr_t* p, intptr_t v) {
 134 #ifdef AMD64
 135   __asm__ __volatile__ ("xchgq (%2), %0"
 136                         : "=r" (v)
 137                         : "0" (v), "r" (p)
 138                         : "memory");
 139 #else
 140   store_fence((jint*)p, (jint)v);
 141 #endif // AMD64
 142 }
 143 
 144 inline void     OrderAccess::store_ptr_fence(void**    p, void*    v) {
 145 #ifdef AMD64
 146   __asm__ __volatile__ ("xchgq (%2), %0"
 147                         : "=r" (v)
 148                         : "0" (v), "r" (p)
 149                         : "memory");
 150 #else
 151   store_fence((jint*)p, (jint)v);
 152 #endif // AMD64
 153 }
 154 
 155 // Must duplicate definitions instead of calling store_fence because we don't want to cast away volatile.
 156 inline void     OrderAccess::release_store_fence(volatile jbyte*  p, jbyte  v) {
 157   __asm__ volatile (  "xchgb (%2),%0"
 158                     : "=q" (v)
 159                     : "0" (v), "r" (p)
 160                     : "memory");
 161 }
 162 inline void     OrderAccess::release_store_fence(volatile jshort* p, jshort v) {
 163   __asm__ volatile (  "xchgw (%2),%0"
 164                     : "=r" (v)
 165                     : "0" (v), "r" (p)
 166                     : "memory");
 167 }
 168 inline void     OrderAccess::release_store_fence(volatile jint*   p, jint   v) {
 169   __asm__ volatile (  "xchgl (%2),%0"
 170                     : "=r" (v)
 171                     : "0" (v), "r" (p)
 172                     : "memory");
 173 }
 174 
 175 inline void     OrderAccess::release_store_fence(volatile jlong*   p, jlong   v) {
 176 #ifdef AMD64
 177   __asm__ __volatile__ (  "xchgq (%2), %0"
 178                           : "=r" (v)
 179                           : "0" (v), "r" (p)
 180                           : "memory");
 181 #else
 182   release_store(p, v); fence();
 183 #endif // AMD64
 184 }
 185 
 186 inline void     OrderAccess::release_store_fence(volatile jubyte*  p, jubyte  v) { release_store_fence((volatile jbyte*)p,  (jbyte)v);  }
 187 inline void     OrderAccess::release_store_fence(volatile jushort* p, jushort v) { release_store_fence((volatile jshort*)p, (jshort)v); }
 188 inline void     OrderAccess::release_store_fence(volatile juint*   p, juint   v) { release_store_fence((volatile jint*)p,   (jint)v);   }
 189 inline void     OrderAccess::release_store_fence(volatile julong*  p, julong  v) { release_store_fence((volatile jlong*)p,  (jlong)v);  }
 190 
 191 inline void     OrderAccess::release_store_fence(volatile jfloat*  p, jfloat  v) { *p = v; fence(); }
 192 inline void     OrderAccess::release_store_fence(volatile jdouble* p, jdouble v) { *p = v; fence(); }
 193 
 194 inline void     OrderAccess::release_store_ptr_fence(volatile intptr_t* p, intptr_t v) {
 195 #ifdef AMD64
 196   __asm__ __volatile__ (  "xchgq (%2), %0"
 197                           : "=r" (v)
 198                           : "0" (v), "r" (p)
 199                           : "memory");
 200 #else
 201   release_store_fence((volatile jint*)p, (jint)v);
 202 #endif // AMD64
 203 }
 204 inline void     OrderAccess::release_store_ptr_fence(volatile void*     p, void*    v) {
 205 #ifdef AMD64
 206   __asm__ __volatile__ (  "xchgq (%2), %0"
 207                           : "=r" (v)
 208                           : "0" (v), "r" (p)
 209                           : "memory");
 210 #else
 211   release_store_fence((volatile jint*)p, (jint)v);
 212 #endif // AMD64
 213 }
 214 
 215 #endif // OS_CPU_BSD_X86_VM_ORDERACCESS_BSD_X86_INLINE_HPP