1 /*
   2  * Copyright (c) 2003, 2019, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef OS_CPU_LINUX_X86_VM_ORDERACCESS_LINUX_X86_INLINE_HPP
  26 #define OS_CPU_LINUX_X86_VM_ORDERACCESS_LINUX_X86_INLINE_HPP
  27 
  28 #include "runtime/atomic.inline.hpp"
  29 #include "runtime/orderAccess.hpp"
  30 #include "runtime/os.hpp"
  31 #include "vm_version_x86.hpp"
  32 
  33 // Implementation of class OrderAccess.
  34 
  35 inline void OrderAccess::loadload()   { acquire(); }
  36 inline void OrderAccess::storestore() { release(); }
  37 inline void OrderAccess::loadstore()  { acquire(); }
  38 inline void OrderAccess::storeload()  { fence(); }
  39 
  40 inline void OrderAccess::acquire() {
  41   volatile intptr_t local_dummy;
  42 #ifdef AMD64
  43   __asm__ volatile ("movq 0(%%rsp), %0" : "=r" (local_dummy) : : "memory");
  44 #else
  45   __asm__ volatile ("movl 0(%%esp),%0" : "=r" (local_dummy) : : "memory");
  46 #endif // AMD64
  47 }
  48 
  49 inline void OrderAccess::release() {
  50   // Avoid hitting the same cache-line from
  51   // different threads.
  52   volatile jint local_dummy = 0;
  53 }
  54 
  55 inline void OrderAccess::fence() {
  56   if (os::is_MP()) {
  57     // always use locked addl since mfence is sometimes expensive
  58 #ifdef AMD64
  59     __asm__ volatile ("lock; addl $0,0(%%rsp)" : : : "cc", "memory");
  60 #else
  61     __asm__ volatile ("lock; addl $0,0(%%esp)" : : : "cc", "memory");
  62 #endif
  63   }
  64 }
  65 
  66 inline jbyte    OrderAccess::load_acquire(volatile jbyte*   p) { return *p; }
  67 inline jshort   OrderAccess::load_acquire(volatile jshort*  p) { return *p; }
  68 inline jint     OrderAccess::load_acquire(volatile jint*    p) { return *p; }
  69 inline jlong    OrderAccess::load_acquire(volatile jlong*   p) { return Atomic::load(p); }
  70 inline jubyte   OrderAccess::load_acquire(volatile jubyte*  p) { return *p; }
  71 inline jushort  OrderAccess::load_acquire(volatile jushort* p) { return *p; }
  72 inline juint    OrderAccess::load_acquire(volatile juint*   p) { return *p; }
  73 inline julong   OrderAccess::load_acquire(volatile julong*  p) { return Atomic::load((volatile jlong*)p); }
  74 inline jfloat   OrderAccess::load_acquire(volatile jfloat*  p) { return *p; }
  75 inline jdouble  OrderAccess::load_acquire(volatile jdouble* p) { return jdouble_cast(Atomic::load((volatile jlong*)p)); }
  76 inline bool     OrderAccess::load_acquire(const volatile bool*  p) {
  77     assert(EnableJFR, "sanity check");
  78     return *p;
  79 }
  80 inline julong   OrderAccess::load_acquire(const volatile julong*  p) {
  81     assert(EnableJFR, "sanity check");
  82     return Atomic::load((volatile jlong*)p);
  83 }
  84 
  85 inline intptr_t OrderAccess::load_ptr_acquire(volatile intptr_t*   p) { return *p; }
  86 inline uintptr_t OrderAccess::load_ptr_acquire(const volatile uintptr_t* p) {
  87     assert(EnableJFR, "sanity check");
  88     return *p;
  89 }
  90 
  91 inline void*    OrderAccess::load_ptr_acquire(volatile void*       p) { return *(void* volatile *)p; }
  92 inline void*    OrderAccess::load_ptr_acquire(const volatile void* p) { return *(void* const volatile *)p; }
  93 
  94 inline void     OrderAccess::release_store(volatile jbyte*   p, jbyte   v) { *p = v; }
  95 inline void     OrderAccess::release_store(volatile jshort*  p, jshort  v) { *p = v; }
  96 inline void     OrderAccess::release_store(volatile jint*    p, jint    v) { *p = v; }
  97 inline void     OrderAccess::release_store(volatile jlong*   p, jlong   v) { Atomic::store(v, p); }
  98 inline void     OrderAccess::release_store(volatile jubyte*  p, jubyte  v) { *p = v; }
  99 inline void     OrderAccess::release_store(volatile jushort* p, jushort v) { *p = v; }
 100 inline void     OrderAccess::release_store(volatile juint*   p, juint   v) { *p = v; }
 101 inline void     OrderAccess::release_store(volatile julong*  p, julong  v) { Atomic::store((jlong)v, (volatile jlong*)p); }
 102 inline void     OrderAccess::release_store(volatile jfloat*  p, jfloat  v) { *p = v; }
 103 inline void     OrderAccess::release_store(volatile jdouble* p, jdouble v) { release_store((volatile jlong *)p, jlong_cast(v)); }
 104 
 105 inline void     OrderAccess::release_store_ptr(volatile intptr_t* p, intptr_t v) { *p = v; }
 106 inline void     OrderAccess::release_store_ptr(volatile void*     p, void*    v) { *(void* volatile *)p = v; }
 107 
 108 inline void     OrderAccess::store_fence(jbyte*  p, jbyte  v) {
 109   __asm__ volatile (  "xchgb (%2),%0"
 110                     : "=q" (v)
 111                     : "0" (v), "r" (p)
 112                     : "memory");
 113 }
 114 inline void     OrderAccess::store_fence(jshort* p, jshort v) {
 115   __asm__ volatile (  "xchgw (%2),%0"
 116                     : "=r" (v)
 117                     : "0" (v), "r" (p)
 118                     : "memory");
 119 }
 120 inline void     OrderAccess::store_fence(jint*   p, jint   v) {
 121   __asm__ volatile (  "xchgl (%2),%0"
 122                     : "=r" (v)
 123                     : "0" (v), "r" (p)
 124                     : "memory");
 125 }
 126 
 127 inline void     OrderAccess::store_fence(jlong*   p, jlong   v) {
 128 #ifdef AMD64
 129   __asm__ __volatile__ ("xchgq (%2), %0"
 130                         : "=r" (v)
 131                         : "0" (v), "r" (p)
 132                         : "memory");
 133 #else
 134   *p = v; fence();
 135 #endif // AMD64
 136 }
 137 
 138 // AMD64 copied the bodies for the the signed version. 32bit did this. As long as the
 139 // compiler does the inlining this is simpler.
 140 inline void     OrderAccess::store_fence(jubyte*  p, jubyte  v) { store_fence((jbyte*)p,  (jbyte)v);  }
 141 inline void     OrderAccess::store_fence(jushort* p, jushort v) { store_fence((jshort*)p, (jshort)v); }
 142 inline void     OrderAccess::store_fence(juint*   p, juint   v) { store_fence((jint*)p,   (jint)v);   }
 143 inline void     OrderAccess::store_fence(julong*  p, julong  v) { store_fence((jlong*)p,  (jlong)v);  }
 144 inline void     OrderAccess::store_fence(jfloat*  p, jfloat  v) { *p = v; fence(); }
 145 inline void     OrderAccess::store_fence(jdouble* p, jdouble v) { store_fence((jlong*)p, jlong_cast(v)); }
 146 
 147 inline void     OrderAccess::store_ptr_fence(intptr_t* p, intptr_t v) {
 148 #ifdef AMD64
 149   __asm__ __volatile__ ("xchgq (%2), %0"
 150                         : "=r" (v)
 151                         : "0" (v), "r" (p)
 152                         : "memory");
 153 #else
 154   store_fence((jint*)p, (jint)v);
 155 #endif // AMD64
 156 }
 157 
 158 inline void     OrderAccess::store_ptr_fence(void**    p, void*    v) {
 159 #ifdef AMD64
 160   __asm__ __volatile__ ("xchgq (%2), %0"
 161                         : "=r" (v)
 162                         : "0" (v), "r" (p)
 163                         : "memory");
 164 #else
 165   store_fence((jint*)p, (jint)v);
 166 #endif // AMD64
 167 }
 168 
 169 // Must duplicate definitions instead of calling store_fence because we don't want to cast away volatile.
 170 inline void     OrderAccess::release_store_fence(volatile jbyte*  p, jbyte  v) {
 171   __asm__ volatile (  "xchgb (%2),%0"
 172                     : "=q" (v)
 173                     : "0" (v), "r" (p)
 174                     : "memory");
 175 }
 176 inline void     OrderAccess::release_store_fence(volatile jshort* p, jshort v) {
 177   __asm__ volatile (  "xchgw (%2),%0"
 178                     : "=r" (v)
 179                     : "0" (v), "r" (p)
 180                     : "memory");
 181 }
 182 inline void     OrderAccess::release_store_fence(volatile jint*   p, jint   v) {
 183   __asm__ volatile (  "xchgl (%2),%0"
 184                     : "=r" (v)
 185                     : "0" (v), "r" (p)
 186                     : "memory");
 187 }
 188 
 189 inline void     OrderAccess::release_store_fence(volatile jlong*   p, jlong   v) {
 190 #ifdef AMD64
 191   __asm__ __volatile__ (  "xchgq (%2), %0"
 192                           : "=r" (v)
 193                           : "0" (v), "r" (p)
 194                           : "memory");
 195 #else
 196   release_store(p, v); fence();
 197 #endif // AMD64
 198 }
 199 
 200 inline void     OrderAccess::release_store_fence(volatile jubyte*  p, jubyte  v) { release_store_fence((volatile jbyte*)p,  (jbyte)v);  }
 201 inline void     OrderAccess::release_store_fence(volatile jushort* p, jushort v) { release_store_fence((volatile jshort*)p, (jshort)v); }
 202 inline void     OrderAccess::release_store_fence(volatile juint*   p, juint   v) { release_store_fence((volatile jint*)p,   (jint)v);   }
 203 inline void     OrderAccess::release_store_fence(volatile julong*  p, julong  v) { release_store_fence((volatile jlong*)p,  (jlong)v);  }
 204 
 205 inline void     OrderAccess::release_store_fence(volatile jfloat*  p, jfloat  v) { *p = v; fence(); }
 206 inline void     OrderAccess::release_store_fence(volatile jdouble* p, jdouble v) { release_store_fence((volatile jlong*)p, jlong_cast(v)); }
 207 
 208 inline void     OrderAccess::release_store_ptr_fence(volatile intptr_t* p, intptr_t v) {
 209 #ifdef AMD64
 210   __asm__ __volatile__ (  "xchgq (%2), %0"
 211                           : "=r" (v)
 212                           : "0" (v), "r" (p)
 213                           : "memory");
 214 #else
 215   release_store_fence((volatile jint*)p, (jint)v);
 216 #endif // AMD64
 217 }
 218 inline void     OrderAccess::release_store_ptr_fence(volatile void*     p, void*    v) {
 219 #ifdef AMD64
 220   __asm__ __volatile__ (  "xchgq (%2), %0"
 221                           : "=r" (v)
 222                           : "0" (v), "r" (p)
 223                           : "memory");
 224 #else
 225   release_store_fence((volatile jint*)p, (jint)v);
 226 #endif // AMD64
 227 }
 228 
 229 #endif // OS_CPU_LINUX_X86_VM_ORDERACCESS_LINUX_X86_INLINE_HPP