1 /*
   2  * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 // Implementation of class OrderAccess.
  26 
  27 inline void OrderAccess::loadload()   { acquire(); }
  28 inline void OrderAccess::storestore() { release(); }
  29 inline void OrderAccess::loadstore()  { acquire(); }
  30 inline void OrderAccess::storeload()  { fence(); }
  31 
  32 inline void OrderAccess::acquire() {
  33   volatile intptr_t local_dummy;
  34 #ifdef AMD64
  35   __asm__ volatile ("movq 0(%%rsp), %0" : "=r" (local_dummy) : : "memory");
  36 #else
  37   __asm__ volatile ("movl 0(%%esp),%0" : "=r" (local_dummy) : : "memory");
  38 #endif // AMD64
  39 }
  40 
  41 inline void OrderAccess::release() {
  42   // Avoid hitting the same cache-line from
  43   // different threads.
  44   volatile jint local_dummy = 0;
  45 }
  46 
  47 inline void OrderAccess::fence() {
  48   if (os::is_MP()) {
  49     // always use locked addl since mfence is sometimes expensive
  50 #ifdef AMD64
  51     __asm__ volatile ("lock; addl $0,0(%%rsp)" : : : "cc", "memory");
  52 #else
  53     __asm__ volatile ("lock; addl $0,0(%%esp)" : : : "cc", "memory");
  54 #endif
  55   }
  56 }
  57 
  58 inline jbyte    OrderAccess::load_acquire(volatile jbyte*   p) { return *p; }
  59 inline jshort   OrderAccess::load_acquire(volatile jshort*  p) { return *p; }
  60 inline jint     OrderAccess::load_acquire(volatile jint*    p) { return *p; }
  61 inline jlong    OrderAccess::load_acquire(volatile jlong*   p) { return *p; }
  62 inline jubyte   OrderAccess::load_acquire(volatile jubyte*  p) { return *p; }
  63 inline jushort  OrderAccess::load_acquire(volatile jushort* p) { return *p; }
  64 inline juint    OrderAccess::load_acquire(volatile juint*   p) { return *p; }
  65 inline julong   OrderAccess::load_acquire(volatile julong*  p) { return *p; }
  66 inline jfloat   OrderAccess::load_acquire(volatile jfloat*  p) { return *p; }
  67 inline jdouble  OrderAccess::load_acquire(volatile jdouble* p) { return *p; }
  68 
  69 inline intptr_t OrderAccess::load_ptr_acquire(volatile intptr_t*   p) { return *p; }
  70 inline void*    OrderAccess::load_ptr_acquire(volatile void*       p) { return *(void* volatile *)p; }
  71 inline void*    OrderAccess::load_ptr_acquire(const volatile void* p) { return *(void* const volatile *)p; }
  72 
  73 inline void     OrderAccess::release_store(volatile jbyte*   p, jbyte   v) { *p = v; }
  74 inline void     OrderAccess::release_store(volatile jshort*  p, jshort  v) { *p = v; }
  75 inline void     OrderAccess::release_store(volatile jint*    p, jint    v) { *p = v; }
  76 inline void     OrderAccess::release_store(volatile jlong*   p, jlong   v) { *p = v; }
  77 inline void     OrderAccess::release_store(volatile jubyte*  p, jubyte  v) { *p = v; }
  78 inline void     OrderAccess::release_store(volatile jushort* p, jushort v) { *p = v; }
  79 inline void     OrderAccess::release_store(volatile juint*   p, juint   v) { *p = v; }
  80 inline void     OrderAccess::release_store(volatile julong*  p, julong  v) { *p = v; }
  81 inline void     OrderAccess::release_store(volatile jfloat*  p, jfloat  v) { *p = v; }
  82 inline void     OrderAccess::release_store(volatile jdouble* p, jdouble v) { *p = v; }
  83 
  84 inline void     OrderAccess::release_store_ptr(volatile intptr_t* p, intptr_t v) { *p = v; }
  85 inline void     OrderAccess::release_store_ptr(volatile void*     p, void*    v) { *(void* volatile *)p = v; }
  86 
  87 inline void     OrderAccess::store_fence(jbyte*  p, jbyte  v) {
  88   __asm__ volatile (  "xchgb (%2),%0"
  89                     : "=r" (v)
  90                     : "0" (v), "r" (p)
  91                     : "memory");
  92 }
  93 inline void     OrderAccess::store_fence(jshort* p, jshort v) {
  94   __asm__ volatile (  "xchgw (%2),%0"
  95                     : "=r" (v)
  96                     : "0" (v), "r" (p)
  97                     : "memory");
  98 }
  99 inline void     OrderAccess::store_fence(jint*   p, jint   v) {
 100   __asm__ volatile (  "xchgl (%2),%0"
 101                     : "=r" (v)
 102                     : "0" (v), "r" (p)
 103                     : "memory");
 104 }
 105 
 106 inline void     OrderAccess::store_fence(jlong*   p, jlong   v) {
 107 #ifdef AMD64
 108   __asm__ __volatile__ ("xchgq (%2), %0"
 109                         : "=r" (v)
 110                         : "0" (v), "r" (p)
 111                         : "memory");
 112 #else
 113   *p = v; fence();
 114 #endif // AMD64
 115 }
 116 
 117 // AMD64 copied the bodies for the the signed version. 32bit did this. As long as the
 118 // compiler does the inlining this is simpler.
 119 inline void     OrderAccess::store_fence(jubyte*  p, jubyte  v) { store_fence((jbyte*)p,  (jbyte)v);  }
 120 inline void     OrderAccess::store_fence(jushort* p, jushort v) { store_fence((jshort*)p, (jshort)v); }
 121 inline void     OrderAccess::store_fence(juint*   p, juint   v) { store_fence((jint*)p,   (jint)v);   }
 122 inline void     OrderAccess::store_fence(julong*  p, julong  v) { store_fence((jlong*)p,  (jlong)v);  }
 123 inline void     OrderAccess::store_fence(jfloat*  p, jfloat  v) { *p = v; fence(); }
 124 inline void     OrderAccess::store_fence(jdouble* p, jdouble v) { *p = v; fence(); }
 125 
 126 inline void     OrderAccess::store_ptr_fence(intptr_t* p, intptr_t v) {
 127 #ifdef AMD64
 128   __asm__ __volatile__ ("xchgq (%2), %0"
 129                         : "=r" (v)
 130                         : "0" (v), "r" (p)
 131                         : "memory");
 132 #else
 133   store_fence((jint*)p, (jint)v);
 134 #endif // AMD64
 135 }
 136 
 137 inline void     OrderAccess::store_ptr_fence(void**    p, void*    v) {
 138 #ifdef AMD64
 139   __asm__ __volatile__ ("xchgq (%2), %0"
 140                         : "=r" (v)
 141                         : "0" (v), "r" (p)
 142                         : "memory");
 143 #else
 144   store_fence((jint*)p, (jint)v);
 145 #endif // AMD64
 146 }
 147 
 148 // Must duplicate definitions instead of calling store_fence because we don't want to cast away volatile.
 149 inline void     OrderAccess::release_store_fence(volatile jbyte*  p, jbyte  v) {
 150   __asm__ volatile (  "xchgb (%2),%0"
 151                     : "=r" (v)
 152                     : "0" (v), "r" (p)
 153                     : "memory");
 154 }
 155 inline void     OrderAccess::release_store_fence(volatile jshort* p, jshort v) {
 156   __asm__ volatile (  "xchgw (%2),%0"
 157                     : "=r" (v)
 158                     : "0" (v), "r" (p)
 159                     : "memory");
 160 }
 161 inline void     OrderAccess::release_store_fence(volatile jint*   p, jint   v) {
 162   __asm__ volatile (  "xchgl (%2),%0"
 163                     : "=r" (v)
 164                     : "0" (v), "r" (p)
 165                     : "memory");
 166 }
 167 
 168 inline void     OrderAccess::release_store_fence(volatile jlong*   p, jlong   v) {
 169 #ifdef AMD64
 170   __asm__ __volatile__ (  "xchgq (%2), %0"
 171                           : "=r" (v)
 172                           : "0" (v), "r" (p)
 173                           : "memory");
 174 #else
 175   *p = v; fence();
 176 #endif // AMD64
 177 }
 178 
 179 inline void     OrderAccess::release_store_fence(volatile jubyte*  p, jubyte  v) { release_store_fence((volatile jbyte*)p,  (jbyte)v);  }
 180 inline void     OrderAccess::release_store_fence(volatile jushort* p, jushort v) { release_store_fence((volatile jshort*)p, (jshort)v); }
 181 inline void     OrderAccess::release_store_fence(volatile juint*   p, juint   v) { release_store_fence((volatile jint*)p,   (jint)v);   }
 182 inline void     OrderAccess::release_store_fence(volatile julong*  p, julong  v) { release_store_fence((volatile jlong*)p,  (jlong)v);  }
 183 
 184 inline void     OrderAccess::release_store_fence(volatile jfloat*  p, jfloat  v) { *p = v; fence(); }
 185 inline void     OrderAccess::release_store_fence(volatile jdouble* p, jdouble v) { *p = v; fence(); }
 186 
 187 inline void     OrderAccess::release_store_ptr_fence(volatile intptr_t* p, intptr_t v) {
 188 #ifdef AMD64
 189   __asm__ __volatile__ (  "xchgq (%2), %0"
 190                           : "=r" (v)
 191                           : "0" (v), "r" (p)
 192                           : "memory");
 193 #else
 194   release_store_fence((volatile jint*)p, (jint)v);
 195 #endif // AMD64
 196 }
 197 inline void     OrderAccess::release_store_ptr_fence(volatile void*     p, void*    v) {
 198 #ifdef AMD64
 199   __asm__ __volatile__ (  "xchgq (%2), %0"
 200                           : "=r" (v)
 201                           : "0" (v), "r" (p)
 202                           : "memory");
 203 #else
 204   release_store_fence((volatile jint*)p, (jint)v);
 205 #endif // AMD64
 206 }