1 /*
   2  * Copyright (c) 2003, 2019, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef OS_CPU_BSD_X86_ORDERACCESS_BSD_X86_HPP
  26 #define OS_CPU_BSD_X86_ORDERACCESS_BSD_X86_HPP
  27 
  28 // Included in orderAccess.hpp header file.
  29 
  30 // Compiler version last used for testing: clang 5.1
  31 // Please update this information when this file changes
  32 
  33 // A compiler barrier, forcing the C++ compiler to invalidate all memory assumptions
  34 static inline void compiler_barrier() {
  35   __asm__ volatile ("" : : : "memory");
  36 }
  37 
  38 // x86 is TSO and hence only needs a fence for storeload
  39 // However, a compiler barrier is still needed to prevent reordering
  40 // between volatile and non-volatile memory accesses.
  41 
  42 // Implementation of class OrderAccess.
  43 
  44 inline void OrderAccess::loadload()   { compiler_barrier(); }
  45 inline void OrderAccess::storestore() { compiler_barrier(); }
  46 inline void OrderAccess::loadstore()  { compiler_barrier(); }
  47 inline void OrderAccess::storeload()  { fence();            }
  48 
  49 inline void OrderAccess::acquire()    { compiler_barrier(); }
  50 inline void OrderAccess::release()    { compiler_barrier(); }
  51 
  52 inline void OrderAccess::fence() {
  53   // always use locked addl since mfence is sometimes expensive
  54 #ifdef AMD64
  55   __asm__ volatile ("lock; addl $0,0(%%rsp)" : : : "cc", "memory");
  56 #else
  57   __asm__ volatile ("lock; addl $0,0(%%esp)" : : : "cc", "memory");
  58 #endif
  59   compiler_barrier();
  60 }
  61 
  62 inline void OrderAccess::cross_modify_fence() {
  63   int idx = 0;
  64   __asm__ volatile ("cpuid " : "+a" (idx) : : "ebx", "ecx", "edx", "memory");
  65 }
  66 
  67 template<>
  68 struct OrderAccess::PlatformOrderedStore<1, RELEASE_X_FENCE>
  69 {
  70   template <typename T>
  71   void operator()(T v, volatile T* p) const {
  72     __asm__ volatile (  "xchgb (%2),%0"
  73                       : "=q" (v)
  74                       : "0" (v), "r" (p)
  75                       : "memory");
  76   }
  77 };
  78 
  79 template<>
  80 struct OrderAccess::PlatformOrderedStore<2, RELEASE_X_FENCE>
  81 {
  82   template <typename T>
  83   void operator()(T v, volatile T* p) const {
  84     __asm__ volatile (  "xchgw (%2),%0"
  85                       : "=r" (v)
  86                       : "0" (v), "r" (p)
  87                       : "memory");
  88   }
  89 };
  90 
  91 template<>
  92 struct OrderAccess::PlatformOrderedStore<4, RELEASE_X_FENCE>
  93 {
  94   template <typename T>
  95   void operator()(T v, volatile T* p) const {
  96     __asm__ volatile (  "xchgl (%2),%0"
  97                       : "=r" (v)
  98                       : "0" (v), "r" (p)
  99                       : "memory");
 100   }
 101 };
 102 
 103 #ifdef AMD64
 104 template<>
 105 struct OrderAccess::PlatformOrderedStore<8, RELEASE_X_FENCE>
 106 {
 107   template <typename T>
 108   void operator()(T v, volatile T* p) const {
 109     __asm__ volatile (  "xchgq (%2), %0"
 110                       : "=r" (v)
 111                       : "0" (v), "r" (p)
 112                       : "memory");
 113   }
 114 };
 115 #endif // AMD64
 116 
 117 #endif // OS_CPU_BSD_X86_ORDERACCESS_BSD_X86_HPP