1 /* 2 * Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef OS_CPU_LINUX_X86_VM_ORDERACCESS_LINUX_X86_HPP 26 #define OS_CPU_LINUX_X86_VM_ORDERACCESS_LINUX_X86_HPP 27 28 #include "runtime/atomic.hpp" 29 #include "runtime/orderAccess.hpp" 30 31 // Compiler version last used for testing: gcc 4.8.2 32 // Please update this information when this file changes 33 34 // Implementation of class OrderAccess. 35 36 // A compiler barrier, forcing the C++ compiler to invalidate all memory assumptions 37 static inline void compiler_barrier() { 38 __asm__ volatile ("" : : : "memory"); 39 } 40 41 inline void OrderAccess::loadload() { compiler_barrier(); } 42 inline void OrderAccess::storestore() { compiler_barrier(); } 43 inline void OrderAccess::loadstore() { compiler_barrier(); } 44 inline void OrderAccess::storeload() { fence(); } 45 46 inline void OrderAccess::acquire() { compiler_barrier(); } 47 inline void OrderAccess::release() { compiler_barrier(); } 48 49 inline void OrderAccess::fence() { 50 // always use locked addl since mfence is sometimes expensive 51 #ifdef AMD64 52 __asm__ volatile ("lock; addl $0,0(%%rsp)" : : : "cc", "memory"); 53 #else 54 __asm__ volatile ("lock; addl $0,0(%%esp)" : : : "cc", "memory"); 55 #endif 56 compiler_barrier(); 57 } 58 59 template<> 60 struct OrderAccess::PlatformOrderedStore<1, RELEASE_X_FENCE> 61 { 62 template <typename T> 63 void operator()(T v, volatile T* p) const { 64 __asm__ volatile ( "xchgb (%2),%0" 65 : "=q" (v) 66 : "0" (v), "r" (p) 67 : "memory"); 68 } 69 }; 70 71 template<> 72 struct OrderAccess::PlatformOrderedStore<2, RELEASE_X_FENCE> 73 { 74 template <typename T> 75 void operator()(T v, volatile T* p) const { 76 __asm__ volatile ( "xchgw (%2),%0" 77 : "=r" (v) 78 : "0" (v), "r" (p) 79 : "memory"); 80 } 81 }; 82 83 template<> 84 struct OrderAccess::PlatformOrderedStore<4, RELEASE_X_FENCE> 85 { 86 template <typename T> 87 void operator()(T v, volatile T* p) const { 88 __asm__ volatile ( "xchgl (%2),%0" 89 : "=r" (v) 90 : "0" (v), "r" (p) 91 : "memory"); 92 } 93 }; 94 95 #ifdef AMD64 96 template<> 97 struct OrderAccess::PlatformOrderedStore<8, RELEASE_X_FENCE> 98 { 99 template <typename T> 100 void operator()(T v, volatile T* p) const { 101 __asm__ volatile ( "xchgq (%2), %0" 102 : "=r" (v) 103 : "0" (v), "r" (p) 104 : "memory"); 105 } 106 }; 107 #endif // AMD64 108 109 #endif // OS_CPU_LINUX_X86_VM_ORDERACCESS_LINUX_X86_HPP