1 /* 2 * Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef OS_CPU_LINUX_X86_VM_ORDERACCESS_LINUX_X86_INLINE_HPP 26 #define OS_CPU_LINUX_X86_VM_ORDERACCESS_LINUX_X86_INLINE_HPP 27 28 #include "runtime/atomic.hpp" 29 #include "runtime/orderAccess.hpp" 30 #include "runtime/os.hpp" 31 32 // Compiler version last used for testing: gcc 4.8.2 33 // Please update this information when this file changes 34 35 // Implementation of class OrderAccess. 36 37 // A compiler barrier, forcing the C++ compiler to invalidate all memory assumptions 38 static inline void compiler_barrier() { 39 __asm__ volatile ("" : : : "memory"); 40 } 41 42 inline void OrderAccess::loadload() { compiler_barrier(); } 43 inline void OrderAccess::storestore() { compiler_barrier(); } 44 inline void OrderAccess::loadstore() { compiler_barrier(); } 45 inline void OrderAccess::storeload() { fence(); } 46 47 inline void OrderAccess::acquire() { compiler_barrier(); } 48 inline void OrderAccess::release() { compiler_barrier(); } 49 50 inline void OrderAccess::fence() { 51 if (os::is_MP()) { 52 // always use locked addl since mfence is sometimes expensive 53 #ifdef AMD64 54 __asm__ volatile ("lock; addl $0,0(%%rsp)" : : : "cc", "memory"); 55 #else 56 __asm__ volatile ("lock; addl $0,0(%%esp)" : : : "cc", "memory"); 57 #endif 58 } 59 compiler_barrier(); 60 } 61 62 template<> 63 struct OrderAccess::PlatformOrderedStore<1, RELEASE_X_FENCE> 64 { 65 template <typename T> 66 void operator()(T v, volatile T* p) const { 67 __asm__ volatile ( "xchgb (%2),%0" 68 : "=q" (v) 69 : "0" (v), "r" (p) 70 : "memory"); 71 } 72 }; 73 74 template<> 75 struct OrderAccess::PlatformOrderedStore<2, RELEASE_X_FENCE> 76 { 77 template <typename T> 78 void operator()(T v, volatile T* p) const { 79 __asm__ volatile ( "xchgw (%2),%0" 80 : "=r" (v) 81 : "0" (v), "r" (p) 82 : "memory"); 83 } 84 }; 85 86 template<> 87 struct OrderAccess::PlatformOrderedStore<4, RELEASE_X_FENCE> 88 { 89 template <typename T> 90 void operator()(T v, volatile T* p) const { 91 __asm__ volatile ( "xchgl (%2),%0" 92 : "=r" (v) 93 : "0" (v), "r" (p) 94 : "memory"); 95 } 96 }; 97 98 #ifdef AMD64 99 template<> 100 struct OrderAccess::PlatformOrderedStore<8, RELEASE_X_FENCE> 101 { 102 template <typename T> 103 void operator()(T v, volatile T* p) const { 104 __asm__ volatile ( "xchgq (%2), %0" 105 : "=r" (v) 106 : "0" (v), "r" (p) 107 : "memory"); 108 } 109 }; 110 #endif // AMD64 111 112 #endif // OS_CPU_LINUX_X86_VM_ORDERACCESS_LINUX_X86_INLINE_HPP