1 /* 2 * Copyright (c) 2003, 2014, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef OS_CPU_BSD_X86_VM_ORDERACCESS_BSD_X86_INLINE_HPP 26 #define OS_CPU_BSD_X86_VM_ORDERACCESS_BSD_X86_INLINE_HPP 27 28 #include "runtime/atomic.inline.hpp" 29 #include "runtime/orderAccess.hpp" 30 #include "runtime/os.hpp" 31 32 // A compiler barrier, forcing the C++ compiler to invalidate all memory assumptions 33 static inline void compiler_barrier() { 34 __asm__ volatile ("" : : : "memory"); 35 } 36 37 // x86 is TSO and hence only needs a fence for storeload 38 // However, a compiler barrier is still needed to prevent reordering 39 // between volatile and non-volatile memory accesses. 40 41 // Implementation of class OrderAccess. 42 43 inline void OrderAccess::loadload() { compiler_barrier(); } 44 inline void OrderAccess::storestore() { compiler_barrier(); } 45 inline void OrderAccess::loadstore() { compiler_barrier(); } 46 inline void OrderAccess::storeload() { fence(); } 47 48 inline void OrderAccess::acquire() { compiler_barrier(); } 49 inline void OrderAccess::release() { compiler_barrier(); } 50 51 inline void OrderAccess::fence() { 52 if (os::is_MP()) { 53 // always use locked addl since mfence is sometimes expensive 54 #ifdef AMD64 55 __asm__ volatile ("lock; addl $0,0(%%rsp)" : : : "cc", "memory"); 56 #else 57 __asm__ volatile ("lock; addl $0,0(%%esp)" : : : "cc", "memory"); 58 #endif 59 } 60 compiler_barrier(); 61 } 62 63 template<> 64 inline void OrderAccess::specialized_release_store_fence<jbyte> (volatile jbyte* p, jbyte v) { 65 __asm__ volatile ( "xchgb (%2),%0" 66 : "=q" (v) 67 : "0" (v), "r" (p) 68 : "memory"); 69 } 70 template<> 71 inline void OrderAccess::specialized_release_store_fence<jshort>(volatile jshort* p, jshort v) { 72 __asm__ volatile ( "xchgw (%2),%0" 73 : "=r" (v) 74 : "0" (v), "r" (p) 75 : "memory"); 76 } 77 template<> 78 inline void OrderAccess::specialized_release_store_fence<jint> (volatile jint* p, jint v) { 79 __asm__ volatile ( "xchgl (%2),%0" 80 : "=r" (v) 81 : "0" (v), "r" (p) 82 : "memory"); 83 } 84 85 #ifdef AMD64 86 template<> 87 inline void OrderAccess::specialized_release_store_fence<jlong> (volatile jlong* p, jlong v) { 88 __asm__ volatile ( "xchgq (%2), %0" 89 : "=r" (v) 90 : "0" (v), "r" (p) 91 : "memory"); 92 } 93 #endif // AMD64 94 95 template<> 96 inline void OrderAccess::specialized_release_store_fence<jfloat> (volatile jfloat* p, jfloat v) { 97 release_store_fence((volatile jint*)p, jint_cast(v)); 98 } 99 template<> 100 inline void OrderAccess::specialized_release_store_fence<jdouble>(volatile jdouble* p, jdouble v) { 101 release_store_fence((volatile jlong*)p, jlong_cast(v)); 102 } 103 104 #define VM_HAS_GENERALIZED_ORDER_ACCESS 1 105 106 #endif // OS_CPU_BSD_X86_VM_ORDERACCESS_BSD_X86_INLINE_HPP