/* * Copyright (c) 1999, 2017, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 only, as * published by the Free Software Foundation. * * This code is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * version 2 for more details (a copy is included in the LICENSE file that * accompanied this code). * * You should have received a copy of the GNU General Public License version * 2 along with this work; if not, write to the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. * * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA * or visit www.oracle.com if you need additional information or have any * questions. * */ #ifndef OS_CPU_BSD_X86_VM_ATOMIC_BSD_X86_HPP #define OS_CPU_BSD_X86_VM_ATOMIC_BSD_X86_HPP #include "runtime/os.hpp" // Implementation of class atomic template <> inline int32_t Atomic::specialized_add(int32_t add_value, volatile int32_t* dest) { int32_t addend = add_value; __asm__ volatile ( "lock xaddl %0,(%2)" : "=r" (addend) : "0" (addend), "r" (dest) : "cc", "memory"); return addend + add_value; } template <> inline void Atomic::specialized_inc(volatile int32_t* dest) { __asm__ volatile ( "lock addl $1,(%0)" : : "r" (dest) : "cc", "memory"); } template <> inline void Atomic::specialized_dec(volatile int32_t* dest) { __asm__ volatile ( "lock subl $1,(%0)" : : "r" (dest) : "cc", "memory"); } template <> inline int32_t Atomic::specialized_xchg(int32_t exchange_value, volatile int32_t* dest) { __asm__ volatile ( "xchgl (%2),%0" : "=r" (exchange_value) : "0" (exchange_value), "r" (dest) : "memory"); return exchange_value; } #define VM_HAS_SPECIALIZED_CMPXCHG_BYTE template <> inline int8_t Atomic::specialized_cmpxchg(int8_t exchange_value, volatile int8_t* dest, int8_t compare_value, cmpxchg_memory_order order) { __asm__ volatile ("lock cmpxchgb %1,(%3)" : "=a" (exchange_value) : "q" (exchange_value), "a" (compare_value), "r" (dest) : "cc", "memory"); return exchange_value; } template <> inline int32_t Atomic::specialized_cmpxchg(int32_t exchange_value, volatile int32_t* dest, int32_t compare_value, cmpxchg_memory_order order) { __asm__ volatile ("lock cmpxchgl %1,(%3)" : "=a" (exchange_value) : "r" (exchange_value), "a" (compare_value), "r" (dest) : "cc", "memory"); return exchange_value; } #ifdef AMD64 template <> inline int64_t Atomic::specialized_add(int64_t add_value, volatile int64_t* dest) { int64_t addend = add_value; __asm__ __volatile__ ("lock xaddq %0,(%2)" : "=r" (addend) : "0" (addend), "r" (dest) : "cc", "memory"); return addend + add_value; } template <> inline void Atomic::specialized_inc(volatile int64_t* dest) { __asm__ __volatile__ ("lock addq $1,(%0)" : : "r" (dest) : "cc", "memory"); } template <> inline void Atomic::specialized_dec(volatile int64_t* dest) { __asm__ __volatile__ ("lock subq $1,(%0)" : : "r" (dest) : "cc", "memory"); } template <> inline int64_t Atomic::specialized_xchg(int64_t exchange_value, volatile int64_t* dest) { __asm__ __volatile__ ("xchgq (%2),%0" : "=r" (exchange_value) : "0" (exchange_value), "r" (dest) : "memory"); return exchange_value; } template <> inline int64_t Atomic::specialized_cmpxchg(int64_t exchange_value, volatile int64_t* dest, int64_t compare_value, cmpxchg_memory_order order) { __asm__ __volatile__ ("lock cmpxchgq %1,(%3)" : "=a" (exchange_value) : "r" (exchange_value), "a" (compare_value), "r" (dest) : "cc", "memory"); return exchange_value; } #else // !AMD64 extern "C" { // defined in bsd_x86.s int64_t _Atomic_cmpxchg_long(int64_t, volatile int64_t*, int64_t); void _Atomic_move_long(const volatile int64_t* src, volatile int64_t* dst); } template <> inline void Atomic::specialized_store(int64_t store_value, volatile int64_t* dest) { _Atomic_move_long(&store_value, dest); } template <> inline int64_t Atomic::specialized_load(const volatile int64_t* src) { volatile int64_t dest; _Atomic_move_long(src, &dest); return dest; } template <> inline int64_t Atomic::specialized_cmpxchg(int64_t exchange_value, volatile int64_t* dest, int64_t compare_value, cmpxchg_memory_order order) { return _Atomic_cmpxchg_long(exchange_value, dest, compare_value); } #endif // AMD64 #endif // OS_CPU_BSD_X86_VM_ATOMIC_BSD_X86_HPP