/* * Copyright (c) 1999, 2017, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 only, as * published by the Free Software Foundation. * * This code is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * version 2 for more details (a copy is included in the LICENSE file that * accompanied this code). * * You should have received a copy of the GNU General Public License version * 2 along with this work; if not, write to the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. * * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA * or visit www.oracle.com if you need additional information or have any * questions. * */ #ifndef OS_CPU_LINUX_SPARC_VM_ATOMIC_LINUX_SPARC_INLINE_HPP #define OS_CPU_LINUX_SPARC_VM_ATOMIC_LINUX_SPARC_INLINE_HPP // Implementation of class atomic template <> inline int32_t Atomic::specialized_add(int32_t add_value, volatile int32_t* dest) { intptr_t rv; __asm__ volatile( "1: \n\t" " ld [%2], %%o2\n\t" " add %1, %%o2, %%o3\n\t" " cas [%2], %%o2, %%o3\n\t" " cmp %%o2, %%o3\n\t" " bne 1b\n\t" " nop\n\t" " add %1, %%o2, %0\n\t" : "=r" (rv) : "r" (add_value), "r" (dest) : "memory", "o2", "o3"); return rv; } template <> inline int64_t Atomic::specialized_add(int64_t add_value, volatile int64_t* dest) { intptr_t rv; __asm__ volatile( "1: \n\t" " ldx [%2], %%o2\n\t" " add %1, %%o2, %%o3\n\t" " casx [%2], %%o2, %%o3\n\t" " cmp %%o2, %%o3\n\t" " bne %%xcc, 1b\n\t" " nop\n\t" " add %1, %%o2, %0\n\t" : "=r" (rv) : "r" (add_value), "r" (dest) : "memory", "o2", "o3"); return rv; } template <> inline int32_t Atomic::specialized_xchg(int32_t exchange_value, volatile int32_t* dest) { intptr_t rv = exchange_value; __asm__ volatile( " swap [%2],%1\n\t" : "=r" (rv) : "0" (exchange_value) /* we use same register as for return value */, "r" (dest) : "memory"); return rv; } template <> inline int64_t Atomic::specialized_xchg(int64_t exchange_value, volatile int64_t* dest) { intptr_t rv = exchange_value; __asm__ volatile( "1:\n\t" " mov %1, %%o3\n\t" " ldx [%2], %%o2\n\t" " casx [%2], %%o2, %%o3\n\t" " cmp %%o2, %%o3\n\t" " bne %%xcc, 1b\n\t" " nop\n\t" " mov %%o2, %0\n\t" : "=r" (rv) : "r" (exchange_value), "r" (dest) : "memory", "o2", "o3"); return rv; } template <> inline int32_t Atomic::specialized_cmpxchg(int32_t exchange_value, volatile int32_t* dest, int32_t compare_value, cmpxchg_memory_order order) { int32_t rv; __asm__ volatile( " cas [%2], %3, %0" : "=r" (rv) : "0" (exchange_value), "r" (dest), "r" (compare_value) : "memory"); return rv; } template <> inline int64_t Atomic::specialized_cmpxchg(int64_t exchange_value, volatile int64_t* dest, int64_t compare_value, cmpxchg_memory_order order) { int64_t rv; __asm__ volatile( " casx [%2], %3, %0" : "=r" (rv) : "0" (exchange_value), "r" (dest), "r" (compare_value) : "memory"); return rv; } #endif // OS_CPU_LINUX_SPARC_VM_ATOMIC_LINUX_SPARC_INLINE_HPP