1 /* 2 * Copyright (c) 1999, 2017, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef OS_CPU_LINUX_SPARC_VM_ATOMIC_LINUX_SPARC_INLINE_HPP 26 #define OS_CPU_LINUX_SPARC_VM_ATOMIC_LINUX_SPARC_INLINE_HPP 27 28 // Implementation of class atomic 29 30 31 template <> 32 inline int32_t Atomic::specialized_add<int32_t>(int32_t add_value, volatile int32_t* dest) { 33 intptr_t rv; 34 __asm__ volatile( 35 "1: \n\t" 36 " ld [%2], %%o2\n\t" 37 " add %1, %%o2, %%o3\n\t" 38 " cas [%2], %%o2, %%o3\n\t" 39 " cmp %%o2, %%o3\n\t" 40 " bne 1b\n\t" 41 " nop\n\t" 42 " add %1, %%o2, %0\n\t" 43 : "=r" (rv) 44 : "r" (add_value), "r" (dest) 45 : "memory", "o2", "o3"); 46 return rv; 47 } 48 49 50 template <> 51 inline int64_t Atomic::specialized_add<int64_t>(int64_t add_value, volatile int64_t* dest) { 52 intptr_t rv; 53 __asm__ volatile( 54 "1: \n\t" 55 " ldx [%2], %%o2\n\t" 56 " add %1, %%o2, %%o3\n\t" 57 " casx [%2], %%o2, %%o3\n\t" 58 " cmp %%o2, %%o3\n\t" 59 " bne %%xcc, 1b\n\t" 60 " nop\n\t" 61 " add %1, %%o2, %0\n\t" 62 : "=r" (rv) 63 : "r" (add_value), "r" (dest) 64 : "memory", "o2", "o3"); 65 return rv; 66 } 67 68 69 template <> 70 inline int32_t Atomic::specialized_xchg<int32_t>(int32_t exchange_value, volatile int32_t* dest) { 71 intptr_t rv = exchange_value; 72 __asm__ volatile( 73 " swap [%2],%1\n\t" 74 : "=r" (rv) 75 : "0" (exchange_value) /* we use same register as for return value */, "r" (dest) 76 : "memory"); 77 return rv; 78 } 79 80 81 template <> 82 inline int64_t Atomic::specialized_xchg<int64_t>(int64_t exchange_value, volatile int64_t* dest) { 83 intptr_t rv = exchange_value; 84 __asm__ volatile( 85 "1:\n\t" 86 " mov %1, %%o3\n\t" 87 " ldx [%2], %%o2\n\t" 88 " casx [%2], %%o2, %%o3\n\t" 89 " cmp %%o2, %%o3\n\t" 90 " bne %%xcc, 1b\n\t" 91 " nop\n\t" 92 " mov %%o2, %0\n\t" 93 : "=r" (rv) 94 : "r" (exchange_value), "r" (dest) 95 : "memory", "o2", "o3"); 96 return rv; 97 } 98 99 100 template <> 101 inline int32_t Atomic::specialized_cmpxchg<int32_t>(int32_t exchange_value, volatile int32_t* dest, int32_t compare_value, cmpxchg_memory_order order) { 102 int32_t rv; 103 __asm__ volatile( 104 " cas [%2], %3, %0" 105 : "=r" (rv) 106 : "0" (exchange_value), "r" (dest), "r" (compare_value) 107 : "memory"); 108 return rv; 109 } 110 111 112 template <> 113 inline int64_t Atomic::specialized_cmpxchg<int64_t>(int64_t exchange_value, volatile int64_t* dest, int64_t compare_value, cmpxchg_memory_order order) { 114 int64_t rv; 115 __asm__ volatile( 116 " casx [%2], %3, %0" 117 : "=r" (rv) 118 : "0" (exchange_value), "r" (dest), "r" (compare_value) 119 : "memory"); 120 return rv; 121 } 122 123 #endif // OS_CPU_LINUX_SPARC_VM_ATOMIC_LINUX_SPARC_INLINE_HPP