1 /* 2 * Copyright (c) 1999, 2019, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef OS_CPU_LINUX_SPARC_ATOMIC_LINUX_SPARC_HPP 26 #define OS_CPU_LINUX_SPARC_ATOMIC_LINUX_SPARC_HPP 27 28 // Implementation of class atomic 29 30 template<size_t byte_size> 31 struct Atomic::PlatformAdd 32 : Atomic::AddAndFetch<Atomic::PlatformAdd<byte_size> > 33 { 34 template<typename D, typename I> 35 D add_and_fetch(D volatile* dest, I add_value, atomic_memory_order order) const; 36 }; 37 38 template<> 39 template<typename D, typename I> 40 inline D Atomic::PlatformAdd<4>::add_and_fetch(D volatile* dest, I add_value, 41 atomic_memory_order order) const { 42 STATIC_ASSERT(4 == sizeof(I)); 43 STATIC_ASSERT(4 == sizeof(D)); 44 45 D rv; 46 __asm__ volatile( 47 "1: \n\t" 48 " ld [%2], %%o2\n\t" 49 " add %1, %%o2, %%o3\n\t" 50 " cas [%2], %%o2, %%o3\n\t" 51 " cmp %%o2, %%o3\n\t" 52 " bne 1b\n\t" 53 " nop\n\t" 54 " add %1, %%o2, %0\n\t" 55 : "=r" (rv) 56 : "r" (add_value), "r" (dest) 57 : "memory", "o2", "o3"); 58 return rv; 59 } 60 61 template<> 62 template<typename D, typename I> 63 inline D Atomic::PlatformAdd<8>::add_and_fetch(D volatile* dest, I add_value, 64 atomic_memory_order order) const { 65 STATIC_ASSERT(8 == sizeof(I)); 66 STATIC_ASSERT(8 == sizeof(D)); 67 68 D rv; 69 __asm__ volatile( 70 "1: \n\t" 71 " ldx [%2], %%o2\n\t" 72 " add %1, %%o2, %%o3\n\t" 73 " casx [%2], %%o2, %%o3\n\t" 74 " cmp %%o2, %%o3\n\t" 75 " bne %%xcc, 1b\n\t" 76 " nop\n\t" 77 " add %1, %%o2, %0\n\t" 78 : "=r" (rv) 79 : "r" (add_value), "r" (dest) 80 : "memory", "o2", "o3"); 81 return rv; 82 } 83 84 template<> 85 template<typename T> 86 inline T Atomic::PlatformXchg<4>::operator()(T volatile* dest, 87 T exchange_value, 88 atomic_memory_order order) const { 89 STATIC_ASSERT(4 == sizeof(T)); 90 T rv = exchange_value; 91 __asm__ volatile( 92 " swap [%2],%1\n\t" 93 : "=r" (rv) 94 : "0" (exchange_value) /* we use same register as for return value */, "r" (dest) 95 : "memory"); 96 return rv; 97 } 98 99 template<> 100 template<typename T> 101 inline T Atomic::PlatformXchg<8>::operator()(T volatile* dest, 102 T exchange_value, 103 atomic_memory_order order) const { 104 STATIC_ASSERT(8 == sizeof(T)); 105 T rv = exchange_value; 106 __asm__ volatile( 107 "1:\n\t" 108 " mov %1, %%o3\n\t" 109 " ldx [%2], %%o2\n\t" 110 " casx [%2], %%o2, %%o3\n\t" 111 " cmp %%o2, %%o3\n\t" 112 " bne %%xcc, 1b\n\t" 113 " nop\n\t" 114 " mov %%o2, %0\n\t" 115 : "=r" (rv) 116 : "r" (exchange_value), "r" (dest) 117 : "memory", "o2", "o3"); 118 return rv; 119 } 120 121 // No direct support for cmpxchg of bytes; emulate using int. 122 template<> 123 struct Atomic::PlatformCmpxchg<1> : Atomic::CmpxchgByteUsingInt {}; 124 125 template<> 126 template<typename T> 127 inline T Atomic::PlatformCmpxchg<4>::operator()(T exchange_value, 128 T volatile* dest, 129 T compare_value, 130 atomic_memory_order order) const { 131 STATIC_ASSERT(4 == sizeof(T)); 132 T rv; 133 __asm__ volatile( 134 " cas [%2], %3, %0" 135 : "=r" (rv) 136 : "0" (exchange_value), "r" (dest), "r" (compare_value) 137 : "memory"); 138 return rv; 139 } 140 141 template<> 142 template<typename T> 143 inline T Atomic::PlatformCmpxchg<8>::operator()(T exchange_value, 144 T volatile* dest, 145 T compare_value, 146 atomic_memory_order order) const { 147 STATIC_ASSERT(8 == sizeof(T)); 148 T rv; 149 __asm__ volatile( 150 " casx [%2], %3, %0" 151 : "=r" (rv) 152 : "0" (exchange_value), "r" (dest), "r" (compare_value) 153 : "memory"); 154 return rv; 155 } 156 157 #endif // OS_CPU_LINUX_SPARC_ATOMIC_LINUX_SPARC_HPP