1 /* 2 * Copyright (c) 2008, 2017, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef OS_CPU_LINUX_ARM_VM_ATOMIC_LINUX_ARM_HPP 26 #define OS_CPU_LINUX_ARM_VM_ATOMIC_LINUX_ARM_HPP 27 28 #include "runtime/os.hpp" 29 #include "vm_version_arm.hpp" 30 31 // Implementation of class atomic 32 33 /* 34 * Atomic long operations on 32-bit ARM 35 * ARM v7 supports LDREXD/STREXD synchronization instructions so no problem. 36 * ARM < v7 does not have explicit 64 atomic load/store capability. 37 * However, gcc emits LDRD/STRD instructions on v5te and LDM/STM on v5t 38 * when loading/storing 64 bits. 39 * For non-MP machines (which is all we support for ARM < v7) 40 * under current Linux distros these instructions appear atomic. 41 * See section A3.5.3 of ARM Architecture Reference Manual for ARM v7. 42 * Also, for cmpxchg64, if ARM < v7 we check for cmpxchg64 support in the 43 * Linux kernel using _kuser_helper_version. See entry-armv.S in the Linux 44 * kernel source or kernel_user_helpers.txt in Linux Doc. 45 */ 46 47 inline void Atomic::store (jbyte store_value, jbyte* dest) { *dest = store_value; } 48 inline void Atomic::store (jshort store_value, jshort* dest) { *dest = store_value; } 49 inline void Atomic::store (jint store_value, jint* dest) { *dest = store_value; } 50 inline void Atomic::store_ptr(intptr_t store_value, intptr_t* dest) { *dest = store_value; } 51 inline void Atomic::store_ptr(void* store_value, void* dest) { *(void**)dest = store_value; } 52 53 inline void Atomic::store (jbyte store_value, volatile jbyte* dest) { *dest = store_value; } 54 inline void Atomic::store (jshort store_value, volatile jshort* dest) { *dest = store_value; } 55 inline void Atomic::store (jint store_value, volatile jint* dest) { *dest = store_value; } 56 inline void Atomic::store_ptr(intptr_t store_value, volatile intptr_t* dest) { *dest = store_value; } 57 inline void Atomic::store_ptr(void* store_value, volatile void* dest) { *(void* volatile *)dest = store_value; } 58 59 inline jlong Atomic::load (const volatile jlong* src) { 60 assert(((intx)src & (sizeof(jlong)-1)) == 0, "Atomic load jlong mis-aligned"); 61 #ifdef AARCH64 62 return *src; 63 #else 64 return (*os::atomic_load_long_func)(src); 65 #endif 66 } 67 68 inline void Atomic::store (jlong value, volatile jlong* dest) { 69 assert(((intx)dest & (sizeof(jlong)-1)) == 0, "Atomic store jlong mis-aligned"); 70 #ifdef AARCH64 71 *dest = value; 72 #else 73 (*os::atomic_store_long_func)(value, dest); 74 #endif 75 } 76 77 inline void Atomic::store (jlong value, jlong* dest) { 78 store(value, (volatile jlong*)dest); 79 } 80 81 // As per atomic.hpp all read-modify-write operations have to provide two-way 82 // barriers semantics. For AARCH64 we are using load-acquire-with-reservation and 83 // store-release-with-reservation. While load-acquire combined with store-release 84 // do not generally form two-way barriers, their use with reservations does - the 85 // ARMv8 architecture manual Section F "Barrier Litmus Tests" indicates they 86 // provide sequentially consistent semantics. All we need to add is an explicit 87 // barrier in the failure path of the cmpxchg operations (as these don't execute 88 // the store) - arguably this may be overly cautious as there is a very low 89 // likelihood that the hardware would pull loads/stores into the region guarded 90 // by the reservation. 91 // 92 // For ARMv7 we add explicit barriers in the stubs. 93 94 inline jint Atomic::add(jint add_value, volatile jint* dest) { 95 #ifdef AARCH64 96 jint val; 97 int tmp; 98 __asm__ volatile( 99 "1:\n\t" 100 " ldaxr %w[val], [%[dest]]\n\t" 101 " add %w[val], %w[val], %w[add_val]\n\t" 102 " stlxr %w[tmp], %w[val], [%[dest]]\n\t" 103 " cbnz %w[tmp], 1b\n\t" 104 : [val] "=&r" (val), [tmp] "=&r" (tmp) 105 : [add_val] "r" (add_value), [dest] "r" (dest) 106 : "memory"); 107 return val; 108 #else 109 return (*os::atomic_add_func)(add_value, dest); 110 #endif 111 } 112 113 inline void Atomic::inc(volatile jint* dest) { 114 Atomic::add(1, (volatile jint *)dest); 115 } 116 117 inline void Atomic::dec(volatile jint* dest) { 118 Atomic::add(-1, (volatile jint *)dest); 119 } 120 121 inline intptr_t Atomic::add_ptr(intptr_t add_value, volatile intptr_t* dest) { 122 #ifdef AARCH64 123 intptr_t val; 124 int tmp; 125 __asm__ volatile( 126 "1:\n\t" 127 " ldaxr %[val], [%[dest]]\n\t" 128 " add %[val], %[val], %[add_val]\n\t" 129 " stlxr %w[tmp], %[val], [%[dest]]\n\t" 130 " cbnz %w[tmp], 1b\n\t" 131 : [val] "=&r" (val), [tmp] "=&r" (tmp) 132 : [add_val] "r" (add_value), [dest] "r" (dest) 133 : "memory"); 134 return val; 135 #else 136 return (intptr_t)Atomic::add((jint)add_value, (volatile jint*)dest); 137 #endif 138 } 139 140 inline void* Atomic::add_ptr(intptr_t add_value, volatile void* dest) { 141 return (void*)add_ptr(add_value, (volatile intptr_t*)dest); 142 } 143 144 inline void Atomic::inc_ptr(volatile intptr_t* dest) { 145 Atomic::add_ptr(1, dest); 146 } 147 148 inline void Atomic::dec_ptr(volatile intptr_t* dest) { 149 Atomic::add_ptr(-1, dest); 150 } 151 152 inline void Atomic::inc_ptr(volatile void* dest) { 153 inc_ptr((volatile intptr_t*)dest); 154 } 155 156 inline void Atomic::dec_ptr(volatile void* dest) { 157 dec_ptr((volatile intptr_t*)dest); 158 } 159 160 161 inline jint Atomic::xchg(jint exchange_value, volatile jint* dest) { 162 #ifdef AARCH64 163 jint old_val; 164 int tmp; 165 __asm__ volatile( 166 "1:\n\t" 167 " ldaxr %w[old_val], [%[dest]]\n\t" 168 " stlxr %w[tmp], %w[new_val], [%[dest]]\n\t" 169 " cbnz %w[tmp], 1b\n\t" 170 : [old_val] "=&r" (old_val), [tmp] "=&r" (tmp) 171 : [new_val] "r" (exchange_value), [dest] "r" (dest) 172 : "memory"); 173 return old_val; 174 #else 175 return (*os::atomic_xchg_func)(exchange_value, dest); 176 #endif 177 } 178 179 inline intptr_t Atomic::xchg_ptr(intptr_t exchange_value, volatile intptr_t* dest) { 180 #ifdef AARCH64 181 intptr_t old_val; 182 int tmp; 183 __asm__ volatile( 184 "1:\n\t" 185 " ldaxr %[old_val], [%[dest]]\n\t" 186 " stlxr %w[tmp], %[new_val], [%[dest]]\n\t" 187 " cbnz %w[tmp], 1b\n\t" 188 : [old_val] "=&r" (old_val), [tmp] "=&r" (tmp) 189 : [new_val] "r" (exchange_value), [dest] "r" (dest) 190 : "memory"); 191 return old_val; 192 #else 193 return (intptr_t)xchg((jint)exchange_value, (volatile jint*)dest); 194 #endif 195 } 196 197 inline void* Atomic::xchg_ptr(void* exchange_value, volatile void* dest) { 198 return (void*)xchg_ptr((intptr_t)exchange_value, (volatile intptr_t*)dest); 199 } 200 201 // The memory_order parameter is ignored - we always provide the strongest/most-conservative ordering 202 203 // No direct support for cmpxchg of bytes; emulate using int. 204 template<> 205 struct Atomic::PlatformCmpxchg<1> : Atomic::CmpxchgByteUsingInt {}; 206 207 #ifndef AARCH64 208 209 inline jint reorder_cmpxchg_func(jint exchange_value, 210 jint volatile* dest, 211 jint compare_value) { 212 // Warning: Arguments are swapped to avoid moving them for kernel call 213 return (*os::atomic_cmpxchg_func)(compare_value, exchange_value, dest); 214 } 215 216 inline jlong reorder_cmpxchg_long_func(jlong exchange_value, 217 jlong volatile* dest, 218 jlong compare_value) { 219 assert(VM_Version::supports_cx8(), "Atomic compare and exchange jlong not supported on this architecture!"); 220 // Warning: Arguments are swapped to avoid moving them for kernel call 221 return (*os::atomic_cmpxchg_long_func)(compare_value, exchange_value, dest); 222 } 223 224 #endif // !AARCH64 225 226 template<> 227 template<typename T> 228 inline T Atomic::PlatformCmpxchg<4>::operator()(T exchange_value, 229 T volatile* dest, 230 T compare_value, 231 cmpxchg_memory_order order) const { 232 STATIC_ASSERT(4 == sizeof(T)); 233 #ifdef AARCH64 234 T rv; 235 int tmp; 236 __asm__ volatile( 237 "1:\n\t" 238 " ldaxr %w[rv], [%[dest]]\n\t" 239 " cmp %w[rv], %w[cv]\n\t" 240 " b.ne 2f\n\t" 241 " stlxr %w[tmp], %w[ev], [%[dest]]\n\t" 242 " cbnz %w[tmp], 1b\n\t" 243 " b 3f\n\t" 244 "2:\n\t" 245 " dmb sy\n\t" 246 "3:\n\t" 247 : [rv] "=&r" (rv), [tmp] "=&r" (tmp) 248 : [ev] "r" (exchange_value), [dest] "r" (dest), [cv] "r" (compare_value) 249 : "memory"); 250 return rv; 251 #else 252 return cmpxchg_using_helper<jint>(reorder_cmpxchg_func, exchange_value, dest, compare_value); 253 #endif 254 } 255 256 template<> 257 template<typename T> 258 inline T Atomic::PlatformCmpxchg<8>::operator()(T exchange_value, 259 T volatile* dest, 260 T compare_value, 261 cmpxchg_memory_order order) const { 262 STATIC_ASSERT(8 == sizeof(T)); 263 #ifdef AARCH64 264 T rv; 265 int tmp; 266 __asm__ volatile( 267 "1:\n\t" 268 " ldaxr %[rv], [%[dest]]\n\t" 269 " cmp %[rv], %[cv]\n\t" 270 " b.ne 2f\n\t" 271 " stlxr %w[tmp], %[ev], [%[dest]]\n\t" 272 " cbnz %w[tmp], 1b\n\t" 273 " b 3f\n\t" 274 "2:\n\t" 275 " dmb sy\n\t" 276 "3:\n\t" 277 : [rv] "=&r" (rv), [tmp] "=&r" (tmp) 278 : [ev] "r" (exchange_value), [dest] "r" (dest), [cv] "r" (compare_value) 279 : "memory"); 280 return rv; 281 #else 282 return cmpxchg_using_helper<jlong>(reorder_cmpxchg_long_func, exchange_value, dest, compare_value); 283 #endif 284 } 285 286 #endif // OS_CPU_LINUX_ARM_VM_ATOMIC_LINUX_ARM_HPP