1 /* 2 * Copyright (c) 2003, 2014, Oracle and/or its affiliates. All rights reserved. 3 * Copyright 2007, 2008, 2011, 2015, Red Hat, Inc. 4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 5 * 6 * This code is free software; you can redistribute it and/or modify it 7 * under the terms of the GNU General Public License version 2 only, as 8 * published by the Free Software Foundation. 9 * 10 * This code is distributed in the hope that it will be useful, but WITHOUT 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 13 * version 2 for more details (a copy is included in the LICENSE file that 14 * accompanied this code). 15 * 16 * You should have received a copy of the GNU General Public License version 17 * 2 along with this work; if not, write to the Free Software Foundation, 18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 19 * 20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 21 * or visit www.oracle.com if you need additional information or have any 22 * questions. 23 * 24 */ 25 26 #ifndef OS_CPU_LINUX_ZERO_VM_ATOMIC_LINUX_ZERO_INLINE_HPP 27 #define OS_CPU_LINUX_ZERO_VM_ATOMIC_LINUX_ZERO_INLINE_HPP 28 29 #include "runtime/atomic.hpp" 30 #include "runtime/os.hpp" 31 32 // Implementation of class atomic 33 34 #ifdef M68K 35 36 /* 37 * __m68k_cmpxchg 38 * 39 * Atomically store newval in *ptr if *ptr is equal to oldval for user space. 40 * Returns newval on success and oldval if no exchange happened. 41 * This implementation is processor specific and works on 42 * 68020 68030 68040 and 68060. 43 * 44 * It will not work on ColdFire, 68000 and 68010 since they lack the CAS 45 * instruction. 46 * Using a kernelhelper would be better for arch complete implementation. 47 * 48 */ 49 50 static inline int __m68k_cmpxchg(int oldval, int newval, volatile int *ptr) { 51 int ret; 52 __asm __volatile ("cas%.l %0,%2,%1" 53 : "=d" (ret), "+m" (*(ptr)) 54 : "d" (newval), "0" (oldval)); 55 return ret; 56 } 57 58 /* Perform an atomic compare and swap: if the current value of `*PTR' 59 is OLDVAL, then write NEWVAL into `*PTR'. Return the contents of 60 `*PTR' before the operation.*/ 61 static inline int m68k_compare_and_swap(volatile int *ptr, 62 int oldval, 63 int newval) { 64 for (;;) { 65 int prev = *ptr; 66 if (prev != oldval) 67 return prev; 68 69 if (__m68k_cmpxchg (prev, newval, ptr) == newval) 70 // Success. 71 return prev; 72 73 // We failed even though prev == oldval. Try again. 74 } 75 } 76 77 /* Atomically add an int to memory. */ 78 static inline int m68k_add_and_fetch(volatile int *ptr, int add_value) { 79 for (;;) { 80 // Loop until success. 81 82 int prev = *ptr; 83 84 if (__m68k_cmpxchg (prev, prev + add_value, ptr) == prev + add_value) 85 return prev + add_value; 86 } 87 } 88 89 /* Atomically write VALUE into `*PTR' and returns the previous 90 contents of `*PTR'. */ 91 static inline int m68k_lock_test_and_set(volatile int *ptr, int newval) { 92 for (;;) { 93 // Loop until success. 94 int prev = *ptr; 95 96 if (__m68k_cmpxchg (prev, newval, ptr) == prev) 97 return prev; 98 } 99 } 100 #endif // M68K 101 102 #ifdef ARM 103 104 /* 105 * __kernel_cmpxchg 106 * 107 * Atomically store newval in *ptr if *ptr is equal to oldval for user space. 108 * Return zero if *ptr was changed or non-zero if no exchange happened. 109 * The C flag is also set if *ptr was changed to allow for assembly 110 * optimization in the calling code. 111 * 112 */ 113 114 typedef int (__kernel_cmpxchg_t)(int oldval, int newval, volatile int *ptr); 115 #define __kernel_cmpxchg (*(__kernel_cmpxchg_t *) 0xffff0fc0) 116 117 118 119 /* Perform an atomic compare and swap: if the current value of `*PTR' 120 is OLDVAL, then write NEWVAL into `*PTR'. Return the contents of 121 `*PTR' before the operation.*/ 122 static inline int arm_compare_and_swap(volatile int *ptr, 123 int oldval, 124 int newval) { 125 for (;;) { 126 int prev = *ptr; 127 if (prev != oldval) 128 return prev; 129 130 if (__kernel_cmpxchg (prev, newval, ptr) == 0) 131 // Success. 132 return prev; 133 134 // We failed even though prev == oldval. Try again. 135 } 136 } 137 138 /* Atomically add an int to memory. */ 139 static inline int arm_add_and_fetch(volatile int *ptr, int add_value) { 140 for (;;) { 141 // Loop until a __kernel_cmpxchg succeeds. 142 143 int prev = *ptr; 144 145 if (__kernel_cmpxchg (prev, prev + add_value, ptr) == 0) 146 return prev + add_value; 147 } 148 } 149 150 /* Atomically write VALUE into `*PTR' and returns the previous 151 contents of `*PTR'. */ 152 static inline int arm_lock_test_and_set(volatile int *ptr, int newval) { 153 for (;;) { 154 // Loop until a __kernel_cmpxchg succeeds. 155 int prev = *ptr; 156 157 if (__kernel_cmpxchg (prev, newval, ptr) == 0) 158 return prev; 159 } 160 } 161 #endif // ARM 162 163 inline void Atomic::store(jint store_value, volatile jint* dest) { 164 *dest = store_value; 165 } 166 167 inline void Atomic::store_ptr(intptr_t store_value, intptr_t* dest) { 168 *dest = store_value; 169 } 170 171 inline jint Atomic::add(jint add_value, volatile jint* dest) { 172 #ifdef ARM 173 return arm_add_and_fetch(dest, add_value); 174 #else 175 #ifdef M68K 176 return m68k_add_and_fetch(dest, add_value); 177 #else 178 return __sync_add_and_fetch(dest, add_value); 179 #endif // M68K 180 #endif // ARM 181 } 182 183 inline intptr_t Atomic::add_ptr(intptr_t add_value, volatile intptr_t* dest) { 184 #ifdef ARM 185 return arm_add_and_fetch(dest, add_value); 186 #else 187 #ifdef M68K 188 return m68k_add_and_fetch(dest, add_value); 189 #else 190 return __sync_add_and_fetch(dest, add_value); 191 #endif // M68K 192 #endif // ARM 193 } 194 195 inline void* Atomic::add_ptr(intptr_t add_value, volatile void* dest) { 196 return (void *) add_ptr(add_value, (volatile intptr_t *) dest); 197 } 198 199 inline void Atomic::inc(volatile jint* dest) { 200 add(1, dest); 201 } 202 203 inline void Atomic::inc_ptr(volatile intptr_t* dest) { 204 add_ptr(1, dest); 205 } 206 207 inline void Atomic::inc_ptr(volatile void* dest) { 208 add_ptr(1, dest); 209 } 210 211 inline void Atomic::dec(volatile jint* dest) { 212 add(-1, dest); 213 } 214 215 inline void Atomic::dec_ptr(volatile intptr_t* dest) { 216 add_ptr(-1, dest); 217 } 218 219 inline void Atomic::dec_ptr(volatile void* dest) { 220 add_ptr(-1, dest); 221 } 222 223 inline jint Atomic::xchg(jint exchange_value, volatile jint* dest) { 224 #ifdef ARM 225 return arm_lock_test_and_set(dest, exchange_value); 226 #else 227 #ifdef M68K 228 return m68k_lock_test_and_set(dest, exchange_value); 229 #else 230 // __sync_lock_test_and_set is a bizarrely named atomic exchange 231 // operation. Note that some platforms only support this with the 232 // limitation that the only valid value to store is the immediate 233 // constant 1. There is a test for this in JNI_CreateJavaVM(). 234 jint result = __sync_lock_test_and_set (dest, exchange_value); 235 // All atomic operations are expected to be full memory barriers 236 // (see atomic.hpp). However, __sync_lock_test_and_set is not 237 // a full memory barrier, but an acquire barrier. Hence, this added 238 // barrier. 239 __sync_synchronize(); 240 return result; 241 #endif // M68K 242 #endif // ARM 243 } 244 245 inline intptr_t Atomic::xchg_ptr(intptr_t exchange_value, 246 volatile intptr_t* dest) { 247 #ifdef ARM 248 return arm_lock_test_and_set(dest, exchange_value); 249 #else 250 #ifdef M68K 251 return m68k_lock_test_and_set(dest, exchange_value); 252 #else 253 intptr_t result = __sync_lock_test_and_set (dest, exchange_value); 254 __sync_synchronize(); 255 return result; 256 #endif // M68K 257 #endif // ARM 258 } 259 260 inline void* Atomic::xchg_ptr(void* exchange_value, volatile void* dest) { 261 return (void *) xchg_ptr((intptr_t) exchange_value, 262 (volatile intptr_t*) dest); 263 } 264 265 inline jint Atomic::cmpxchg(jint exchange_value, 266 volatile jint* dest, 267 jint compare_value) { 268 #ifdef ARM 269 return arm_compare_and_swap(dest, compare_value, exchange_value); 270 #else 271 #ifdef M68K 272 return m68k_compare_and_swap(dest, compare_value, exchange_value); 273 #else 274 return __sync_val_compare_and_swap(dest, compare_value, exchange_value); 275 #endif // M68K 276 #endif // ARM 277 } 278 279 inline jlong Atomic::cmpxchg(jlong exchange_value, 280 volatile jlong* dest, 281 jlong compare_value) { 282 283 return __sync_val_compare_and_swap(dest, compare_value, exchange_value); 284 } 285 286 inline intptr_t Atomic::cmpxchg_ptr(intptr_t exchange_value, 287 volatile intptr_t* dest, 288 intptr_t compare_value) { 289 #ifdef ARM 290 return arm_compare_and_swap(dest, compare_value, exchange_value); 291 #else 292 #ifdef M68K 293 return m68k_compare_and_swap(dest, compare_value, exchange_value); 294 #else 295 return __sync_val_compare_and_swap(dest, compare_value, exchange_value); 296 #endif // M68K 297 #endif // ARM 298 } 299 300 inline void* Atomic::cmpxchg_ptr(void* exchange_value, 301 volatile void* dest, 302 void* compare_value) { 303 304 return (void *) cmpxchg_ptr((intptr_t) exchange_value, 305 (volatile intptr_t*) dest, 306 (intptr_t) compare_value); 307 } 308 309 inline jlong Atomic::load(volatile jlong* src) { 310 volatile jlong dest; 311 os::atomic_copy64(src, &dest); 312 return dest; 313 } 314 315 inline void Atomic::store(jlong store_value, jlong* dest) { 316 os::atomic_copy64((volatile jlong*)&store_value, (volatile jlong*)dest); 317 } 318 319 inline void Atomic::store(jlong store_value, volatile jlong* dest) { 320 os::atomic_copy64((volatile jlong*)&store_value, dest); 321 } 322 323 #endif // OS_CPU_LINUX_ZERO_VM_ATOMIC_LINUX_ZERO_INLINE_HPP