1 /* 2 * Copyright (c) 2003, 2010, Oracle and/or its affiliates. All rights reserved. 3 * Copyright 2007, 2008 Red Hat, Inc. 4 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 5 * 6 * This code is free software; you can redistribute it and/or modify it 7 * under the terms of the GNU General Public License version 2 only, as 8 * published by the Free Software Foundation. 9 * 10 * This code is distributed in the hope that it will be useful, but WITHOUT 11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 13 * version 2 for more details (a copy is included in the LICENSE file that 14 * accompanied this code). 15 * 16 * You should have received a copy of the GNU General Public License version 17 * 2 along with this work; if not, write to the Free Software Foundation, 18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 19 * 20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 21 * or visit www.oracle.com if you need additional information or have any 22 * questions. 23 * 24 */ 25 26 #ifndef OS_CPU_LINUX_ZERO_VM_ATOMIC_LINUX_ZERO_INLINE_HPP 27 #define OS_CPU_LINUX_ZERO_VM_ATOMIC_LINUX_ZERO_INLINE_HPP 28 29 #include "orderAccess_linux_zero.inline.hpp" 30 #include "runtime/atomic.hpp" 31 #include "runtime/os.hpp" 32 #include "vm_version_zero.hpp" 33 34 // Implementation of class atomic 35 36 #ifdef M68K 37 38 /* 39 * __m68k_cmpxchg 40 * 41 * Atomically store newval in *ptr if *ptr is equal to oldval for user space. 42 * Returns newval on success and oldval if no exchange happened. 43 * This implementation is processor specific and works on 44 * 68020 68030 68040 and 68060. 45 * 46 * It will not work on ColdFire, 68000 and 68010 since they lack the CAS 47 * instruction. 48 * Using a kernelhelper would be better for arch complete implementation. 49 * 50 */ 51 52 static inline int __m68k_cmpxchg(int oldval, int newval, volatile int *ptr) { 53 int ret; 54 __asm __volatile ("cas%.l %0,%2,%1" 55 : "=d" (ret), "+m" (*(ptr)) 56 : "d" (newval), "0" (oldval)); 57 return ret; 58 } 59 60 /* Perform an atomic compare and swap: if the current value of `*PTR' 61 is OLDVAL, then write NEWVAL into `*PTR'. Return the contents of 62 `*PTR' before the operation.*/ 63 static inline int m68k_compare_and_swap(volatile int *ptr, 64 int oldval, 65 int newval) { 66 for (;;) { 67 int prev = *ptr; 68 if (prev != oldval) 69 return prev; 70 71 if (__m68k_cmpxchg (prev, newval, ptr) == newval) 72 // Success. 73 return prev; 74 75 // We failed even though prev == oldval. Try again. 76 } 77 } 78 79 /* Atomically add an int to memory. */ 80 static inline int m68k_add_and_fetch(volatile int *ptr, int add_value) { 81 for (;;) { 82 // Loop until success. 83 84 int prev = *ptr; 85 86 if (__m68k_cmpxchg (prev, prev + add_value, ptr) == prev + add_value) 87 return prev + add_value; 88 } 89 } 90 91 /* Atomically write VALUE into `*PTR' and returns the previous 92 contents of `*PTR'. */ 93 static inline int m68k_lock_test_and_set(volatile int *ptr, int newval) { 94 for (;;) { 95 // Loop until success. 96 int prev = *ptr; 97 98 if (__m68k_cmpxchg (prev, newval, ptr) == prev) 99 return prev; 100 } 101 } 102 #endif // M68K 103 104 #ifdef ARM 105 106 /* 107 * __kernel_cmpxchg 108 * 109 * Atomically store newval in *ptr if *ptr is equal to oldval for user space. 110 * Return zero if *ptr was changed or non-zero if no exchange happened. 111 * The C flag is also set if *ptr was changed to allow for assembly 112 * optimization in the calling code. 113 * 114 */ 115 116 typedef int (__kernel_cmpxchg_t)(int oldval, int newval, volatile int *ptr); 117 #define __kernel_cmpxchg (*(__kernel_cmpxchg_t *) 0xffff0fc0) 118 119 120 121 /* Perform an atomic compare and swap: if the current value of `*PTR' 122 is OLDVAL, then write NEWVAL into `*PTR'. Return the contents of 123 `*PTR' before the operation.*/ 124 static inline int arm_compare_and_swap(volatile int *ptr, 125 int oldval, 126 int newval) { 127 for (;;) { 128 int prev = *ptr; 129 if (prev != oldval) 130 return prev; 131 132 if (__kernel_cmpxchg (prev, newval, ptr) == 0) 133 // Success. 134 return prev; 135 136 // We failed even though prev == oldval. Try again. 137 } 138 } 139 140 /* Atomically add an int to memory. */ 141 static inline int arm_add_and_fetch(volatile int *ptr, int add_value) { 142 for (;;) { 143 // Loop until a __kernel_cmpxchg succeeds. 144 145 int prev = *ptr; 146 147 if (__kernel_cmpxchg (prev, prev + add_value, ptr) == 0) 148 return prev + add_value; 149 } 150 } 151 152 /* Atomically write VALUE into `*PTR' and returns the previous 153 contents of `*PTR'. */ 154 static inline int arm_lock_test_and_set(volatile int *ptr, int newval) { 155 for (;;) { 156 // Loop until a __kernel_cmpxchg succeeds. 157 int prev = *ptr; 158 159 if (__kernel_cmpxchg (prev, newval, ptr) == 0) 160 return prev; 161 } 162 } 163 #endif // ARM 164 165 inline void Atomic::store(jint store_value, volatile jint* dest) { 166 *dest = store_value; 167 } 168 169 inline void Atomic::store_ptr(intptr_t store_value, intptr_t* dest) { 170 *dest = store_value; 171 } 172 173 inline jint Atomic::add(jint add_value, volatile jint* dest) { 174 #ifdef ARM 175 return arm_add_and_fetch(dest, add_value); 176 #else 177 #ifdef M68K 178 return m68k_add_and_fetch(dest, add_value); 179 #else 180 return __sync_add_and_fetch(dest, add_value); 181 #endif // M68K 182 #endif // ARM 183 } 184 185 inline intptr_t Atomic::add_ptr(intptr_t add_value, volatile intptr_t* dest) { 186 #ifdef ARM 187 return arm_add_and_fetch(dest, add_value); 188 #else 189 #ifdef M68K 190 return m68k_add_and_fetch(dest, add_value); 191 #else 192 return __sync_add_and_fetch(dest, add_value); 193 #endif // M68K 194 #endif // ARM 195 } 196 197 inline void* Atomic::add_ptr(intptr_t add_value, volatile void* dest) { 198 return (void *) add_ptr(add_value, (volatile intptr_t *) dest); 199 } 200 201 inline void Atomic::inc(volatile jint* dest) { 202 add(1, dest); 203 } 204 205 inline void Atomic::inc_ptr(volatile intptr_t* dest) { 206 add_ptr(1, dest); 207 } 208 209 inline void Atomic::inc_ptr(volatile void* dest) { 210 add_ptr(1, dest); 211 } 212 213 inline void Atomic::dec(volatile jint* dest) { 214 add(-1, dest); 215 } 216 217 inline void Atomic::dec_ptr(volatile intptr_t* dest) { 218 add_ptr(-1, dest); 219 } 220 221 inline void Atomic::dec_ptr(volatile void* dest) { 222 add_ptr(-1, dest); 223 } 224 225 inline jint Atomic::xchg(jint exchange_value, volatile jint* dest) { 226 #ifdef ARM 227 return arm_lock_test_and_set(dest, exchange_value); 228 #else 229 #ifdef M68K 230 return m68k_lock_test_and_set(dest, exchange_value); 231 #else 232 // __sync_lock_test_and_set is a bizarrely named atomic exchange 233 // operation. Note that some platforms only support this with the 234 // limitation that the only valid value to store is the immediate 235 // constant 1. There is a test for this in JNI_CreateJavaVM(). 236 return __sync_lock_test_and_set (dest, exchange_value); 237 #endif // M68K 238 #endif // ARM 239 } 240 241 inline intptr_t Atomic::xchg_ptr(intptr_t exchange_value, 242 volatile intptr_t* dest) { 243 #ifdef ARM 244 return arm_lock_test_and_set(dest, exchange_value); 245 #else 246 #ifdef M68K 247 return m68k_lock_test_and_set(dest, exchange_value); 248 #else 249 return __sync_lock_test_and_set (dest, exchange_value); 250 #endif // M68K 251 #endif // ARM 252 } 253 254 inline void* Atomic::xchg_ptr(void* exchange_value, volatile void* dest) { 255 return (void *) xchg_ptr((intptr_t) exchange_value, 256 (volatile intptr_t*) dest); 257 } 258 259 inline jint Atomic::cmpxchg(jint exchange_value, 260 volatile jint* dest, 261 jint compare_value) { 262 #ifdef ARM 263 return arm_compare_and_swap(dest, compare_value, exchange_value); 264 #else 265 #ifdef M68K 266 return m68k_compare_and_swap(dest, compare_value, exchange_value); 267 #else 268 return __sync_val_compare_and_swap(dest, compare_value, exchange_value); 269 #endif // M68K 270 #endif // ARM 271 } 272 273 inline jlong Atomic::cmpxchg(jlong exchange_value, 274 volatile jlong* dest, 275 jlong compare_value) { 276 277 return __sync_val_compare_and_swap(dest, compare_value, exchange_value); 278 } 279 280 inline intptr_t Atomic::cmpxchg_ptr(intptr_t exchange_value, 281 volatile intptr_t* dest, 282 intptr_t compare_value) { 283 #ifdef ARM 284 return arm_compare_and_swap(dest, compare_value, exchange_value); 285 #else 286 #ifdef M68K 287 return m68k_compare_and_swap(dest, compare_value, exchange_value); 288 #else 289 return __sync_val_compare_and_swap(dest, compare_value, exchange_value); 290 #endif // M68K 291 #endif // ARM 292 } 293 294 inline void* Atomic::cmpxchg_ptr(void* exchange_value, 295 volatile void* dest, 296 void* compare_value) { 297 298 return (void *) cmpxchg_ptr((intptr_t) exchange_value, 299 (volatile intptr_t*) dest, 300 (intptr_t) compare_value); 301 } 302 303 #endif // OS_CPU_LINUX_ZERO_VM_ATOMIC_LINUX_ZERO_INLINE_HPP