< prev index next >

src/hotspot/os_cpu/linux_zero/atomic_linux_zero.hpp

Print this page
rev 48027 : 8192123: Zero should use compiler built-ins for atomics on linux-arm


  13  * version 2 for more details (a copy is included in the LICENSE file that
  14  * accompanied this code).
  15  *
  16  * You should have received a copy of the GNU General Public License version
  17  * 2 along with this work; if not, write to the Free Software Foundation,
  18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19  *
  20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21  * or visit www.oracle.com if you need additional information or have any
  22  * questions.
  23  *
  24  */
  25 
  26 #ifndef OS_CPU_LINUX_ZERO_VM_ATOMIC_LINUX_ZERO_HPP
  27 #define OS_CPU_LINUX_ZERO_VM_ATOMIC_LINUX_ZERO_HPP
  28 
  29 #include "runtime/os.hpp"
  30 
  31 // Implementation of class atomic
  32 
  33 #ifdef ARM
  34 
  35 /*
  36  * __kernel_cmpxchg
  37  *
  38  * Atomically store newval in *ptr if *ptr is equal to oldval for user space.
  39  * Return zero if *ptr was changed or non-zero if no exchange happened.
  40  * The C flag is also set if *ptr was changed to allow for assembly
  41  * optimization in the calling code.
  42  *
  43  */
  44 
  45 typedef int (__kernel_cmpxchg_t)(int oldval, int newval, volatile int *ptr);
  46 #define __kernel_cmpxchg (*(__kernel_cmpxchg_t *) 0xffff0fc0)
  47 
  48 
  49 
  50 /* Perform an atomic compare and swap: if the current value of `*PTR'
  51    is OLDVAL, then write NEWVAL into `*PTR'.  Return the contents of
  52    `*PTR' before the operation.*/
  53 static inline int arm_compare_and_swap(int newval,
  54                                        volatile int *ptr,
  55                                        int oldval) {
  56   for (;;) {
  57       int prev = *ptr;
  58       if (prev != oldval)
  59         return prev;
  60 
  61       if (__kernel_cmpxchg (prev, newval, ptr) == 0)
  62         // Success.
  63         return prev;
  64 
  65       // We failed even though prev == oldval.  Try again.
  66     }
  67 }
  68 
  69 /* Atomically add an int to memory.  */
  70 static inline int arm_add_and_fetch(int add_value, volatile int *ptr) {
  71   for (;;) {
  72       // Loop until a __kernel_cmpxchg succeeds.
  73 
  74       int prev = *ptr;
  75 
  76       if (__kernel_cmpxchg (prev, prev + add_value, ptr) == 0)
  77         return prev + add_value;
  78     }
  79 }
  80 
  81 /* Atomically write VALUE into `*PTR' and returns the previous
  82    contents of `*PTR'.  */
  83 static inline int arm_lock_test_and_set(int newval, volatile int *ptr) {
  84   for (;;) {
  85       // Loop until a __kernel_cmpxchg succeeds.
  86       int prev = *ptr;
  87 
  88       if (__kernel_cmpxchg (prev, newval, ptr) == 0)
  89         return prev;
  90     }
  91 }
  92 #endif // ARM
  93 
  94 template<size_t byte_size>
  95 struct Atomic::PlatformAdd
  96   : Atomic::AddAndFetch<Atomic::PlatformAdd<byte_size> >
  97 {
  98   template<typename I, typename D>
  99   D add_and_fetch(I add_value, D volatile* dest) const;
 100 };
 101 
 102 template<>
 103 template<typename I, typename D>
 104 inline D Atomic::PlatformAdd<4>::add_and_fetch(I add_value, D volatile* dest) const {
 105   STATIC_ASSERT(4 == sizeof(I));
 106   STATIC_ASSERT(4 == sizeof(D));
 107 
 108 #ifdef ARM
 109   return add_using_helper<int>(arm_add_and_fetch, add_value, dest);
 110 #else
 111   return __sync_add_and_fetch(dest, add_value);
 112 #endif // ARM
 113 }
 114 
 115 template<>
 116 template<typename I, typename D>
 117 inline D Atomic::PlatformAdd<8>::add_and_fetch(I add_value, D volatile* dest) const {
 118   STATIC_ASSERT(8 == sizeof(I));
 119   STATIC_ASSERT(8 == sizeof(D));
 120 
 121   return __sync_add_and_fetch(dest, add_value);
 122 }
 123 
 124 template<>
 125 template<typename T>
 126 inline T Atomic::PlatformXchg<4>::operator()(T exchange_value,
 127                                              T volatile* dest) const {
 128   STATIC_ASSERT(4 == sizeof(T));
 129 #ifdef ARM
 130   return xchg_using_helper<int>(arm_lock_test_and_set, exchange_value, dest);
 131 #else
 132   // __sync_lock_test_and_set is a bizarrely named atomic exchange
 133   // operation.  Note that some platforms only support this with the
 134   // limitation that the only valid value to store is the immediate
 135   // constant 1.  There is a test for this in JNI_CreateJavaVM().
 136   T result = __sync_lock_test_and_set (dest, exchange_value);
 137   // All atomic operations are expected to be full memory barriers
 138   // (see atomic.hpp). However, __sync_lock_test_and_set is not
 139   // a full memory barrier, but an acquire barrier. Hence, this added
 140   // barrier.
 141   __sync_synchronize();
 142   return result;
 143 #endif // ARM
 144 }
 145 
 146 template<>
 147 template<typename T>
 148 inline T Atomic::PlatformXchg<8>::operator()(T exchange_value,
 149                                              T volatile* dest) const {
 150   STATIC_ASSERT(8 == sizeof(T));
 151   T result = __sync_lock_test_and_set (dest, exchange_value);
 152   __sync_synchronize();
 153   return result;
 154 }
 155 
 156 // No direct support for cmpxchg of bytes; emulate using int.
 157 template<>
 158 struct Atomic::PlatformCmpxchg<1> : Atomic::CmpxchgByteUsingInt {};
 159 
 160 template<>
 161 template<typename T>
 162 inline T Atomic::PlatformCmpxchg<4>::operator()(T exchange_value,
 163                                                 T volatile* dest,
 164                                                 T compare_value,
 165                                                 cmpxchg_memory_order order) const {
 166   STATIC_ASSERT(4 == sizeof(T));
 167 #ifdef ARM
 168   return cmpxchg_using_helper<int>(arm_compare_and_swap, exchange_value, dest, compare_value);
 169 #else
 170   return __sync_val_compare_and_swap(dest, compare_value, exchange_value);
 171 #endif // ARM
 172 }
 173 
 174 template<>
 175 template<typename T>
 176 inline T Atomic::PlatformCmpxchg<8>::operator()(T exchange_value,
 177                                                 T volatile* dest,
 178                                                 T compare_value,
 179                                                 cmpxchg_memory_order order) const {
 180   STATIC_ASSERT(8 == sizeof(T));
 181   return __sync_val_compare_and_swap(dest, compare_value, exchange_value);
 182 }
 183 
 184 template<>
 185 template<typename T>
 186 inline T Atomic::PlatformLoad<8>::operator()(T const volatile* src) const {
 187   STATIC_ASSERT(8 == sizeof(T));
 188   volatile jlong dest;
 189   os::atomic_copy64(reinterpret_cast<const volatile jlong*>(src), reinterpret_cast<volatile jlong*>(&dest));
 190   return PrimitiveConversions::cast<T>(dest);
 191 }


  13  * version 2 for more details (a copy is included in the LICENSE file that
  14  * accompanied this code).
  15  *
  16  * You should have received a copy of the GNU General Public License version
  17  * 2 along with this work; if not, write to the Free Software Foundation,
  18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19  *
  20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21  * or visit www.oracle.com if you need additional information or have any
  22  * questions.
  23  *
  24  */
  25 
  26 #ifndef OS_CPU_LINUX_ZERO_VM_ATOMIC_LINUX_ZERO_HPP
  27 #define OS_CPU_LINUX_ZERO_VM_ATOMIC_LINUX_ZERO_HPP
  28 
  29 #include "runtime/os.hpp"
  30 
  31 // Implementation of class atomic
  32 





























































  33 template<size_t byte_size>
  34 struct Atomic::PlatformAdd
  35   : Atomic::AddAndFetch<Atomic::PlatformAdd<byte_size> >
  36 {
  37   template<typename I, typename D>
  38   D add_and_fetch(I add_value, D volatile* dest) const;
  39 };
  40 
  41 template<>
  42 template<typename I, typename D>
  43 inline D Atomic::PlatformAdd<4>::add_and_fetch(I add_value, D volatile* dest) const {
  44   STATIC_ASSERT(4 == sizeof(I));
  45   STATIC_ASSERT(4 == sizeof(D));
  46 



  47   return __sync_add_and_fetch(dest, add_value);

  48 }
  49 
  50 template<>
  51 template<typename I, typename D>
  52 inline D Atomic::PlatformAdd<8>::add_and_fetch(I add_value, D volatile* dest) const {
  53   STATIC_ASSERT(8 == sizeof(I));
  54   STATIC_ASSERT(8 == sizeof(D));

  55   return __sync_add_and_fetch(dest, add_value);
  56 }
  57 
  58 template<>
  59 template<typename T>
  60 inline T Atomic::PlatformXchg<4>::operator()(T exchange_value,
  61                                              T volatile* dest) const {
  62   STATIC_ASSERT(4 == sizeof(T));



  63   // __sync_lock_test_and_set is a bizarrely named atomic exchange
  64   // operation.  Note that some platforms only support this with the
  65   // limitation that the only valid value to store is the immediate
  66   // constant 1.  There is a test for this in JNI_CreateJavaVM().
  67   T result = __sync_lock_test_and_set (dest, exchange_value);
  68   // All atomic operations are expected to be full memory barriers
  69   // (see atomic.hpp). However, __sync_lock_test_and_set is not
  70   // a full memory barrier, but an acquire barrier. Hence, this added
  71   // barrier.
  72   __sync_synchronize();
  73   return result;

  74 }
  75 
  76 template<>
  77 template<typename T>
  78 inline T Atomic::PlatformXchg<8>::operator()(T exchange_value,
  79                                              T volatile* dest) const {
  80   STATIC_ASSERT(8 == sizeof(T));
  81   T result = __sync_lock_test_and_set (dest, exchange_value);
  82   __sync_synchronize();
  83   return result;
  84 }
  85 
  86 // No direct support for cmpxchg of bytes; emulate using int.
  87 template<>
  88 struct Atomic::PlatformCmpxchg<1> : Atomic::CmpxchgByteUsingInt {};
  89 
  90 template<>
  91 template<typename T>
  92 inline T Atomic::PlatformCmpxchg<4>::operator()(T exchange_value,
  93                                                 T volatile* dest,
  94                                                 T compare_value,
  95                                                 cmpxchg_memory_order order) const {
  96   STATIC_ASSERT(4 == sizeof(T));



  97   return __sync_val_compare_and_swap(dest, compare_value, exchange_value);

  98 }
  99 
 100 template<>
 101 template<typename T>
 102 inline T Atomic::PlatformCmpxchg<8>::operator()(T exchange_value,
 103                                                 T volatile* dest,
 104                                                 T compare_value,
 105                                                 cmpxchg_memory_order order) const {
 106   STATIC_ASSERT(8 == sizeof(T));
 107   return __sync_val_compare_and_swap(dest, compare_value, exchange_value);
 108 }
 109 
 110 template<>
 111 template<typename T>
 112 inline T Atomic::PlatformLoad<8>::operator()(T const volatile* src) const {
 113   STATIC_ASSERT(8 == sizeof(T));
 114   volatile jlong dest;
 115   os::atomic_copy64(reinterpret_cast<const volatile jlong*>(src), reinterpret_cast<volatile jlong*>(&dest));
 116   return PrimitiveConversions::cast<T>(dest);
 117 }
< prev index next >