1 /*
   2  * Copyright (c) 2003, 2019, Oracle and/or its affiliates. All rights reserved.
   3  * Copyright 2007, 2008, 2011, 2015, Red Hat, Inc.
   4  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   5  *
   6  * This code is free software; you can redistribute it and/or modify it
   7  * under the terms of the GNU General Public License version 2 only, as
   8  * published by the Free Software Foundation.
   9  *
  10  * This code is distributed in the hope that it will be useful, but WITHOUT
  11  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  12  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  13  * version 2 for more details (a copy is included in the LICENSE file that
  14  * accompanied this code).
  15  *
  16  * You should have received a copy of the GNU General Public License version
  17  * 2 along with this work; if not, write to the Free Software Foundation,
  18  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  19  *
  20  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  21  * or visit www.oracle.com if you need additional information or have any
  22  * questions.
  23  *
  24  */
  25 
  26 #ifndef OS_CPU_LINUX_ZERO_ATOMIC_LINUX_ZERO_HPP
  27 #define OS_CPU_LINUX_ZERO_ATOMIC_LINUX_ZERO_HPP
  28 
  29 #include "runtime/os.hpp"
  30 
  31 // Implementation of class atomic
  32 
  33 template<size_t byte_size>
  34 struct Atomic::PlatformAdd
  35   : Atomic::AddAndFetch<Atomic::PlatformAdd<byte_size> >
  36 {
  37   template<typename I, typename D>
  38   D add_and_fetch(I add_value, D volatile* dest, atomic_memory_order order) const;
  39 };
  40 
  41 template<>
  42 template<typename I, typename D>
  43 inline D Atomic::PlatformAdd<4>::add_and_fetch(I add_value, D volatile* dest,
  44                                                atomic_memory_order order) const {
  45   STATIC_ASSERT(4 == sizeof(I));
  46   STATIC_ASSERT(4 == sizeof(D));
  47 
  48   return __sync_add_and_fetch(dest, add_value);
  49 }
  50 
  51 template<>
  52 template<typename I, typename D>
  53 inline D Atomic::PlatformAdd<8>::add_and_fetch(I add_value, D volatile* dest,
  54                                                atomic_memory_order order) const {
  55   STATIC_ASSERT(8 == sizeof(I));
  56   STATIC_ASSERT(8 == sizeof(D));
  57   return __sync_add_and_fetch(dest, add_value);
  58 }
  59 
  60 template<>
  61 template<typename T>
  62 inline T Atomic::PlatformXchg<4>::operator()(T exchange_value,
  63                                              T volatile* dest,
  64                                              atomic_memory_order order) const {
  65   STATIC_ASSERT(4 == sizeof(T));
  66   // __sync_lock_test_and_set is a bizarrely named atomic exchange
  67   // operation.  Note that some platforms only support this with the
  68   // limitation that the only valid value to store is the immediate
  69   // constant 1.  There is a test for this in JNI_CreateJavaVM().
  70   T result = __sync_lock_test_and_set (dest, exchange_value);
  71   // All atomic operations are expected to be full memory barriers
  72   // (see atomic.hpp). However, __sync_lock_test_and_set is not
  73   // a full memory barrier, but an acquire barrier. Hence, this added
  74   // barrier.
  75   __sync_synchronize();
  76   return result;
  77 }
  78 
  79 template<>
  80 template<typename T>
  81 inline T Atomic::PlatformXchg<8>::operator()(T exchange_value,
  82                                              T volatile* dest,
  83                                              atomic_memory_order order) const {
  84   STATIC_ASSERT(8 == sizeof(T));
  85   T result = __sync_lock_test_and_set (dest, exchange_value);
  86   __sync_synchronize();
  87   return result;
  88 }
  89 
  90 // No direct support for cmpxchg of bytes; emulate using int.
  91 template<>
  92 struct Atomic::PlatformCmpxchg<1> : Atomic::CmpxchgByteUsingInt {};
  93 
  94 template<>
  95 template<typename T>
  96 inline T Atomic::PlatformCmpxchg<4>::operator()(T exchange_value,
  97                                                 T volatile* dest,
  98                                                 T compare_value,
  99                                                 atomic_memory_order order) const {
 100   STATIC_ASSERT(4 == sizeof(T));
 101   return __sync_val_compare_and_swap(dest, compare_value, exchange_value);
 102 }
 103 
 104 template<>
 105 template<typename T>
 106 inline T Atomic::PlatformCmpxchg<8>::operator()(T exchange_value,
 107                                                 T volatile* dest,
 108                                                 T compare_value,
 109                                                 atomic_memory_order order) const {
 110   STATIC_ASSERT(8 == sizeof(T));
 111   return __sync_val_compare_and_swap(dest, compare_value, exchange_value);
 112 }
 113 
 114 template<>
 115 template<typename T>
 116 inline T Atomic::PlatformLoad<8>::operator()(T const volatile* src) const {
 117   STATIC_ASSERT(8 == sizeof(T));
 118   volatile int64_t dest;
 119   os::atomic_copy64(reinterpret_cast<const volatile int64_t*>(src), reinterpret_cast<volatile int64_t*>(&dest));
 120   return PrimitiveConversions::cast<T>(dest);
 121 }
 122 
 123 template<>
 124 template<typename T>
 125 inline void Atomic::PlatformStore<8>::operator()(T store_value,
 126                                                  T volatile* dest) const {
 127   STATIC_ASSERT(8 == sizeof(T));
 128   os::atomic_copy64(reinterpret_cast<const volatile int64_t*>(&store_value), reinterpret_cast<volatile int64_t*>(dest));
 129 }
 130 
 131 #endif // OS_CPU_LINUX_ZERO_ATOMIC_LINUX_ZERO_HPP