12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 * version 2 for more details (a copy is included in the LICENSE file that
14 * accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License version
17 * 2 along with this work; if not, write to the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 * or visit www.oracle.com if you need additional information or have any
22 * questions.
23 *
24 */
25
26 #ifndef OS_CPU_LINUX_AARCH64_ATOMIC_LINUX_AARCH64_HPP
27 #define OS_CPU_LINUX_AARCH64_ATOMIC_LINUX_AARCH64_HPP
28
29 #include "vm_version_aarch64.hpp"
30
31 // Implementation of class atomic
32
33 #define FULL_MEM_BARRIER __sync_synchronize()
34 #define READ_MEM_BARRIER __atomic_thread_fence(__ATOMIC_ACQUIRE);
35 #define WRITE_MEM_BARRIER __atomic_thread_fence(__ATOMIC_RELEASE);
36
37 template<size_t byte_size>
38 struct Atomic::PlatformAdd
39 : Atomic::AddAndFetch<Atomic::PlatformAdd<byte_size> >
40 {
41 template<typename I, typename D>
42 D add_and_fetch(I add_value, D volatile* dest, atomic_memory_order order) const {
43 D res = __atomic_add_fetch(dest, add_value, __ATOMIC_RELEASE);
44 FULL_MEM_BARRIER;
45 return res;
46 }
47 };
48
49 template<size_t byte_size>
50 template<typename T>
51 inline T Atomic::PlatformXchg<byte_size>::operator()(T exchange_value,
52 T volatile* dest,
53 atomic_memory_order order) const {
54 STATIC_ASSERT(byte_size == sizeof(T));
55 T res = __sync_lock_test_and_set(dest, exchange_value);
56 FULL_MEM_BARRIER;
57 return res;
58 }
59
60 template<size_t byte_size>
61 template<typename T>
62 inline T Atomic::PlatformCmpxchg<byte_size>::operator()(T exchange_value,
63 T volatile* dest,
64 T compare_value,
65 atomic_memory_order order) const {
66 STATIC_ASSERT(byte_size == sizeof(T));
67 if (order == memory_order_relaxed) {
68 T value = compare_value;
69 __atomic_compare_exchange(dest, &value, &exchange_value, /*weak*/false,
70 __ATOMIC_RELAXED, __ATOMIC_RELAXED);
71 return value;
72 } else {
73 return __sync_val_compare_and_swap(dest, compare_value, exchange_value);
74 }
75 }
76
77 #endif // OS_CPU_LINUX_AARCH64_ATOMIC_LINUX_AARCH64_HPP
|
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
13 * version 2 for more details (a copy is included in the LICENSE file that
14 * accompanied this code).
15 *
16 * You should have received a copy of the GNU General Public License version
17 * 2 along with this work; if not, write to the Free Software Foundation,
18 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19 *
20 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
21 * or visit www.oracle.com if you need additional information or have any
22 * questions.
23 *
24 */
25
26 #ifndef OS_CPU_LINUX_AARCH64_ATOMIC_LINUX_AARCH64_HPP
27 #define OS_CPU_LINUX_AARCH64_ATOMIC_LINUX_AARCH64_HPP
28
29 #include "vm_version_aarch64.hpp"
30
31 // Implementation of class atomic
32 // Note that memory_order_conservative requires a full barrier after atomic stores.
33 // See https://patchwork.kernel.org/patch/3575821/
34
35 #define FULL_MEM_BARRIER __sync_synchronize()
36 #define READ_MEM_BARRIER __atomic_thread_fence(__ATOMIC_ACQUIRE);
37 #define WRITE_MEM_BARRIER __atomic_thread_fence(__ATOMIC_RELEASE);
38
39 template<size_t byte_size>
40 struct Atomic::PlatformAdd
41 : Atomic::AddAndFetch<Atomic::PlatformAdd<byte_size> >
42 {
43 template<typename I, typename D>
44 D add_and_fetch(I add_value, D volatile* dest, atomic_memory_order order) const {
45 D res = __atomic_add_fetch(dest, add_value, __ATOMIC_RELEASE);
46 FULL_MEM_BARRIER;
47 return res;
48 }
49 };
50
51 template<size_t byte_size>
52 template<typename T>
53 inline T Atomic::PlatformXchg<byte_size>::operator()(T exchange_value,
54 T volatile* dest,
55 atomic_memory_order order) const {
56 STATIC_ASSERT(byte_size == sizeof(T));
57 T res = __atomic_exchange_n(dest, exchange_value, __ATOMIC_RELEASE);
58 FULL_MEM_BARRIER;
59 return res;
60 }
61
62 template<size_t byte_size>
63 template<typename T>
64 inline T Atomic::PlatformCmpxchg<byte_size>::operator()(T exchange_value,
65 T volatile* dest,
66 T compare_value,
67 atomic_memory_order order) const {
68 STATIC_ASSERT(byte_size == sizeof(T));
69 if (order == memory_order_relaxed) {
70 T value = compare_value;
71 __atomic_compare_exchange(dest, &value, &exchange_value, /*weak*/false,
72 __ATOMIC_RELAXED, __ATOMIC_RELAXED);
73 return value;
74 } else {
75 T value = compare_value;
76 FULL_MEM_BARRIER;
77 __atomic_compare_exchange(dest, &value, &exchange_value, /*weak*/false,
78 __ATOMIC_RELAXED, __ATOMIC_RELAXED);
79 FULL_MEM_BARRIER;
80 return value;
81 }
82 }
83
84 #endif // OS_CPU_LINUX_AARCH64_ATOMIC_LINUX_AARCH64_HPP
|