< prev index next >

src/hotspot/os_cpu/linux_arm/atomic_linux_arm.hpp

Print this page
rev 49898 : 8202080: Introduce ordering semantics for Atomic::add and other RMW atomics
Reviewed-by:

*** 1,7 **** /* ! * Copyright (c) 2008, 2017, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 only, as * published by the Free Software Foundation. --- 1,7 ---- /* ! * Copyright (c) 2008, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 only, as * published by the Free Software Foundation.
*** 79,94 **** template<size_t byte_size> struct Atomic::PlatformAdd : Atomic::AddAndFetch<Atomic::PlatformAdd<byte_size> > { template<typename I, typename D> ! D add_and_fetch(I add_value, D volatile* dest) const; }; template<> template<typename I, typename D> ! inline D Atomic::PlatformAdd<4>::add_and_fetch(I add_value, D volatile* dest) const { STATIC_ASSERT(4 == sizeof(I)); STATIC_ASSERT(4 == sizeof(D)); #ifdef AARCH64 D val; int tmp; --- 79,95 ---- template<size_t byte_size> struct Atomic::PlatformAdd : Atomic::AddAndFetch<Atomic::PlatformAdd<byte_size> > { template<typename I, typename D> ! D add_and_fetch(I add_value, D volatile* dest, atomic_memory_order order) const; }; template<> template<typename I, typename D> ! inline D Atomic::PlatformAdd<4>::add_and_fetch(I add_value, D volatile* dest, ! atomic_memory_order order) const { STATIC_ASSERT(4 == sizeof(I)); STATIC_ASSERT(4 == sizeof(D)); #ifdef AARCH64 D val; int tmp;
*** 108,118 **** } #ifdef AARCH64 template<> template<typename I, typename D> ! inline D Atomic::PlatformAdd<8>::add_and_fetch(I add_value, D volatile* dest) const { STATIC_ASSERT(8 == sizeof(I)); STATIC_ASSERT(8 == sizeof(D)); D val; int tmp; __asm__ volatile( --- 109,120 ---- } #ifdef AARCH64 template<> template<typename I, typename D> ! inline D Atomic::PlatformAdd<8>::add_and_fetch(I add_value, D volatile* dest, ! atomic_memory_order order) const { STATIC_ASSERT(8 == sizeof(I)); STATIC_ASSERT(8 == sizeof(D)); D val; int tmp; __asm__ volatile(
*** 129,139 **** #endif template<> template<typename T> inline T Atomic::PlatformXchg<4>::operator()(T exchange_value, ! T volatile* dest) const { STATIC_ASSERT(4 == sizeof(T)); #ifdef AARCH64 T old_val; int tmp; __asm__ volatile( --- 131,142 ---- #endif template<> template<typename T> inline T Atomic::PlatformXchg<4>::operator()(T exchange_value, ! T volatile* dest, ! atomic_memory_order order) const { STATIC_ASSERT(4 == sizeof(T)); #ifdef AARCH64 T old_val; int tmp; __asm__ volatile(
*** 152,162 **** #ifdef AARCH64 template<> template<typename T> inline T Atomic::PlatformXchg<8>::operator()(T exchange_value, ! T volatile* dest) const { STATIC_ASSERT(8 == sizeof(T)); T old_val; int tmp; __asm__ volatile( "1:\n\t" --- 155,166 ---- #ifdef AARCH64 template<> template<typename T> inline T Atomic::PlatformXchg<8>::operator()(T exchange_value, ! T volatile* dest, ! atomic_memory_order order) const { STATIC_ASSERT(8 == sizeof(T)); T old_val; int tmp; __asm__ volatile( "1:\n\t"
*** 198,208 **** template<> template<typename T> inline T Atomic::PlatformCmpxchg<4>::operator()(T exchange_value, T volatile* dest, T compare_value, ! cmpxchg_memory_order order) const { STATIC_ASSERT(4 == sizeof(T)); #ifdef AARCH64 T rv; int tmp; __asm__ volatile( --- 202,212 ---- template<> template<typename T> inline T Atomic::PlatformCmpxchg<4>::operator()(T exchange_value, T volatile* dest, T compare_value, ! atomic_memory_order order) const { STATIC_ASSERT(4 == sizeof(T)); #ifdef AARCH64 T rv; int tmp; __asm__ volatile(
*** 228,238 **** template<> template<typename T> inline T Atomic::PlatformCmpxchg<8>::operator()(T exchange_value, T volatile* dest, T compare_value, ! cmpxchg_memory_order order) const { STATIC_ASSERT(8 == sizeof(T)); #ifdef AARCH64 T rv; int tmp; __asm__ volatile( --- 232,242 ---- template<> template<typename T> inline T Atomic::PlatformCmpxchg<8>::operator()(T exchange_value, T volatile* dest, T compare_value, ! atomic_memory_order order) const { STATIC_ASSERT(8 == sizeof(T)); #ifdef AARCH64 T rv; int tmp; __asm__ volatile(
< prev index next >