< prev index next >

src/hotspot/os_cpu/bsd_zero/atomic_bsd_zero.hpp

Print this page
rev 49986 : 8202080: Introduce ordering semantics for Atomic::add and other RMW atomics
Reviewed-by: lucy, rehn, dholmes

*** 1,7 **** /* ! * Copyright (c) 2003, 2017, Oracle and/or its affiliates. All rights reserved. * Copyright 2007, 2008, 2011, 2015, Red Hat, Inc. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 only, as --- 1,7 ---- /* ! * Copyright (c) 2003, 2018, Oracle and/or its affiliates. All rights reserved. * Copyright 2007, 2008, 2011, 2015, Red Hat, Inc. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 only, as
*** 162,177 **** template<size_t byte_size> struct Atomic::PlatformAdd : Atomic::AddAndFetch<Atomic::PlatformAdd<byte_size> > { template<typename I, typename D> ! D add_and_fetch(I add_value, D volatile* dest) const; }; template<> template<typename I, typename D> ! inline D Atomic::PlatformAdd<4>::add_and_fetch(I add_value, D volatile* dest) const { STATIC_ASSERT(4 == sizeof(I)); STATIC_ASSERT(4 == sizeof(D)); #ifdef ARM return add_using_helper<int>(arm_add_and_fetch, add_value, dest); --- 162,178 ---- template<size_t byte_size> struct Atomic::PlatformAdd : Atomic::AddAndFetch<Atomic::PlatformAdd<byte_size> > { template<typename I, typename D> ! D add_and_fetch(I add_value, D volatile* dest, atomic_memory_order order) const; }; template<> template<typename I, typename D> ! inline D Atomic::PlatformAdd<4>::add_and_fetch(I add_value, D volatile* dest, ! atomic_memory_order order) const { STATIC_ASSERT(4 == sizeof(I)); STATIC_ASSERT(4 == sizeof(D)); #ifdef ARM return add_using_helper<int>(arm_add_and_fetch, add_value, dest);
*** 184,204 **** #endif // ARM } template<> template<typename I, typename D> ! inline D Atomic::PlatformAdd<8>::add_and_fetch(I add_value, D volatile* dest) const { STATIC_ASSERT(8 == sizeof(I)); STATIC_ASSERT(8 == sizeof(D)); return __sync_add_and_fetch(dest, add_value); } template<> template<typename T> inline T Atomic::PlatformXchg<4>::operator()(T exchange_value, ! T volatile* dest) const { STATIC_ASSERT(4 == sizeof(T)); #ifdef ARM return xchg_using_helper<int>(arm_lock_test_and_set, exchange_value, dest); #else #ifdef M68K --- 185,207 ---- #endif // ARM } template<> template<typename I, typename D> ! inline D Atomic::PlatformAdd<8>::add_and_fetch(I add_value, D volatile* dest, ! atomic_memory_order order) const { STATIC_ASSERT(8 == sizeof(I)); STATIC_ASSERT(8 == sizeof(D)); return __sync_add_and_fetch(dest, add_value); } template<> template<typename T> inline T Atomic::PlatformXchg<4>::operator()(T exchange_value, ! T volatile* dest, ! atomic_memory_order order) const { STATIC_ASSERT(4 == sizeof(T)); #ifdef ARM return xchg_using_helper<int>(arm_lock_test_and_set, exchange_value, dest); #else #ifdef M68K
*** 220,230 **** } template<> template<typename T> inline T Atomic::PlatformXchg<8>::operator()(T exchange_value, ! T volatile* dest) const { STATIC_ASSERT(8 == sizeof(T)); T result = __sync_lock_test_and_set (dest, exchange_value); __sync_synchronize(); return result; } --- 223,234 ---- } template<> template<typename T> inline T Atomic::PlatformXchg<8>::operator()(T exchange_value, ! T volatile* dest, ! atomic_memory_order order) const { STATIC_ASSERT(8 == sizeof(T)); T result = __sync_lock_test_and_set (dest, exchange_value); __sync_synchronize(); return result; }
*** 236,246 **** template<> template<typename T> inline T Atomic::PlatformCmpxchg<4>::operator()(T exchange_value, T volatile* dest, T compare_value, ! cmpxchg_memory_order order) const { STATIC_ASSERT(4 == sizeof(T)); #ifdef ARM return cmpxchg_using_helper<int>(arm_compare_and_swap, exchange_value, dest, compare_value); #else #ifdef M68K --- 240,250 ---- template<> template<typename T> inline T Atomic::PlatformCmpxchg<4>::operator()(T exchange_value, T volatile* dest, T compare_value, ! atomic_memory_order order) const { STATIC_ASSERT(4 == sizeof(T)); #ifdef ARM return cmpxchg_using_helper<int>(arm_compare_and_swap, exchange_value, dest, compare_value); #else #ifdef M68K
*** 254,264 **** template<> template<typename T> inline T Atomic::PlatformCmpxchg<8>::operator()(T exchange_value, T volatile* dest, T compare_value, ! cmpxchg_memory_order order) const { STATIC_ASSERT(8 == sizeof(T)); return __sync_val_compare_and_swap(dest, compare_value, exchange_value); } template<> --- 258,268 ---- template<> template<typename T> inline T Atomic::PlatformCmpxchg<8>::operator()(T exchange_value, T volatile* dest, T compare_value, ! atomic_memory_order order) const { STATIC_ASSERT(8 == sizeof(T)); return __sync_val_compare_and_swap(dest, compare_value, exchange_value); } template<>
< prev index next >