< prev index next >

src/hotspot/os_cpu/bsd_x86/atomic_bsd_x86.hpp

Print this page
rev 49986 : 8202080: Introduce ordering semantics for Atomic::add and other RMW atomics
Reviewed-by: lucy, rehn, dholmes

*** 1,7 **** /* ! * Copyright (c) 1999, 2017, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 only, as * published by the Free Software Foundation. --- 1,7 ---- /* ! * Copyright (c) 1999, 2018, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 only, as * published by the Free Software Foundation.
*** 30,45 **** template<size_t byte_size> struct Atomic::PlatformAdd : Atomic::FetchAndAdd<Atomic::PlatformAdd<byte_size> > { template<typename I, typename D> ! D fetch_and_add(I add_value, D volatile* dest) const; }; template<> template<typename I, typename D> ! inline D Atomic::PlatformAdd<4>::fetch_and_add(I add_value, D volatile* dest) const { STATIC_ASSERT(4 == sizeof(I)); STATIC_ASSERT(4 == sizeof(D)); D old_value; __asm__ volatile ( "lock xaddl %0,(%2)" : "=r" (old_value) --- 30,46 ---- template<size_t byte_size> struct Atomic::PlatformAdd : Atomic::FetchAndAdd<Atomic::PlatformAdd<byte_size> > { template<typename I, typename D> ! D fetch_and_add(I add_value, D volatile* dest, atomic_memory_order /* order */) const; }; template<> template<typename I, typename D> ! inline D Atomic::PlatformAdd<4>::fetch_and_add(I add_value, D volatile* dest, ! atomic_memory_order /* order */) const { STATIC_ASSERT(4 == sizeof(I)); STATIC_ASSERT(4 == sizeof(D)); D old_value; __asm__ volatile ( "lock xaddl %0,(%2)" : "=r" (old_value)
*** 49,59 **** } template<> template<typename T> inline T Atomic::PlatformXchg<4>::operator()(T exchange_value, ! T volatile* dest) const { STATIC_ASSERT(4 == sizeof(T)); __asm__ volatile ( "xchgl (%2),%0" : "=r" (exchange_value) : "0" (exchange_value), "r" (dest) : "memory"); --- 50,61 ---- } template<> template<typename T> inline T Atomic::PlatformXchg<4>::operator()(T exchange_value, ! T volatile* dest, ! atomic_memory_order /* order */) const { STATIC_ASSERT(4 == sizeof(T)); __asm__ volatile ( "xchgl (%2),%0" : "=r" (exchange_value) : "0" (exchange_value), "r" (dest) : "memory");
*** 63,73 **** template<> template<typename T> inline T Atomic::PlatformCmpxchg<1>::operator()(T exchange_value, T volatile* dest, T compare_value, ! cmpxchg_memory_order /* order */) const { STATIC_ASSERT(1 == sizeof(T)); __asm__ volatile ( "lock cmpxchgb %1,(%3)" : "=a" (exchange_value) : "q" (exchange_value), "a" (compare_value), "r" (dest) : "cc", "memory"); --- 65,75 ---- template<> template<typename T> inline T Atomic::PlatformCmpxchg<1>::operator()(T exchange_value, T volatile* dest, T compare_value, ! atomic_memory_order /* order */) const { STATIC_ASSERT(1 == sizeof(T)); __asm__ volatile ( "lock cmpxchgb %1,(%3)" : "=a" (exchange_value) : "q" (exchange_value), "a" (compare_value), "r" (dest) : "cc", "memory");
*** 77,87 **** template<> template<typename T> inline T Atomic::PlatformCmpxchg<4>::operator()(T exchange_value, T volatile* dest, T compare_value, ! cmpxchg_memory_order /* order */) const { STATIC_ASSERT(4 == sizeof(T)); __asm__ volatile ( "lock cmpxchgl %1,(%3)" : "=a" (exchange_value) : "r" (exchange_value), "a" (compare_value), "r" (dest) : "cc", "memory"); --- 79,89 ---- template<> template<typename T> inline T Atomic::PlatformCmpxchg<4>::operator()(T exchange_value, T volatile* dest, T compare_value, ! atomic_memory_order /* order */) const { STATIC_ASSERT(4 == sizeof(T)); __asm__ volatile ( "lock cmpxchgl %1,(%3)" : "=a" (exchange_value) : "r" (exchange_value), "a" (compare_value), "r" (dest) : "cc", "memory");
*** 89,99 **** } #ifdef AMD64 template<> template<typename I, typename D> ! inline D Atomic::PlatformAdd<8>::fetch_and_add(I add_value, D volatile* dest) const { STATIC_ASSERT(8 == sizeof(I)); STATIC_ASSERT(8 == sizeof(D)); D old_value; __asm__ __volatile__ ( "lock xaddq %0,(%2)" : "=r" (old_value) --- 91,102 ---- } #ifdef AMD64 template<> template<typename I, typename D> ! inline D Atomic::PlatformAdd<8>::fetch_and_add(I add_value, D volatile* dest, ! atomic_memory_order /* order */) const { STATIC_ASSERT(8 == sizeof(I)); STATIC_ASSERT(8 == sizeof(D)); D old_value; __asm__ __volatile__ ( "lock xaddq %0,(%2)" : "=r" (old_value)
*** 103,113 **** } template<> template<typename T> inline T Atomic::PlatformXchg<8>::operator()(T exchange_value, ! T volatile* dest) const { STATIC_ASSERT(8 == sizeof(T)); __asm__ __volatile__ ("xchgq (%2),%0" : "=r" (exchange_value) : "0" (exchange_value), "r" (dest) : "memory"); --- 106,117 ---- } template<> template<typename T> inline T Atomic::PlatformXchg<8>::operator()(T exchange_value, ! T volatile* dest, ! atomic_memory_order /* order */) const { STATIC_ASSERT(8 == sizeof(T)); __asm__ __volatile__ ("xchgq (%2),%0" : "=r" (exchange_value) : "0" (exchange_value), "r" (dest) : "memory");
*** 117,127 **** template<> template<typename T> inline T Atomic::PlatformCmpxchg<8>::operator()(T exchange_value, T volatile* dest, T compare_value, ! cmpxchg_memory_order /* order */) const { STATIC_ASSERT(8 == sizeof(T)); __asm__ __volatile__ ( "lock cmpxchgq %1,(%3)" : "=a" (exchange_value) : "r" (exchange_value), "a" (compare_value), "r" (dest) : "cc", "memory"); --- 121,131 ---- template<> template<typename T> inline T Atomic::PlatformCmpxchg<8>::operator()(T exchange_value, T volatile* dest, T compare_value, ! atomic_memory_order /* order */) const { STATIC_ASSERT(8 == sizeof(T)); __asm__ __volatile__ ( "lock cmpxchgq %1,(%3)" : "=a" (exchange_value) : "r" (exchange_value), "a" (compare_value), "r" (dest) : "cc", "memory");
*** 139,149 **** template<> template<typename T> inline T Atomic::PlatformCmpxchg<8>::operator()(T exchange_value, T volatile* dest, T compare_value, ! cmpxchg_memory_order order) const { STATIC_ASSERT(8 == sizeof(T)); return cmpxchg_using_helper<int64_t>(_Atomic_cmpxchg_long, exchange_value, dest, compare_value); } template<> --- 143,153 ---- template<> template<typename T> inline T Atomic::PlatformCmpxchg<8>::operator()(T exchange_value, T volatile* dest, T compare_value, ! atomic_memory_order /* order */) const { STATIC_ASSERT(8 == sizeof(T)); return cmpxchg_using_helper<int64_t>(_Atomic_cmpxchg_long, exchange_value, dest, compare_value); } template<>
< prev index next >