/* * Copyright (c) 1999, 2017, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 only, as * published by the Free Software Foundation. * * This code is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * version 2 for more details (a copy is included in the LICENSE file that * accompanied this code). * * You should have received a copy of the GNU General Public License version * 2 along with this work; if not, write to the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. * * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA * or visit www.oracle.com if you need additional information or have any * questions. * */ #ifndef SHARE_VM_RUNTIME_ATOMIC_HPP #define SHARE_VM_RUNTIME_ATOMIC_HPP #include "memory/allocation.hpp" #include "metaprogramming/integerTypes.hpp" #include "metaprogramming/isIntegral.hpp" #include "metaprogramming/isPointer.hpp" #include "utilities/align.hpp" #include "utilities/debug.hpp" #include "utilities/macros.hpp" enum cmpxchg_memory_order { memory_order_relaxed, // Use value which doesn't interfere with C++2011. We need to be more conservative. memory_order_conservative = 8 }; class Atomic : AllStatic { template class Never: public FalseType {}; template inline static void specialized_store(T store_value, volatile T* dest) { STATIC_ASSERT(sizeof(T) <= size_t(BytesPerWord)); // Does the machine support atomic wide accesses? (void)const_cast(*dest = store_value); } template inline static T specialized_load(const volatile T* dest) { STATIC_ASSERT(sizeof(T) <= size_t(BytesPerWord)); // Does the machine support atomic wide accesses? return *dest; } template inline static T specialized_add(T add_value, volatile T* dest) { STATIC_ASSERT(Never::value); return add_value; } template inline static void specialized_inc(volatile T* dest) { add(1, dest); } template inline static void specialized_dec(volatile T* dest) { add(-1, dest); } template inline static T specialized_xchg(T exchange_value, volatile T* dest) { STATIC_ASSERT(Never::value); return exchange_value; } template inline static T specialized_cmpxchg(T exchange_value, volatile T* dest, T compare_value, cmpxchg_memory_order order) { STATIC_ASSERT(Never::value); return exchange_value; } public: // Atomic operations on 64-bit types are not available on all 32-bit // platforms. If atomic ops on 64-bit types are defined here they must only // be used from code that verifies they are available at runtime and // can provide an alternative action if not - see supports_cx8() for // a means to test availability. // The memory operations that are mentioned with each of the atomic // function families come from src/share/vm/runtime/orderAccess.hpp, // e.g., is described in that file and is implemented by the // OrderAccess::fence() function. See that file for the gory details // on the Memory Access Ordering Model. // All of the atomic operations that imply a read-modify-write action // guarantee a two-way memory barrier across that operation. Historically // these semantics reflect the strength of atomic operations that are // provided on SPARC/X86. We assume that strength is necessary unless // we can prove that a weaker form is sufficiently safe. // Atomically store to a location // See comment above about using 64-bit atomics on 32-bit platforms template inline static void store(T store_value, volatile U* dest); // The store_ptr() member functions are deprecated. Use store() instead. static void store_ptr(intptr_t store_value, volatile intptr_t* dest) { store(store_value, dest); } static void store_ptr(void* store_value, volatile void* dest) { store((intptr_t)store_value, (volatile intptr_t*)dest); } // Atomically load from a location // See comment above about using 64-bit atomics on 32-bit platforms template inline static T load(volatile T* src); // Atomically add to a location. Returns updated value. add*() provide: // add-value-to-dest // add(I1 v, I* d) // add(I1 v, P* d) // where I, I1 are integral types, P is a pointer type. // Functional behavior is modelled on *dest += add_value. template inline static U add(T add_value, volatile U* dst); template inline static U* add(T add_value, U* volatile* dst); // The add_ptr() member functions are deprecated. Use add() instead. static intptr_t add_ptr(intptr_t add_value, volatile intptr_t* dest) { return add(add_value, dest); } static void* add_ptr(intptr_t add_value, volatile void* dest) { return (void*)add(add_value, (volatile intptr_t*)dest); } // Atomically increment location. inc*() provide: // increment-dest // Functional behavior is modelled on *dest++ template inline static void inc(volatile T* dest); template inline static void inc(T* volatile* dest); // The inc_ptr member functions are deprecated. Use inc() instead. static void inc_ptr(volatile intptr_t* dest) { inc(dest); } static void inc_ptr(volatile void* dest) { inc((volatile intptr_t*)dest); } // Atomically decrement a location. dec*() provide: // decrement-dest // Functional behavior is modelled on *dest-- template inline static void dec(volatile T* dest); template inline static void dec(T* volatile* dest); // The dec_ptr member functions are deprecated. Use dec() instead. static void dec_ptr(volatile intptr_t* dest) { dec(dest); } static void dec_ptr(volatile void* dest) { dec((volatile intptr_t*)dest); } // Performs atomic exchange of *dest with exchange_value. Returns old // prior value of *dest. xchg*() provide: // exchange-value-with-dest template inline static U xchg(T exchange_value, volatile U* dest); // The xchg_ptr() member functions are deprecated. Use xchg() instead. static intptr_t xchg_ptr(intptr_t exchange_value, volatile intptr_t* dest) { return xchg(exchange_value, dest); } static void* xchg_ptr(void* exchange_value, volatile void* dest) { return (void*)xchg((intptr_t)exchange_value, (volatile intptr_t*)dest); } // Performs atomic compare of *dest and compare_value, and exchanges // *dest with exchange_value if the comparison succeeded. Returns prior // value of *dest. cmpxchg*() provide: // compare-and-exchange // See comment above about using 64-bit atomics on 32-bit platforms template inline static U cmpxchg(T exchange_value, volatile U* dest, V compare_value, cmpxchg_memory_order order = memory_order_conservative); // The cmpxchg_ptr member functions are deprecated. Use cmpxchg() instead. inline static intptr_t cmpxchg_ptr(intptr_t exchange_value, volatile intptr_t* dest, intptr_t compare_value, cmpxchg_memory_order order = memory_order_conservative) { return cmpxchg(exchange_value, dest, compare_value, order); } inline static void* cmpxchg_ptr(void* exchange_value, volatile void* dest, void* compare_value, cmpxchg_memory_order order = memory_order_conservative) { return (void*)cmpxchg((intptr_t)exchange_value, (volatile intptr_t*)dest, (intptr_t)compare_value, order); } }; // internal implementation template inline void Atomic::store(T store_value, volatile U* dest) { typedef typename IntegerTypes::Signed::type Raw; U store_value_cast = store_value; specialized_store(IntegerTypes::cast_to_signed(store_value_cast), reinterpret_cast(dest)); } template inline T Atomic::load(volatile T* src) { typedef typename IntegerTypes::Signed::type Raw; return IntegerTypes::cast(specialized_load(reinterpret_cast(src))); } template inline U Atomic::add(T add_value, volatile U* dst) { STATIC_ASSERT(IsIntegral::value); STATIC_ASSERT(IsIntegral::value); typedef typename IntegerTypes::Signed::type Raw; // Allow -Wconversion or the like to complain about unsafe conversions. U value = add_value; Raw raw_value = IntegerTypes::cast_to_signed(value); Raw result = specialized_add(raw_value, reinterpret_cast(dst)); return IntegerTypes::cast(result); } template inline U* Atomic::add(T add_value, U* volatile* dst) { STATIC_ASSERT(IsIntegral::value); typedef typename IntegerTypes::Signed::type Raw; ptrdiff_t value = add_value; Raw raw_value = IntegerTypes::cast_to_signed(value * sizeof(U)); Raw result = specialized_add(raw_value, reinterpret_cast(dst)); return IntegerTypes::cast(result); } template inline void Atomic::inc(volatile T* src) { STATIC_ASSERT(IsIntegral::value); typedef typename IntegerTypes::Signed::type Raw; specialized_inc(reinterpret_cast(src)); } template inline void Atomic::inc(T* volatile* src) { if (sizeof(T) != 1) { add(1, src); } else { typedef typename IntegerTypes::Signed::type Raw; specialized_inc(reinterpret_cast(src)); } } template inline void Atomic::dec(volatile T* src) { STATIC_ASSERT(IsIntegral::value); typedef typename IntegerTypes::Signed::type Raw; specialized_dec(reinterpret_cast(src)); } template inline void Atomic::dec(T* volatile* src) { if (sizeof(T) != 1) { add(-1, src); } else { typedef typename IntegerTypes::Signed::type Raw; specialized_dec(reinterpret_cast(src)); } } template inline U Atomic::xchg(T exchange_value, volatile U* dest) { typedef typename IntegerTypes::Signed::type Raw; U exchange_value_cast = exchange_value; Raw result = specialized_xchg(IntegerTypes::cast_to_signed(exchange_value_cast), reinterpret_cast(dest)); return IntegerTypes::cast(result); } template inline U Atomic::cmpxchg(T exchange_value, volatile U* dest, V compare_value, cmpxchg_memory_order order) { typedef typename IntegerTypes::Signed::type Raw; U exchange_value_cast = exchange_value; U compare_value_cast = compare_value; Raw result = specialized_cmpxchg(IntegerTypes::cast_to_signed(exchange_value_cast), reinterpret_cast(dest), IntegerTypes::cast_to_signed(compare_value_cast), order); return IntegerTypes::cast(result); } // platform specific in-line definitions - must come before shared definitions #include OS_CPU_HEADER(atomic) // shared in-line definitions #ifndef VM_HAS_SPECIALIZED_CMPXCHG_BYTE /* * This is the default implementation of byte-sized cmpxchg. It emulates 8-bit-sized cmpxchg * in terms of 32-bit-sized cmpxchg. Platforms may override this by defining their own inline definition * as well as defining VM_HAS_SPECIALIZED_CMPXCHG_BYTE. This will cause the platform specific * implementation to be used instead. */ template <> inline int8_t Atomic::specialized_cmpxchg(int8_t exchange_value, volatile int8_t* dest, int8_t compare_value, cmpxchg_memory_order order) { volatile int32_t* dest_int = reinterpret_cast(align_down(dest, sizeof(int32_t))); size_t offset = pointer_delta(dest, dest_int, 1); int32_t cur = *dest_int; int8_t* cur_as_bytes = reinterpret_cast(&cur); // current value may not be what we are looking for, so force it // to that value so the initial cmpxchg will fail if it is different cur_as_bytes[offset] = compare_value; // always execute a real cmpxchg so that we get the required memory // barriers even on initial failure do { // value to swap in matches current value ... int32_t new_value = cur; // ... except for the one byte we want to update reinterpret_cast(&new_value)[offset] = exchange_value; int32_t res = cmpxchg(new_value, dest_int, cur, order); if (res == cur) break; // success // at least one byte in the int changed value, so update // our view of the current int cur = res; // if our byte is still as cur we loop and try again } while (cur_as_bytes[offset] == compare_value); return cur_as_bytes[offset]; } #endif // VM_HAS_SPECIALIZED_CMPXCHG_BYTE template <> inline int16_t Atomic::specialized_add(int16_t add_value, volatile int16_t* dest) { // Most platforms do not support atomic add on a 2-byte value. However, // if the value occupies the most significant 16 bits of an aligned 32-bit // word, then we can do this with an atomic add of (add_value << 16) // to the 32-bit word. // // The least significant parts of this 32-bit word will never be affected, even // in case of overflow/underflow. // // Use the ATOMIC_SHORT_PAIR macro (see macros.hpp) to get the desired alignment. #ifdef VM_LITTLE_ENDIAN assert((intx(dest) & 0x03) == 0x02, "wrong alignment"); int32_t new_value = Atomic::add(int32_t(add_value) << 16, (volatile int32_t*)(dest-1)); #else assert((intx(dest) & 0x03) == 0x00, "wrong alignment"); int32_t new_value = Atomic::add(int32_t(add_value) << 16, (volatile int32_t*)(dest)); #endif return (int16_t)(new_value >> 16); // preserves sign } template <> inline void Atomic::specialized_inc(volatile int16_t* dest) { (void)add(int16_t(1), dest); } template <> inline void Atomic::specialized_dec(volatile int16_t* dest) { (void)add(int16_t(-1), dest); } #endif // SHARE_VM_RUNTIME_ATOMIC_HPP