/* * Copyright (c) 1999, 2017, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 only, as * published by the Free Software Foundation. * * This code is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * version 2 for more details (a copy is included in the LICENSE file that * accompanied this code). * * You should have received a copy of the GNU General Public License version * 2 along with this work; if not, write to the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. * * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA * or visit www.oracle.com if you need additional information or have any * questions. * */ #ifndef SHARE_VM_RUNTIME_ATOMIC_HPP #define SHARE_VM_RUNTIME_ATOMIC_HPP #include "memory/allocation.hpp" #include "metaprogramming/enableIf.hpp" #include "metaprogramming/integerTypes.hpp" #include "metaprogramming/isIntegral.hpp" #include "metaprogramming/isSame.hpp" #include "metaprogramming/removeCV.hpp" #include "utilities/align.hpp" #include "utilities/macros.hpp" enum cmpxchg_memory_order { memory_order_relaxed, // Use value which doesn't interfere with C++2011. We need to be more conservative. memory_order_conservative = 8 }; class Atomic : AllStatic { public: // Atomic operations on jlong types are not available on all 32-bit // platforms. If atomic ops on jlongs are defined here they must only // be used from code that verifies they are available at runtime and // can provide an alternative action if not - see supports_cx8() for // a means to test availability. // The memory operations that are mentioned with each of the atomic // function families come from src/share/vm/runtime/orderAccess.hpp, // e.g., is described in that file and is implemented by the // OrderAccess::fence() function. See that file for the gory details // on the Memory Access Ordering Model. // All of the atomic operations that imply a read-modify-write action // guarantee a two-way memory barrier across that operation. Historically // these semantics reflect the strength of atomic operations that are // provided on SPARC/X86. We assume that strength is necessary unless // we can prove that a weaker form is sufficiently safe. // Atomically store to a location inline static void store (jbyte store_value, jbyte* dest); inline static void store (jshort store_value, jshort* dest); inline static void store (jint store_value, jint* dest); // See comment above about using jlong atomics on 32-bit platforms inline static void store (jlong store_value, jlong* dest); inline static void store_ptr(intptr_t store_value, intptr_t* dest); inline static void store_ptr(void* store_value, void* dest); inline static void store (jbyte store_value, volatile jbyte* dest); inline static void store (jshort store_value, volatile jshort* dest); inline static void store (jint store_value, volatile jint* dest); // See comment above about using jlong atomics on 32-bit platforms inline static void store (jlong store_value, volatile jlong* dest); inline static void store_ptr(intptr_t store_value, volatile intptr_t* dest); inline static void store_ptr(void* store_value, volatile void* dest); // See comment above about using jlong atomics on 32-bit platforms inline static jlong load(const volatile jlong* src); // Atomically add to a location. Returns updated value. add*() provide: // add-value-to-dest inline static jshort add (jshort add_value, volatile jshort* dest); inline static jint add (jint add_value, volatile jint* dest); inline static size_t add (size_t add_value, volatile size_t* dest); inline static intptr_t add_ptr(intptr_t add_value, volatile intptr_t* dest); inline static void* add_ptr(intptr_t add_value, volatile void* dest); // Atomically increment location. inc*() provide: // increment-dest inline static void inc (volatile jint* dest); inline static void inc (volatile jshort* dest); inline static void inc (volatile size_t* dest); inline static void inc_ptr(volatile intptr_t* dest); inline static void inc_ptr(volatile void* dest); // Atomically decrement a location. dec*() provide: // decrement-dest inline static void dec (volatile jint* dest); inline static void dec (volatile jshort* dest); inline static void dec (volatile size_t* dest); inline static void dec_ptr(volatile intptr_t* dest); inline static void dec_ptr(volatile void* dest); // Performs atomic exchange of *dest with exchange_value. Returns old // prior value of *dest. xchg*() provide: // exchange-value-with-dest inline static jint xchg (jint exchange_value, volatile jint* dest); inline static unsigned int xchg (unsigned int exchange_value, volatile unsigned int* dest); inline static intptr_t xchg_ptr(intptr_t exchange_value, volatile intptr_t* dest); inline static void* xchg_ptr(void* exchange_value, volatile void* dest); // Performs atomic compare of *dest and compare_value, and exchanges // *dest with exchange_value if the comparison succeeded. Returns prior // value of *dest. cmpxchg*() provide: // compare-and-exchange template inline static D cmpxchg(T exchange_value, D volatile* dest, U compare_value, cmpxchg_memory_order order = memory_order_conservative); // Performs atomic compare of *dest and NULL, and replaces *dest // with exchange_value if the comparison succeeded. Returns true if // the comparison succeeded and the exchange occurred. This is // often used as part of lazy initialization, as a lock-free // alternative to the Double-Checked Locking Pattern. template inline static bool replace_if_null(T* value, D* volatile* dest, cmpxchg_memory_order order = memory_order_conservative); inline static intptr_t cmpxchg_ptr(intptr_t exchange_value, volatile intptr_t* dest, intptr_t compare_value, cmpxchg_memory_order order = memory_order_conservative) { return cmpxchg(exchange_value, dest, compare_value, order); } inline static void* cmpxchg_ptr(void* exchange_value, volatile void* dest, void* compare_value, cmpxchg_memory_order order = memory_order_conservative) { return cmpxchg(exchange_value, reinterpret_cast(dest), compare_value, order); } private: // Test whether From is implicitly convertible to To. // From and To must be pointer types. // Note: Provides the limited subset of C++11 std::is_convertible // that is needed here. template struct IsPointerConvertible; // Dispatch handler for cmpxchg. Provides type-based validity // checking and limited conversions around calls to the // platform-specific implementation layer provided by // PlatformCmpxchg. template struct CmpxchgImpl; // Platform-specific implementation of cmpxchg. Support for sizes // of 1, 4, and 8 are required. The class is a function object that // must be default constructable, with these requirements: // // - dest is of type T*. // - exchange_value and compare_value are of type T. // - order is of type cmpxchg_memory_order. // - platform_cmpxchg is an object of type PlatformCmpxchg. // // Then // platform_cmpxchg(exchange_value, dest, compare_value, order) // must be a valid expression, returning a result convertible to T. // // A default definition is provided, which declares a function template // T operator()(T, T volatile*, T, cmpxchg_memory_order) const // // For each required size, a platform must either provide an // appropriate definition of that function, or must entirely // specialize the class template for that size. template struct PlatformCmpxchg; // Support for platforms that implement some variants of cmpxchg // using a (typically out of line) non-template helper function. // The generic arguments passed to PlatformCmpxchg need to be // translated to the appropriate type for the helper function, the // helper invoked on the translated arguments, and the result // translated back. Type is the parameter / return type of the // helper function. template static T cmpxchg_using_helper(Fn fn, T exchange_value, T volatile* dest, T compare_value); // Support platforms that do not provide Read-Modify-Write // byte-level atomic access. To use, derive PlatformCmpxchg<1> from // this class. public: // Temporary, can't be private: C++03 11.4/2. Fixed by C++11. struct CmpxchgByteUsingInt; private: }; template struct Atomic::IsPointerConvertible : AllStatic { // Determine whether From* is implicitly convertible to To*, using // the "sizeof trick". typedef char yes; typedef char (&no)[2]; static yes test(To*); static no test(...); static From* test_value; static const bool value = (sizeof(yes) == sizeof(test(test_value))); }; // Define the class before including platform file, which may specialize // the operator definition. No generic definition of specializations // of the operator template are provided, nor are there any generic // specializations of the class. The platform file is responsible for // providing those. template struct Atomic::PlatformCmpxchg VALUE_OBJ_CLASS_SPEC { template T operator()(T exchange_value, T volatile* dest, T compare_value, cmpxchg_memory_order order) const; }; // Define the class before including platform file, which may use this // as a base class, requiring it be complete. The definition is later // in this file, near the other definitions related to cmpxchg. struct Atomic::CmpxchgByteUsingInt VALUE_OBJ_CLASS_SPEC { template T operator()(T exchange_value, T volatile* dest, T compare_value, cmpxchg_memory_order order) const; }; // platform specific in-line definitions - must come before shared definitions #include OS_CPU_HEADER(atomic) // shared in-line definitions // size_t casts... #if (SIZE_MAX != UINTPTR_MAX) #error size_t is not WORD_SIZE, interesting platform, but missing implementation here #endif inline size_t Atomic::add(size_t add_value, volatile size_t* dest) { return (size_t) add_ptr((intptr_t) add_value, (volatile intptr_t*) dest); } inline void Atomic::inc(volatile size_t* dest) { inc_ptr((volatile intptr_t*) dest); } inline void Atomic::dec(volatile size_t* dest) { dec_ptr((volatile intptr_t*) dest); } template inline D Atomic::cmpxchg(T exchange_value, D volatile* dest, U compare_value, cmpxchg_memory_order order) { return CmpxchgImpl()(exchange_value, dest, compare_value, order); } template inline bool Atomic::replace_if_null(T* value, D* volatile* dest, cmpxchg_memory_order order) { // Presently using a trivial implementation in terms of cmpxchg. // Consider adding platform support, to permit the use of compiler // intrinsics like gcc's __sync_bool_compare_and_swap. D* expected_null = NULL; return expected_null == cmpxchg(value, dest, expected_null, order); } // Handle cmpxchg for integral and enum types. // // All the involved types must be identical. template struct Atomic::CmpxchgImpl< T, T, T, typename EnableIf::value || IsRegisteredEnum::value>::type> VALUE_OBJ_CLASS_SPEC { T operator()(T exchange_value, T volatile* dest, T compare_value, cmpxchg_memory_order order) const { // Forward to the platform handler for the size of T. return PlatformCmpxchg()(exchange_value, dest, compare_value, order); } }; // Handle cmpxchg for pointer types. // // The destination's type and the compare_value type must be the same, // ignoring cv-qualifiers; we don't care about the cv-qualifiers of // the compare_value. // // The exchange_value must be implicitly convertible to the // destination's type; it must be type-correct to store the // exchange_value in the destination. template struct Atomic::CmpxchgImpl< T*, D*, U*, typename EnableIf::value && IsSame::type, typename RemoveCV::type>::value>::type> VALUE_OBJ_CLASS_SPEC { D* operator()(T* exchange_value, D* volatile* dest, U* compare_value, cmpxchg_memory_order order) const { // Allow derived to base conversion, and adding cv-qualifiers. D* new_value = exchange_value; // Don't care what the CV qualifiers for compare_value are, // but we need to match D* when calling platform support. D* old_value = const_cast(compare_value); return PlatformCmpxchg()(new_value, dest, old_value, order); } }; // Handle cmpxchg for types that have a translator. // // All the involved types must be identical. // // This translates the original call into a call on the decayed // arguments, and returns the recovered result of that translated // call. template struct Atomic::CmpxchgImpl< T, T, T, typename EnableIf::value>::type> VALUE_OBJ_CLASS_SPEC { T operator()(T exchange_value, T volatile* dest, T compare_value, cmpxchg_memory_order order) const { typedef IntegerTypes::Translate Translator; typedef typename Translator::Decayed Decayed; STATIC_ASSERT(sizeof(T) == sizeof(Decayed)); return Translator::recover( cmpxchg(Translator::decay(exchange_value), reinterpret_cast(dest), Translator::decay(compare_value), order)); } }; template inline T Atomic::cmpxchg_using_helper(Fn fn, T exchange_value, T volatile* dest, T compare_value) { STATIC_ASSERT(sizeof(Type) == sizeof(T)); return IntegerTypes::cast( fn(IntegerTypes::cast(exchange_value), reinterpret_cast(dest), IntegerTypes::cast(compare_value))); } template inline T Atomic::CmpxchgByteUsingInt::operator()(T exchange_value, T volatile* dest, T compare_value, cmpxchg_memory_order order) const { STATIC_ASSERT(sizeof(T) == sizeof(uint8_t)); uint8_t canon_exchange_value = exchange_value; uint8_t canon_compare_value = compare_value; volatile uint32_t* aligned_dest = reinterpret_cast(align_down(dest, sizeof(uint32_t))); size_t offset = pointer_delta(dest, aligned_dest, 1); uint32_t cur = *aligned_dest; uint8_t* cur_as_bytes = reinterpret_cast(&cur); // current value may not be what we are looking for, so force it // to that value so the initial cmpxchg will fail if it is different cur_as_bytes[offset] = canon_compare_value; // always execute a real cmpxchg so that we get the required memory // barriers even on initial failure do { // value to swap in matches current value ... uint32_t new_value = cur; // ... except for the one jbyte we want to update reinterpret_cast(&new_value)[offset] = canon_exchange_value; uint32_t res = cmpxchg(new_value, aligned_dest, cur, order); if (res == cur) break; // success // at least one byte in the int changed value, so update // our view of the current int cur = res; // if our byte is still as cur we loop and try again } while (cur_as_bytes[offset] == canon_compare_value); return IntegerTypes::cast(cur_as_bytes[offset]); } inline unsigned Atomic::xchg(unsigned int exchange_value, volatile unsigned int* dest) { assert(sizeof(unsigned int) == sizeof(jint), "more work to do"); return (unsigned int)Atomic::xchg((jint)exchange_value, (volatile jint*)dest); } inline jshort Atomic::add(jshort add_value, volatile jshort* dest) { // Most platforms do not support atomic add on a 2-byte value. However, // if the value occupies the most significant 16 bits of an aligned 32-bit // word, then we can do this with an atomic add of (add_value << 16) // to the 32-bit word. // // The least significant parts of this 32-bit word will never be affected, even // in case of overflow/underflow. // // Use the ATOMIC_SHORT_PAIR macro (see macros.hpp) to get the desired alignment. #ifdef VM_LITTLE_ENDIAN assert((intx(dest) & 0x03) == 0x02, "wrong alignment"); jint new_value = Atomic::add(add_value << 16, (volatile jint*)(dest-1)); #else assert((intx(dest) & 0x03) == 0x00, "wrong alignment"); jint new_value = Atomic::add(add_value << 16, (volatile jint*)(dest)); #endif return (jshort)(new_value >> 16); // preserves sign } inline void Atomic::inc(volatile jshort* dest) { (void)add(1, dest); } inline void Atomic::dec(volatile jshort* dest) { (void)add(-1, dest); } #endif // SHARE_VM_RUNTIME_ATOMIC_HPP