/* * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 only, as * published by the Free Software Foundation. * * This code is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * version 2 for more details (a copy is included in the LICENSE file that * accompanied this code). * * You should have received a copy of the GNU General Public License version * 2 along with this work; if not, write to the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. * * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA * or visit www.oracle.com if you need additional information or have any * questions. * */ #include "precompiled.hpp" #include "access.inline.hpp" #include "gc/shared/collectedHeap.hpp" #include "oops/oop.inline.hpp" #include "runtime/mutexLocker.hpp" #include "runtime/vm_version.hpp" #include "utilities/copy.hpp" // VM_Version::supports_cx8() is a surrogate for 'supports atomic long memory ops'. // // On platforms which do not support atomic compare-and-swap of jlong (8 byte) // values we have to use a lock-based scheme to enforce atomicity. This has to be // applied to all Unsafe operations that set the value of a jlong field. Even so // the compareAndSwapLong operation will not be atomic with respect to direct stores // to the field from Java code. It is important therefore that any Java code that // utilizes these Unsafe jlong operations does not perform direct stores. To permit // direct loads of the field from Java code we must also use Atomic::store within the // locked regions. And for good measure, in case there are direct stores, we also // employ Atomic::load within those regions. Note that the field in question must be // volatile and so must have atomic load/store accesses applied at the Java level. // // The locking scheme could utilize a range of strategies for controlling the locking // granularity: from a lock per-field through to a single global lock. The latter is // the simplest and is used for the current implementation. Note that the Java object // that contains the field, can not, in general, be used for locking. To do so can lead // to deadlocks as we may introduce locking into what appears to the Java code to be a // lock-free path. // // As all the locked-regions are very short and themselves non-blocking we can treat // them as leaf routines and elide safepoint checks (ie we don't perform any thread // state transitions even when blocking for the lock). Note that if we do choose to // add safepoint checks and thread state transitions, we must ensure that we calculate // the address of the field _after_ we have acquired the lock, else the object may have // been moved by the GC #ifndef SUPPORTS_NATIVE_CX8 // This is intentionally in the cpp file rather than the .inline.hpp file. It seems // desirable to trade faster JDK build times (not propagating vm_version.hpp) // for slightly worse runtime atomic jlong performance on 32 bit machines with // support for 64 bit atomics. bool AccessInternal::wide_atomic_needs_locking() { return VM_Version::supports_cx8(); } jlong AccessInternal::load_locked(void* addr) { assert(!VM_Version::supports_cx8(), "why else?"); MutexLockerEx mu(UnsafeJlong_lock, Mutex::_no_safepoint_check_flag); return RawAccess::load((jlong*)addr); } void AccessInternal::store_locked(void* addr, jlong val) { assert(!VM_Version::supports_cx8(), "why else?"); MutexLockerEx mu(UnsafeJlong_lock, Mutex::_no_safepoint_check_flag); RawAccess::store((jlong*)addr, val); } jlong AccessInternal::swap_locked(jlong new_val, void* addr) { assert(!VM_Version::supports_cx8(), "why else?"); MutexLockerEx mu(UnsafeJlong_lock, Mutex::_no_safepoint_check_flag); jlong old_val = RawAccess::load((jlong*)addr); RawAccess::store((jlong*)addr, new_val); return old_val } jlong AccessInternal::cas_locked(jlong new_val, void* addr, jlong expected_val) { assert(!VM_Version::supports_cx8(), "why else?"); MutexLockerEx mu(UnsafeJlong_lock, Mutex::_no_safepoint_check_flag); jlong old_val = RawAccess::load((jlong*)addr); if (old_val == expected_val) { RawAccess::store((jlong*)addr, new_val); } return old_val; } #endif void AccessInternal::copy_arrayof_conjoint_oops(void* src, void* dst, size_t length) { Copy::arrayof_conjoint_oops((HeapWord*)src, (HeapWord*)dst, length); } void AccessInternal::copy_conjoint_oops(void* src, void* dst, size_t length) { Copy::arrayof_conjoint_oops((HeapWord*)src, (HeapWord*)dst, length); } void AccessInternal::copy_conjoint_memory_atomic(void* src, void* dst, size_t length) { Copy::conjoint_memory_atomic(src, dst, length); } void AccessInternal::copy_conjoint_jbytes(void* src, void* dst, size_t length) { Copy::conjoint_jbytes((jbyte*)src, (jbyte*)dst, length); } void AccessInternal::copy_conjoint_jlongs_atomic(void* src, void* dst, size_t length) { Copy::conjoint_jlongs_atomic((jlong*)src, (jlong*)dst, length); } void AccessInternal::copy_disjoint_words(void* src, void* dst, size_t length) { Copy::disjoint_words((HeapWord*)src, (HeapWord*)dst, length); } // Add common accesses here that can be made without access.inline.hpp for // faster build times when inlining of the access is not crucial because the // access is not hot enough to be noticeable. In that case, it is desirable // to save build times instead. #define GENERATE_ACCESS_SLOWPATH(DECORATORS) \ template oop AccessInternal::load_at(oop base, ptrdiff_t offset); \ template void AccessInternal::store_at(oop base, ptrdiff_t offset, oop value); \ \ template jbyte AccessInternal::load_at(oop base, ptrdiff_t offset); \ template jshort AccessInternal::load_at(oop base, ptrdiff_t offset); \ template jint AccessInternal::load_at(oop base, ptrdiff_t offset); \ template jlong AccessInternal::load_at(oop base, ptrdiff_t offset); \ template jfloat AccessInternal::load_at(oop base, ptrdiff_t offset); \ template jdouble AccessInternal::load_at(oop base, ptrdiff_t offset); \ \ template void AccessInternal::store_at(oop base, ptrdiff_t offset, jbyte value); \ template void AccessInternal::store_at(oop base, ptrdiff_t offset, jshort value); \ template void AccessInternal::store_at(oop base, ptrdiff_t offset, jint value); \ template void AccessInternal::store_at(oop base, ptrdiff_t offset, jlong value); \ template void AccessInternal::store_at(oop base, ptrdiff_t offset, jfloat value); \ template void AccessInternal::store_at(oop base, ptrdiff_t offset, jdouble value); \ \ template jubyte AccessInternal::load_at(oop base, ptrdiff_t offset); \ template jushort AccessInternal::load_at(oop base, ptrdiff_t offset); \ template juint AccessInternal::load_at(oop base, ptrdiff_t offset); \ template julong AccessInternal::load_at(oop base, ptrdiff_t offset); \ \ template void AccessInternal::store_at(oop base, ptrdiff_t offset, jubyte value); \ template void AccessInternal::store_at(oop base, ptrdiff_t offset, jushort value); \ template void AccessInternal::store_at(oop base, ptrdiff_t offset, juint value); \ template void AccessInternal::store_at(oop base, ptrdiff_t offset, julong value); GENERATE_ACCESS_SLOWPATH(ACCESS_ON_HEAP) GENERATE_ACCESS_SLOWPATH(ACCESS_BASIC) GENERATE_ACCESS_SLOWPATH(ACCESS_RAW) #undef GENERATE_ACCESS_SLOWPATH