1 /*
   2  * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "access.inline.hpp"
  27 #include "gc/shared/collectedHeap.hpp"
  28 #include "oops/oop.inline.hpp"
  29 #include "runtime/mutexLocker.hpp"
  30 #include "runtime/vm_version.hpp"
  31 #include "utilities/copy.hpp"
  32 
  33 // VM_Version::supports_cx8() is a surrogate for 'supports atomic long memory ops'.
  34 //
  35 // On platforms which do not support atomic compare-and-swap of jlong (8 byte)
  36 // values we have to use a lock-based scheme to enforce atomicity. This has to be
  37 // applied to all Unsafe operations that set the value of a jlong field. Even so
  38 // the compareAndSwapLong operation will not be atomic with respect to direct stores
  39 // to the field from Java code. It is important therefore that any Java code that
  40 // utilizes these Unsafe jlong operations does not perform direct stores. To permit
  41 // direct loads of the field from Java code we must also use Atomic::store within the
  42 // locked regions. And for good measure, in case there are direct stores, we also
  43 // employ Atomic::load within those regions. Note that the field in question must be
  44 // volatile and so must have atomic load/store accesses applied at the Java level.
  45 //
  46 // The locking scheme could utilize a range of strategies for controlling the locking
  47 // granularity: from a lock per-field through to a single global lock. The latter is
  48 // the simplest and is used for the current implementation. Note that the Java object
  49 // that contains the field, can not, in general, be used for locking. To do so can lead
  50 // to deadlocks as we may introduce locking into what appears to the Java code to be a
  51 // lock-free path.
  52 //
  53 // As all the locked-regions are very short and themselves non-blocking we can treat
  54 // them as leaf routines and elide safepoint checks (ie we don't perform any thread
  55 // state transitions even when blocking for the lock). Note that if we do choose to
  56 // add safepoint checks and thread state transitions, we must ensure that we calculate
  57 // the address of the field _after_ we have acquired the lock, else the object may have
  58 // been moved by the GC
  59 
  60 #ifndef SUPPORTS_NATIVE_CX8
  61 
  62 // This is intentionally in the cpp file rather than the .inline.hpp file. It seems
  63 // desirable to trade faster JDK build times (not propagating vm_version.hpp)
  64 // for slightly worse runtime atomic jlong performance on 32 bit machines with
  65 // support for 64 bit atomics.
  66 bool AccessInternal::wide_atomic_needs_locking() {
  67   return VM_Version::supports_cx8();
  68 }
  69 
  70 jlong AccessInternal::load_locked(void* addr) {
  71   assert(!VM_Version::supports_cx8(), "why else?");
  72   MutexLockerEx mu(UnsafeJlong_lock, Mutex::_no_safepoint_check_flag);
  73   return RawAccess<MO_RELAXED>::load((jlong*)addr);
  74 }
  75 
  76 void AccessInternal::store_locked(void* addr, jlong val) {
  77   assert(!VM_Version::supports_cx8(), "why else?");
  78   MutexLockerEx mu(UnsafeJlong_lock, Mutex::_no_safepoint_check_flag);
  79   RawAccess<MO_RELAXED>::store((jlong*)addr, val);
  80 }
  81 
  82 jlong AccessInternal::swap_locked(jlong new_val, void* addr) {
  83   assert(!VM_Version::supports_cx8(), "why else?");
  84   MutexLockerEx mu(UnsafeJlong_lock, Mutex::_no_safepoint_check_flag);
  85   jlong old_val = RawAccess<MO_RELAXED>::load((jlong*)addr);
  86   RawAccess<MO_RELAXED>::store((jlong*)addr, new_val);
  87   return old_val
  88 }
  89 
  90 jlong AccessInternal::cas_locked(jlong new_val, void* addr, jlong expected_val) {
  91   assert(!VM_Version::supports_cx8(), "why else?");
  92   MutexLockerEx mu(UnsafeJlong_lock, Mutex::_no_safepoint_check_flag);
  93   jlong old_val = RawAccess<MO_RELAXED>::load((jlong*)addr);
  94   if (old_val == expected_val) {
  95     RawAccess<MO_RELAXED>::store((jlong*)addr, new_val);
  96   }
  97   return old_val;
  98 }
  99 
 100 #endif
 101 
 102 void AccessInternal::copy_arrayof_conjoint_oops(void* src, void* dst, size_t length) {
 103   Copy::arrayof_conjoint_oops((HeapWord*)src, (HeapWord*)dst, length);
 104 }
 105 
 106 void AccessInternal::copy_conjoint_oops(void* src, void* dst, size_t length) {
 107   Copy::arrayof_conjoint_oops((HeapWord*)src, (HeapWord*)dst, length);
 108 }
 109 
 110 void AccessInternal::copy_conjoint_memory_atomic(void* src, void* dst, size_t length) {
 111   Copy::conjoint_memory_atomic(src, dst, length);
 112 }
 113 
 114 void AccessInternal::copy_conjoint_jbytes(void* src, void* dst, size_t length) {
 115   Copy::conjoint_jbytes((jbyte*)src, (jbyte*)dst, length);
 116 }
 117 
 118 void AccessInternal::copy_conjoint_jlongs_atomic(void* src, void* dst, size_t length) {
 119   Copy::conjoint_jlongs_atomic((jlong*)src, (jlong*)dst, length);
 120 }
 121 
 122 void AccessInternal::copy_disjoint_words(void* src, void* dst, size_t length) {
 123   Copy::disjoint_words((HeapWord*)src, (HeapWord*)dst, length);
 124 }
 125 
 126 // Add common accesses here that can be made without access.inline.hpp for
 127 // faster build times when inlining of the access is not crucial because the
 128 // access is not hot enough to be noticeable. In that case, it is desirable
 129 // to save build times instead.
 130 
 131 #define GENERATE_ACCESS_SLOWPATH(DECORATORS)                                                                            \
 132 template oop     AccessInternal::load_at<DECORATORS | VALUE_IS_OOP, oop, oop>(oop base, ptrdiff_t offset);              \
 133 template void    AccessInternal::store_at<DECORATORS | VALUE_IS_OOP, oop, oop>(oop base, ptrdiff_t offset, oop value);  \
 134                                                                                                                         \
 135 template jbyte   AccessInternal::load_at<DECORATORS, oop, jbyte>(oop base, ptrdiff_t offset);                           \
 136 template jshort  AccessInternal::load_at<DECORATORS, oop, jshort>(oop base, ptrdiff_t offset);                          \
 137 template jint    AccessInternal::load_at<DECORATORS, oop, jint>(oop base, ptrdiff_t offset);                            \
 138 template jlong   AccessInternal::load_at<DECORATORS, oop, jlong>(oop base, ptrdiff_t offset);                           \
 139 template jfloat  AccessInternal::load_at<DECORATORS, oop, jfloat>(oop base, ptrdiff_t offset);                          \
 140 template jdouble AccessInternal::load_at<DECORATORS, oop, jdouble>(oop base, ptrdiff_t offset);                         \
 141                                                                                                                         \
 142 template void    AccessInternal::store_at<DECORATORS, oop, jbyte>(oop base, ptrdiff_t offset, jbyte value);             \
 143 template void    AccessInternal::store_at<DECORATORS, oop, jshort>(oop base, ptrdiff_t offset, jshort value);           \
 144 template void    AccessInternal::store_at<DECORATORS, oop, jint>(oop base, ptrdiff_t offset, jint value);               \
 145 template void    AccessInternal::store_at<DECORATORS, oop, jlong>(oop base, ptrdiff_t offset, jlong value);             \
 146 template void    AccessInternal::store_at<DECORATORS, oop, jfloat>(oop base, ptrdiff_t offset, jfloat value);           \
 147 template void    AccessInternal::store_at<DECORATORS, oop, jdouble>(oop base, ptrdiff_t offset, jdouble value);         \
 148                                                                                                                         \
 149 template jubyte  AccessInternal::load_at<DECORATORS, oop, jubyte>(oop base, ptrdiff_t offset);                          \
 150 template jushort AccessInternal::load_at<DECORATORS, oop, jushort>(oop base, ptrdiff_t offset);                         \
 151 template juint   AccessInternal::load_at<DECORATORS, oop, juint>(oop base, ptrdiff_t offset);                           \
 152 template julong  AccessInternal::load_at<DECORATORS, oop, julong>(oop base, ptrdiff_t offset);                          \
 153                                                                                                                         \
 154 template void AccessInternal::store_at<DECORATORS, oop, jubyte>(oop base, ptrdiff_t offset, jubyte value);              \
 155 template void AccessInternal::store_at<DECORATORS, oop, jushort>(oop base, ptrdiff_t offset, jushort value);            \
 156 template void AccessInternal::store_at<DECORATORS, oop, juint>(oop base, ptrdiff_t offset, juint value);                \
 157 template void AccessInternal::store_at<DECORATORS, oop, julong>(oop base, ptrdiff_t offset, julong value);
 158 
 159 GENERATE_ACCESS_SLOWPATH(ACCESS_ON_HEAP)
 160 GENERATE_ACCESS_SLOWPATH(ACCESS_BASIC)
 161 GENERATE_ACCESS_SLOWPATH(ACCESS_RAW)
 162 
 163 #undef GENERATE_ACCESS_SLOWPATH