1 /*
   2  * Copyright (c) 2015, 2017, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  */
  23 
  24 #ifndef SHARE_GC_Z_ZBARRIER_INLINE_HPP
  25 #define SHARE_GC_Z_ZBARRIER_INLINE_HPP
  26 
  27 #include "classfile/javaClasses.hpp"
  28 #include "gc/z/zAddress.inline.hpp"
  29 #include "gc/z/zBarrier.hpp"
  30 #include "gc/z/zOop.inline.hpp"
  31 #include "gc/z/zResurrection.inline.hpp"
  32 #include "oops/oop.hpp"
  33 #include "runtime/atomic.hpp"
  34 
  35 template <ZBarrierFastPath fast_path, ZBarrierSlowPath slow_path>
  36 inline oop ZBarrier::barrier(volatile oop* p, oop o) {
  37   uintptr_t addr = ZOop::to_address(o);
  38 
  39 retry:
  40   // Fast path
  41   if (fast_path(addr)) {
  42     return ZOop::from_address(addr);
  43   }
  44 
  45   // Slow path
  46   const uintptr_t good_addr = slow_path(addr);
  47 
  48   // Self heal, but only if the address was actually updated by the slow path,
  49   // which might not be the case, e.g. when marking through an already good oop.
  50   if (p != NULL && good_addr != addr) {
  51     const uintptr_t prev_addr = Atomic::cmpxchg(good_addr, (volatile uintptr_t*)p, addr);
  52     if (prev_addr != addr) {
  53       // Some other thread overwrote the oop. If this oop was updated by a
  54       // weak barrier the new oop might not be good, in which case we need
  55       // to re-apply this barrier.
  56       addr = prev_addr;
  57       goto retry;
  58     }
  59   }
  60 
  61   return ZOop::from_address(good_addr);
  62 }
  63 
  64 template <ZBarrierFastPath fast_path, ZBarrierSlowPath slow_path>
  65 inline oop ZBarrier::weak_barrier(volatile oop* p, oop o) {
  66   const uintptr_t addr = ZOop::to_address(o);
  67 
  68   // Fast path
  69   if (fast_path(addr)) {
  70     // Return the good address instead of the weak good address
  71     // to ensure that the currently active heap view is used.
  72     return ZOop::from_address(ZAddress::good_or_null(addr));
  73   }
  74 
  75   // Slow path
  76   uintptr_t good_addr = slow_path(addr);
  77 
  78   // Self heal unless the address returned from the slow path is null,
  79   // in which case resurrection was blocked and we must let the reference
  80   // processor clear the oop. Mutators are not allowed to clear oops in
  81   // these cases, since that would be similar to calling Reference.clear(),
  82   // which would make the reference non-discoverable or silently dropped
  83   // by the reference processor.
  84   if (p != NULL && good_addr != 0) {
  85     // The slow path returns a good/marked address, but we never mark oops
  86     // in a weak load barrier so we always self heal with the remapped address.
  87     const uintptr_t weak_good_addr = ZAddress::remapped(good_addr);
  88     const uintptr_t prev_addr = Atomic::cmpxchg(weak_good_addr, (volatile uintptr_t*)p, addr);
  89     if (prev_addr != addr) {
  90       // Some other thread overwrote the oop. The new
  91       // oop is guaranteed to be weak good or null.
  92       assert(ZAddress::is_weak_good_or_null(prev_addr), "Bad weak overwrite");
  93 
  94       // Return the good address instead of the weak good address
  95       // to ensure that the currently active heap view is used.
  96       good_addr = ZAddress::good_or_null(prev_addr);
  97     }
  98   }
  99 
 100   return ZOop::from_address(good_addr);
 101 }
 102 
 103 template <ZBarrierFastPath fast_path, ZBarrierSlowPath slow_path>
 104 inline void ZBarrier::root_barrier(oop* p, oop o) {
 105   const uintptr_t addr = ZOop::to_address(o);
 106 
 107   // Fast path
 108   if (fast_path(addr)) {
 109     return;
 110   }
 111 
 112   // Slow path
 113   const uintptr_t good_addr = slow_path(addr);
 114 
 115   // Non-atomic healing helps speed up root scanning. This is safe to do
 116   // since we are always healing roots in a safepoint, or under a lock,
 117   // which ensures we are never racing with mutators modifying roots while
 118   // we are healing them. It's also safe in case multiple GC threads try
 119   // to heal the same root if it is aligned, since they would always heal
 120   // the root in the same way and it does not matter in which order it
 121   // happens. For misaligned oops, there needs to be mutual exclusion.
 122   *p = ZOop::from_address(good_addr);
 123 }
 124 
 125 inline bool ZBarrier::is_null_fast_path(uintptr_t addr) {
 126   return ZAddress::is_null(addr);
 127 }
 128 
 129 inline bool ZBarrier::is_good_or_null_fast_path(uintptr_t addr) {
 130   return ZAddress::is_good_or_null(addr);
 131 }
 132 
 133 inline bool ZBarrier::is_weak_good_or_null_fast_path(uintptr_t addr) {
 134   return ZAddress::is_weak_good_or_null(addr);
 135 }
 136 
 137 inline bool ZBarrier::is_resurrection_blocked(volatile oop* p, oop* o) {
 138   const bool is_blocked = ZResurrection::is_blocked();
 139 
 140   // Reload oop after checking the resurrection blocked state. This is
 141   // done to prevent a race where we first load an oop, which is logically
 142   // null but not yet cleared, then this oop is cleared by the reference
 143   // processor and resurrection is unblocked. At this point the mutator
 144   // would see the unblocked state and pass this invalid oop through the
 145   // normal barrier path, which would incorrectly try to mark this oop.
 146   if (p != NULL) {
 147     // First assign to reloaded_o to avoid compiler warning about
 148     // implicit dereference of volatile oop.
 149     const oop reloaded_o = *p;
 150     *o = reloaded_o;
 151   }
 152 
 153   return is_blocked;
 154 }
 155 
 156 //
 157 // Load barrier
 158 //
 159 inline oop ZBarrier::load_barrier_on_oop(oop o) {
 160   return load_barrier_on_oop_field_preloaded((oop*)NULL, o);
 161 }
 162 
 163 inline oop ZBarrier::load_barrier_on_oop_field(volatile oop* p) {
 164   const oop o = *p;
 165   return load_barrier_on_oop_field_preloaded(p, o);
 166 }
 167 
 168 inline oop ZBarrier::load_barrier_on_oop_field_preloaded(volatile oop* p, oop o) {
 169   return barrier<is_good_or_null_fast_path, load_barrier_on_oop_slow_path>(p, o);
 170 }
 171 
 172 inline void ZBarrier::load_barrier_on_oop_array(volatile oop* p, size_t length) {
 173   for (volatile const oop* const end = p + length; p < end; p++) {
 174     load_barrier_on_oop_field(p);
 175   }
 176 }
 177 
 178 // ON_WEAK barriers should only ever be applied to j.l.r.Reference.referents.
 179 inline void verify_on_weak(volatile oop* referent_addr) {
 180 #ifdef ASSERT
 181   if (referent_addr != NULL) {
 182     uintptr_t base = (uintptr_t)referent_addr - java_lang_ref_Reference::referent_offset;
 183     oop obj = cast_to_oop(base);
 184     assert(oopDesc::is_oop(obj), "Verification failed for: ref " PTR_FORMAT " obj: " PTR_FORMAT, (uintptr_t)referent_addr, base);
 185     assert(java_lang_ref_Reference::is_referent_field(obj, java_lang_ref_Reference::referent_offset), "Sanity");
 186   }
 187 #endif
 188 }
 189 
 190 inline oop ZBarrier::load_barrier_on_weak_oop_field_preloaded(volatile oop* p, oop o) {
 191   verify_on_weak(p);
 192 
 193   if (is_resurrection_blocked(p, &o)) {
 194     return weak_barrier<is_good_or_null_fast_path, weak_load_barrier_on_weak_oop_slow_path>(p, o);
 195   }
 196 
 197   return load_barrier_on_oop_field_preloaded(p, o);
 198 }
 199 
 200 inline oop ZBarrier::load_barrier_on_phantom_oop_field_preloaded(volatile oop* p, oop o) {
 201   if (is_resurrection_blocked(p, &o)) {
 202     return weak_barrier<is_good_or_null_fast_path, weak_load_barrier_on_phantom_oop_slow_path>(p, o);
 203   }
 204 
 205   return load_barrier_on_oop_field_preloaded(p, o);
 206 }
 207 
 208 inline void ZBarrier::load_barrier_on_root_oop_field(oop* p) {
 209   const oop o = *p;
 210   root_barrier<is_good_or_null_fast_path, load_barrier_on_oop_slow_path>(p, o);
 211 }
 212 
 213 //
 214 // Weak load barrier
 215 //
 216 inline oop ZBarrier::weak_load_barrier_on_oop_field(volatile oop* p) {
 217   assert(!ZResurrection::is_blocked(), "Should not be called during resurrection blocked phase");
 218   const oop o = *p;
 219   return weak_load_barrier_on_oop_field_preloaded(p, o);
 220 }
 221 
 222 inline oop ZBarrier::weak_load_barrier_on_oop_field_preloaded(volatile oop* p, oop o) {
 223   return weak_barrier<is_weak_good_or_null_fast_path, weak_load_barrier_on_oop_slow_path>(p, o);
 224 }
 225 
 226 inline oop ZBarrier::weak_load_barrier_on_weak_oop(oop o) {
 227   return weak_load_barrier_on_weak_oop_field_preloaded((oop*)NULL, o);
 228 }
 229 
 230 inline oop ZBarrier::weak_load_barrier_on_weak_oop_field(volatile oop* p) {
 231   const oop o = *p;
 232   return weak_load_barrier_on_weak_oop_field_preloaded(p, o);
 233 }
 234 
 235 inline oop ZBarrier::weak_load_barrier_on_weak_oop_field_preloaded(volatile oop* p, oop o) {
 236   verify_on_weak(p);
 237 
 238   if (is_resurrection_blocked(p, &o)) {
 239     return weak_barrier<is_good_or_null_fast_path, weak_load_barrier_on_weak_oop_slow_path>(p, o);
 240   }
 241 
 242   return weak_load_barrier_on_oop_field_preloaded(p, o);
 243 }
 244 
 245 inline oop ZBarrier::weak_load_barrier_on_phantom_oop(oop o) {
 246   return weak_load_barrier_on_phantom_oop_field_preloaded((oop*)NULL, o);
 247 }
 248 
 249 inline oop ZBarrier::weak_load_barrier_on_phantom_oop_field(volatile oop* p) {
 250   const oop o = *p;
 251   return weak_load_barrier_on_phantom_oop_field_preloaded(p, o);
 252 }
 253 
 254 inline oop ZBarrier::weak_load_barrier_on_phantom_oop_field_preloaded(volatile oop* p, oop o) {
 255   if (is_resurrection_blocked(p, &o)) {
 256     return weak_barrier<is_good_or_null_fast_path, weak_load_barrier_on_phantom_oop_slow_path>(p, o);
 257   }
 258 
 259   return weak_load_barrier_on_oop_field_preloaded(p, o);
 260 }
 261 
 262 //
 263 // Is alive barrier
 264 //
 265 inline bool ZBarrier::is_alive_barrier_on_weak_oop(oop o) {
 266   // Check if oop is logically non-null. This operation
 267   // is only valid when resurrection is blocked.
 268   assert(ZResurrection::is_blocked(), "Invalid phase");
 269   return weak_load_barrier_on_weak_oop(o) != NULL;
 270 }
 271 
 272 inline bool ZBarrier::is_alive_barrier_on_phantom_oop(oop o) {
 273   // Check if oop is logically non-null. This operation
 274   // is only valid when resurrection is blocked.
 275   assert(ZResurrection::is_blocked(), "Invalid phase");
 276   return weak_load_barrier_on_phantom_oop(o) != NULL;
 277 }
 278 
 279 //
 280 // Keep alive barrier
 281 //
 282 inline void ZBarrier::keep_alive_barrier_on_weak_oop_field(volatile oop* p) {
 283   // This operation is only valid when resurrection is blocked.
 284   assert(ZResurrection::is_blocked(), "Invalid phase");
 285   const oop o = *p;
 286   barrier<is_good_or_null_fast_path, keep_alive_barrier_on_weak_oop_slow_path>(p, o);
 287 }
 288 
 289 inline void ZBarrier::keep_alive_barrier_on_phantom_oop_field(volatile oop* p) {
 290   // This operation is only valid when resurrection is blocked.
 291   assert(ZResurrection::is_blocked(), "Invalid phase");
 292   const oop o = *p;
 293   barrier<is_good_or_null_fast_path, keep_alive_barrier_on_phantom_oop_slow_path>(p, o);
 294 }
 295 
 296 inline void ZBarrier::keep_alive_barrier_on_phantom_root_oop_field(oop* p) {
 297   // This operation is only valid when resurrection is blocked.
 298   assert(ZResurrection::is_blocked(), "Invalid phase");
 299   const oop o = *p;
 300   root_barrier<is_good_or_null_fast_path, keep_alive_barrier_on_phantom_oop_slow_path>(p, o);
 301 }
 302 
 303 //
 304 // Mark barrier
 305 //
 306 inline void ZBarrier::mark_barrier_on_oop_field(volatile oop* p, bool finalizable) {
 307   // The fast path only checks for null since the GC worker
 308   // threads doing marking wants to mark through good oops.
 309   const oop o = *p;
 310 
 311   if (finalizable) {
 312     barrier<is_null_fast_path, mark_barrier_on_finalizable_oop_slow_path>(p, o);
 313   } else {
 314     barrier<is_null_fast_path, mark_barrier_on_oop_slow_path>(p, o);
 315   }
 316 }
 317 
 318 inline void ZBarrier::mark_barrier_on_oop_array(volatile oop* p, size_t length, bool finalizable) {
 319   for (volatile const oop* const end = p + length; p < end; p++) {
 320     mark_barrier_on_oop_field(p, finalizable);
 321   }
 322 }
 323 
 324 inline void ZBarrier::mark_barrier_on_root_oop_field(oop* p) {
 325   const oop o = *p;
 326   root_barrier<is_good_or_null_fast_path, mark_barrier_on_root_oop_slow_path>(p, o);
 327 }
 328 
 329 inline void ZBarrier::mark_barrier_on_invisible_root_oop_field(oop* p) {
 330   const oop o = *p;
 331   root_barrier<is_good_or_null_fast_path, mark_barrier_on_invisible_root_oop_slow_path>(p, o);
 332 }
 333 
 334 //
 335 // Relocate barrier
 336 //
 337 inline void ZBarrier::relocate_barrier_on_root_oop_field(oop* p) {
 338   const oop o = *p;
 339   root_barrier<is_good_or_null_fast_path, relocate_barrier_on_root_oop_slow_path>(p, o);
 340 }
 341 
 342 #endif // SHARE_GC_Z_ZBARRIER_INLINE_HPP