1 /*
   2  * Copyright (c) 2015, 2019, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  */
  23 
  24 #ifndef SHARE_GC_Z_ZBARRIER_INLINE_HPP
  25 #define SHARE_GC_Z_ZBARRIER_INLINE_HPP
  26 
  27 #include "classfile/javaClasses.hpp"
  28 #include "gc/z/zAddress.inline.hpp"
  29 #include "gc/z/zBarrier.hpp"
  30 #include "gc/z/zOop.inline.hpp"
  31 #include "gc/z/zResurrection.inline.hpp"
  32 #include "oops/oop.hpp"
  33 #include "runtime/atomic.hpp"
  34 
  35 // A self heal must always "upgrade" the address metadata bits in
  36 // accordance with the metadata bits state machine, which has the
  37 // valid state transitions as described below (where N is the GC
  38 // cycle).
  39 //
  40 // Note the subtleness of overlapping GC cycles. Specifically that
  41 // oops are colored Remapped(N) starting at relocation N and ending
  42 // at marking N + 1.
  43 //
  44 //              +--- Mark Start
  45 //              | +--- Mark End
  46 //              | | +--- Relocate Start
  47 //              | | | +--- Relocate End
  48 //              | | | |
  49 // Marked       |---N---|--N+1--|--N+2--|----
  50 // Finalizable  |---N---|--N+1--|--N+2--|----
  51 // Remapped     ----|---N---|--N+1--|--N+2--|
  52 //
  53 // VALID STATE TRANSITIONS
  54 //
  55 //   Marked(N)           -> Remapped(N)
  56 //                       -> Marked(N + 1)
  57 //                       -> Finalizable(N + 1)
  58 //
  59 //   Finalizable(N)      -> Marked(N)
  60 //                       -> Remapped(N)
  61 //                       -> Marked(N + 1)
  62 //                       -> Finalizable(N + 1)
  63 //
  64 //   Remapped(N)         -> Marked(N + 1)
  65 //                       -> Finalizable(N + 1)
  66 //
  67 // PHASE VIEW
  68 //
  69 // ZPhaseMark
  70 //   Load & Mark
  71 //     Marked(N)         <- Marked(N - 1)
  72 //                       <- Finalizable(N - 1)
  73 //                       <- Remapped(N - 1)
  74 //                       <- Finalizable(N)
  75 //
  76 //   Mark(Finalizable)
  77 //     Finalizable(N)    <- Marked(N - 1)
  78 //                       <- Finalizable(N - 1)
  79 //                       <- Remapped(N - 1)
  80 //
  81 //   Load(AS_NO_KEEPALIVE)
  82 //     Remapped(N - 1)   <- Marked(N - 1)
  83 //                       <- Finalizable(N - 1)
  84 //
  85 // ZPhaseMarkCompleted (Resurrection blocked)
  86 //   Load & Load(ON_WEAK/PHANTOM_OOP_REF | AS_NO_KEEPALIVE) & KeepAlive
  87 //     Marked(N)         <- Marked(N - 1)
  88 //                       <- Finalizable(N - 1)
  89 //                       <- Remapped(N - 1)
  90 //                       <- Finalizable(N)
  91 //
  92 //   Load(ON_STRONG_OOP_REF | AS_NO_KEEPALIVE)
  93 //     Remapped(N - 1)   <- Marked(N - 1)
  94 //                       <- Finalizable(N - 1)
  95 //
  96 // ZPhaseMarkCompleted (Resurrection unblocked)
  97 //   Load
  98 //     Marked(N)         <- Finalizable(N)
  99 //
 100 // ZPhaseRelocate
 101 //   Load & Load(AS_NO_KEEPALIVE)
 102 //     Remapped(N)       <- Marked(N)
 103 //                       <- Finalizable(N)
 104 
 105 template <ZBarrierFastPath fast_path>
 106 inline void ZBarrier::self_heal(volatile oop* p, uintptr_t addr, uintptr_t heal_addr) {
 107   if (heal_addr == 0) {
 108     // Never heal with null since it interacts badly with reference processing.
 109     // A mutator clearing an oop would be similar to calling Reference.clear(),
 110     // which would make the reference non-discoverable or silently dropped
 111     // by the reference processor.
 112     return;
 113   }
 114 
 115   assert(!fast_path(addr), "Invalid self heal");
 116   assert(fast_path(heal_addr), "Invalid self heal");
 117 
 118   for (;;) {
 119     // Heal
 120     const uintptr_t prev_addr = Atomic::cmpxchg((volatile uintptr_t*)p, addr, heal_addr);
 121     if (prev_addr == addr) {
 122       // Success
 123       return;
 124     }
 125 
 126     if (fast_path(prev_addr)) {
 127       // Must not self heal
 128       return;
 129     }
 130 
 131     // The oop location was healed by another barrier, but still needs upgrading.
 132     // Re-apply healing to make sure the oop is not left with weaker (remapped or
 133     // finalizable) metadata bits than what this barrier tried to apply.
 134     assert(ZAddress::offset(prev_addr) == ZAddress::offset(heal_addr), "Invalid offset");
 135     addr = prev_addr;
 136   }
 137 }
 138 
 139 template <ZBarrierFastPath fast_path, ZBarrierSlowPath slow_path>
 140 inline oop ZBarrier::barrier(volatile oop* p, oop o) {
 141   const uintptr_t addr = ZOop::to_address(o);
 142 
 143   // Fast path
 144   if (fast_path(addr)) {
 145     return ZOop::from_address(addr);
 146   }
 147 
 148   // Slow path
 149   const uintptr_t good_addr = slow_path(addr);
 150 
 151   if (p != NULL) {
 152     self_heal<fast_path>(p, addr, good_addr);
 153   }
 154 
 155   return ZOop::from_address(good_addr);
 156 }
 157 
 158 template <ZBarrierFastPath fast_path, ZBarrierSlowPath slow_path>
 159 inline oop ZBarrier::weak_barrier(volatile oop* p, oop o) {
 160   const uintptr_t addr = ZOop::to_address(o);
 161 
 162   // Fast path
 163   if (fast_path(addr)) {
 164     // Return the good address instead of the weak good address
 165     // to ensure that the currently active heap view is used.
 166     return ZOop::from_address(ZAddress::good_or_null(addr));
 167   }
 168 
 169   // Slow path
 170   const uintptr_t good_addr = slow_path(addr);
 171 
 172   if (p != NULL) {
 173     // The slow path returns a good/marked address or null, but we never mark
 174     // oops in a weak load barrier so we always heal with the remapped address.
 175     self_heal<fast_path>(p, addr, ZAddress::remapped_or_null(good_addr));
 176   }
 177 
 178   return ZOop::from_address(good_addr);
 179 }
 180 
 181 template <ZBarrierFastPath fast_path, ZBarrierSlowPath slow_path>
 182 inline void ZBarrier::root_barrier(oop* p, oop o) {
 183   const uintptr_t addr = ZOop::to_address(o);
 184 
 185   // Fast path
 186   if (fast_path(addr)) {
 187     return;
 188   }
 189 
 190   // Slow path
 191   const uintptr_t good_addr = slow_path(addr);
 192 
 193   // Non-atomic healing helps speed up root scanning. This is safe to do
 194   // since we are always healing roots in a safepoint, or under a lock,
 195   // which ensures we are never racing with mutators modifying roots while
 196   // we are healing them. It's also safe in case multiple GC threads try
 197   // to heal the same root if it is aligned, since they would always heal
 198   // the root in the same way and it does not matter in which order it
 199   // happens. For misaligned oops, there needs to be mutual exclusion.
 200   *p = ZOop::from_address(good_addr);
 201 }
 202 
 203 inline bool ZBarrier::is_good_or_null_fast_path(uintptr_t addr) {
 204   return ZAddress::is_good_or_null(addr);
 205 }
 206 
 207 inline bool ZBarrier::is_weak_good_or_null_fast_path(uintptr_t addr) {
 208   return ZAddress::is_weak_good_or_null(addr);
 209 }
 210 
 211 inline bool ZBarrier::is_marked_or_null_fast_path(uintptr_t addr) {
 212   return ZAddress::is_marked_or_null(addr);
 213 }
 214 
 215 inline bool ZBarrier::during_mark() {
 216   return ZGlobalPhase == ZPhaseMark;
 217 }
 218 
 219 inline bool ZBarrier::during_relocate() {
 220   return ZGlobalPhase == ZPhaseRelocate;
 221 }
 222 
 223 //
 224 // Load barrier
 225 //
 226 inline oop ZBarrier::load_barrier_on_oop(oop o) {
 227   return load_barrier_on_oop_field_preloaded((oop*)NULL, o);
 228 }
 229 
 230 inline oop ZBarrier::load_barrier_on_oop_field(volatile oop* p) {
 231   const oop o = *p;
 232   return load_barrier_on_oop_field_preloaded(p, o);
 233 }
 234 
 235 inline oop ZBarrier::load_barrier_on_oop_field_preloaded(volatile oop* p, oop o) {
 236   return barrier<is_good_or_null_fast_path, load_barrier_on_oop_slow_path>(p, o);
 237 }
 238 
 239 inline void ZBarrier::load_barrier_on_oop_array(volatile oop* p, size_t length) {
 240   for (volatile const oop* const end = p + length; p < end; p++) {
 241     load_barrier_on_oop_field(p);
 242   }
 243 }
 244 
 245 // ON_WEAK barriers should only ever be applied to j.l.r.Reference.referents.
 246 inline void verify_on_weak(volatile oop* referent_addr) {
 247 #ifdef ASSERT
 248   if (referent_addr != NULL) {
 249     uintptr_t base = (uintptr_t)referent_addr - java_lang_ref_Reference::referent_offset;
 250     oop obj = cast_to_oop(base);
 251     assert(oopDesc::is_oop(obj), "Verification failed for: ref " PTR_FORMAT " obj: " PTR_FORMAT, (uintptr_t)referent_addr, base);
 252     assert(java_lang_ref_Reference::is_referent_field(obj, java_lang_ref_Reference::referent_offset), "Sanity");
 253   }
 254 #endif
 255 }
 256 
 257 inline oop ZBarrier::load_barrier_on_weak_oop_field_preloaded(volatile oop* p, oop o) {
 258   verify_on_weak(p);
 259 
 260   if (ZResurrection::is_blocked()) {
 261     return barrier<is_good_or_null_fast_path, weak_load_barrier_on_weak_oop_slow_path>(p, o);
 262   }
 263 
 264   return load_barrier_on_oop_field_preloaded(p, o);
 265 }
 266 
 267 inline oop ZBarrier::load_barrier_on_phantom_oop_field_preloaded(volatile oop* p, oop o) {
 268   if (ZResurrection::is_blocked()) {
 269     return barrier<is_good_or_null_fast_path, weak_load_barrier_on_phantom_oop_slow_path>(p, o);
 270   }
 271 
 272   return load_barrier_on_oop_field_preloaded(p, o);
 273 }
 274 
 275 inline void ZBarrier::load_barrier_on_root_oop_field(oop* p) {
 276   const oop o = *p;
 277   root_barrier<is_good_or_null_fast_path, load_barrier_on_oop_slow_path>(p, o);
 278 }
 279 
 280 //
 281 // Weak load barrier
 282 //
 283 inline oop ZBarrier::weak_load_barrier_on_oop_field(volatile oop* p) {
 284   assert(!ZResurrection::is_blocked(), "Should not be called during resurrection blocked phase");
 285   const oop o = *p;
 286   return weak_load_barrier_on_oop_field_preloaded(p, o);
 287 }
 288 
 289 inline oop ZBarrier::weak_load_barrier_on_oop_field_preloaded(volatile oop* p, oop o) {
 290   return weak_barrier<is_weak_good_or_null_fast_path, weak_load_barrier_on_oop_slow_path>(p, o);
 291 }
 292 
 293 inline oop ZBarrier::weak_load_barrier_on_weak_oop(oop o) {
 294   return weak_load_barrier_on_weak_oop_field_preloaded((oop*)NULL, o);
 295 }
 296 
 297 inline oop ZBarrier::weak_load_barrier_on_weak_oop_field(volatile oop* p) {
 298   const oop o = *p;
 299   return weak_load_barrier_on_weak_oop_field_preloaded(p, o);
 300 }
 301 
 302 inline oop ZBarrier::weak_load_barrier_on_weak_oop_field_preloaded(volatile oop* p, oop o) {
 303   verify_on_weak(p);
 304 
 305   if (ZResurrection::is_blocked()) {
 306     return barrier<is_good_or_null_fast_path, weak_load_barrier_on_weak_oop_slow_path>(p, o);
 307   }
 308 
 309   return weak_load_barrier_on_oop_field_preloaded(p, o);
 310 }
 311 
 312 inline oop ZBarrier::weak_load_barrier_on_phantom_oop(oop o) {
 313   return weak_load_barrier_on_phantom_oop_field_preloaded((oop*)NULL, o);
 314 }
 315 
 316 inline oop ZBarrier::weak_load_barrier_on_phantom_oop_field(volatile oop* p) {
 317   const oop o = *p;
 318   return weak_load_barrier_on_phantom_oop_field_preloaded(p, o);
 319 }
 320 
 321 inline oop ZBarrier::weak_load_barrier_on_phantom_oop_field_preloaded(volatile oop* p, oop o) {
 322   if (ZResurrection::is_blocked()) {
 323     return barrier<is_good_or_null_fast_path, weak_load_barrier_on_phantom_oop_slow_path>(p, o);
 324   }
 325 
 326   return weak_load_barrier_on_oop_field_preloaded(p, o);
 327 }
 328 
 329 //
 330 // Is alive barrier
 331 //
 332 inline bool ZBarrier::is_alive_barrier_on_weak_oop(oop o) {
 333   // Check if oop is logically non-null. This operation
 334   // is only valid when resurrection is blocked.
 335   assert(ZResurrection::is_blocked(), "Invalid phase");
 336   return weak_load_barrier_on_weak_oop(o) != NULL;
 337 }
 338 
 339 inline bool ZBarrier::is_alive_barrier_on_phantom_oop(oop o) {
 340   // Check if oop is logically non-null. This operation
 341   // is only valid when resurrection is blocked.
 342   assert(ZResurrection::is_blocked(), "Invalid phase");
 343   return weak_load_barrier_on_phantom_oop(o) != NULL;
 344 }
 345 
 346 //
 347 // Keep alive barrier
 348 //
 349 inline void ZBarrier::keep_alive_barrier_on_weak_oop_field(volatile oop* p) {
 350   // This operation is only valid when resurrection is blocked.
 351   assert(ZResurrection::is_blocked(), "Invalid phase");
 352   const oop o = *p;
 353   barrier<is_good_or_null_fast_path, keep_alive_barrier_on_weak_oop_slow_path>(p, o);
 354 }
 355 
 356 inline void ZBarrier::keep_alive_barrier_on_phantom_oop_field(volatile oop* p) {
 357   // This operation is only valid when resurrection is blocked.
 358   assert(ZResurrection::is_blocked(), "Invalid phase");
 359   const oop o = *p;
 360   barrier<is_good_or_null_fast_path, keep_alive_barrier_on_phantom_oop_slow_path>(p, o);
 361 }
 362 
 363 inline void ZBarrier::keep_alive_barrier_on_phantom_root_oop_field(oop* p) {
 364   // This operation is only valid when resurrection is blocked.
 365   assert(ZResurrection::is_blocked(), "Invalid phase");
 366   const oop o = *p;
 367   root_barrier<is_good_or_null_fast_path, keep_alive_barrier_on_phantom_oop_slow_path>(p, o);
 368 }
 369 
 370 inline void ZBarrier::keep_alive_barrier_on_oop(oop o) {
 371   const uintptr_t addr = ZOop::to_address(o);
 372   assert(ZAddress::is_good(addr), "Invalid address");
 373 
 374   if (during_mark()) {
 375     mark_barrier_on_oop_slow_path(addr);
 376   }
 377 }
 378 
 379 //
 380 // Mark barrier
 381 //
 382 inline void ZBarrier::mark_barrier_on_oop_field(volatile oop* p, bool finalizable) {
 383   const oop o = *p;
 384 
 385   if (finalizable) {
 386     barrier<is_marked_or_null_fast_path, mark_barrier_on_finalizable_oop_slow_path>(p, o);
 387   } else {
 388     const uintptr_t addr = ZOop::to_address(o);
 389     if (ZAddress::is_good(addr)) {
 390       // Mark through good oop
 391       mark_barrier_on_oop_slow_path(addr);
 392     } else {
 393       // Mark through bad oop
 394       barrier<is_good_or_null_fast_path, mark_barrier_on_oop_slow_path>(p, o);
 395     }
 396   }
 397 }
 398 
 399 inline void ZBarrier::mark_barrier_on_oop_array(volatile oop* p, size_t length, bool finalizable) {
 400   for (volatile const oop* const end = p + length; p < end; p++) {
 401     mark_barrier_on_oop_field(p, finalizable);
 402   }
 403 }
 404 
 405 inline void ZBarrier::mark_barrier_on_root_oop_field(oop* p) {
 406   const oop o = *p;
 407   root_barrier<is_good_or_null_fast_path, mark_barrier_on_root_oop_slow_path>(p, o);
 408 }
 409 
 410 inline void ZBarrier::mark_barrier_on_invisible_root_oop_field(oop* p) {
 411   const oop o = *p;
 412   root_barrier<is_good_or_null_fast_path, mark_barrier_on_invisible_root_oop_slow_path>(p, o);
 413 }
 414 
 415 //
 416 // Relocate barrier
 417 //
 418 inline void ZBarrier::relocate_barrier_on_root_oop_field(oop* p) {
 419   const oop o = *p;
 420   root_barrier<is_good_or_null_fast_path, relocate_barrier_on_root_oop_slow_path>(p, o);
 421 }
 422 
 423 #endif // SHARE_GC_Z_ZBARRIER_INLINE_HPP