< prev index next >

src/hotspot/share/gc/z/zBarrier.inline.hpp

Print this page




  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  */
  23 
  24 #ifndef SHARE_GC_Z_ZBARRIER_INLINE_HPP
  25 #define SHARE_GC_Z_ZBARRIER_INLINE_HPP
  26 
  27 #include "classfile/javaClasses.hpp"
  28 #include "gc/z/zAddress.inline.hpp"
  29 #include "gc/z/zBarrier.hpp"
  30 #include "gc/z/zOop.inline.hpp"
  31 #include "gc/z/zResurrection.inline.hpp"
  32 #include "oops/oop.hpp"
  33 #include "runtime/atomic.hpp"
  34 


































































  35 inline void ZBarrier::self_heal(volatile oop* p, uintptr_t addr, uintptr_t heal_addr) {
  36   if (heal_addr == 0) {
  37     // Never heal with null since it interacts badly with reference processing.
  38     // A mutator clearing an oop would be similar to calling Reference.clear(),
  39     // which would make the reference non-discoverable or silently dropped
  40     // by the reference processor.
  41     return;
  42   }
  43 
  44   for (;;) {
  45     if (addr == heal_addr) {
  46       // Already healed
  47       return;
  48     }
  49 

  50     // Heal
  51     const uintptr_t prev_addr = Atomic::cmpxchg((volatile uintptr_t*)p, addr, heal_addr);
  52     if (prev_addr == addr) {
  53       // Success
  54       return;
  55     }
  56 
  57     if (ZAddress::is_good_or_null(prev_addr)) {
  58       // No need to heal
  59       return;
  60     }
  61 
  62     // The oop location was healed by another barrier, but it is still not
  63     // good or null. Re-apply healing to make sure the oop is not left with
  64     // weaker (remapped or finalizable) metadata bits than what this barrier
  65     // tried to apply.
  66     assert(ZAddress::offset(prev_addr) == ZAddress::offset(heal_addr), "Invalid offset");
  67     addr = prev_addr;
  68   }
  69 }
  70 
  71 template <ZBarrierFastPath fast_path, ZBarrierSlowPath slow_path>
  72 inline oop ZBarrier::barrier(volatile oop* p, oop o) {
  73   uintptr_t addr = ZOop::to_address(o);
  74 
  75   // Fast path
  76   if (fast_path(addr)) {
  77     return ZOop::from_address(addr);
  78   }
  79 
  80   // Slow path
  81   const uintptr_t good_addr = slow_path(addr);
  82 
  83   if (p != NULL) {
  84     self_heal(p, addr, good_addr);
  85   }
  86 
  87   return ZOop::from_address(good_addr);
  88 }
  89 
  90 template <ZBarrierFastPath fast_path, ZBarrierSlowPath slow_path>
  91 inline oop ZBarrier::weak_barrier(volatile oop* p, oop o) {
  92   const uintptr_t addr = ZOop::to_address(o);
  93 
  94   // Fast path
  95   if (fast_path(addr)) {
  96     // Return the good address instead of the weak good address
  97     // to ensure that the currently active heap view is used.
  98     return ZOop::from_address(ZAddress::good_or_null(addr));
  99   }
 100 
 101   // Slow path
 102   const uintptr_t good_addr = slow_path(addr);
 103 
 104   if (p != NULL) {
 105     // The slow path returns a good/marked address or null, but we never mark
 106     // oops in a weak load barrier so we always heal with the remapped address.
 107     self_heal(p, addr, ZAddress::remapped_or_null(good_addr));
 108   }
 109 
 110   return ZOop::from_address(good_addr);
 111 }
 112 
 113 template <ZBarrierFastPath fast_path, ZBarrierSlowPath slow_path>
 114 inline void ZBarrier::root_barrier(oop* p, oop o) {
 115   const uintptr_t addr = ZOop::to_address(o);
 116 
 117   // Fast path
 118   if (fast_path(addr)) {
 119     return;
 120   }
 121 
 122   // Slow path
 123   const uintptr_t good_addr = slow_path(addr);
 124 
 125   // Non-atomic healing helps speed up root scanning. This is safe to do
 126   // since we are always healing roots in a safepoint, or under a lock,
 127   // which ensures we are never racing with mutators modifying roots while
 128   // we are healing them. It's also safe in case multiple GC threads try
 129   // to heal the same root if it is aligned, since they would always heal
 130   // the root in the same way and it does not matter in which order it
 131   // happens. For misaligned oops, there needs to be mutual exclusion.
 132   *p = ZOop::from_address(good_addr);
 133 }
 134 
 135 inline bool ZBarrier::is_null_fast_path(uintptr_t addr) {
 136   return ZAddress::is_null(addr);
 137 }
 138 
 139 inline bool ZBarrier::is_good_or_null_fast_path(uintptr_t addr) {
 140   return ZAddress::is_good_or_null(addr);
 141 }
 142 
 143 inline bool ZBarrier::is_weak_good_or_null_fast_path(uintptr_t addr) {
 144   return ZAddress::is_weak_good_or_null(addr);
 145 }
 146 




 147 inline bool ZBarrier::during_mark() {
 148   return ZGlobalPhase == ZPhaseMark;
 149 }
 150 
 151 inline bool ZBarrier::during_relocate() {
 152   return ZGlobalPhase == ZPhaseRelocate;
 153 }
 154 
 155 //
 156 // Load barrier
 157 //
 158 inline oop ZBarrier::load_barrier_on_oop(oop o) {
 159   return load_barrier_on_oop_field_preloaded((oop*)NULL, o);
 160 }
 161 
 162 inline oop ZBarrier::load_barrier_on_oop_field(volatile oop* p) {
 163   const oop o = *p;
 164   return load_barrier_on_oop_field_preloaded(p, o);
 165 }
 166 


 283   assert(ZResurrection::is_blocked(), "Invalid phase");
 284   const oop o = *p;
 285   barrier<is_good_or_null_fast_path, keep_alive_barrier_on_weak_oop_slow_path>(p, o);
 286 }
 287 
 288 inline void ZBarrier::keep_alive_barrier_on_phantom_oop_field(volatile oop* p) {
 289   // This operation is only valid when resurrection is blocked.
 290   assert(ZResurrection::is_blocked(), "Invalid phase");
 291   const oop o = *p;
 292   barrier<is_good_or_null_fast_path, keep_alive_barrier_on_phantom_oop_slow_path>(p, o);
 293 }
 294 
 295 inline void ZBarrier::keep_alive_barrier_on_phantom_root_oop_field(oop* p) {
 296   // This operation is only valid when resurrection is blocked.
 297   assert(ZResurrection::is_blocked(), "Invalid phase");
 298   const oop o = *p;
 299   root_barrier<is_good_or_null_fast_path, keep_alive_barrier_on_phantom_oop_slow_path>(p, o);
 300 }
 301 
 302 inline void ZBarrier::keep_alive_barrier_on_oop(oop o) {



 303   if (during_mark()) {
 304     barrier<is_null_fast_path, mark_barrier_on_oop_slow_path>(NULL, o);
 305   }
 306 }
 307 
 308 //
 309 // Mark barrier
 310 //
 311 inline void ZBarrier::mark_barrier_on_oop_field(volatile oop* p, bool finalizable) {
 312   // The fast path only checks for null since the GC worker
 313   // threads doing marking wants to mark through good oops.
 314   const oop o = *p;
 315 
 316   if (finalizable) {
 317     barrier<is_null_fast_path, mark_barrier_on_finalizable_oop_slow_path>(p, o);
 318   } else {
 319     barrier<is_null_fast_path, mark_barrier_on_oop_slow_path>(p, o);







 320   }
 321 }
 322 
 323 inline void ZBarrier::mark_barrier_on_oop_array(volatile oop* p, size_t length, bool finalizable) {
 324   for (volatile const oop* const end = p + length; p < end; p++) {
 325     mark_barrier_on_oop_field(p, finalizable);
 326   }
 327 }
 328 
 329 inline void ZBarrier::mark_barrier_on_root_oop_field(oop* p) {
 330   const oop o = *p;
 331   root_barrier<is_good_or_null_fast_path, mark_barrier_on_root_oop_slow_path>(p, o);
 332 }
 333 
 334 inline void ZBarrier::mark_barrier_on_invisible_root_oop_field(oop* p) {
 335   const oop o = *p;
 336   root_barrier<is_good_or_null_fast_path, mark_barrier_on_invisible_root_oop_slow_path>(p, o);
 337 }
 338 
 339 //


  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  */
  23 
  24 #ifndef SHARE_GC_Z_ZBARRIER_INLINE_HPP
  25 #define SHARE_GC_Z_ZBARRIER_INLINE_HPP
  26 
  27 #include "classfile/javaClasses.hpp"
  28 #include "gc/z/zAddress.inline.hpp"
  29 #include "gc/z/zBarrier.hpp"
  30 #include "gc/z/zOop.inline.hpp"
  31 #include "gc/z/zResurrection.inline.hpp"
  32 #include "oops/oop.hpp"
  33 #include "runtime/atomic.hpp"
  34 
  35 // A self heal must always "upgrade" the address metadata bits in
  36 // accordance with the metadata bits state machine, which has the
  37 // valid state transitions as described below (where N is the GC
  38 // cycle).
  39 //
  40 // Note the subtleness of overlapping GC cycles. Specifically that
  41 // oops are colored Remapped(N) starting at relocation N and ending
  42 // at marking N + 1.
  43 //
  44 //              +--- Mark Start
  45 //              | +--- Mark End
  46 //              | | +--- Relocate Start
  47 //              | | | +--- Relocate End
  48 //              | | | |
  49 // Marked       |---N---|--N+1--|--N+2--|----
  50 // Finalizable  |---N---|--N+1--|--N+2--|----
  51 // Remapped     ----|---N---|--N+1--|--N+2--|
  52 //
  53 // VALID STATE TRANSITIONS
  54 //
  55 //   Marked(N)           -> Remapped(N)
  56 //                       -> Marked(N + 1)
  57 //                       -> Finalizable(N + 1)
  58 //
  59 //   Finalizable(N)      -> Marked(N)
  60 //                       -> Remapped(N)
  61 //                       -> Marked(N + 1)
  62 //                       -> Finalizable(N + 1)
  63 //
  64 //   Remapped(N)         -> Marked(N + 1)
  65 //                       -> Finalizable(N + 1)
  66 //
  67 // PHASE VIEW
  68 //
  69 // ZPhaseMark
  70 //   Load & Mark
  71 //     Marked(N)         <- Marked(N - 1)
  72 //                       <- Finalizable(N - 1)
  73 //                       <- Remapped(N - 1)
  74 //                       <- Finalizable(N)
  75 //
  76 //   Mark(Finalizable)
  77 //     Finalizable(N)    <- Marked(N - 1)
  78 //                       <- Finalizable(N - 1)
  79 //                       <- Remapped(N - 1)
  80 //
  81 //   Load(AS_NO_KEEPALIVE)
  82 //     Remapped(N - 1)   <- Finalizable(N - 1)
  83 //
  84 // ZPhaseMarkCompleted (Resurrection blocked)
  85 //   Load & Load(AS_NO_KEEPALIVE) & KeepAlive
  86 //     Marked(N)         <- Marked(N - 1)
  87 //                       <- Finalizable(N - 1)
  88 //                       <- Remapped(N -1)
  89 //                       <- Finalizable(N)
  90 //
  91 // ZPhaseMarkCompleted (Resurrection unblocked)
  92 //   Load & Load(AS_NO_KEEPALIVE)
  93 //     Marked(N)         <- Finalizable(N)
  94 //
  95 // ZPhaseRelocate
  96 //   Load & Load(AS_NO_KEEPALIVE)
  97 //     Remapped(N)       <- Marked(N)
  98 //                       <- Finalizable(N)
  99 
 100 template <ZBarrierFastPath fast_path>
 101 inline void ZBarrier::self_heal(volatile oop* p, uintptr_t addr, uintptr_t heal_addr) {
 102   if (heal_addr == 0) {
 103     // Never heal with null since it interacts badly with reference processing.
 104     // A mutator clearing an oop would be similar to calling Reference.clear(),
 105     // which would make the reference non-discoverable or silently dropped
 106     // by the reference processor.
 107     return;
 108   }
 109 
 110   assert(!fast_path(addr), "Invalid self heal");
 111   assert(fast_path(heal_addr), "Invalid self heal");



 112 
 113   for (;;) {
 114     // Heal
 115     const uintptr_t prev_addr = Atomic::cmpxchg((volatile uintptr_t*)p, addr, heal_addr);
 116     if (prev_addr == addr) {
 117       // Success
 118       return;
 119     }
 120 
 121     if (fast_path(prev_addr)) {
 122       // Must not self heal
 123       return;
 124     }
 125 
 126     // The oop location was healed by another barrier, but still needs upgrading.
 127     // Re-apply healing to make sure the oop is not left with weaker (remapped or
 128     // finalizable) metadata bits than what this barrier tried to apply.

 129     assert(ZAddress::offset(prev_addr) == ZAddress::offset(heal_addr), "Invalid offset");
 130     addr = prev_addr;
 131   }
 132 }
 133 
 134 template <ZBarrierFastPath fast_path, ZBarrierSlowPath slow_path>
 135 inline oop ZBarrier::barrier(volatile oop* p, oop o) {
 136   const uintptr_t addr = ZOop::to_address(o);
 137 
 138   // Fast path
 139   if (fast_path(addr)) {
 140     return ZOop::from_address(addr);
 141   }
 142 
 143   // Slow path
 144   const uintptr_t good_addr = slow_path(addr);
 145 
 146   if (p != NULL) {
 147     self_heal<fast_path>(p, addr, good_addr);
 148   }
 149 
 150   return ZOop::from_address(good_addr);
 151 }
 152 
 153 template <ZBarrierFastPath fast_path, ZBarrierSlowPath slow_path>
 154 inline oop ZBarrier::weak_barrier(volatile oop* p, oop o) {
 155   const uintptr_t addr = ZOop::to_address(o);
 156 
 157   // Fast path
 158   if (fast_path(addr)) {
 159     // Return the good address instead of the weak good address
 160     // to ensure that the currently active heap view is used.
 161     return ZOop::from_address(ZAddress::good_or_null(addr));
 162   }
 163 
 164   // Slow path
 165   const uintptr_t good_addr = slow_path(addr);
 166 
 167   if (p != NULL) {
 168     // The slow path returns a good/marked address or null, but we never mark
 169     // oops in a weak load barrier so we always heal with the remapped address.
 170     self_heal<fast_path>(p, addr, ZAddress::remapped_or_null(good_addr));
 171   }
 172 
 173   return ZOop::from_address(good_addr);
 174 }
 175 
 176 template <ZBarrierFastPath fast_path, ZBarrierSlowPath slow_path>
 177 inline void ZBarrier::root_barrier(oop* p, oop o) {
 178   const uintptr_t addr = ZOop::to_address(o);
 179 
 180   // Fast path
 181   if (fast_path(addr)) {
 182     return;
 183   }
 184 
 185   // Slow path
 186   const uintptr_t good_addr = slow_path(addr);
 187 
 188   // Non-atomic healing helps speed up root scanning. This is safe to do
 189   // since we are always healing roots in a safepoint, or under a lock,
 190   // which ensures we are never racing with mutators modifying roots while
 191   // we are healing them. It's also safe in case multiple GC threads try
 192   // to heal the same root if it is aligned, since they would always heal
 193   // the root in the same way and it does not matter in which order it
 194   // happens. For misaligned oops, there needs to be mutual exclusion.
 195   *p = ZOop::from_address(good_addr);
 196 }
 197 




 198 inline bool ZBarrier::is_good_or_null_fast_path(uintptr_t addr) {
 199   return ZAddress::is_good_or_null(addr);
 200 }
 201 
 202 inline bool ZBarrier::is_weak_good_or_null_fast_path(uintptr_t addr) {
 203   return ZAddress::is_weak_good_or_null(addr);
 204 }
 205 
 206 inline bool ZBarrier::is_marked_or_null_fast_path(uintptr_t addr) {
 207   return ZAddress::is_marked_or_null(addr);
 208 }
 209 
 210 inline bool ZBarrier::during_mark() {
 211   return ZGlobalPhase == ZPhaseMark;
 212 }
 213 
 214 inline bool ZBarrier::during_relocate() {
 215   return ZGlobalPhase == ZPhaseRelocate;
 216 }
 217 
 218 //
 219 // Load barrier
 220 //
 221 inline oop ZBarrier::load_barrier_on_oop(oop o) {
 222   return load_barrier_on_oop_field_preloaded((oop*)NULL, o);
 223 }
 224 
 225 inline oop ZBarrier::load_barrier_on_oop_field(volatile oop* p) {
 226   const oop o = *p;
 227   return load_barrier_on_oop_field_preloaded(p, o);
 228 }
 229 


 346   assert(ZResurrection::is_blocked(), "Invalid phase");
 347   const oop o = *p;
 348   barrier<is_good_or_null_fast_path, keep_alive_barrier_on_weak_oop_slow_path>(p, o);
 349 }
 350 
 351 inline void ZBarrier::keep_alive_barrier_on_phantom_oop_field(volatile oop* p) {
 352   // This operation is only valid when resurrection is blocked.
 353   assert(ZResurrection::is_blocked(), "Invalid phase");
 354   const oop o = *p;
 355   barrier<is_good_or_null_fast_path, keep_alive_barrier_on_phantom_oop_slow_path>(p, o);
 356 }
 357 
 358 inline void ZBarrier::keep_alive_barrier_on_phantom_root_oop_field(oop* p) {
 359   // This operation is only valid when resurrection is blocked.
 360   assert(ZResurrection::is_blocked(), "Invalid phase");
 361   const oop o = *p;
 362   root_barrier<is_good_or_null_fast_path, keep_alive_barrier_on_phantom_oop_slow_path>(p, o);
 363 }
 364 
 365 inline void ZBarrier::keep_alive_barrier_on_oop(oop o) {
 366   const uintptr_t addr = ZOop::to_address(o);
 367   assert(ZAddress::is_good(addr), "Invalid address");
 368 
 369   if (during_mark()) {
 370     mark_barrier_on_oop_slow_path(addr);
 371   }
 372 }
 373 
 374 //
 375 // Mark barrier
 376 //
 377 inline void ZBarrier::mark_barrier_on_oop_field(volatile oop* p, bool finalizable) {


 378   const oop o = *p;
 379 
 380   if (finalizable) {
 381     barrier<is_marked_or_null_fast_path, mark_barrier_on_finalizable_oop_slow_path>(p, o);
 382   } else {
 383     const uintptr_t addr = ZOop::to_address(o);
 384     if (ZAddress::is_good(addr)) {
 385       // Mark through good oop
 386       mark_barrier_on_oop_slow_path(addr);
 387     } else {
 388       // Mark through bad oop
 389       barrier<is_good_or_null_fast_path, mark_barrier_on_oop_slow_path>(p, o);
 390     }
 391   }
 392 }
 393 
 394 inline void ZBarrier::mark_barrier_on_oop_array(volatile oop* p, size_t length, bool finalizable) {
 395   for (volatile const oop* const end = p + length; p < end; p++) {
 396     mark_barrier_on_oop_field(p, finalizable);
 397   }
 398 }
 399 
 400 inline void ZBarrier::mark_barrier_on_root_oop_field(oop* p) {
 401   const oop o = *p;
 402   root_barrier<is_good_or_null_fast_path, mark_barrier_on_root_oop_slow_path>(p, o);
 403 }
 404 
 405 inline void ZBarrier::mark_barrier_on_invisible_root_oop_field(oop* p) {
 406   const oop o = *p;
 407   root_barrier<is_good_or_null_fast_path, mark_barrier_on_invisible_root_oop_slow_path>(p, o);
 408 }
 409 
 410 //
< prev index next >