< prev index next >

src/hotspot/share/gc/z/zBarrier.inline.hpp

Print this page

        

*** 30,99 **** #include "gc/z/zOop.inline.hpp" #include "gc/z/zResurrection.inline.hpp" #include "oops/oop.hpp" #include "runtime/atomic.hpp" template <ZBarrierFastPath fast_path, ZBarrierSlowPath slow_path> inline oop ZBarrier::barrier(volatile oop* p, oop o) { uintptr_t addr = ZOop::to_address(o); if (fast_path(addr)) { return ZOop::from_address(addr); } ! uintptr_t good_addr = slow_path(addr); ! const oop result = ZOop::from_address(good_addr); ! ! // Self heal, but only if the address was actually updated by the slow path, ! // which might not be the case, e.g. when marking through an already good oop. ! while (p != NULL && good_addr != addr) { ! const uintptr_t prev_addr = Atomic::cmpxchg(good_addr, (volatile uintptr_t*)p, addr); ! if (prev_addr != addr) { ! // Some other thread overwrote the oop. If this oop was updated by a ! // weak barrier the new oop might not be good, in which case we need ! // to re-apply this barrier. ! addr = prev_addr; ! // Fast path ! if (fast_path(addr)) { ! break; ! } ! // Slow path ! good_addr = slow_path(addr); ! } } ! return result; } template <ZBarrierFastPath fast_path, ZBarrierSlowPath slow_path> inline oop ZBarrier::weak_barrier(volatile oop* p, oop o) { const uintptr_t addr = ZOop::to_address(o); if (fast_path(addr)) { // Return the good address instead of the weak good address // to ensure that the currently active heap view is used. return ZOop::from_address(ZAddress::good_or_null(addr)); } const uintptr_t good_addr = slow_path(addr); - const oop result = ZOop::from_address(good_addr); ! // Self heal unless the address returned from the slow path is null, ! // in which case resurrection was blocked and we must let the reference ! // processor clear the oop. Mutators are not allowed to clear oops in ! // these cases, since that would be similar to calling Reference.clear(), ! // which would make the reference non-discoverable or silently dropped ! // by the reference processor. ! if (p != NULL && good_addr != 0) { ! // The slow path returns a good/marked address, but we never mark oops ! // in a weak load barrier so we always self heal with the remapped address. ! const uintptr_t weak_good_addr = ZAddress::remapped(good_addr); ! Atomic::cmpxchg(weak_good_addr, (volatile uintptr_t*)p, addr); } ! return result; } template <ZBarrierFastPath fast_path, ZBarrierSlowPath slow_path> inline void ZBarrier::root_barrier(oop* p, oop o) { const uintptr_t addr = ZOop::to_address(o); --- 30,115 ---- #include "gc/z/zOop.inline.hpp" #include "gc/z/zResurrection.inline.hpp" #include "oops/oop.hpp" #include "runtime/atomic.hpp" + inline void ZBarrier::self_heal(volatile oop* p, uintptr_t addr, uintptr_t heal_addr) { + if (heal_addr == 0) { + // Never heal with null since it interacts badly with reference processing. + // A mutator clearing an oop would be similar to calling Reference.clear(), + // which would make the reference non-discoverable or silently dropped + // by the reference processor. + return; + } + + for (;;) { + if (addr == heal_addr) { + // Already healed + return; + } + + // Heal + const uintptr_t prev_addr = Atomic::cmpxchg(heal_addr, (volatile uintptr_t*)p, addr); + if (prev_addr == addr) { + // Success + return; + } + + if (ZAddress::is_good_or_null(prev_addr)) { + // No need to heal + return; + } + + // The oop location was healed by another barrier, but it is still not + // good or null. Re-apply healing to make sure the oop is not left with + // weaker (remapped or finalizable) metadata bits than what this barrier + // tried to apply. + assert(ZAddress::offset(prev_addr) == ZAddress::offset(heal_addr), "Invalid offset"); + addr = prev_addr; + } + } + template <ZBarrierFastPath fast_path, ZBarrierSlowPath slow_path> inline oop ZBarrier::barrier(volatile oop* p, oop o) { uintptr_t addr = ZOop::to_address(o); + // Fast path if (fast_path(addr)) { return ZOop::from_address(addr); } ! // Slow path ! const uintptr_t good_addr = slow_path(addr); ! if (p != NULL) { ! self_heal(p, addr, good_addr); } ! return ZOop::from_address(good_addr); } template <ZBarrierFastPath fast_path, ZBarrierSlowPath slow_path> inline oop ZBarrier::weak_barrier(volatile oop* p, oop o) { const uintptr_t addr = ZOop::to_address(o); + // Fast path if (fast_path(addr)) { // Return the good address instead of the weak good address // to ensure that the currently active heap view is used. return ZOop::from_address(ZAddress::good_or_null(addr)); } + // Slow path const uintptr_t good_addr = slow_path(addr); ! if (p != NULL) { ! // The slow path returns a good/marked address or null, but we never mark ! // oops in a weak load barrier so we always heal with the remapped address. ! self_heal(p, addr, ZAddress::remapped_or_null(good_addr)); } ! return ZOop::from_address(good_addr); } template <ZBarrierFastPath fast_path, ZBarrierSlowPath slow_path> inline void ZBarrier::root_barrier(oop* p, oop o) { const uintptr_t addr = ZOop::to_address(o);
*** 164,182 **** inline oop ZBarrier::load_barrier_on_weak_oop_field_preloaded(volatile oop* p, oop o) { verify_on_weak(p); if (ZResurrection::is_blocked()) { ! return weak_barrier<is_good_or_null_fast_path, weak_load_barrier_on_weak_oop_slow_path>(p, o); } return load_barrier_on_oop_field_preloaded(p, o); } inline oop ZBarrier::load_barrier_on_phantom_oop_field_preloaded(volatile oop* p, oop o) { if (ZResurrection::is_blocked()) { ! return weak_barrier<is_good_or_null_fast_path, weak_load_barrier_on_phantom_oop_slow_path>(p, o); } return load_barrier_on_oop_field_preloaded(p, o); } --- 180,198 ---- inline oop ZBarrier::load_barrier_on_weak_oop_field_preloaded(volatile oop* p, oop o) { verify_on_weak(p); if (ZResurrection::is_blocked()) { ! return barrier<is_good_or_null_fast_path, weak_load_barrier_on_weak_oop_slow_path>(p, o); } return load_barrier_on_oop_field_preloaded(p, o); } inline oop ZBarrier::load_barrier_on_phantom_oop_field_preloaded(volatile oop* p, oop o) { if (ZResurrection::is_blocked()) { ! return barrier<is_good_or_null_fast_path, weak_load_barrier_on_phantom_oop_slow_path>(p, o); } return load_barrier_on_oop_field_preloaded(p, o); }
*** 209,219 **** inline oop ZBarrier::weak_load_barrier_on_weak_oop_field_preloaded(volatile oop* p, oop o) { verify_on_weak(p); if (ZResurrection::is_blocked()) { ! return weak_barrier<is_good_or_null_fast_path, weak_load_barrier_on_weak_oop_slow_path>(p, o); } return weak_load_barrier_on_oop_field_preloaded(p, o); } --- 225,235 ---- inline oop ZBarrier::weak_load_barrier_on_weak_oop_field_preloaded(volatile oop* p, oop o) { verify_on_weak(p); if (ZResurrection::is_blocked()) { ! return barrier<is_good_or_null_fast_path, weak_load_barrier_on_weak_oop_slow_path>(p, o); } return weak_load_barrier_on_oop_field_preloaded(p, o); }
*** 226,236 **** return weak_load_barrier_on_phantom_oop_field_preloaded(p, o); } inline oop ZBarrier::weak_load_barrier_on_phantom_oop_field_preloaded(volatile oop* p, oop o) { if (ZResurrection::is_blocked()) { ! return weak_barrier<is_good_or_null_fast_path, weak_load_barrier_on_phantom_oop_slow_path>(p, o); } return weak_load_barrier_on_oop_field_preloaded(p, o); } --- 242,252 ---- return weak_load_barrier_on_phantom_oop_field_preloaded(p, o); } inline oop ZBarrier::weak_load_barrier_on_phantom_oop_field_preloaded(volatile oop* p, oop o) { if (ZResurrection::is_blocked()) { ! return barrier<is_good_or_null_fast_path, weak_load_barrier_on_phantom_oop_slow_path>(p, o); } return weak_load_barrier_on_oop_field_preloaded(p, o); }
< prev index next >