--- old/src/hotspot/share/gc/z/zHeap.cpp 2019-06-04 12:49:28.986930658 +0200 +++ new/src/hotspot/share/gc/z/zHeap.cpp 2019-06-04 12:49:28.786924237 +0200 @@ -508,8 +508,8 @@ void ZHeap::object_iterate(ObjectClosure* cl, bool visit_referents) { assert(SafepointSynchronize::is_at_safepoint(), "Should be at safepoint"); - ZHeapIterator iter(visit_referents); - iter.objects_do(cl); + ZHeapIterator iter; + iter.objects_do(cl, visit_referents); } void ZHeap::serviceability_initialize() { --- old/src/hotspot/share/gc/z/zHeapIterator.cpp 2019-06-04 12:49:29.337941929 +0200 +++ new/src/hotspot/share/gc/z/zHeapIterator.cpp 2019-06-04 12:49:29.090933998 +0200 @@ -51,18 +51,29 @@ } }; +template class ZHeapIteratorRootOopClosure : public ZRootsIteratorClosure { private: ZHeapIterator* const _iter; + oop load_oop(oop* p) { + if (Weak) { + return NativeAccess::oop_load(p); + } + + if (Concurrent) { + return NativeAccess::oop_load(p); + } + + return RawAccess<>::oop_load(p); + } + public: ZHeapIteratorRootOopClosure(ZHeapIterator* iter) : _iter(iter) {} virtual void do_oop(oop* p) { - // Load barrier needed here, even on non-concurrent strong roots, - // for the same reason we need fixup_partial_loads() in ZHeap::mark_end(). - const oop obj = NativeAccess::oop_load(p); + const oop obj = load_oop(p); _iter->push(obj); } @@ -71,28 +82,27 @@ } }; +template class ZHeapIteratorOopClosure : public BasicOopIterateClosure { private: ZHeapIterator* const _iter; const oop _base; - const bool _visit_referents; - oop load_oop(oop* p) const { - if (_visit_referents) { - return HeapAccess::oop_load_at(_base, _base->field_offset(p)); - } else { - return HeapAccess::oop_load(p); + oop load_oop(oop* p) { + if (VisitReferents) { + return HeapAccess::oop_load_at(_base, _base->field_offset(p)); } + + return HeapAccess::oop_load(p); } public: - ZHeapIteratorOopClosure(ZHeapIterator* iter, oop base, bool visit_referents) : + ZHeapIteratorOopClosure(ZHeapIterator* iter, oop base) : _iter(iter), - _base(base), - _visit_referents(visit_referents) {} + _base(base) {} virtual ReferenceIterationMode reference_iteration_mode() { - return _visit_referents ? DO_FIELDS : DO_FIELDS_EXCEPT_REFERENT; + return VisitReferents ? DO_FIELDS : DO_FIELDS_EXCEPT_REFERENT; } virtual void do_oop(oop* p) { @@ -111,10 +121,9 @@ #endif }; -ZHeapIterator::ZHeapIterator(bool visit_referents) : +ZHeapIterator::ZHeapIterator() : _visit_stack(), - _visit_map(), - _visit_referents(visit_referents) {} + _visit_map() {} ZHeapIterator::~ZHeapIterator() { ZVisitMapIterator iter(&_visit_map); @@ -162,49 +171,45 @@ _visit_stack.push(obj); } -void ZHeapIterator::objects_do(ObjectClosure* cl) { - // Note that the heap iterator visits all reachable objects, including - // objects that might be unreachable from the application, such as a - // not yet cleared JNIWeakGloablRef. However, also note that visiting - // the JVMTI tag map is a requirement to make sure we visit all tagged - // objects, even those that might now have become phantom reachable. - // If we didn't do this the application would have expected to see - // ObjectFree events for phantom reachable objects in the tag map. - - ZStatTimerDisable disable; - ZHeapIteratorRootOopClosure root_cl(this); - - // Push strong roots onto stack - { - ZRootsIterator roots; - roots.oops_do(&root_cl); - } +template +void ZHeapIterator::push_roots() { + ZHeapIteratorRootOopClosure cl(this); + RootsIterator roots; + roots.oops_do(&cl); +} - { - ZConcurrentRootsIterator roots; - roots.oops_do(&root_cl); - } +template +void ZHeapIterator::push_fields(oop obj) { + ZHeapIteratorOopClosure cl(this, obj); + obj->oop_iterate(&cl); +} - // Push weak roots onto stack - { - ZWeakRootsIterator roots; - roots.oops_do(&root_cl); - } +template +void ZHeapIterator::objects_do(ObjectClosure* cl) { + ZStatTimerDisable disable; - { - ZConcurrentWeakRootsIterator roots; - roots.oops_do(&root_cl); - } + // Push roots to visit + push_roots(); + push_roots(); + push_roots(); + push_roots(); // Drain stack while (!_visit_stack.is_empty()) { const oop obj = _visit_stack.pop(); - // Visit + // Visit object cl->do_object(obj); - // Push members to visit - ZHeapIteratorOopClosure push_cl(this, obj, _visit_referents); - obj->oop_iterate(&push_cl); + // Push fields to visit + push_fields(obj); + } +} + +void ZHeapIterator::objects_do(ObjectClosure* cl, bool visit_referents) { + if (visit_referents) { + objects_do(cl); + } else { + objects_do(cl); } } --- old/src/hotspot/share/gc/z/zHeapIterator.hpp 2019-06-04 12:49:29.690953263 +0200 +++ new/src/hotspot/share/gc/z/zHeapIterator.hpp 2019-06-04 12:49:29.441945268 +0200 @@ -32,8 +32,8 @@ class ZHeapIteratorBitMap; class ZHeapIterator : public StackObj { - friend class ZHeapIteratorRootOopClosure; - friend class ZHeapIteratorOopClosure; + template friend class ZHeapIteratorRootOopClosure; + template friend class ZHeapIteratorOopClosure; private: typedef ZGranuleMap ZVisitMap; @@ -42,16 +42,19 @@ ZVisitStack _visit_stack; ZVisitMap _visit_map; - const bool _visit_referents; ZHeapIteratorBitMap* object_map(oop obj); void push(oop obj); + template void push_roots(); + template void push_fields(oop obj); + template void objects_do(ObjectClosure* cl); + public: - ZHeapIterator(bool visit_referents); + ZHeapIterator(); ~ZHeapIterator(); - void objects_do(ObjectClosure* cl); + void objects_do(ObjectClosure* cl, bool visit_referents); }; #endif // SHARE_GC_Z_ZHEAPITERATOR_HPP