--- /dev/null 2018-04-03 12:55:20.301839954 +0200 +++ new/src/hotspot/share/gc/z/zHeapIterator.cpp 2018-06-08 19:46:10.119283452 +0200 @@ -0,0 +1,183 @@ +/* + * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved. + * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. + * + * This code is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 only, as + * published by the Free Software Foundation. + * + * This code is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License + * version 2 for more details (a copy is included in the LICENSE file that + * accompanied this code). + * + * You should have received a copy of the GNU General Public License version + * 2 along with this work; if not, write to the Free Software Foundation, + * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. + * + * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA + * or visit www.oracle.com if you need additional information or have any + * questions. + */ + +#include "precompiled.hpp" +#include "gc/z/zAddressRangeMap.inline.hpp" +#include "gc/z/zBarrier.inline.hpp" +#include "gc/z/zGlobals.hpp" +#include "gc/z/zHeapIterator.hpp" +#include "gc/z/zOop.inline.hpp" +#include "gc/z/zRootsIterator.hpp" +#include "oops/oop.inline.hpp" +#include "utilities/bitMap.inline.hpp" +#include "utilities/stack.inline.hpp" + +class ZHeapIteratorBitMap : public CHeapObj { +private: + CHeapBitMap _map; + +public: + ZHeapIteratorBitMap(size_t size_in_bits) : + _map(size_in_bits) {} + + bool try_set_bit(size_t index) { + if (_map.at(index)) { + return false; + } + + _map.set_bit(index); + return true; + } +}; + +class ZHeapIteratorRootOopClosure : public OopClosure { +private: + ZHeapIterator* const _iter; + ObjectClosure* const _cl; + +public: + ZHeapIteratorRootOopClosure(ZHeapIterator* iter, ObjectClosure* cl) : + _iter(iter), + _cl(cl) {} + + virtual void do_oop(oop* p) { + // Load barrier needed here for the same reason we + // need fixup_partial_loads() in ZHeap::mark_end() + const oop obj = RootAccess<>::oop_load(p); + _iter->push(obj); + _iter->drain(_cl); + } + + virtual void do_oop(narrowOop* p) { + ShouldNotReachHere(); + } +}; + +class ZHeapIteratorPushOopClosure : public ExtendedOopClosure { +private: + ZHeapIterator* const _iter; + const oop _base; + +public: + ZHeapIteratorPushOopClosure(ZHeapIterator* iter, oop base) : + _iter(iter), + _base(base) {} + + void do_oop_nv(oop* p) { + const oop obj = HeapAccess::oop_load_at(_base, _base->field_offset(p)); + _iter->push(obj); + } + + void do_oop_nv(narrowOop* p) { + ShouldNotReachHere(); + } + + virtual void do_oop(oop* p) { + do_oop_nv(p); + } + + virtual void do_oop(narrowOop* p) { + do_oop_nv(p); + } + +#ifdef ASSERT + virtual bool should_verify_oops() { + return false; + } +#endif +}; + +ZHeapIterator::ZHeapIterator() : + _visit_stack(), + _visit_map() {} + +ZHeapIterator::~ZHeapIterator() { + ZVisitMapIterator iter(&_visit_map); + for (ZHeapIteratorBitMap* map; iter.next(&map);) { + delete map; + } +} + +size_t ZHeapIterator::object_index_max() const { + return ZPageSizeMin >> ZObjectAlignmentSmallShift; +} + +size_t ZHeapIterator::object_index(oop obj) const { + const uintptr_t addr = ZOop::to_address(obj); + const uintptr_t offset = ZAddress::offset(addr); + const uintptr_t mask = (1 << ZPageSizeMinShift) - 1; + return (offset & mask) >> ZObjectAlignmentSmallShift; +} + +ZHeapIteratorBitMap* ZHeapIterator::object_map(oop obj) { + const uintptr_t addr = ZOop::to_address(obj); + ZHeapIteratorBitMap* map = _visit_map.get(addr); + if (map == NULL) { + map = new ZHeapIteratorBitMap(object_index_max()); + _visit_map.put(addr, map); + } + + return map; +} + +void ZHeapIterator::push(oop obj) { + if (obj == NULL) { + // Ignore + return; + } + + ZHeapIteratorBitMap* const map = object_map(obj); + const size_t index = object_index(obj); + if (!map->try_set_bit(index)) { + // Already pushed + return; + } + + // Push + _visit_stack.push(obj); +} + +void ZHeapIterator::drain(ObjectClosure* cl) { + while (!_visit_stack.is_empty()) { + const oop obj = _visit_stack.pop(); + + // Visit + cl->do_object(obj); + + // Push members to visit + ZHeapIteratorPushOopClosure push_cl(this, obj); + obj->oop_iterate(&push_cl); + } +} + +void ZHeapIterator::objects_do(ObjectClosure* cl) { + ZHeapIteratorRootOopClosure root_cl(this, cl); + ZRootsIterator roots; + + // Follow roots. Note that we also visit the JVMTI weak tag map + // as if they where strong roots to make sure we visit all tagged + // objects, even those that might now have become unreachable. + // If we didn't do this the user would have expected to see + // ObjectFree events for unreachable objects in the tag map. + roots.oops_do(&root_cl, true /* visit_jvmti_weak_export */); +}