1 /*
   2  * Copyright (c) 2017, 2019, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  */
  23 
  24 #include "precompiled.hpp"
  25 #include "gc/z/zBarrier.inline.hpp"
  26 #include "gc/z/zGlobals.hpp"
  27 #include "gc/z/zGranuleMap.inline.hpp"
  28 #include "gc/z/zHeapIterator.hpp"
  29 #include "gc/z/zOop.inline.hpp"
  30 #include "gc/z/zRootsIterator.hpp"
  31 #include "gc/z/zStat.hpp"
  32 #include "memory/iterator.inline.hpp"
  33 #include "utilities/bitMap.inline.hpp"
  34 #include "utilities/stack.inline.hpp"
  35 
  36 class ZHeapIteratorBitMap : public CHeapObj<mtGC> {
  37 private:
  38   CHeapBitMap _map;
  39 
  40 public:
  41   ZHeapIteratorBitMap(size_t size_in_bits) :
  42       _map(size_in_bits) {}
  43 
  44   bool try_set_bit(size_t index) {
  45     if (_map.at(index)) {
  46       return false;
  47     }
  48 
  49     _map.set_bit(index);
  50     return true;
  51   }
  52 };
  53 
  54 class ZHeapIteratorRootOopClosure : public ZRootsIteratorClosure {
  55 private:
  56   ZHeapIterator* const _iter;
  57 
  58 public:
  59   ZHeapIteratorRootOopClosure(ZHeapIterator* iter) :
  60       _iter(iter) {}
  61 
  62   virtual void do_oop(oop* p) {
  63     // Load barrier needed here, even on non-concurrent strong roots,
  64     // for the same reason we need fixup_partial_loads() in ZHeap::mark_end().
  65     const oop obj = NativeAccess<AS_NO_KEEPALIVE>::oop_load(p);
  66     _iter->push(obj);
  67   }
  68 
  69   virtual void do_oop(narrowOop* p) {
  70     ShouldNotReachHere();
  71   }
  72 };
  73 
  74 class ZHeapIteratorOopClosure : public BasicOopIterateClosure {
  75 private:
  76   ZHeapIterator* const _iter;
  77   const oop            _base;
  78   const bool           _visit_referents;
  79 
  80   oop load_oop(oop* p) const {
  81     if (_visit_referents) {
  82       return HeapAccess<ON_UNKNOWN_OOP_REF | AS_NO_KEEPALIVE>::oop_load_at(_base, _base->field_offset(p));
  83     } else {
  84       return HeapAccess<AS_NO_KEEPALIVE>::oop_load(p);
  85     }
  86   }
  87 
  88 public:
  89   ZHeapIteratorOopClosure(ZHeapIterator* iter, oop base, bool visit_referents) :
  90       _iter(iter),
  91       _base(base),
  92       _visit_referents(visit_referents) {}
  93 
  94   virtual ReferenceIterationMode reference_iteration_mode() {
  95     return _visit_referents ? DO_FIELDS : DO_FIELDS_EXCEPT_REFERENT;
  96   }
  97 
  98   virtual void do_oop(oop* p) {
  99     const oop obj = load_oop(p);
 100     _iter->push(obj);
 101   }
 102 
 103   virtual void do_oop(narrowOop* p) {
 104     ShouldNotReachHere();
 105   }
 106 
 107 #ifdef ASSERT
 108   virtual bool should_verify_oops() {
 109     return false;
 110   }
 111 #endif
 112 };
 113 
 114 ZHeapIterator::ZHeapIterator(bool visit_referents) :
 115     _visit_stack(),
 116     _visit_map(),
 117     _visit_referents(visit_referents) {}
 118 
 119 ZHeapIterator::~ZHeapIterator() {
 120   ZVisitMapIterator iter(&_visit_map);
 121   for (ZHeapIteratorBitMap* map; iter.next(&map);) {
 122     delete map;
 123   }
 124 }
 125 
 126 static size_t object_index_max() {
 127   return ZGranuleSize >> ZObjectAlignmentSmallShift;
 128 }
 129 
 130 static size_t object_index(oop obj) {
 131   const uintptr_t addr = ZOop::to_address(obj);
 132   const uintptr_t offset = ZAddress::offset(addr);
 133   const uintptr_t mask = ZGranuleSize - 1;
 134   return (offset & mask) >> ZObjectAlignmentSmallShift;
 135 }
 136 
 137 ZHeapIteratorBitMap* ZHeapIterator::object_map(oop obj) {
 138   const uintptr_t addr = ZOop::to_address(obj);
 139   ZHeapIteratorBitMap* map = _visit_map.get(addr);
 140   if (map == NULL) {
 141     map = new ZHeapIteratorBitMap(object_index_max());
 142     _visit_map.put(addr, map);
 143   }
 144 
 145   return map;
 146 }
 147 
 148 void ZHeapIterator::push(oop obj) {
 149   if (obj == NULL) {
 150     // Ignore
 151     return;
 152   }
 153 
 154   ZHeapIteratorBitMap* const map = object_map(obj);
 155   const size_t index = object_index(obj);
 156   if (!map->try_set_bit(index)) {
 157     // Already pushed
 158     return;
 159   }
 160 
 161   // Push
 162   _visit_stack.push(obj);
 163 }
 164 
 165 void ZHeapIterator::objects_do(ObjectClosure* cl) {
 166   // Note that the heap iterator visits all reachable objects, including
 167   // objects that might be unreachable from the application, such as a
 168   // not yet cleared JNIWeakGloablRef. However, also note that visiting
 169   // the JVMTI tag map is a requirement to make sure we visit all tagged
 170   // objects, even those that might now have become phantom reachable.
 171   // If we didn't do this the application would have expected to see
 172   // ObjectFree events for phantom reachable objects in the tag map.
 173 
 174   ZStatTimerDisable disable;
 175   ZHeapIteratorRootOopClosure root_cl(this);
 176 
 177   // Push strong roots onto stack
 178   {
 179     ZRootsIterator roots;
 180     roots.oops_do(&root_cl);
 181   }
 182 
 183   {
 184     ZConcurrentRootsIterator roots;
 185     roots.oops_do(&root_cl);
 186   }
 187 
 188   // Push weak roots onto stack
 189   {
 190     ZWeakRootsIterator roots;
 191     roots.oops_do(&root_cl);
 192   }
 193 
 194   {
 195     ZConcurrentWeakRootsIterator roots;
 196     roots.oops_do(&root_cl);
 197   }
 198 
 199   // Drain stack
 200   while (!_visit_stack.is_empty()) {
 201     const oop obj = _visit_stack.pop();
 202 
 203     // Visit
 204     cl->do_object(obj);
 205 
 206     // Push members to visit
 207     ZHeapIteratorOopClosure push_cl(this, obj, _visit_referents);
 208     obj->oop_iterate(&push_cl);
 209   }
 210 }