1 /*
   2  * Copyright (c) 2017, 2018, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  */
  23 
  24 #include "precompiled.hpp"
  25 #include "gc/z/zAddressRangeMap.inline.hpp"
  26 #include "gc/z/zBarrier.inline.hpp"
  27 #include "gc/z/zGlobals.hpp"
  28 #include "gc/z/zHeapIterator.hpp"
  29 #include "gc/z/zOop.inline.hpp"
  30 #include "gc/z/zRootsIterator.hpp"
  31 #include "memory/iterator.inline.hpp"
  32 #include "utilities/bitMap.inline.hpp"
  33 #include "utilities/stack.inline.hpp"
  34 
  35 class ZHeapIteratorBitMap : public CHeapObj<mtGC> {
  36 private:
  37   CHeapBitMap _map;
  38 
  39 public:
  40   ZHeapIteratorBitMap(size_t size_in_bits) :
  41       _map(size_in_bits) {}
  42 
  43   bool try_set_bit(size_t index) {
  44     if (_map.at(index)) {
  45       return false;
  46     }
  47 
  48     _map.set_bit(index);
  49     return true;
  50   }
  51 };
  52 
  53 class ZHeapIteratorRootOopClosure : public ZRootsIteratorClosure {
  54 private:
  55   ZHeapIterator* const _iter;
  56 
  57 public:
  58   ZHeapIteratorRootOopClosure(ZHeapIterator* iter) :
  59       _iter(iter) {}
  60 
  61   virtual void do_oop(oop* p) {
  62     // Load barrier needed here, even on non-concurrent strong roots,
  63     // for the same reason we need fixup_partial_loads() in ZHeap::mark_end().
  64     const oop obj = NativeAccess<AS_NO_KEEPALIVE>::oop_load(p);
  65     _iter->push(obj);
  66   }
  67 
  68   virtual void do_oop(narrowOop* p) {
  69     ShouldNotReachHere();
  70   }
  71 };
  72 
  73 class ZHeapIteratorOopClosure : public BasicOopIterateClosure {
  74 private:
  75   ZHeapIterator* const _iter;
  76   const oop            _base;
  77   const bool           _visit_referents;
  78 
  79   oop load_oop(oop* p) const {
  80     if (_visit_referents) {
  81       return HeapAccess<ON_UNKNOWN_OOP_REF | AS_NO_KEEPALIVE>::oop_load_at(_base, _base->field_offset(p));
  82     } else {
  83       return HeapAccess<AS_NO_KEEPALIVE>::oop_load(p);
  84     }
  85   }
  86 
  87 public:
  88   ZHeapIteratorOopClosure(ZHeapIterator* iter, oop base, bool visit_referents) :
  89       _iter(iter),
  90       _base(base),
  91       _visit_referents(visit_referents) {}
  92 
  93   virtual ReferenceIterationMode reference_iteration_mode() {
  94     return _visit_referents ? DO_FIELDS : DO_FIELDS_EXCEPT_REFERENT;
  95   }
  96 
  97   virtual void do_oop(oop* p) {
  98     const oop obj = load_oop(p);
  99     _iter->push(obj);
 100   }
 101 
 102   virtual void do_oop(narrowOop* p) {
 103     ShouldNotReachHere();
 104   }
 105 
 106 #ifdef ASSERT
 107   virtual bool should_verify_oops() {
 108     return false;
 109   }
 110 #endif
 111 };
 112 
 113 ZHeapIterator::ZHeapIterator(bool visit_referents) :
 114     _visit_stack(),
 115     _visit_map(),
 116     _visit_referents(visit_referents) {}
 117 
 118 ZHeapIterator::~ZHeapIterator() {
 119   ZVisitMapIterator iter(&_visit_map);
 120   for (ZHeapIteratorBitMap* map; iter.next(&map);) {
 121     delete map;
 122   }
 123 }
 124 
 125 static size_t object_index_max() {
 126   return ZPageSizeMin >> ZObjectAlignmentSmallShift;
 127 }
 128 
 129 static size_t object_index(oop obj) {
 130   const uintptr_t addr = ZOop::to_address(obj);
 131   const uintptr_t offset = ZAddress::offset(addr);
 132   const uintptr_t mask = (1 << ZPageSizeMinShift) - 1;
 133   return (offset & mask) >> ZObjectAlignmentSmallShift;
 134 }
 135 
 136 ZHeapIteratorBitMap* ZHeapIterator::object_map(oop obj) {
 137   const uintptr_t addr = ZOop::to_address(obj);
 138   ZHeapIteratorBitMap* map = _visit_map.get(addr);
 139   if (map == NULL) {
 140     map = new ZHeapIteratorBitMap(object_index_max());
 141     _visit_map.put(addr, map);
 142   }
 143 
 144   return map;
 145 }
 146 
 147 void ZHeapIterator::push(oop obj) {
 148   if (obj == NULL) {
 149     // Ignore
 150     return;
 151   }
 152 
 153   ZHeapIteratorBitMap* const map = object_map(obj);
 154   const size_t index = object_index(obj);
 155   if (!map->try_set_bit(index)) {
 156     // Already pushed
 157     return;
 158   }
 159 
 160   // Push
 161   _visit_stack.push(obj);
 162 }
 163 
 164 void ZHeapIterator::objects_do(ObjectClosure* cl) {
 165   // Note that the heap iterator visits all reachable objects, including
 166   // objects that might be unreachable from the application, such as a
 167   // not yet cleared JNIWeakGloablRef. However, also note that visiting
 168   // the JVMTI tag map is a requirement to make sure we visit all tagged
 169   // objects, even those that might now have become phantom reachable.
 170   // If we didn't do this the application would have expected to see
 171   // ObjectFree events for phantom reachable objects in the tag map.
 172 
 173   ZHeapIteratorRootOopClosure root_cl(this);
 174 
 175   // Push strong roots onto stack
 176   {
 177     ZRootsIterator roots;
 178     roots.oops_do(&root_cl);
 179   }
 180 
 181   {
 182     ZConcurrentRootsIterator roots;
 183     roots.oops_do(&root_cl);
 184   }
 185 
 186   // Push weak roots onto stack
 187   {
 188     ZWeakRootsIterator roots;
 189     roots.oops_do(&root_cl);
 190   }
 191 
 192   {
 193     ZConcurrentWeakRootsIterator roots;
 194     roots.oops_do(&root_cl);
 195   }
 196 
 197   // Drain stack
 198   while (!_visit_stack.is_empty()) {
 199     const oop obj = _visit_stack.pop();
 200 
 201     // Visit
 202     cl->do_object(obj);
 203 
 204     // Push members to visit
 205     ZHeapIteratorOopClosure push_cl(this, obj, _visit_referents);
 206     obj->oop_iterate(&push_cl);
 207   }
 208 }