1 /*
  2  * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
  3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
  4  *
  5  * This code is free software; you can redistribute it and/or modify it
  6  * under the terms of the GNU General Public License version 2 only, as
  7  * published by the Free Software Foundation.
  8  *
  9  * This code is distributed in the hope that it will be useful, but WITHOUT
 10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
 11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
 12  * version 2 for more details (a copy is included in the LICENSE file that
 13  * accompanied this code).
 14  *
 15  * You should have received a copy of the GNU General Public License version
 16  * 2 along with this work; if not, write to the Free Software Foundation,
 17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
 18  *
 19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
 20  * or visit www.oracle.com if you need additional information or have any
 21  * questions.
 22  */
 23 
 24 #include "precompiled.hpp"
 25 #include "gc/z/zAddressRangeMap.inline.hpp"
 26 #include "gc/z/zBarrier.inline.hpp"
 27 #include "gc/z/zGlobals.hpp"
 28 #include "gc/z/zHeapIterator.hpp"
 29 #include "gc/z/zOop.inline.hpp"
 30 #include "gc/z/zRootsIterator.hpp"
 31 #include "memory/iterator.inline.hpp"
 32 #include "oops/oop.inline.hpp"
 33 #include "utilities/bitMap.inline.hpp"
 34 #include "utilities/stack.inline.hpp"
 35 
 36 class ZHeapIteratorBitMap : public CHeapObj<mtGC> {
 37 private:
 38   CHeapBitMap _map;
 39 
 40 public:
 41   ZHeapIteratorBitMap(size_t size_in_bits) :
 42       _map(size_in_bits) {}
 43 
 44   bool try_set_bit(size_t index) {
 45     if (_map.at(index)) {
 46       return false;
 47     }
 48 
 49     _map.set_bit(index);
 50     return true;
 51   }
 52 };
 53 
 54 class ZHeapIteratorRootOopClosure : public OopClosure {
 55 private:
 56   ZHeapIterator* const _iter;
 57   ObjectClosure* const _cl;
 58 
 59 public:
 60   ZHeapIteratorRootOopClosure(ZHeapIterator* iter, ObjectClosure* cl) :
 61       _iter(iter),
 62       _cl(cl) {}
 63 
 64   virtual void do_oop(oop* p) {
 65     // Load barrier needed here for the same reason we
 66     // need fixup_partial_loads() in ZHeap::mark_end()
 67     const oop obj = ZBarrier::load_barrier_on_oop_field(p);
 68     _iter->push(obj);
 69     _iter->drain(_cl);
 70   }
 71 
 72   virtual void do_oop(narrowOop* p) {
 73     ShouldNotReachHere();
 74   }
 75 };
 76 
 77 class ZHeapIteratorPushOopClosure : public BasicOopIterateClosure {
 78 private:
 79   ZHeapIterator* const _iter;
 80   const oop            _base;
 81   const bool           _visit_referents;
 82 
 83 public:
 84   ZHeapIteratorPushOopClosure(ZHeapIterator* iter, oop base) :
 85       _iter(iter),
 86       _base(base),
 87       _visit_referents(iter->visit_referents()) {}
 88 
 89   oop load_oop(oop* p) {
 90     if (_visit_referents) {
 91       return HeapAccess<ON_UNKNOWN_OOP_REF>::oop_load_at(_base, _base->field_offset(p));
 92     } else {
 93       return HeapAccess<>::oop_load(p);
 94     }
 95   }
 96 
 97   virtual ReferenceIterationMode reference_iteration_mode() {
 98     return _visit_referents ? DO_FIELDS : DO_FIELDS_EXCEPT_REFERENT;
 99   }
100 
101   virtual void do_oop(oop* p) {
102     const oop obj = load_oop(p);
103     _iter->push(obj);
104   }
105 
106   virtual void do_oop(narrowOop* p) {
107     ShouldNotReachHere();
108   }
109 
110 #ifdef ASSERT
111   virtual bool should_verify_oops() {
112     return false;
113   }
114 #endif
115 };
116 
117 ZHeapIterator::ZHeapIterator(bool visit_referents) :
118     _visit_stack(),
119     _visit_map(),
120     _visit_referents(visit_referents) {}
121 
122 ZHeapIterator::~ZHeapIterator() {
123   ZVisitMapIterator iter(&_visit_map);
124   for (ZHeapIteratorBitMap* map; iter.next(&map);) {
125     delete map;
126   }
127 }
128 
129 size_t ZHeapIterator::object_index_max() const {
130   return ZPageSizeMin >> ZObjectAlignmentSmallShift;
131 }
132 
133 size_t ZHeapIterator::object_index(oop obj) const {
134   const uintptr_t addr = ZOop::to_address(obj);
135   const uintptr_t offset = ZAddress::offset(addr);
136   const uintptr_t mask = (1 << ZPageSizeMinShift) - 1;
137   return (offset & mask) >> ZObjectAlignmentSmallShift;
138 }
139 
140 ZHeapIteratorBitMap* ZHeapIterator::object_map(oop obj) {
141   const uintptr_t addr = ZOop::to_address(obj);
142   ZHeapIteratorBitMap* map = _visit_map.get(addr);
143   if (map == NULL) {
144     map = new ZHeapIteratorBitMap(object_index_max());
145     _visit_map.put(addr, map);
146   }
147 
148   return map;
149 }
150 
151 void ZHeapIterator::push(oop obj) {
152   if (obj == NULL) {
153     // Ignore
154     return;
155   }
156 
157   ZHeapIteratorBitMap* const map = object_map(obj);
158   const size_t index = object_index(obj);
159   if (!map->try_set_bit(index)) {
160     // Already pushed
161     return;
162   }
163 
164   // Push
165   _visit_stack.push(obj);
166 }
167 
168 void ZHeapIterator::drain(ObjectClosure* cl) {
169   while (!_visit_stack.is_empty()) {
170     const oop obj = _visit_stack.pop();
171 
172     // Visit
173     cl->do_object(obj);
174 
175     // Push members to visit
176     ZHeapIteratorPushOopClosure push_cl(this, obj);
177     obj->oop_iterate(&push_cl);
178   }
179 }
180 
181 bool ZHeapIterator::visit_referents() const {
182   return _visit_referents;
183 }
184 
185 void ZHeapIterator::objects_do(ObjectClosure* cl) {
186   ZHeapIteratorRootOopClosure root_cl(this, cl);
187   ZRootsIterator roots;
188 
189   // Follow roots. Note that we also visit the JVMTI weak tag map
190   // as if they were strong roots to make sure we visit all tagged
191   // objects, even those that might now have become unreachable.
192   // If we didn't do this the user would have expected to see
193   // ObjectFree events for unreachable objects in the tag map.
194   roots.oops_do(&root_cl, true /* visit_jvmti_weak_export */);
195 }