1 /*
   2  * Copyright (c) 2017, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  */
  23 
  24 #include "precompiled.hpp"
  25 #include "gc/z/zAddressRangeMap.inline.hpp"
  26 #include "gc/z/zBarrier.inline.hpp"
  27 #include "gc/z/zGlobals.hpp"
  28 #include "gc/z/zHeapIterator.hpp"
  29 #include "gc/z/zOop.inline.hpp"
  30 #include "gc/z/zRootsIterator.hpp"
  31 #include "oops/oop.inline.hpp"
  32 #include "utilities/bitMap.inline.hpp"
  33 #include "utilities/stack.inline.hpp"
  34 
  35 class ZHeapIteratorBitMap : public CHeapObj<mtGC> {
  36 private:
  37   CHeapBitMap _map;
  38 
  39 public:
  40   ZHeapIteratorBitMap(size_t size_in_bits) :
  41       _map(size_in_bits) {}
  42 
  43   bool try_set_bit(size_t index) {
  44     if (_map.at(index)) {
  45       return false;
  46     }
  47 
  48     _map.set_bit(index);
  49     return true;
  50   }
  51 };
  52 
  53 class ZHeapIteratorRootOopClosure : public OopClosure {
  54 private:
  55   ZHeapIterator* const _iter;
  56   ObjectClosure* const _cl;
  57 
  58 public:
  59   ZHeapIteratorRootOopClosure(ZHeapIterator* iter, ObjectClosure* cl) :
  60       _iter(iter),
  61       _cl(cl) {}
  62 
  63   virtual void do_oop(oop* p) {
  64     // Load barrier needed here for the same reason we
  65     // need fixup_partial_loads() in ZHeap::mark_end()
  66     const oop obj = ZBarrier::load_barrier_on_oop_field(p);
  67     _iter->push(obj);
  68     _iter->drain(_cl);
  69   }
  70 
  71   virtual void do_oop(narrowOop* p) {
  72     ShouldNotReachHere();
  73   }
  74 };
  75 
  76 class ZHeapIteratorPushOopClosure : public ExtendedOopClosure {
  77 private:
  78   ZHeapIterator* const _iter;
  79   const oop            _base;
  80 
  81 public:
  82   ZHeapIteratorPushOopClosure(ZHeapIterator* iter, oop base) :
  83       _iter(iter),
  84       _base(base) {}
  85 
  86   void do_oop_nv(oop* p) {
  87     const oop obj = HeapAccess<ON_UNKNOWN_OOP_REF>::oop_load_at(_base, _base->field_offset(p));
  88     _iter->push(obj);
  89   }
  90 
  91   void do_oop_nv(narrowOop* p) {
  92     ShouldNotReachHere();
  93   }
  94 
  95   virtual void do_oop(oop* p) {
  96     do_oop_nv(p);
  97   }
  98 
  99   virtual void do_oop(narrowOop* p) {
 100     do_oop_nv(p);
 101   }
 102 
 103 #ifdef ASSERT
 104   virtual bool should_verify_oops() {
 105     return false;
 106   }
 107 #endif
 108 };
 109 
 110 ZHeapIterator::ZHeapIterator() :
 111     _visit_stack(),
 112     _visit_map() {}
 113 
 114 ZHeapIterator::~ZHeapIterator() {
 115   ZVisitMapIterator iter(&_visit_map);
 116   for (ZHeapIteratorBitMap* map; iter.next(&map);) {
 117     delete map;
 118   }
 119 }
 120 
 121 size_t ZHeapIterator::object_index_max() const {
 122   return ZPageSizeMin >> ZObjectAlignmentSmallShift;
 123 }
 124 
 125 size_t ZHeapIterator::object_index(oop obj) const {
 126   const uintptr_t addr = ZOop::to_address(obj);
 127   const uintptr_t offset = ZAddress::offset(addr);
 128   const uintptr_t mask = (1 << ZPageSizeMinShift) - 1;
 129   return (offset & mask) >> ZObjectAlignmentSmallShift;
 130 }
 131 
 132 ZHeapIteratorBitMap* ZHeapIterator::object_map(oop obj) {
 133   const uintptr_t addr = ZOop::to_address(obj);
 134   ZHeapIteratorBitMap* map = _visit_map.get(addr);
 135   if (map == NULL) {
 136     map = new ZHeapIteratorBitMap(object_index_max());
 137     _visit_map.put(addr, map);
 138   }
 139 
 140   return map;
 141 }
 142 
 143 void ZHeapIterator::push(oop obj) {
 144   if (obj == NULL) {
 145     // Ignore
 146     return;
 147   }
 148 
 149   ZHeapIteratorBitMap* const map = object_map(obj);
 150   const size_t index = object_index(obj);
 151   if (!map->try_set_bit(index)) {
 152     // Already pushed
 153     return;
 154   }
 155 
 156   // Push
 157   _visit_stack.push(obj);
 158 }
 159 
 160 void ZHeapIterator::drain(ObjectClosure* cl) {
 161   while (!_visit_stack.is_empty()) {
 162     const oop obj = _visit_stack.pop();
 163 
 164     // Visit
 165     cl->do_object(obj);
 166 
 167     // Push members to visit
 168     ZHeapIteratorPushOopClosure push_cl(this, obj);
 169     obj->oop_iterate(&push_cl);
 170   }
 171 }
 172 
 173 void ZHeapIterator::objects_do(ObjectClosure* cl) {
 174   ZHeapIteratorRootOopClosure root_cl(this, cl);
 175   ZRootsIterator roots;
 176 
 177   // Follow roots. Note that we also visit the JVMTI weak tag map
 178   // as if they where strong roots to make sure we visit all tagged
 179   // objects, even those that might now have become unreachable.
 180   // If we didn't do this the user would have expected to see
 181   // ObjectFree events for unreachable objects in the tag map.
 182   roots.oops_do(&root_cl, true /* visit_jvmti_weak_export */);
 183 }