1 /*
   2  * Copyright (c) 1997, 2010, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef SHARE_VM_MEMORY_ITERATOR_HPP
  26 #define SHARE_VM_MEMORY_ITERATOR_HPP
  27 
  28 #include "memory/allocation.hpp"
  29 #include "memory/memRegion.hpp"
  30 #include "runtime/prefetch.hpp"
  31 #include "utilities/top.hpp"
  32 
  33 // The following classes are C++ `closures` for iterating over objects, roots and spaces
  34 
  35 class CodeBlob;
  36 class nmethod;
  37 class ReferenceProcessor;
  38 class DataLayout;
  39 
  40 // Closure provides abortability.
  41 
  42 class Closure : public StackObj {
  43  protected:
  44   bool _abort;
  45   void set_abort() { _abort = true; }
  46  public:
  47   Closure() : _abort(false) {}
  48   // A subtype can use this mechanism to indicate to some iterator mapping
  49   // functions that the iteration should cease.
  50   bool abort() { return _abort; }
  51   void clear_abort() { _abort = false; }
  52 };
  53 
  54 // OopClosure is used for iterating through roots (oop*)
  55 
  56 class OopClosure : public Closure {
  57  public:
  58   ReferenceProcessor* _ref_processor;
  59   OopClosure(ReferenceProcessor* rp) : _ref_processor(rp) { }
  60   OopClosure() : _ref_processor(NULL) { }
  61   virtual void do_oop(oop* o) = 0;
  62   virtual void do_oop_v(oop* o) { do_oop(o); }
  63   virtual void do_oop(narrowOop* o) = 0;
  64   virtual void do_oop_v(narrowOop* o) { do_oop(o); }
  65 
  66   // In support of post-processing of weak links of KlassKlass objects;
  67   // see KlassKlass::oop_oop_iterate().
  68 
  69   virtual const bool should_remember_klasses() const {
  70     assert(!must_remember_klasses(), "Should have overriden this method.");
  71     return false;
  72   }
  73 
  74   virtual void remember_klass(Klass* k) { /* do nothing */ }
  75 
  76   // In support of post-processing of weak references in
  77   // ProfileData (MethodDataOop) objects; see, for example,
  78   // VirtualCallData::oop_iterate().
  79   virtual const bool should_remember_mdo() const { return false; }
  80   virtual void remember_mdo(DataLayout* v) { /* do nothing */ }
  81 
  82   // The methods below control how object iterations invoking this closure
  83   // should be performed:
  84 
  85   // If "true", invoke on header klass field.
  86   bool do_header() { return true; } // Note that this is non-virtual.
  87   // Controls how prefetching is done for invocations of this closure.
  88   Prefetch::style prefetch_style() { // Note that this is non-virtual.
  89     return Prefetch::do_none;
  90   }
  91 
  92   // True iff this closure may be safely applied more than once to an oop
  93   // location without an intervening "major reset" (like the end of a GC).
  94   virtual bool idempotent() { return false; }
  95   virtual bool apply_to_weak_ref_discovered_field() { return false; }
  96 
  97 #ifdef ASSERT
  98   static bool _must_remember_klasses;
  99   static bool must_remember_klasses();
 100   static void set_must_remember_klasses(bool v);
 101 #endif
 102 };
 103 
 104 // ObjectClosure is used for iterating through an object space
 105 
 106 class ObjectClosure : public Closure {
 107  public:
 108   // Called for each object.
 109   virtual void do_object(oop obj) = 0;
 110 };
 111 
 112 
 113 class BoolObjectClosure : public ObjectClosure {
 114  public:
 115   virtual bool do_object_b(oop obj) = 0;
 116 };
 117 
 118 // Applies an oop closure to all ref fields in objects iterated over in an
 119 // object iteration.
 120 class ObjectToOopClosure: public ObjectClosure {
 121   OopClosure* _cl;
 122 public:
 123   void do_object(oop obj);
 124   ObjectToOopClosure(OopClosure* cl) : _cl(cl) {}
 125 };
 126 
 127 // A version of ObjectClosure with "memory" (see _previous_address below)
 128 class UpwardsObjectClosure: public BoolObjectClosure {
 129   HeapWord* _previous_address;
 130  public:
 131   UpwardsObjectClosure() : _previous_address(NULL) { }
 132   void set_previous(HeapWord* addr) { _previous_address = addr; }
 133   HeapWord* previous()              { return _previous_address; }
 134   // A return value of "true" can be used by the caller to decide
 135   // if this object's end should *NOT* be recorded in
 136   // _previous_address above.
 137   virtual bool do_object_bm(oop obj, MemRegion mr) = 0;
 138 };
 139 
 140 // A version of ObjectClosure that is expected to be robust
 141 // in the face of possibly uninitialized objects.
 142 class ObjectClosureCareful : public ObjectClosure {
 143  public:
 144   virtual size_t do_object_careful_m(oop p, MemRegion mr) = 0;
 145   virtual size_t do_object_careful(oop p) = 0;
 146 };
 147 
 148 // The following are used in CompactibleFreeListSpace and
 149 // ConcurrentMarkSweepGeneration.
 150 
 151 // Blk closure (abstract class)
 152 class BlkClosure : public StackObj {
 153  public:
 154   virtual size_t do_blk(HeapWord* addr) = 0;
 155 };
 156 
 157 // A version of BlkClosure that is expected to be robust
 158 // in the face of possibly uninitialized objects.
 159 class BlkClosureCareful : public BlkClosure {
 160  public:
 161   size_t do_blk(HeapWord* addr) {
 162     guarantee(false, "call do_blk_careful instead");
 163     return 0;
 164   }
 165   virtual size_t do_blk_careful(HeapWord* addr) = 0;
 166 };
 167 
 168 // SpaceClosure is used for iterating over spaces
 169 
 170 class Space;
 171 class CompactibleSpace;
 172 
 173 class SpaceClosure : public StackObj {
 174  public:
 175   // Called for each space
 176   virtual void do_space(Space* s) = 0;
 177 };
 178 
 179 class CompactibleSpaceClosure : public StackObj {
 180  public:
 181   // Called for each compactible space
 182   virtual void do_space(CompactibleSpace* s) = 0;
 183 };
 184 
 185 
 186 // CodeBlobClosure is used for iterating through code blobs
 187 // in the code cache or on thread stacks
 188 
 189 class CodeBlobClosure : public Closure {
 190  public:
 191   // Called for each code blob.
 192   virtual void do_code_blob(CodeBlob* cb) = 0;
 193 };
 194 
 195 
 196 class MarkingCodeBlobClosure : public CodeBlobClosure {
 197  public:
 198   // Called for each code blob, but at most once per unique blob.
 199   virtual void do_newly_marked_nmethod(nmethod* nm) = 0;
 200 
 201   virtual void do_code_blob(CodeBlob* cb);
 202     // = { if (!nmethod(cb)->test_set_oops_do_mark())  do_newly_marked_nmethod(cb); }
 203 
 204   class MarkScope : public StackObj {
 205   protected:
 206     bool _active;
 207   public:
 208     MarkScope(bool activate = true);
 209       // = { if (active) nmethod::oops_do_marking_prologue(); }
 210     ~MarkScope();
 211       // = { if (active) nmethod::oops_do_marking_epilogue(); }
 212   };
 213 };
 214 
 215 
 216 // Applies an oop closure to all ref fields in code blobs
 217 // iterated over in an object iteration.
 218 class CodeBlobToOopClosure: public MarkingCodeBlobClosure {
 219   OopClosure* _cl;
 220   bool _do_marking;
 221 public:
 222   virtual void do_newly_marked_nmethod(nmethod* cb);
 223     // = { cb->oops_do(_cl); }
 224   virtual void do_code_blob(CodeBlob* cb);
 225     // = { if (_do_marking)  super::do_code_blob(cb); else cb->oops_do(_cl); }
 226   CodeBlobToOopClosure(OopClosure* cl, bool do_marking)
 227     : _cl(cl), _do_marking(do_marking) {}
 228 };
 229 
 230 
 231 
 232 // MonitorClosure is used for iterating over monitors in the monitors cache
 233 
 234 class ObjectMonitor;
 235 
 236 class MonitorClosure : public StackObj {
 237  public:
 238   // called for each monitor in cache
 239   virtual void do_monitor(ObjectMonitor* m) = 0;
 240 };
 241 
 242 // A closure that is applied without any arguments.
 243 class VoidClosure : public StackObj {
 244  public:
 245   // I would have liked to declare this a pure virtual, but that breaks
 246   // in mysterious ways, for unknown reasons.
 247   virtual void do_void();
 248 };
 249 
 250 
 251 // YieldClosure is intended for use by iteration loops
 252 // to incrementalize their work, allowing interleaving
 253 // of an interruptable task so as to allow other
 254 // threads to run (which may not otherwise be able to access
 255 // exclusive resources, for instance). Additionally, the
 256 // closure also allows for aborting an ongoing iteration
 257 // by means of checking the return value from the polling
 258 // call.
 259 class YieldClosure : public StackObj {
 260   public:
 261    virtual bool should_return() = 0;
 262 };
 263 
 264 // Abstract closure for serializing data (read or write).
 265 
 266 class SerializeOopClosure : public OopClosure {
 267 public:
 268   // Return bool indicating whether closure implements read or write.
 269   virtual bool reading() const = 0;
 270 
 271   // Read/write the int pointed to by i.
 272   virtual void do_int(int* i) = 0;
 273 
 274   // Read/write the size_t pointed to by i.
 275   virtual void do_size_t(size_t* i) = 0;
 276 
 277   // Read/write the void pointer pointed to by p.
 278   virtual void do_ptr(void** p) = 0;
 279 
 280   // Read/write the HeapWord pointer pointed to be p.
 281   virtual void do_ptr(HeapWord** p) = 0;
 282 
 283   // Read/write the region specified.
 284   virtual void do_region(u_char* start, size_t size) = 0;
 285 
 286   // Check/write the tag.  If reading, then compare the tag against
 287   // the passed in value and fail is they don't match.  This allows
 288   // for verification that sections of the serialized data are of the
 289   // correct length.
 290   virtual void do_tag(int tag) = 0;
 291 };
 292 
 293 #ifdef ASSERT
 294 // This class is used to flag phases of a collection that
 295 // can unload classes and which should override the
 296 // should_remember_klasses() and remember_klass() of OopClosure.
 297 // The _must_remember_klasses is set in the contructor and restored
 298 // in the destructor.  _must_remember_klasses is checked in assertions
 299 // in the OopClosure implementations of should_remember_klasses() and
 300 // remember_klass() and the expectation is that the OopClosure
 301 // implementation should not be in use if _must_remember_klasses is set.
 302 // Instances of RememberKlassesChecker can be place in
 303 // marking phases of collections which can do class unloading.
 304 // RememberKlassesChecker can be passed "false" to turn off checking.
 305 // It is used by CMS when CMS yields to a different collector.
 306 class RememberKlassesChecker: StackObj {
 307  bool _saved_state;
 308  bool _do_check;
 309  public:
 310   RememberKlassesChecker(bool checking_on) : _saved_state(false),
 311     _do_check(true) {
 312     // The ClassUnloading unloading flag affects the collectors except
 313     // for CMS.
 314     // CMS unloads classes if CMSClassUnloadingEnabled is true or
 315     // if ExplicitGCInvokesConcurrentAndUnloadsClasses is true and
 316     // the current collection is an explicit collection.  Turning
 317     // on the checking in general for
 318     // ExplicitGCInvokesConcurrentAndUnloadsClasses and
 319     // UseConcMarkSweepGC should not lead to false positives.
 320     _do_check =
 321       ClassUnloading && !UseConcMarkSweepGC ||
 322       CMSClassUnloadingEnabled && UseConcMarkSweepGC ||
 323       ExplicitGCInvokesConcurrentAndUnloadsClasses && UseConcMarkSweepGC;
 324     if (_do_check) {
 325       _saved_state = OopClosure::must_remember_klasses();
 326       OopClosure::set_must_remember_klasses(checking_on);
 327     }
 328   }
 329   ~RememberKlassesChecker() {
 330     if (_do_check) {
 331       OopClosure::set_must_remember_klasses(_saved_state);
 332     }
 333   }
 334 };
 335 #endif  // ASSERT
 336 
 337 #endif // SHARE_VM_MEMORY_ITERATOR_HPP