1 /*
   2  * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef SHARE_VM_MEMORY_ITERATOR_HPP
  26 #define SHARE_VM_MEMORY_ITERATOR_HPP
  27 
  28 #include "memory/allocation.hpp"
  29 #include "memory/memRegion.hpp"
  30 #include "runtime/prefetch.hpp"
  31 #include "utilities/top.hpp"
  32 
  33 // The following classes are C++ `closures` for iterating over objects, roots and spaces
  34 
  35 class CodeBlob;
  36 class nmethod;
  37 class ReferenceProcessor;
  38 class DataLayout;
  39 class KlassClosure;
  40 class ClassLoaderData;
  41 
  42 // Closure provides abortability.
  43 
  44 class Closure : public StackObj {
  45  protected:
  46   bool _abort;
  47   void set_abort() { _abort = true; }
  48  public:
  49   Closure() : _abort(false) {}
  50   // A subtype can use this mechanism to indicate to some iterator mapping
  51   // functions that the iteration should cease.
  52   bool abort() { return _abort; }
  53   void clear_abort() { _abort = false; }
  54 };
  55 
  56 // OopClosure is used for iterating through references to Java objects.
  57 
  58 class OopClosure : public Closure {
  59  public:
  60   virtual void do_oop(oop* o) = 0;
  61   virtual void do_oop_v(oop* o) { do_oop(o); }
  62   virtual void do_oop(narrowOop* o) = 0;
  63   virtual void do_oop_v(narrowOop* o) { do_oop(o); }
  64 };
  65 
  66 // ExtendedOopClosure adds extra code to be run during oop iterations.
  67 // This is needed by the GC and is extracted to a separate type to not
  68 // pollute the OopClosure interface.
  69 class ExtendedOopClosure : public OopClosure {
  70  public:
  71   ReferenceProcessor* _ref_processor;
  72   ExtendedOopClosure(ReferenceProcessor* rp) : _ref_processor(rp) { }
  73   ExtendedOopClosure() : OopClosure(), _ref_processor(NULL) { }
  74 
  75   // If the do_metadata functions return "true",
  76   // we invoke the following when running oop_iterate():
  77   //
  78   // 1) do_klass on the header klass pointer.
  79   // 2) do_klass on the klass pointer in the mirrors.
  80   // 3) do_class_loader_data on the class loader data in class loaders.
  81   //
  82   // The virtual (without suffix) and the non-virtual (with _nv suffix) need
  83   // to be updated together, or else the devirtualization will break.
  84   //
  85   // Providing default implementations of the _nv functions unfortunately
  86   // removes the compile-time safeness, but reduces the clutter for the
  87   // ExtendedOopClosures that don't need to walk the metadata. Currently,
  88   // only CMS needs these.
  89 
  90   virtual bool do_metadata() { return do_metadata_nv(); }
  91   bool do_metadata_v()       { return do_metadata(); }
  92   bool do_metadata_nv()      { return false; }
  93 
  94   virtual void do_klass(Klass* k)   { do_klass_nv(k); }
  95   void do_klass_v(Klass* k)         { do_klass(k); }
  96   void do_klass_nv(Klass* k)        { ShouldNotReachHere(); }
  97 
  98   virtual void do_class_loader_data(ClassLoaderData* cld) { ShouldNotReachHere(); }
  99 
 100   // Controls how prefetching is done for invocations of this closure.
 101   Prefetch::style prefetch_style() { // Note that this is non-virtual.
 102     return Prefetch::do_none;
 103   }
 104 
 105   // True iff this closure may be safely applied more than once to an oop
 106   // location without an intervening "major reset" (like the end of a GC).
 107   virtual bool idempotent() { return false; }
 108   virtual bool apply_to_weak_ref_discovered_field() { return false; }
 109 };
 110 
 111 // Wrapper closure only used to implement oop_iterate_no_header().
 112 class NoHeaderExtendedOopClosure : public ExtendedOopClosure {
 113   OopClosure* _wrapped_closure;
 114  public:
 115   NoHeaderExtendedOopClosure(OopClosure* cl) : _wrapped_closure(cl) {}
 116   // Warning: this calls the virtual version do_oop in the the wrapped closure.
 117   void do_oop_nv(oop* p)       { _wrapped_closure->do_oop(p); }
 118   void do_oop_nv(narrowOop* p) { _wrapped_closure->do_oop(p); }
 119 
 120   void do_oop(oop* p)          { assert(false, "Only the _nv versions should be used");
 121                                  _wrapped_closure->do_oop(p); }
 122   void do_oop(narrowOop* p)    { assert(false, "Only the _nv versions should be used");
 123                                  _wrapped_closure->do_oop(p);}
 124 };
 125 
 126 class KlassClosure : public Closure {
 127  public:
 128   virtual void do_klass(Klass* k) = 0;
 129 };
 130 
 131 class KlassToOopClosure : public KlassClosure {
 132   OopClosure* _oop_closure;
 133  public:
 134   KlassToOopClosure(OopClosure* oop_closure) : _oop_closure(oop_closure) {}
 135   virtual void do_klass(Klass* k);
 136 };
 137 
 138 class CLDToOopClosure {
 139   OopClosure* _oop_closure;
 140   KlassToOopClosure _default_klass_closure;
 141   KlassClosure* _klass_closure;
 142   bool _must_claim_cld;
 143 
 144  public:
 145   CLDToOopClosure(OopClosure* oop_closure, bool must_claim_cld = true) :
 146       _oop_closure(oop_closure),
 147       _default_klass_closure(oop_closure),
 148       _klass_closure(&_default_klass_closure),
 149       _must_claim_cld(must_claim_cld) {}
 150   CLDToOopClosure(OopClosure* oop_closure, KlassClosure* klass_closure, bool must_claim_cld = true) :
 151       _oop_closure(oop_closure),
 152       _default_klass_closure(NULL), // Ignored
 153       _klass_closure(klass_closure),
 154       _must_claim_cld(must_claim_cld) {}
 155 
 156   void do_cld(ClassLoaderData* cld);
 157 };
 158 
 159 // ObjectClosure is used for iterating through an object space
 160 
 161 class ObjectClosure : public Closure {
 162  public:
 163   // Called for each object.
 164   virtual void do_object(oop obj) = 0;
 165 };
 166 
 167 
 168 class BoolObjectClosure : public ObjectClosure {
 169  public:
 170   virtual bool do_object_b(oop obj) = 0;
 171 };
 172 
 173 // Applies an oop closure to all ref fields in objects iterated over in an
 174 // object iteration.
 175 class ObjectToOopClosure: public ObjectClosure {
 176   ExtendedOopClosure* _cl;
 177 public:
 178   void do_object(oop obj);
 179   ObjectToOopClosure(ExtendedOopClosure* cl) : _cl(cl) {}
 180 };
 181 
 182 // A version of ObjectClosure with "memory" (see _previous_address below)
 183 class UpwardsObjectClosure: public BoolObjectClosure {
 184   HeapWord* _previous_address;
 185  public:
 186   UpwardsObjectClosure() : _previous_address(NULL) { }
 187   void set_previous(HeapWord* addr) { _previous_address = addr; }
 188   HeapWord* previous()              { return _previous_address; }
 189   // A return value of "true" can be used by the caller to decide
 190   // if this object's end should *NOT* be recorded in
 191   // _previous_address above.
 192   virtual bool do_object_bm(oop obj, MemRegion mr) = 0;
 193 };
 194 
 195 // A version of ObjectClosure that is expected to be robust
 196 // in the face of possibly uninitialized objects.
 197 class ObjectClosureCareful : public ObjectClosure {
 198  public:
 199   virtual size_t do_object_careful_m(oop p, MemRegion mr) = 0;
 200   virtual size_t do_object_careful(oop p) = 0;
 201 };
 202 
 203 // The following are used in CompactibleFreeListSpace and
 204 // ConcurrentMarkSweepGeneration.
 205 
 206 // Blk closure (abstract class)
 207 class BlkClosure : public StackObj {
 208  public:
 209   virtual size_t do_blk(HeapWord* addr) = 0;
 210 };
 211 
 212 // A version of BlkClosure that is expected to be robust
 213 // in the face of possibly uninitialized objects.
 214 class BlkClosureCareful : public BlkClosure {
 215  public:
 216   size_t do_blk(HeapWord* addr) {
 217     guarantee(false, "call do_blk_careful instead");
 218     return 0;
 219   }
 220   virtual size_t do_blk_careful(HeapWord* addr) = 0;
 221 };
 222 
 223 // SpaceClosure is used for iterating over spaces
 224 
 225 class Space;
 226 class CompactibleSpace;
 227 
 228 class SpaceClosure : public StackObj {
 229  public:
 230   // Called for each space
 231   virtual void do_space(Space* s) = 0;
 232 };
 233 
 234 class CompactibleSpaceClosure : public StackObj {
 235  public:
 236   // Called for each compactible space
 237   virtual void do_space(CompactibleSpace* s) = 0;
 238 };
 239 
 240 
 241 // CodeBlobClosure is used for iterating through code blobs
 242 // in the code cache or on thread stacks
 243 
 244 class CodeBlobClosure : public Closure {
 245  public:
 246   // Called for each code blob.
 247   virtual void do_code_blob(CodeBlob* cb) = 0;
 248 };
 249 
 250 
 251 class MarkingCodeBlobClosure : public CodeBlobClosure {
 252  public:
 253   // Called for each code blob, but at most once per unique blob.
 254   virtual void do_newly_marked_nmethod(nmethod* nm) = 0;
 255 
 256   virtual void do_code_blob(CodeBlob* cb);
 257     // = { if (!nmethod(cb)->test_set_oops_do_mark())  do_newly_marked_nmethod(cb); }
 258 
 259   class MarkScope : public StackObj {
 260   protected:
 261     bool _active;
 262   public:
 263     MarkScope(bool activate = true);
 264       // = { if (active) nmethod::oops_do_marking_prologue(); }
 265     ~MarkScope();
 266       // = { if (active) nmethod::oops_do_marking_epilogue(); }
 267   };
 268 };
 269 
 270 
 271 // Applies an oop closure to all ref fields in code blobs
 272 // iterated over in an object iteration.
 273 class CodeBlobToOopClosure: public MarkingCodeBlobClosure {
 274   OopClosure* _cl;
 275   bool _do_marking;
 276 public:
 277   virtual void do_newly_marked_nmethod(nmethod* cb);
 278     // = { cb->oops_do(_cl); }
 279   virtual void do_code_blob(CodeBlob* cb);
 280     // = { if (_do_marking)  super::do_code_blob(cb); else cb->oops_do(_cl); }
 281   CodeBlobToOopClosure(OopClosure* cl, bool do_marking)
 282     : _cl(cl), _do_marking(do_marking) {}
 283 };
 284 
 285 
 286 
 287 // MonitorClosure is used for iterating over monitors in the monitors cache
 288 
 289 class ObjectMonitor;
 290 
 291 class MonitorClosure : public StackObj {
 292  public:
 293   // called for each monitor in cache
 294   virtual void do_monitor(ObjectMonitor* m) = 0;
 295 };
 296 
 297 // A closure that is applied without any arguments.
 298 class VoidClosure : public StackObj {
 299  public:
 300   // I would have liked to declare this a pure virtual, but that breaks
 301   // in mysterious ways, for unknown reasons.
 302   virtual void do_void();
 303 };
 304 
 305 
 306 // YieldClosure is intended for use by iteration loops
 307 // to incrementalize their work, allowing interleaving
 308 // of an interruptable task so as to allow other
 309 // threads to run (which may not otherwise be able to access
 310 // exclusive resources, for instance). Additionally, the
 311 // closure also allows for aborting an ongoing iteration
 312 // by means of checking the return value from the polling
 313 // call.
 314 class YieldClosure : public StackObj {
 315   public:
 316    virtual bool should_return() = 0;
 317 };
 318 
 319 // Abstract closure for serializing data (read or write).
 320 
 321 class SerializeClosure : public Closure {
 322 public:
 323   // Return bool indicating whether closure implements read or write.
 324   virtual bool reading() const = 0;
 325 
 326   // Read/write the void pointer pointed to by p.
 327   virtual void do_ptr(void** p) = 0;
 328 
 329   // Read/write the region specified.
 330   virtual void do_region(u_char* start, size_t size) = 0;
 331 
 332   // Check/write the tag.  If reading, then compare the tag against
 333   // the passed in value and fail is they don't match.  This allows
 334   // for verification that sections of the serialized data are of the
 335   // correct length.
 336   virtual void do_tag(int tag) = 0;
 337 };
 338 
 339 class SymbolClosure : public StackObj {
 340  public:
 341   virtual void do_symbol(Symbol**) = 0;
 342 
 343   // Clear LSB in symbol address; it can be set by CPSlot.
 344   static Symbol* load_symbol(Symbol** p) {
 345     return (Symbol*)(intptr_t(*p) & ~1);
 346   }
 347 
 348   // Store symbol, adjusting new pointer if the original pointer was adjusted
 349   // (symbol references in constant pool slots have their LSB set to 1).
 350   static void store_symbol(Symbol** p, Symbol* sym) {
 351     *p = (Symbol*)(intptr_t(sym) | (intptr_t(*p) & 1));
 352   }
 353 };
 354 
 355 #endif // SHARE_VM_MEMORY_ITERATOR_HPP