1 /*
   2  * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef SHARE_VM_MEMORY_ITERATOR_HPP
  26 #define SHARE_VM_MEMORY_ITERATOR_HPP
  27 
  28 #include "memory/allocation.hpp"
  29 #include "memory/memRegion.hpp"
  30 #include "utilities/top.hpp"
  31 
  32 class CodeBlob;
  33 class nmethod;
  34 class ReferenceProcessor;
  35 class DataLayout;
  36 class KlassClosure;
  37 class ClassLoaderData;
  38 
  39 // The following classes are C++ `closures` for iterating over objects, roots and spaces
  40 
  41 class Closure : public StackObj { };
  42 
  43 // OopClosure is used for iterating through references to Java objects.
  44 class OopClosure : public Closure {
  45  public:
  46   virtual void do_oop(oop* o) = 0;
  47   virtual void do_oop_v(oop* o) { do_oop(o); }
  48   virtual void do_oop(narrowOop* o) = 0;
  49   virtual void do_oop_v(narrowOop* o) { do_oop(o); }
  50 };
  51 
  52 // ExtendedOopClosure adds extra code to be run during oop iterations.
  53 // This is needed by the GC and is extracted to a separate type to not
  54 // pollute the OopClosure interface.
  55 class ExtendedOopClosure : public OopClosure {
  56  public:
  57   ReferenceProcessor* _ref_processor;
  58   ExtendedOopClosure(ReferenceProcessor* rp) : _ref_processor(rp) { }
  59   ExtendedOopClosure() : OopClosure(), _ref_processor(NULL) { }
  60 
  61   // If the do_metadata functions return "true",
  62   // we invoke the following when running oop_iterate():
  63   //
  64   // 1) do_klass on the header klass pointer.
  65   // 2) do_klass on the klass pointer in the mirrors.
  66   // 3) do_class_loader_data on the class loader data in class loaders.
  67   //
  68   // The virtual (without suffix) and the non-virtual (with _nv suffix) need
  69   // to be updated together, or else the devirtualization will break.
  70   //
  71   // Providing default implementations of the _nv functions unfortunately
  72   // removes the compile-time safeness, but reduces the clutter for the
  73   // ExtendedOopClosures that don't need to walk the metadata. Currently,
  74   // only CMS needs these.
  75 
  76   virtual bool do_metadata() { return do_metadata_nv(); }
  77   bool do_metadata_v()       { return do_metadata(); }
  78   bool do_metadata_nv()      { return false; }
  79 
  80   virtual void do_klass(Klass* k)   { do_klass_nv(k); }
  81   void do_klass_v(Klass* k)         { do_klass(k); }
  82   void do_klass_nv(Klass* k)        { ShouldNotReachHere(); }
  83 
  84   virtual void do_class_loader_data(ClassLoaderData* cld) { ShouldNotReachHere(); }
  85 
  86   // True iff this closure may be safely applied more than once to an oop
  87   // location without an intervening "major reset" (like the end of a GC).
  88   virtual bool idempotent() { return false; }
  89   virtual bool apply_to_weak_ref_discovered_field() { return false; }
  90 };
  91 
  92 // Wrapper closure only used to implement oop_iterate_no_header().
  93 class NoHeaderExtendedOopClosure : public ExtendedOopClosure {
  94   OopClosure* _wrapped_closure;
  95  public:
  96   NoHeaderExtendedOopClosure(OopClosure* cl) : _wrapped_closure(cl) {}
  97   // Warning: this calls the virtual version do_oop in the the wrapped closure.
  98   void do_oop_nv(oop* p)       { _wrapped_closure->do_oop(p); }
  99   void do_oop_nv(narrowOop* p) { _wrapped_closure->do_oop(p); }
 100 
 101   void do_oop(oop* p)          { assert(false, "Only the _nv versions should be used");
 102                                  _wrapped_closure->do_oop(p); }
 103   void do_oop(narrowOop* p)    { assert(false, "Only the _nv versions should be used");
 104                                  _wrapped_closure->do_oop(p);}
 105 };
 106 
 107 class KlassClosure : public Closure {
 108  public:
 109   virtual void do_klass(Klass* k) = 0;
 110 };
 111 
 112 class CLDClosure : public Closure {
 113  public:
 114   virtual void do_cld(ClassLoaderData* cld) = 0;
 115 };
 116 
 117 class KlassToOopClosure : public KlassClosure {
 118   friend class MetadataAwareOopClosure;
 119   friend class MetadataAwareOopsInGenClosure;
 120 
 121   OopClosure* _oop_closure;
 122 
 123   // Used when _oop_closure couldn't be set in an initialization list.
 124   void initialize(OopClosure* oop_closure) {
 125     assert(_oop_closure == NULL, "Should only be called once");
 126     _oop_closure = oop_closure;
 127   }
 128 
 129 public:
 130   KlassToOopClosure(OopClosure* oop_closure = NULL) : _oop_closure(oop_closure) {}
 131   virtual void do_klass(Klass* k);
 132 };
 133 
 134 class CLDToOopClosure : public CLDClosure {
 135   OopClosure* _oop_closure;
 136   KlassToOopClosure _klass_closure;
 137   bool _must_claim_cld;
 138 
 139  public:
 140   CLDToOopClosure(OopClosure* oop_closure, bool must_claim_cld = true) :
 141       _oop_closure(oop_closure),
 142       _klass_closure(oop_closure),
 143       _must_claim_cld(must_claim_cld) {}
 144 
 145   void do_cld(ClassLoaderData* cld);
 146 };
 147 
 148 // The base class for all concurrent marking closures,
 149 // that participates in class unloading.
 150 // It's used to proxy through the metadata to the oops defined in them.
 151 class MetadataAwareOopClosure: public ExtendedOopClosure {
 152   KlassToOopClosure _klass_closure;
 153 
 154  public:
 155   MetadataAwareOopClosure() : ExtendedOopClosure() {
 156     _klass_closure.initialize(this);
 157   }
 158   MetadataAwareOopClosure(ReferenceProcessor* rp) : ExtendedOopClosure(rp) {
 159     _klass_closure.initialize(this);
 160   }
 161 
 162   virtual bool do_metadata()    { return do_metadata_nv(); }
 163   inline  bool do_metadata_nv() { return true; }
 164 
 165   virtual void do_klass(Klass* k);
 166   void do_klass_nv(Klass* k);
 167 
 168   virtual void do_class_loader_data(ClassLoaderData* cld);
 169 };
 170 
 171 // ObjectClosure is used for iterating through an object space
 172 
 173 class ObjectClosure : public Closure {
 174  public:
 175   // Called for each object.
 176   virtual void do_object(oop obj) = 0;
 177 };
 178 
 179 
 180 class BoolObjectClosure : public Closure {
 181  public:
 182   virtual bool do_object_b(oop obj) = 0;
 183 };
 184 
 185 // Applies an oop closure to all ref fields in objects iterated over in an
 186 // object iteration.
 187 class ObjectToOopClosure: public ObjectClosure {
 188   ExtendedOopClosure* _cl;
 189 public:
 190   void do_object(oop obj);
 191   ObjectToOopClosure(ExtendedOopClosure* cl) : _cl(cl) {}
 192 };
 193 
 194 // A version of ObjectClosure that is expected to be robust
 195 // in the face of possibly uninitialized objects.
 196 class ObjectClosureCareful : public ObjectClosure {
 197  public:
 198   virtual size_t do_object_careful_m(oop p, MemRegion mr) = 0;
 199   virtual size_t do_object_careful(oop p) = 0;
 200 };
 201 
 202 // The following are used in CompactibleFreeListSpace and
 203 // ConcurrentMarkSweepGeneration.
 204 
 205 // Blk closure (abstract class)
 206 class BlkClosure : public StackObj {
 207  public:
 208   virtual size_t do_blk(HeapWord* addr) = 0;
 209 };
 210 
 211 // A version of BlkClosure that is expected to be robust
 212 // in the face of possibly uninitialized objects.
 213 class BlkClosureCareful : public BlkClosure {
 214  public:
 215   size_t do_blk(HeapWord* addr) {
 216     guarantee(false, "call do_blk_careful instead");
 217     return 0;
 218   }
 219   virtual size_t do_blk_careful(HeapWord* addr) = 0;
 220 };
 221 
 222 // SpaceClosure is used for iterating over spaces
 223 
 224 class Space;
 225 class CompactibleSpace;
 226 
 227 class SpaceClosure : public StackObj {
 228  public:
 229   // Called for each space
 230   virtual void do_space(Space* s) = 0;
 231 };
 232 
 233 class CompactibleSpaceClosure : public StackObj {
 234  public:
 235   // Called for each compactible space
 236   virtual void do_space(CompactibleSpace* s) = 0;
 237 };
 238 
 239 
 240 // CodeBlobClosure is used for iterating through code blobs
 241 // in the code cache or on thread stacks
 242 
 243 class CodeBlobClosure : public Closure {
 244  public:
 245   // Called for each code blob.
 246   virtual void do_code_blob(CodeBlob* cb) = 0;
 247 };
 248 
 249 
 250 class MarkingCodeBlobClosure : public CodeBlobClosure {
 251  public:
 252   // Called for each code blob, but at most once per unique blob.
 253   virtual void do_newly_marked_nmethod(nmethod* nm) = 0;
 254 
 255   virtual void do_code_blob(CodeBlob* cb);
 256     // = { if (!nmethod(cb)->test_set_oops_do_mark())  do_newly_marked_nmethod(cb); }
 257 
 258   class MarkScope : public StackObj {
 259   protected:
 260     bool _active;
 261   public:
 262     MarkScope(bool activate = true);
 263       // = { if (active) nmethod::oops_do_marking_prologue(); }
 264     ~MarkScope();
 265       // = { if (active) nmethod::oops_do_marking_epilogue(); }
 266   };
 267 };
 268 
 269 
 270 // Applies an oop closure to all ref fields in code blobs
 271 // iterated over in an object iteration.
 272 class CodeBlobToOopClosure: public MarkingCodeBlobClosure {
 273   OopClosure* _cl;
 274   bool _do_marking;
 275 public:
 276   virtual void do_newly_marked_nmethod(nmethod* cb);
 277     // = { cb->oops_do(_cl); }
 278   virtual void do_code_blob(CodeBlob* cb);
 279     // = { if (_do_marking)  super::do_code_blob(cb); else cb->oops_do(_cl); }
 280   CodeBlobToOopClosure(OopClosure* cl, bool do_marking)
 281     : _cl(cl), _do_marking(do_marking) {}
 282 };
 283 
 284 
 285 
 286 // MonitorClosure is used for iterating over monitors in the monitors cache
 287 
 288 class ObjectMonitor;
 289 
 290 class MonitorClosure : public StackObj {
 291  public:
 292   // called for each monitor in cache
 293   virtual void do_monitor(ObjectMonitor* m) = 0;
 294 };
 295 
 296 // A closure that is applied without any arguments.
 297 class VoidClosure : public StackObj {
 298  public:
 299   // I would have liked to declare this a pure virtual, but that breaks
 300   // in mysterious ways, for unknown reasons.
 301   virtual void do_void();
 302 };
 303 
 304 
 305 // YieldClosure is intended for use by iteration loops
 306 // to incrementalize their work, allowing interleaving
 307 // of an interruptable task so as to allow other
 308 // threads to run (which may not otherwise be able to access
 309 // exclusive resources, for instance). Additionally, the
 310 // closure also allows for aborting an ongoing iteration
 311 // by means of checking the return value from the polling
 312 // call.
 313 class YieldClosure : public StackObj {
 314   public:
 315    virtual bool should_return() = 0;
 316 };
 317 
 318 // Abstract closure for serializing data (read or write).
 319 
 320 class SerializeClosure : public Closure {
 321 public:
 322   // Return bool indicating whether closure implements read or write.
 323   virtual bool reading() const = 0;
 324 
 325   // Read/write the void pointer pointed to by p.
 326   virtual void do_ptr(void** p) = 0;
 327 
 328   // Read/write the region specified.
 329   virtual void do_region(u_char* start, size_t size) = 0;
 330 
 331   // Check/write the tag.  If reading, then compare the tag against
 332   // the passed in value and fail is they don't match.  This allows
 333   // for verification that sections of the serialized data are of the
 334   // correct length.
 335   virtual void do_tag(int tag) = 0;
 336 };
 337 
 338 class SymbolClosure : public StackObj {
 339  public:
 340   virtual void do_symbol(Symbol**) = 0;
 341 
 342   // Clear LSB in symbol address; it can be set by CPSlot.
 343   static Symbol* load_symbol(Symbol** p) {
 344     return (Symbol*)(intptr_t(*p) & ~1);
 345   }
 346 
 347   // Store symbol, adjusting new pointer if the original pointer was adjusted
 348   // (symbol references in constant pool slots have their LSB set to 1).
 349   static void store_symbol(Symbol** p, Symbol* sym) {
 350     *p = (Symbol*)(intptr_t(sym) | (intptr_t(*p) & 1));
 351   }
 352 };
 353 
 354 
 355 // Helper defines for ExtendOopClosure
 356 
 357 #define if_do_metadata_checked(closure, nv_suffix)       \
 358   /* Make sure the non-virtual and the virtual versions match. */     \
 359   assert(closure->do_metadata##nv_suffix() == closure->do_metadata(), \
 360       "Inconsistency in do_metadata");                                \
 361   if (closure->do_metadata##nv_suffix())
 362 
 363 #define assert_should_ignore_metadata(closure, nv_suffix)                                  \
 364   assert(!closure->do_metadata##nv_suffix(), "Code to handle metadata is not implemented")
 365 
 366 #endif // SHARE_VM_MEMORY_ITERATOR_HPP