1 /*
   2  * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef SHARE_VM_MEMORY_ITERATOR_HPP
  26 #define SHARE_VM_MEMORY_ITERATOR_HPP
  27 
  28 #include "memory/allocation.hpp"
  29 #include "memory/memRegion.hpp"
  30 #include "utilities/top.hpp"
  31 
  32 class CodeBlob;
  33 class nmethod;
  34 class ReferenceProcessor;
  35 class DataLayout;
  36 class KlassClosure;
  37 class ClassLoaderData;
  38 
  39 // The following classes are C++ `closures` for iterating over objects, roots and spaces
  40 
  41 class Closure : public StackObj { };
  42 
  43 // OopClosure is used for iterating through references to Java objects.
  44 class OopClosure : public Closure {
  45  public:
  46   virtual void do_oop(oop* o) = 0;
  47   virtual void do_oop_v(oop* o) { do_oop(o); }
  48   virtual void do_oop(narrowOop* o) = 0;
  49   virtual void do_oop_v(narrowOop* o) { do_oop(o); }
  50 
  51   // Use SFINAE to dispatch to the "most appropriate" do_oop using OopClosureDispatcher.
  52   // Read the specialized_oop_closures.hpp file how this works
  53   template<class OopClosureType, class OopType>
  54   void do_oop_auto(OopType* o);
  55 };
  56 
  57 // ExtendedOopClosure adds extra code to be run during oop iterations.
  58 // This is needed by the GC and is extracted to a separate type to not
  59 // pollute the OopClosure interface.
  60 class ExtendedOopClosure : public OopClosure {
  61  public:
  62   ReferenceProcessor* _ref_processor;
  63   ExtendedOopClosure(ReferenceProcessor* rp) : _ref_processor(rp) { }
  64   ExtendedOopClosure() : OopClosure(), _ref_processor(NULL) { }
  65 
  66   // If the do_metadata functions return "true",
  67   // we invoke the following when running oop_iterate():
  68   //
  69   // 1) do_klass on the header klass pointer.
  70   // 2) do_klass on the klass pointer in the mirrors.
  71   // 3) do_class_loader_data on the class loader data in class loaders.
  72   //
  73   // The virtual (without suffix) and the non-virtual (with _nv suffix) need
  74   // to be updated together, or else the devirtualization will break.
  75   //
  76   // Providing default implementations of the _nv functions unfortunately
  77   // removes the compile-time safeness, but reduces the clutter for the
  78   // ExtendedOopClosures that don't need to walk the metadata.
  79   // Currently, only CMS and G1 need these.
  80 
  81   virtual bool do_metadata() { return do_metadata_nv(); }
  82   bool do_metadata_v()       { return do_metadata(); }
  83   bool do_metadata_nv()      { return false; }
  84   // Use SFINAE to dispatch to the "most appropriate" do_metadata using OopClosureDispatcher.
  85   // Read the specialized_oop_closures.hpp file how this works
  86   template<class OopClosureType>
  87   bool do_metadata_auto();
  88 
  89   virtual void do_klass(Klass* k)   { do_klass_nv(k); }
  90   void do_klass_v(Klass* k)         { do_klass(k); }
  91   void do_klass_nv(Klass* k)        { ShouldNotReachHere(); }
  92   // Use SFINAE to dispatch to the "most appropriate" do_klass using OopClosureDispatcher.
  93   // Read the specialized_oop_closures.hpp file how this works
  94   template<class OopClosureType>
  95   void do_klass_auto(Klass* klass);
  96 
  97   virtual void do_class_loader_data(ClassLoaderData* cld) { ShouldNotReachHere(); }
  98   // Use SFINAE to dispatch to the "most appropriate" do_class_loader_data using OopClosureDispatcher.
  99   // Read the specialized_oop_closures.hpp file how this works
 100   template<class OopClosureType>
 101   void do_class_loader_data_auto(ClassLoaderData* cld);
 102 
 103   // True iff this closure may be safely applied more than once to an oop
 104   // location without an intervening "major reset" (like the end of a GC).
 105   virtual bool idempotent() { return false; }
 106   virtual bool apply_to_weak_ref_discovered_field() { return false; }
 107 };
 108 
 109 // Autospecialization uses an OopClosure rather than ExtendedOopClosure
 110 // for oop_iterate_no_header to make sure metadata methods are not called
 111 // in the first place using SFINAE type checks
 112 template<class OopClosureType>
 113 class NoHeaderOopClosure : public OopClosure {
 114   OopClosureType* _cl;
 115  public:
 116   NoHeaderOopClosure(OopClosureType *cl) : _cl(cl) {}
 117   void do_oop(oop *p)       { _cl->template do_oop_auto<OopClosureType, oop>(p); }
 118   void do_oop(narrowOop *p) { _cl->template do_oop_auto<OopClosureType, narrowOop>(p); }
 119 };
 120 
 121 // Wrapper closure only used to implement oop_iterate_no_header().
 122 class NoHeaderExtendedOopClosure : public ExtendedOopClosure {
 123   OopClosure* _wrapped_closure;
 124  public:
 125   NoHeaderExtendedOopClosure(OopClosure* cl) : _wrapped_closure(cl) {}
 126   // Warning: this calls the virtual version do_oop in the the wrapped closure.
 127   void do_oop_nv(oop* p)       { _wrapped_closure->do_oop(p); }
 128   void do_oop_nv(narrowOop* p) { _wrapped_closure->do_oop(p); }
 129 
 130   void do_oop(oop* p)          { assert(false, "Only the _nv versions should be used");
 131                                  _wrapped_closure->do_oop(p); }
 132   void do_oop(narrowOop* p)    { assert(false, "Only the _nv versions should be used");
 133                                  _wrapped_closure->do_oop(p);}
 134 };
 135 
 136 class KlassClosure : public Closure {
 137  public:
 138   virtual void do_klass(Klass* k) = 0;
 139 };
 140 
 141 class CLDClosure : public Closure {
 142  public:
 143   virtual void do_cld(ClassLoaderData* cld) = 0;
 144 };
 145 
 146 class KlassToOopClosure : public KlassClosure {
 147   friend class MetadataAwareOopClosure;
 148   friend class MetadataAwareOopsInGenClosure;
 149 
 150   OopClosure* _oop_closure;
 151 
 152   // Used when _oop_closure couldn't be set in an initialization list.
 153   void initialize(OopClosure* oop_closure) {
 154     assert(_oop_closure == NULL, "Should only be called once");
 155     _oop_closure = oop_closure;
 156   }
 157 
 158  public:
 159   KlassToOopClosure(OopClosure* oop_closure = NULL) : _oop_closure(oop_closure) {}
 160 
 161   virtual void do_klass(Klass* k);
 162 };
 163 
 164 class CLDToOopClosure : public CLDClosure {
 165   OopClosure*       _oop_closure;
 166   KlassToOopClosure _klass_closure;
 167   bool              _must_claim_cld;
 168 
 169  public:
 170   CLDToOopClosure(OopClosure* oop_closure, bool must_claim_cld = true) :
 171       _oop_closure(oop_closure),
 172       _klass_closure(oop_closure),
 173       _must_claim_cld(must_claim_cld) {}
 174 
 175   void do_cld(ClassLoaderData* cld);
 176 };
 177 
 178 class CLDToKlassAndOopClosure : public CLDClosure {
 179   friend class SharedHeap;
 180   friend class G1CollectedHeap;
 181  protected:
 182   OopClosure*   _oop_closure;
 183   KlassClosure* _klass_closure;
 184   bool          _must_claim_cld;
 185  public:
 186   CLDToKlassAndOopClosure(KlassClosure* klass_closure,
 187                           OopClosure* oop_closure,
 188                           bool must_claim_cld) :
 189                               _oop_closure(oop_closure),
 190                               _klass_closure(klass_closure),
 191                               _must_claim_cld(must_claim_cld) {}
 192   void do_cld(ClassLoaderData* cld);
 193 };
 194 
 195 // The base class for all concurrent marking closures,
 196 // that participates in class unloading.
 197 // It's used to proxy through the metadata to the oops defined in them.
 198 class MetadataAwareOopClosure: public ExtendedOopClosure {
 199   KlassToOopClosure _klass_closure;
 200 
 201  public:
 202   MetadataAwareOopClosure() : ExtendedOopClosure() {
 203     _klass_closure.initialize(this);
 204   }
 205   MetadataAwareOopClosure(ReferenceProcessor* rp) : ExtendedOopClosure(rp) {
 206     _klass_closure.initialize(this);
 207   }
 208 
 209   virtual bool do_metadata()    { return do_metadata_nv(); }
 210   inline  bool do_metadata_nv() { return true; }
 211 
 212   virtual void do_klass(Klass* k);
 213   void do_klass_nv(Klass* k);
 214 
 215   virtual void do_class_loader_data(ClassLoaderData* cld);
 216 };
 217 
 218 // ObjectClosure is used for iterating through an object space
 219 
 220 class ObjectClosure : public Closure {
 221  public:
 222   // Called for each object.
 223   virtual void do_object(oop obj) = 0;
 224 };
 225 
 226 
 227 class BoolObjectClosure : public Closure {
 228  public:
 229   virtual bool do_object_b(oop obj) = 0;
 230 };
 231 
 232 // Applies an oop closure to all ref fields in objects iterated over in an
 233 // object iteration.
 234 class ObjectToOopClosure: public ObjectClosure {
 235   ExtendedOopClosure* _cl;
 236 public:
 237   void do_object(oop obj);
 238   ObjectToOopClosure(ExtendedOopClosure* cl) : _cl(cl) {}
 239 };
 240 
 241 // A version of ObjectClosure that is expected to be robust
 242 // in the face of possibly uninitialized objects.
 243 class ObjectClosureCareful : public ObjectClosure {
 244  public:
 245   virtual size_t do_object_careful_m(oop p, MemRegion mr) = 0;
 246   virtual size_t do_object_careful(oop p) = 0;
 247 };
 248 
 249 // The following are used in CompactibleFreeListSpace and
 250 // ConcurrentMarkSweepGeneration.
 251 
 252 // Blk closure (abstract class)
 253 class BlkClosure : public StackObj {
 254  public:
 255   virtual size_t do_blk(HeapWord* addr) = 0;
 256 };
 257 
 258 // A version of BlkClosure that is expected to be robust
 259 // in the face of possibly uninitialized objects.
 260 class BlkClosureCareful : public BlkClosure {
 261  public:
 262   size_t do_blk(HeapWord* addr) {
 263     guarantee(false, "call do_blk_careful instead");
 264     return 0;
 265   }
 266   virtual size_t do_blk_careful(HeapWord* addr) = 0;
 267 };
 268 
 269 // SpaceClosure is used for iterating over spaces
 270 
 271 class Space;
 272 class CompactibleSpace;
 273 
 274 class SpaceClosure : public StackObj {
 275  public:
 276   // Called for each space
 277   virtual void do_space(Space* s) = 0;
 278 };
 279 
 280 class CompactibleSpaceClosure : public StackObj {
 281  public:
 282   // Called for each compactible space
 283   virtual void do_space(CompactibleSpace* s) = 0;
 284 };
 285 
 286 
 287 // CodeBlobClosure is used for iterating through code blobs
 288 // in the code cache or on thread stacks
 289 
 290 class CodeBlobClosure : public Closure {
 291  public:
 292   // Called for each code blob.
 293   virtual void do_code_blob(CodeBlob* cb) = 0;
 294 };
 295 
 296 // Applies an oop closure to all ref fields in code blobs
 297 // iterated over in an object iteration.
 298 class CodeBlobToOopClosure : public CodeBlobClosure {
 299   OopClosure* _cl;
 300   bool _fix_relocations;
 301  protected:
 302   void do_nmethod(nmethod* nm);
 303  public:
 304   CodeBlobToOopClosure(OopClosure* cl, bool fix_relocations) : _cl(cl), _fix_relocations(fix_relocations) {}
 305   virtual void do_code_blob(CodeBlob* cb);
 306 
 307   const static bool FixRelocations = true;
 308 };
 309 
 310 class MarkingCodeBlobClosure : public CodeBlobToOopClosure {
 311  public:
 312   MarkingCodeBlobClosure(OopClosure* cl, bool fix_relocations) : CodeBlobToOopClosure(cl, fix_relocations) {}
 313   // Called for each code blob, but at most once per unique blob.
 314 
 315   virtual void do_code_blob(CodeBlob* cb);
 316 
 317   class MarkScope : public StackObj {
 318   protected:
 319     bool _active;
 320   public:
 321     MarkScope(bool activate = true);
 322       // = { if (active) nmethod::oops_do_marking_prologue(); }
 323     ~MarkScope();
 324       // = { if (active) nmethod::oops_do_marking_epilogue(); }
 325   };
 326 };
 327 
 328 // MonitorClosure is used for iterating over monitors in the monitors cache
 329 
 330 class ObjectMonitor;
 331 
 332 class MonitorClosure : public StackObj {
 333  public:
 334   // called for each monitor in cache
 335   virtual void do_monitor(ObjectMonitor* m) = 0;
 336 };
 337 
 338 // A closure that is applied without any arguments.
 339 class VoidClosure : public StackObj {
 340  public:
 341   // I would have liked to declare this a pure virtual, but that breaks
 342   // in mysterious ways, for unknown reasons.
 343   virtual void do_void();
 344 };
 345 
 346 
 347 // YieldClosure is intended for use by iteration loops
 348 // to incrementalize their work, allowing interleaving
 349 // of an interruptable task so as to allow other
 350 // threads to run (which may not otherwise be able to access
 351 // exclusive resources, for instance). Additionally, the
 352 // closure also allows for aborting an ongoing iteration
 353 // by means of checking the return value from the polling
 354 // call.
 355 class YieldClosure : public StackObj {
 356   public:
 357    virtual bool should_return() = 0;
 358 };
 359 
 360 // Abstract closure for serializing data (read or write).
 361 
 362 class SerializeClosure : public Closure {
 363 public:
 364   // Return bool indicating whether closure implements read or write.
 365   virtual bool reading() const = 0;
 366 
 367   // Read/write the void pointer pointed to by p.
 368   virtual void do_ptr(void** p) = 0;
 369 
 370   // Read/write the region specified.
 371   virtual void do_region(u_char* start, size_t size) = 0;
 372 
 373   // Check/write the tag.  If reading, then compare the tag against
 374   // the passed in value and fail is they don't match.  This allows
 375   // for verification that sections of the serialized data are of the
 376   // correct length.
 377   virtual void do_tag(int tag) = 0;
 378 };
 379 
 380 class SymbolClosure : public StackObj {
 381  public:
 382   virtual void do_symbol(Symbol**) = 0;
 383 
 384   // Clear LSB in symbol address; it can be set by CPSlot.
 385   static Symbol* load_symbol(Symbol** p) {
 386     return (Symbol*)(intptr_t(*p) & ~1);
 387   }
 388 
 389   // Store symbol, adjusting new pointer if the original pointer was adjusted
 390   // (symbol references in constant pool slots have their LSB set to 1).
 391   static void store_symbol(Symbol** p, Symbol* sym) {
 392     *p = (Symbol*)(intptr_t(sym) | (intptr_t(*p) & 1));
 393   }
 394 };
 395 
 396 
 397 // Helper defines for ExtendOopClosure
 398 
 399 #define if_do_metadata_checked(closure, nv_suffix)       \
 400   /* Make sure the non-virtual and the virtual versions match. */     \
 401   assert(closure->do_metadata##nv_suffix() == closure->do_metadata(), \
 402       "Inconsistency in do_metadata");                                \
 403   if (closure->do_metadata##nv_suffix())
 404 
 405 #define assert_should_ignore_metadata(closure, nv_suffix)                                  \
 406   assert(!closure->do_metadata##nv_suffix(), "Code to handle metadata is not implemented")
 407 
 408 #endif // SHARE_VM_MEMORY_ITERATOR_HPP