1 /*
   2  * Copyright (c) 1997, 2016, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef SHARE_VM_MEMORY_ITERATOR_HPP
  26 #define SHARE_VM_MEMORY_ITERATOR_HPP
  27 
  28 #include "memory/allocation.hpp"
  29 #include "memory/memRegion.hpp"
  30 #include "oops/oopsHierarchy.hpp"
  31 
  32 class CodeBlob;
  33 class nmethod;
  34 class ReferenceProcessor;
  35 class DataLayout;
  36 class KlassClosure;
  37 class ClassLoaderData;
  38 class Symbol;
  39 
  40 // The following classes are C++ `closures` for iterating over objects, roots and spaces
  41 
  42 class Closure : public StackObj { };
  43 
  44 // OopClosure is used for iterating through references to Java objects.
  45 class OopClosure : public Closure {
  46  public:
  47   virtual void do_oop(oop* o) = 0;
  48   virtual void do_oop(narrowOop* o) = 0;
  49 };
  50 
  51 // ExtendedOopClosure adds extra code to be run during oop iterations.
  52 // This is needed by the GC and is extracted to a separate type to not
  53 // pollute the OopClosure interface.
  54 class ExtendedOopClosure : public OopClosure {
  55  private:
  56   ReferenceProcessor* _ref_processor;
  57 
  58  protected:
  59   ExtendedOopClosure(ReferenceProcessor* rp) : _ref_processor(rp) { }
  60   ExtendedOopClosure() : _ref_processor(NULL) { }
  61   ~ExtendedOopClosure() { }
  62 
  63   void set_ref_processor_internal(ReferenceProcessor* rp) { _ref_processor = rp; }
  64 
  65  public:
  66   ReferenceProcessor* ref_processor() const { return _ref_processor; }
  67 
  68   // Iteration of InstanceRefKlasses differ depending on the closure,
  69   // the below enum describes the different alternatives.
  70   enum ReferenceIterationMode {
  71     DO_DISCOVERY, // Tries to discover references
  72     DO_FIELDS     // No discovery, just apply closure to all fields
  73   };
  74 
  75   // The current default iteration mode is to do discovery.
  76   virtual ReferenceIterationMode reference_iteration_mode() { return DO_DISCOVERY; }
  77 
  78   // If the do_metadata functions return "true",
  79   // we invoke the following when running oop_iterate():
  80   //
  81   // 1) do_klass on the header klass pointer.
  82   // 2) do_klass on the klass pointer in the mirrors.
  83   // 3) do_cld   on the class loader data in class loaders.
  84   //
  85   // The virtual (without suffix) and the non-virtual (with _nv suffix) need
  86   // to be updated together, or else the devirtualization will break.
  87   //
  88   // Providing default implementations of the _nv functions unfortunately
  89   // removes the compile-time safeness, but reduces the clutter for the
  90   // ExtendedOopClosures that don't need to walk the metadata.
  91   // Currently, only CMS and G1 need these.
  92 
  93   bool do_metadata_nv()      { return false; }
  94   virtual bool do_metadata() { return do_metadata_nv(); }
  95 
  96   void do_klass_nv(Klass* k)      { ShouldNotReachHere(); }
  97   virtual void do_klass(Klass* k) { do_klass_nv(k); }
  98 
  99   void do_cld_nv(ClassLoaderData* cld)      { ShouldNotReachHere(); }
 100   virtual void do_cld(ClassLoaderData* cld) { do_cld_nv(cld); }
 101 
 102   // True iff this closure may be safely applied more than once to an oop
 103   // location without an intervening "major reset" (like the end of a GC).
 104   virtual bool idempotent() { return false; }
 105   virtual bool apply_to_weak_ref_discovered_field() { return false; }
 106 
 107 #ifdef ASSERT
 108   // Default verification of each visited oop field.
 109   template <typename T> void verify(T* p);
 110 
 111   // Can be used by subclasses to turn off the default verification of oop fields.
 112   virtual bool should_verify_oops() { return true; }
 113 #endif
 114 };
 115 
 116 // Wrapper closure only used to implement oop_iterate_no_header().
 117 class NoHeaderExtendedOopClosure : public ExtendedOopClosure {
 118   OopClosure* _wrapped_closure;
 119  public:
 120   NoHeaderExtendedOopClosure(OopClosure* cl) : _wrapped_closure(cl) {}
 121   // Warning: this calls the virtual version do_oop in the the wrapped closure.
 122   void do_oop_nv(oop* p)       { _wrapped_closure->do_oop(p); }
 123   void do_oop_nv(narrowOop* p) { _wrapped_closure->do_oop(p); }
 124 
 125   void do_oop(oop* p)          { assert(false, "Only the _nv versions should be used");
 126                                  _wrapped_closure->do_oop(p); }
 127   void do_oop(narrowOop* p)    { assert(false, "Only the _nv versions should be used");
 128                                  _wrapped_closure->do_oop(p);}
 129 };
 130 
 131 class KlassClosure : public Closure {
 132  public:
 133   virtual void do_klass(Klass* k) = 0;
 134 };
 135 
 136 class CLDClosure : public Closure {
 137  public:
 138   virtual void do_cld(ClassLoaderData* cld) = 0;
 139 };
 140 
 141 class KlassToOopClosure : public KlassClosure {
 142   friend class MetadataAwareOopClosure;
 143   friend class MetadataAwareOopsInGenClosure;
 144 
 145   OopClosure* _oop_closure;
 146 
 147   // Used when _oop_closure couldn't be set in an initialization list.
 148   void initialize(OopClosure* oop_closure) {
 149     assert(_oop_closure == NULL, "Should only be called once");
 150     _oop_closure = oop_closure;
 151   }
 152 
 153  public:
 154   KlassToOopClosure(OopClosure* oop_closure = NULL) : _oop_closure(oop_closure) {}
 155 
 156   virtual void do_klass(Klass* k);
 157 };
 158 
 159 class CLDToOopClosure : public CLDClosure {
 160   OopClosure*       _oop_closure;
 161   KlassToOopClosure _klass_closure;
 162   bool              _must_claim_cld;
 163 
 164  public:
 165   CLDToOopClosure(OopClosure* oop_closure, bool must_claim_cld = true) :
 166       _oop_closure(oop_closure),
 167       _klass_closure(oop_closure),
 168       _must_claim_cld(must_claim_cld) {}
 169 
 170   void do_cld(ClassLoaderData* cld);
 171 };
 172 
 173 class CLDToKlassAndOopClosure : public CLDClosure {
 174   friend class G1CollectedHeap;
 175  protected:
 176   OopClosure*   _oop_closure;
 177   KlassClosure* _klass_closure;
 178   bool          _must_claim_cld;
 179  public:
 180   CLDToKlassAndOopClosure(KlassClosure* klass_closure,
 181                           OopClosure* oop_closure,
 182                           bool must_claim_cld) :
 183                               _oop_closure(oop_closure),
 184                               _klass_closure(klass_closure),
 185                               _must_claim_cld(must_claim_cld) {}
 186   void do_cld(ClassLoaderData* cld);
 187 };
 188 
 189 // The base class for all concurrent marking closures,
 190 // that participates in class unloading.
 191 // It's used to proxy through the metadata to the oops defined in them.
 192 class MetadataAwareOopClosure: public ExtendedOopClosure {
 193   KlassToOopClosure _klass_closure;
 194 
 195  public:
 196   MetadataAwareOopClosure() : ExtendedOopClosure() {
 197     _klass_closure.initialize(this);
 198   }
 199   MetadataAwareOopClosure(ReferenceProcessor* rp) : ExtendedOopClosure(rp) {
 200     _klass_closure.initialize(this);
 201   }
 202 
 203   bool do_metadata_nv()      { return true; }
 204   virtual bool do_metadata() { return do_metadata_nv(); }
 205 
 206   void do_klass_nv(Klass* k);
 207   virtual void do_klass(Klass* k) { do_klass_nv(k); }
 208 
 209   void do_cld_nv(ClassLoaderData* cld);
 210   virtual void do_cld(ClassLoaderData* cld) { do_cld_nv(cld); }
 211 };
 212 
 213 // ObjectClosure is used for iterating through an object space
 214 
 215 class ObjectClosure : public Closure {
 216  public:
 217   // Called for each object.
 218   virtual void do_object(oop obj) = 0;
 219 };
 220 
 221 
 222 class BoolObjectClosure : public Closure {
 223  public:
 224   virtual bool do_object_b(oop obj) = 0;
 225 };
 226 
 227 class AlwaysTrueClosure: public BoolObjectClosure {
 228  public:
 229   bool do_object_b(oop p) { return true; }
 230 };
 231 
 232 class AlwaysFalseClosure : public BoolObjectClosure {
 233  public:
 234   bool do_object_b(oop p) { return false; }
 235 };
 236 
 237 // Applies an oop closure to all ref fields in objects iterated over in an
 238 // object iteration.
 239 class ObjectToOopClosure: public ObjectClosure {
 240   ExtendedOopClosure* _cl;
 241 public:
 242   void do_object(oop obj);
 243   ObjectToOopClosure(ExtendedOopClosure* cl) : _cl(cl) {}
 244 };
 245 
 246 // A version of ObjectClosure that is expected to be robust
 247 // in the face of possibly uninitialized objects.
 248 class ObjectClosureCareful : public ObjectClosure {
 249  public:
 250   virtual size_t do_object_careful_m(oop p, MemRegion mr) = 0;
 251   virtual size_t do_object_careful(oop p) = 0;
 252 };
 253 
 254 // The following are used in CompactibleFreeListSpace and
 255 // ConcurrentMarkSweepGeneration.
 256 
 257 // Blk closure (abstract class)
 258 class BlkClosure : public StackObj {
 259  public:
 260   virtual size_t do_blk(HeapWord* addr) = 0;
 261 };
 262 
 263 // A version of BlkClosure that is expected to be robust
 264 // in the face of possibly uninitialized objects.
 265 class BlkClosureCareful : public BlkClosure {
 266  public:
 267   size_t do_blk(HeapWord* addr) {
 268     guarantee(false, "call do_blk_careful instead");
 269     return 0;
 270   }
 271   virtual size_t do_blk_careful(HeapWord* addr) = 0;
 272 };
 273 
 274 // SpaceClosure is used for iterating over spaces
 275 
 276 class Space;
 277 class CompactibleSpace;
 278 
 279 class SpaceClosure : public StackObj {
 280  public:
 281   // Called for each space
 282   virtual void do_space(Space* s) = 0;
 283 };
 284 
 285 class CompactibleSpaceClosure : public StackObj {
 286  public:
 287   // Called for each compactible space
 288   virtual void do_space(CompactibleSpace* s) = 0;
 289 };
 290 
 291 
 292 // CodeBlobClosure is used for iterating through code blobs
 293 // in the code cache or on thread stacks
 294 
 295 class CodeBlobClosure : public Closure {
 296  public:
 297   // Called for each code blob.
 298   virtual void do_code_blob(CodeBlob* cb) = 0;
 299 };
 300 
 301 // Applies an oop closure to all ref fields in code blobs
 302 // iterated over in an object iteration.
 303 class CodeBlobToOopClosure : public CodeBlobClosure {
 304   OopClosure* _cl;
 305   bool _fix_relocations;
 306  protected:
 307   void do_nmethod(nmethod* nm);
 308  public:
 309   // If fix_relocations(), then cl must copy objects to their new location immediately to avoid
 310   // patching nmethods with the old locations.
 311   CodeBlobToOopClosure(OopClosure* cl, bool fix_relocations) : _cl(cl), _fix_relocations(fix_relocations) {}
 312   virtual void do_code_blob(CodeBlob* cb);
 313 
 314   bool fix_relocations() const { return _fix_relocations; }
 315   const static bool FixRelocations = true;
 316 };
 317 
 318 class MarkingCodeBlobClosure : public CodeBlobToOopClosure {
 319  public:
 320   MarkingCodeBlobClosure(OopClosure* cl, bool fix_relocations) : CodeBlobToOopClosure(cl, fix_relocations) {}
 321   // Called for each code blob, but at most once per unique blob.
 322 
 323   virtual void do_code_blob(CodeBlob* cb);
 324 };
 325 
 326 // MonitorClosure is used for iterating over monitors in the monitors cache
 327 
 328 class ObjectMonitor;
 329 
 330 class MonitorClosure : public StackObj {
 331  public:
 332   // called for each monitor in cache
 333   virtual void do_monitor(ObjectMonitor* m) = 0;
 334 };
 335 
 336 // A closure that is applied without any arguments.
 337 class VoidClosure : public StackObj {
 338  public:
 339   // I would have liked to declare this a pure virtual, but that breaks
 340   // in mysterious ways, for unknown reasons.
 341   virtual void do_void();
 342 };
 343 
 344 
 345 // YieldClosure is intended for use by iteration loops
 346 // to incrementalize their work, allowing interleaving
 347 // of an interruptable task so as to allow other
 348 // threads to run (which may not otherwise be able to access
 349 // exclusive resources, for instance). Additionally, the
 350 // closure also allows for aborting an ongoing iteration
 351 // by means of checking the return value from the polling
 352 // call.
 353 class YieldClosure : public StackObj {
 354   public:
 355    virtual bool should_return() = 0;
 356 };
 357 
 358 // Abstract closure for serializing data (read or write).
 359 
 360 class SerializeClosure : public Closure {
 361 public:
 362   // Return bool indicating whether closure implements read or write.
 363   virtual bool reading() const = 0;
 364 
 365   // Read/write the void pointer pointed to by p.
 366   virtual void do_ptr(void** p) = 0;
 367 
 368   // Read/write the 32-bit unsigned integer pointed to by p.
 369   virtual void do_u4(u4* p) = 0;
 370 
 371   // Read/write the region specified.
 372   virtual void do_region(u_char* start, size_t size) = 0;
 373 
 374   // Check/write the tag.  If reading, then compare the tag against
 375   // the passed in value and fail is they don't match.  This allows
 376   // for verification that sections of the serialized data are of the
 377   // correct length.
 378   virtual void do_tag(int tag) = 0;
 379 
 380   bool writing() {
 381     return !reading();
 382   }
 383 };
 384 
 385 class SymbolClosure : public StackObj {
 386  public:
 387   virtual void do_symbol(Symbol**) = 0;
 388 
 389   // Clear LSB in symbol address; it can be set by CPSlot.
 390   static Symbol* load_symbol(Symbol** p) {
 391     return (Symbol*)(intptr_t(*p) & ~1);
 392   }
 393 
 394   // Store symbol, adjusting new pointer if the original pointer was adjusted
 395   // (symbol references in constant pool slots have their LSB set to 1).
 396   static void store_symbol(Symbol** p, Symbol* sym) {
 397     *p = (Symbol*)(intptr_t(sym) | (intptr_t(*p) & 1));
 398   }
 399 };
 400 
 401 // The two class template specializations are used to dispatch calls
 402 // to the ExtendedOopClosure functions. If use_non_virtual_call is true,
 403 // the non-virtual versions are called (E.g. do_oop_nv), otherwise the
 404 // virtual versions are called (E.g. do_oop).
 405 
 406 template <bool use_non_virtual_call>
 407 class Devirtualizer {};
 408 
 409 // Dispatches to the non-virtual functions.
 410 template <> class Devirtualizer<true> {
 411  public:
 412   template <class OopClosureType, typename T> static void do_oop(OopClosureType* closure, T* p);
 413   template <class OopClosureType>             static void do_klass(OopClosureType* closure, Klass* k);
 414   template <class OopClosureType>             static void do_cld(OopClosureType* closure, ClassLoaderData* cld);
 415   template <class OopClosureType>             static bool do_metadata(OopClosureType* closure);
 416 };
 417 
 418 // Dispatches to the virtual functions.
 419 template <> class Devirtualizer<false> {
 420  public:
 421   template <class OopClosureType, typename T> static void do_oop(OopClosureType* closure, T* p);
 422   template <class OopClosureType>             static void do_klass(OopClosureType* closure, Klass* k);
 423   template <class OopClosureType>             static void do_cld(OopClosureType* closure, ClassLoaderData* cld);
 424   template <class OopClosureType>             static bool do_metadata(OopClosureType* closure);
 425 };
 426 
 427 #endif // SHARE_VM_MEMORY_ITERATOR_HPP