1 /*
   2  * Copyright (c) 1997, 2013, Oracle and/or its affiliates. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #ifndef SHARE_VM_MEMORY_ITERATOR_HPP
  26 #define SHARE_VM_MEMORY_ITERATOR_HPP
  27 
  28 #include "memory/allocation.hpp"
  29 #include "memory/memRegion.hpp"
  30 #include "runtime/prefetch.hpp"
  31 #include "utilities/top.hpp"
  32 
  33 class CodeBlob;
  34 class nmethod;
  35 class ReferenceProcessor;
  36 class DataLayout;
  37 class KlassClosure;
  38 class ClassLoaderData;
  39 
  40 // The following classes are C++ `closures` for iterating over objects, roots and spaces
  41 
  42 class Closure : public StackObj { };
  43 
  44 // OopClosure is used for iterating through references to Java objects.
  45 class OopClosure : public Closure {
  46  public:
  47   virtual void do_oop(oop* o) = 0;
  48   virtual void do_oop_v(oop* o) { do_oop(o); }
  49   virtual void do_oop(narrowOop* o) = 0;
  50   virtual void do_oop_v(narrowOop* o) { do_oop(o); }
  51 };
  52 
  53 // ExtendedOopClosure adds extra code to be run during oop iterations.
  54 // This is needed by the GC and is extracted to a separate type to not
  55 // pollute the OopClosure interface.
  56 class ExtendedOopClosure : public OopClosure {
  57  public:
  58   ReferenceProcessor* _ref_processor;
  59   ExtendedOopClosure(ReferenceProcessor* rp) : _ref_processor(rp) { }
  60   ExtendedOopClosure() : OopClosure(), _ref_processor(NULL) { }
  61 
  62   // If the do_metadata functions return "true",
  63   // we invoke the following when running oop_iterate():
  64   //
  65   // 1) do_klass on the header klass pointer.
  66   // 2) do_klass on the klass pointer in the mirrors.
  67   // 3) do_class_loader_data on the class loader data in class loaders.
  68   //
  69   // The virtual (without suffix) and the non-virtual (with _nv suffix) need
  70   // to be updated together, or else the devirtualization will break.
  71   //
  72   // Providing default implementations of the _nv functions unfortunately
  73   // removes the compile-time safeness, but reduces the clutter for the
  74   // ExtendedOopClosures that don't need to walk the metadata. Currently,
  75   // only CMS needs these.
  76 
  77   virtual bool do_metadata() { return do_metadata_nv(); }
  78   bool do_metadata_v()       { return do_metadata(); }
  79   bool do_metadata_nv()      { return false; }
  80 
  81   virtual void do_klass(Klass* k)   { do_klass_nv(k); }
  82   void do_klass_v(Klass* k)         { do_klass(k); }
  83   void do_klass_nv(Klass* k)        { ShouldNotReachHere(); }
  84 
  85   virtual void do_class_loader_data(ClassLoaderData* cld) { ShouldNotReachHere(); }
  86 
  87   // Controls how prefetching is done for invocations of this closure.
  88   Prefetch::style prefetch_style() { // Note that this is non-virtual.
  89     return Prefetch::do_none;
  90   }
  91 
  92   // True iff this closure may be safely applied more than once to an oop
  93   // location without an intervening "major reset" (like the end of a GC).
  94   virtual bool idempotent() { return false; }
  95   virtual bool apply_to_weak_ref_discovered_field() { return false; }
  96 };
  97 
  98 // Wrapper closure only used to implement oop_iterate_no_header().
  99 class NoHeaderExtendedOopClosure : public ExtendedOopClosure {
 100   OopClosure* _wrapped_closure;
 101  public:
 102   NoHeaderExtendedOopClosure(OopClosure* cl) : _wrapped_closure(cl) {}
 103   // Warning: this calls the virtual version do_oop in the the wrapped closure.
 104   void do_oop_nv(oop* p)       { _wrapped_closure->do_oop(p); }
 105   void do_oop_nv(narrowOop* p) { _wrapped_closure->do_oop(p); }
 106 
 107   void do_oop(oop* p)          { assert(false, "Only the _nv versions should be used");
 108                                  _wrapped_closure->do_oop(p); }
 109   void do_oop(narrowOop* p)    { assert(false, "Only the _nv versions should be used");
 110                                  _wrapped_closure->do_oop(p);}
 111 };
 112 
 113 class KlassClosure : public Closure {
 114  public:
 115   virtual void do_klass(Klass* k) = 0;
 116 };
 117 
 118 class CLDClosure : public Closure {
 119  public:
 120   virtual void do_cld(ClassLoaderData* cld) = 0;
 121 };
 122 
 123 class KlassToOopClosure : public KlassClosure {
 124   OopClosure* _oop_closure;
 125  public:
 126   KlassToOopClosure(OopClosure* oop_closure) : _oop_closure(oop_closure) {}
 127   virtual void do_klass(Klass* k);
 128 };
 129 
 130 class CLDToOopClosure : public CLDClosure {
 131   OopClosure* _oop_closure;
 132   KlassToOopClosure _klass_closure;
 133   bool _must_claim_cld;
 134 
 135  public:
 136   CLDToOopClosure(OopClosure* oop_closure, bool must_claim_cld = true) :
 137       _oop_closure(oop_closure),
 138       _klass_closure(oop_closure),
 139       _must_claim_cld(must_claim_cld) {}
 140 
 141   void do_cld(ClassLoaderData* cld);
 142 };
 143 
 144 // ObjectClosure is used for iterating through an object space
 145 
 146 class ObjectClosure : public Closure {
 147  public:
 148   // Called for each object.
 149   virtual void do_object(oop obj) = 0;
 150 };
 151 
 152 
 153 class BoolObjectClosure : public Closure {
 154  public:
 155   virtual bool do_object_b(oop obj) = 0;
 156 };
 157 
 158 // Applies an oop closure to all ref fields in objects iterated over in an
 159 // object iteration.
 160 class ObjectToOopClosure: public ObjectClosure {
 161   ExtendedOopClosure* _cl;
 162 public:
 163   void do_object(oop obj);
 164   ObjectToOopClosure(ExtendedOopClosure* cl) : _cl(cl) {}
 165 };
 166 
 167 // A version of ObjectClosure that is expected to be robust
 168 // in the face of possibly uninitialized objects.
 169 class ObjectClosureCareful : public ObjectClosure {
 170  public:
 171   virtual size_t do_object_careful_m(oop p, MemRegion mr) = 0;
 172   virtual size_t do_object_careful(oop p) = 0;
 173 };
 174 
 175 // The following are used in CompactibleFreeListSpace and
 176 // ConcurrentMarkSweepGeneration.
 177 
 178 // Blk closure (abstract class)
 179 class BlkClosure : public StackObj {
 180  public:
 181   virtual size_t do_blk(HeapWord* addr) = 0;
 182 };
 183 
 184 // A version of BlkClosure that is expected to be robust
 185 // in the face of possibly uninitialized objects.
 186 class BlkClosureCareful : public BlkClosure {
 187  public:
 188   size_t do_blk(HeapWord* addr) {
 189     guarantee(false, "call do_blk_careful instead");
 190     return 0;
 191   }
 192   virtual size_t do_blk_careful(HeapWord* addr) = 0;
 193 };
 194 
 195 // SpaceClosure is used for iterating over spaces
 196 
 197 class Space;
 198 class CompactibleSpace;
 199 
 200 class SpaceClosure : public StackObj {
 201  public:
 202   // Called for each space
 203   virtual void do_space(Space* s) = 0;
 204 };
 205 
 206 class CompactibleSpaceClosure : public StackObj {
 207  public:
 208   // Called for each compactible space
 209   virtual void do_space(CompactibleSpace* s) = 0;
 210 };
 211 
 212 
 213 // CodeBlobClosure is used for iterating through code blobs
 214 // in the code cache or on thread stacks
 215 
 216 class CodeBlobClosure : public Closure {
 217  public:
 218   // Called for each code blob.
 219   virtual void do_code_blob(CodeBlob* cb) = 0;
 220 };
 221 
 222 
 223 class MarkingCodeBlobClosure : public CodeBlobClosure {
 224  public:
 225   // Called for each code blob, but at most once per unique blob.
 226   virtual void do_newly_marked_nmethod(nmethod* nm) = 0;
 227 
 228   virtual void do_code_blob(CodeBlob* cb);
 229     // = { if (!nmethod(cb)->test_set_oops_do_mark())  do_newly_marked_nmethod(cb); }
 230 
 231   class MarkScope : public StackObj {
 232   protected:
 233     bool _active;
 234   public:
 235     MarkScope(bool activate = true);
 236       // = { if (active) nmethod::oops_do_marking_prologue(); }
 237     ~MarkScope();
 238       // = { if (active) nmethod::oops_do_marking_epilogue(); }
 239   };
 240 };
 241 
 242 
 243 // Applies an oop closure to all ref fields in code blobs
 244 // iterated over in an object iteration.
 245 class CodeBlobToOopClosure: public MarkingCodeBlobClosure {
 246   OopClosure* _cl;
 247   bool _do_marking;
 248 public:
 249   virtual void do_newly_marked_nmethod(nmethod* cb);
 250     // = { cb->oops_do(_cl); }
 251   virtual void do_code_blob(CodeBlob* cb);
 252     // = { if (_do_marking)  super::do_code_blob(cb); else cb->oops_do(_cl); }
 253   CodeBlobToOopClosure(OopClosure* cl, bool do_marking)
 254     : _cl(cl), _do_marking(do_marking) {}
 255 };
 256 
 257 
 258 
 259 // MonitorClosure is used for iterating over monitors in the monitors cache
 260 
 261 class ObjectMonitor;
 262 
 263 class MonitorClosure : public StackObj {
 264  public:
 265   // called for each monitor in cache
 266   virtual void do_monitor(ObjectMonitor* m) = 0;
 267 };
 268 
 269 // A closure that is applied without any arguments.
 270 class VoidClosure : public StackObj {
 271  public:
 272   // I would have liked to declare this a pure virtual, but that breaks
 273   // in mysterious ways, for unknown reasons.
 274   virtual void do_void();
 275 };
 276 
 277 
 278 // YieldClosure is intended for use by iteration loops
 279 // to incrementalize their work, allowing interleaving
 280 // of an interruptable task so as to allow other
 281 // threads to run (which may not otherwise be able to access
 282 // exclusive resources, for instance). Additionally, the
 283 // closure also allows for aborting an ongoing iteration
 284 // by means of checking the return value from the polling
 285 // call.
 286 class YieldClosure : public StackObj {
 287   public:
 288    virtual bool should_return() = 0;
 289 };
 290 
 291 // Abstract closure for serializing data (read or write).
 292 
 293 class SerializeClosure : public Closure {
 294 public:
 295   // Return bool indicating whether closure implements read or write.
 296   virtual bool reading() const = 0;
 297 
 298   // Read/write the void pointer pointed to by p.
 299   virtual void do_ptr(void** p) = 0;
 300 
 301   // Read/write the region specified.
 302   virtual void do_region(u_char* start, size_t size) = 0;
 303 
 304   // Check/write the tag.  If reading, then compare the tag against
 305   // the passed in value and fail is they don't match.  This allows
 306   // for verification that sections of the serialized data are of the
 307   // correct length.
 308   virtual void do_tag(int tag) = 0;
 309 };
 310 
 311 class SymbolClosure : public StackObj {
 312  public:
 313   virtual void do_symbol(Symbol**) = 0;
 314 
 315   // Clear LSB in symbol address; it can be set by CPSlot.
 316   static Symbol* load_symbol(Symbol** p) {
 317     return (Symbol*)(intptr_t(*p) & ~1);
 318   }
 319 
 320   // Store symbol, adjusting new pointer if the original pointer was adjusted
 321   // (symbol references in constant pool slots have their LSB set to 1).
 322   static void store_symbol(Symbol** p, Symbol* sym) {
 323     *p = (Symbol*)(intptr_t(sym) | (intptr_t(*p) & 1));
 324   }
 325 };
 326 
 327 #endif // SHARE_VM_MEMORY_ITERATOR_HPP