1 /* 2 * Copyright (c) 1997, 2015, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef SHARE_VM_MEMORY_ITERATOR_HPP 26 #define SHARE_VM_MEMORY_ITERATOR_HPP 27 28 #include "memory/allocation.hpp" 29 #include "memory/memRegion.hpp" 30 #include "utilities/top.hpp" 31 32 class CodeBlob; 33 class nmethod; 34 class ReferenceProcessor; 35 class DataLayout; 36 class KlassClosure; 37 class ClassLoaderData; 38 39 // The following classes are C++ `closures` for iterating over objects, roots and spaces 40 41 class Closure : public StackObj { }; 42 43 // OopClosure is used for iterating through references to Java objects. 44 class OopClosure : public Closure { 45 public: 46 virtual void do_oop(oop* o) = 0; 47 virtual void do_oop(narrowOop* o) = 0; 48 }; 49 50 // ExtendedOopClosure adds extra code to be run during oop iterations. 51 // This is needed by the GC and is extracted to a separate type to not 52 // pollute the OopClosure interface. 53 class ExtendedOopClosure : public OopClosure { 54 private: 55 ReferenceProcessor* _ref_processor; 56 57 protected: 58 ExtendedOopClosure(ReferenceProcessor* rp) : _ref_processor(rp) { } 59 ExtendedOopClosure() : _ref_processor(NULL) { } 60 ~ExtendedOopClosure() { } 61 62 void set_ref_processor_internal(ReferenceProcessor* rp) { _ref_processor = rp; } 63 64 public: 65 ReferenceProcessor* ref_processor() const { return _ref_processor; } 66 67 // If the do_metadata functions return "true", 68 // we invoke the following when running oop_iterate(): 69 // 70 // 1) do_klass on the header klass pointer. 71 // 2) do_klass on the klass pointer in the mirrors. 72 // 3) do_cld on the class loader data in class loaders. 73 // 74 // The virtual (without suffix) and the non-virtual (with _nv suffix) need 75 // to be updated together, or else the devirtualization will break. 76 // 77 // Providing default implementations of the _nv functions unfortunately 78 // removes the compile-time safeness, but reduces the clutter for the 79 // ExtendedOopClosures that don't need to walk the metadata. 80 // Currently, only CMS and G1 need these. 81 82 bool do_metadata_nv() { return false; } 83 virtual bool do_metadata() { return do_metadata_nv(); } 84 85 void do_klass_nv(Klass* k) { ShouldNotReachHere(); } 86 virtual void do_klass(Klass* k) { do_klass_nv(k); } 87 88 void do_cld_nv(ClassLoaderData* cld) { ShouldNotReachHere(); } 89 virtual void do_cld(ClassLoaderData* cld) { do_cld_nv(cld); } 90 91 // True iff this closure may be safely applied more than once to an oop 92 // location without an intervening "major reset" (like the end of a GC). 93 virtual bool idempotent() { return false; } 94 virtual bool apply_to_weak_ref_discovered_field() { return false; } 95 96 #ifdef ASSERT 97 // Default verification of each visited oop field. 98 template <typename T> void verify(T* p); 99 100 // Can be used by subclasses to turn off the default verification of oop fields. 101 virtual bool should_verify_oops() { return true; } 102 #endif 103 }; 104 105 // Wrapper closure only used to implement oop_iterate_no_header(). 106 class NoHeaderExtendedOopClosure : public ExtendedOopClosure { 107 OopClosure* _wrapped_closure; 108 public: 109 NoHeaderExtendedOopClosure(OopClosure* cl) : _wrapped_closure(cl) {} 110 // Warning: this calls the virtual version do_oop in the the wrapped closure. 111 void do_oop_nv(oop* p) { _wrapped_closure->do_oop(p); } 112 void do_oop_nv(narrowOop* p) { _wrapped_closure->do_oop(p); } 113 114 void do_oop(oop* p) { assert(false, "Only the _nv versions should be used"); 115 _wrapped_closure->do_oop(p); } 116 void do_oop(narrowOop* p) { assert(false, "Only the _nv versions should be used"); 117 _wrapped_closure->do_oop(p);} 118 }; 119 120 class KlassClosure : public Closure { 121 public: 122 virtual void do_klass(Klass* k) = 0; 123 }; 124 125 class CLDClosure : public Closure { 126 public: 127 virtual void do_cld(ClassLoaderData* cld) = 0; 128 }; 129 130 class KlassToOopClosure : public KlassClosure { 131 friend class MetadataAwareOopClosure; 132 friend class MetadataAwareOopsInGenClosure; 133 134 OopClosure* _oop_closure; 135 136 // Used when _oop_closure couldn't be set in an initialization list. 137 void initialize(OopClosure* oop_closure) { 138 assert(_oop_closure == NULL, "Should only be called once"); 139 _oop_closure = oop_closure; 140 } 141 142 public: 143 KlassToOopClosure(OopClosure* oop_closure = NULL) : _oop_closure(oop_closure) {} 144 145 virtual void do_klass(Klass* k); 146 }; 147 148 class CLDToOopClosure : public CLDClosure { 149 OopClosure* _oop_closure; 150 KlassToOopClosure _klass_closure; 151 bool _must_claim_cld; 152 153 public: 154 CLDToOopClosure(OopClosure* oop_closure, bool must_claim_cld = true) : 155 _oop_closure(oop_closure), 156 _klass_closure(oop_closure), 157 _must_claim_cld(must_claim_cld) {} 158 159 void do_cld(ClassLoaderData* cld); 160 }; 161 162 class CLDToKlassAndOopClosure : public CLDClosure { 163 friend class G1CollectedHeap; 164 protected: 165 OopClosure* _oop_closure; 166 KlassClosure* _klass_closure; 167 bool _must_claim_cld; 168 public: 169 CLDToKlassAndOopClosure(KlassClosure* klass_closure, 170 OopClosure* oop_closure, 171 bool must_claim_cld) : 172 _oop_closure(oop_closure), 173 _klass_closure(klass_closure), 174 _must_claim_cld(must_claim_cld) {} 175 void do_cld(ClassLoaderData* cld); 176 }; 177 178 // The base class for all concurrent marking closures, 179 // that participates in class unloading. 180 // It's used to proxy through the metadata to the oops defined in them. 181 class MetadataAwareOopClosure: public ExtendedOopClosure { 182 KlassToOopClosure _klass_closure; 183 184 public: 185 MetadataAwareOopClosure() : ExtendedOopClosure() { 186 _klass_closure.initialize(this); 187 } 188 MetadataAwareOopClosure(ReferenceProcessor* rp) : ExtendedOopClosure(rp) { 189 _klass_closure.initialize(this); 190 } 191 192 bool do_metadata_nv() { return true; } 193 virtual bool do_metadata() { return do_metadata_nv(); } 194 195 void do_klass_nv(Klass* k); 196 virtual void do_klass(Klass* k) { do_klass_nv(k); } 197 198 void do_cld_nv(ClassLoaderData* cld); 199 virtual void do_cld(ClassLoaderData* cld) { do_cld_nv(cld); } 200 }; 201 202 // ObjectClosure is used for iterating through an object space 203 204 class ObjectClosure : public Closure { 205 public: 206 // Called for each object. 207 virtual void do_object(oop obj) = 0; 208 }; 209 210 211 class BoolObjectClosure : public Closure { 212 public: 213 virtual bool do_object_b(oop obj) = 0; 214 }; 215 216 // Applies an oop closure to all ref fields in objects iterated over in an 217 // object iteration. 218 class ObjectToOopClosure: public ObjectClosure { 219 ExtendedOopClosure* _cl; 220 public: 221 void do_object(oop obj); 222 ObjectToOopClosure(ExtendedOopClosure* cl) : _cl(cl) {} 223 }; 224 225 // A version of ObjectClosure that is expected to be robust 226 // in the face of possibly uninitialized objects. 227 class ObjectClosureCareful : public ObjectClosure { 228 public: 229 virtual size_t do_object_careful_m(oop p, MemRegion mr) = 0; 230 virtual size_t do_object_careful(oop p) = 0; 231 }; 232 233 // The following are used in CompactibleFreeListSpace and 234 // ConcurrentMarkSweepGeneration. 235 236 // Blk closure (abstract class) 237 class BlkClosure : public StackObj { 238 public: 239 virtual size_t do_blk(HeapWord* addr) = 0; 240 }; 241 242 // A version of BlkClosure that is expected to be robust 243 // in the face of possibly uninitialized objects. 244 class BlkClosureCareful : public BlkClosure { 245 public: 246 size_t do_blk(HeapWord* addr) { 247 guarantee(false, "call do_blk_careful instead"); 248 return 0; 249 } 250 virtual size_t do_blk_careful(HeapWord* addr) = 0; 251 }; 252 253 // SpaceClosure is used for iterating over spaces 254 255 class Space; 256 class CompactibleSpace; 257 258 class SpaceClosure : public StackObj { 259 public: 260 // Called for each space 261 virtual void do_space(Space* s) = 0; 262 }; 263 264 class CompactibleSpaceClosure : public StackObj { 265 public: 266 // Called for each compactible space 267 virtual void do_space(CompactibleSpace* s) = 0; 268 }; 269 270 271 // CodeBlobClosure is used for iterating through code blobs 272 // in the code cache or on thread stacks 273 274 class CodeBlobClosure : public Closure { 275 public: 276 // Called for each code blob. 277 virtual void do_code_blob(CodeBlob* cb) = 0; 278 }; 279 280 // Applies an oop closure to all ref fields in code blobs 281 // iterated over in an object iteration. 282 class CodeBlobToOopClosure : public CodeBlobClosure { 283 OopClosure* _cl; 284 bool _fix_relocations; 285 protected: 286 void do_nmethod(nmethod* nm); 287 public: 288 CodeBlobToOopClosure(OopClosure* cl, bool fix_relocations) : _cl(cl), _fix_relocations(fix_relocations) {} 289 virtual void do_code_blob(CodeBlob* cb); 290 291 bool fix_relocations() const { return _fix_relocations; } 292 const static bool FixRelocations = true; 293 }; 294 295 class MarkingCodeBlobClosure : public CodeBlobToOopClosure { 296 public: 297 MarkingCodeBlobClosure(OopClosure* cl, bool fix_relocations) : CodeBlobToOopClosure(cl, fix_relocations) {} 298 // Called for each code blob, but at most once per unique blob. 299 300 virtual void do_code_blob(CodeBlob* cb); 301 }; 302 303 // MonitorClosure is used for iterating over monitors in the monitors cache 304 305 class ObjectMonitor; 306 307 class MonitorClosure : public StackObj { 308 public: 309 // called for each monitor in cache 310 virtual void do_monitor(ObjectMonitor* m) = 0; 311 }; 312 313 // A closure that is applied without any arguments. 314 class VoidClosure : public StackObj { 315 public: 316 // I would have liked to declare this a pure virtual, but that breaks 317 // in mysterious ways, for unknown reasons. 318 virtual void do_void(); 319 }; 320 321 322 // YieldClosure is intended for use by iteration loops 323 // to incrementalize their work, allowing interleaving 324 // of an interruptable task so as to allow other 325 // threads to run (which may not otherwise be able to access 326 // exclusive resources, for instance). Additionally, the 327 // closure also allows for aborting an ongoing iteration 328 // by means of checking the return value from the polling 329 // call. 330 class YieldClosure : public StackObj { 331 public: 332 virtual bool should_return() = 0; 333 }; 334 335 // Abstract closure for serializing data (read or write). 336 337 class SerializeClosure : public Closure { 338 public: 339 // Return bool indicating whether closure implements read or write. 340 virtual bool reading() const = 0; 341 342 // Read/write the void pointer pointed to by p. 343 virtual void do_ptr(void** p) = 0; 344 345 // Read/write the region specified. 346 virtual void do_region(u_char* start, size_t size) = 0; 347 348 // Check/write the tag. If reading, then compare the tag against 349 // the passed in value and fail is they don't match. This allows 350 // for verification that sections of the serialized data are of the 351 // correct length. 352 virtual void do_tag(int tag) = 0; 353 }; 354 355 class SymbolClosure : public StackObj { 356 public: 357 virtual void do_symbol(Symbol**) = 0; 358 359 // Clear LSB in symbol address; it can be set by CPSlot. 360 static Symbol* load_symbol(Symbol** p) { 361 return (Symbol*)(intptr_t(*p) & ~1); 362 } 363 364 // Store symbol, adjusting new pointer if the original pointer was adjusted 365 // (symbol references in constant pool slots have their LSB set to 1). 366 static void store_symbol(Symbol** p, Symbol* sym) { 367 *p = (Symbol*)(intptr_t(sym) | (intptr_t(*p) & 1)); 368 } 369 }; 370 371 // The two class template specializations are used to dispatch calls 372 // to the ExtendedOopClosure functions. If use_non_virtual_call is true, 373 // the non-virtual versions are called (E.g. do_oop_nv), otherwise the 374 // virtual versions are called (E.g. do_oop). 375 376 template <bool use_non_virtual_call> 377 class Devirtualizer {}; 378 379 // Dispatches to the non-virtual functions. 380 template <> class Devirtualizer<true> { 381 public: 382 template <class OopClosureType, typename T> static void do_oop(OopClosureType* closure, T* p); 383 template <class OopClosureType> static void do_klass(OopClosureType* closure, Klass* k); 384 template <class OopClosureType> static void do_cld(OopClosureType* closure, ClassLoaderData* cld); 385 template <class OopClosureType> static bool do_metadata(OopClosureType* closure); 386 }; 387 388 // Dispatches to the virtual functions. 389 template <> class Devirtualizer<false> { 390 public: 391 template <class OopClosureType, typename T> static void do_oop(OopClosureType* closure, T* p); 392 template <class OopClosureType> static void do_klass(OopClosureType* closure, Klass* k); 393 template <class OopClosureType> static void do_cld(OopClosureType* closure, ClassLoaderData* cld); 394 template <class OopClosureType> static bool do_metadata(OopClosureType* closure); 395 }; 396 397 #endif // SHARE_VM_MEMORY_ITERATOR_HPP