1 /* 2 * Copyright (c) 1997, 2018, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef SHARE_VM_MEMORY_ITERATOR_HPP 26 #define SHARE_VM_MEMORY_ITERATOR_HPP 27 28 #include "memory/allocation.hpp" 29 #include "memory/memRegion.hpp" 30 #include "oops/oopsHierarchy.hpp" 31 32 class CodeBlob; 33 class nmethod; 34 class ReferenceProcessor; 35 class DataLayout; 36 class KlassClosure; 37 class ClassLoaderData; 38 class Symbol; 39 40 // The following classes are C++ `closures` for iterating over objects, roots and spaces 41 42 class Closure : public StackObj { }; 43 44 // OopClosure is used for iterating through references to Java objects. 45 class OopClosure : public Closure { 46 public: 47 virtual void do_oop(oop* o) = 0; 48 virtual void do_oop(narrowOop* o) = 0; 49 }; 50 51 class DoNothingClosure : public OopClosure { 52 public: 53 virtual void do_oop(oop* p) {} 54 virtual void do_oop(narrowOop* p) {} 55 }; 56 extern DoNothingClosure do_nothing_cl; 57 58 // ExtendedOopClosure adds extra code to be run during oop iterations. 59 // This is needed by the GC and is extracted to a separate type to not 60 // pollute the OopClosure interface. 61 class ExtendedOopClosure : public OopClosure { 62 private: 63 ReferenceProcessor* _ref_processor; 64 65 protected: 66 ExtendedOopClosure(ReferenceProcessor* rp) : _ref_processor(rp) { } 67 ExtendedOopClosure() : _ref_processor(NULL) { } 68 ~ExtendedOopClosure() { } 69 70 void set_ref_processor_internal(ReferenceProcessor* rp) { _ref_processor = rp; } 71 72 public: 73 ReferenceProcessor* ref_processor() const { return _ref_processor; } 74 75 // Iteration of InstanceRefKlasses differ depending on the closure, 76 // the below enum describes the different alternatives. 77 enum ReferenceIterationMode { 78 DO_DISCOVERY, // Apply closure and discover references 79 DO_DISCOVERED_AND_DISCOVERY, // Apply closure to discovered field and do discovery 80 DO_FIELDS // Apply closure to all fields 81 }; 82 83 // The default iteration mode is to do discovery. 84 virtual ReferenceIterationMode reference_iteration_mode() { return DO_DISCOVERY; } 85 86 // If the do_metadata functions return "true", 87 // we invoke the following when running oop_iterate(): 88 // 89 // 1) do_klass on the header klass pointer. 90 // 2) do_klass on the klass pointer in the mirrors. 91 // 3) do_cld on the class loader data in class loaders. 92 // 93 // The virtual (without suffix) and the non-virtual (with _nv suffix) need 94 // to be updated together, or else the devirtualization will break. 95 // 96 // Providing default implementations of the _nv functions unfortunately 97 // removes the compile-time safeness, but reduces the clutter for the 98 // ExtendedOopClosures that don't need to walk the metadata. 99 // Currently, only CMS and G1 need these. 100 101 bool do_metadata_nv() { return false; } 102 virtual bool do_metadata() { return do_metadata_nv(); } 103 104 void do_klass_nv(Klass* k) { ShouldNotReachHere(); } 105 virtual void do_klass(Klass* k) { do_klass_nv(k); } 106 107 void do_cld_nv(ClassLoaderData* cld) { ShouldNotReachHere(); } 108 virtual void do_cld(ClassLoaderData* cld) { do_cld_nv(cld); } 109 110 // True iff this closure may be safely applied more than once to an oop 111 // location without an intervening "major reset" (like the end of a GC). 112 virtual bool idempotent() { return false; } 113 114 #ifdef ASSERT 115 // Default verification of each visited oop field. 116 template <typename T> void verify(T* p); 117 118 // Can be used by subclasses to turn off the default verification of oop fields. 119 virtual bool should_verify_oops() { return true; } 120 #endif 121 }; 122 123 // Wrapper closure only used to implement oop_iterate_no_header(). 124 class NoHeaderExtendedOopClosure : public ExtendedOopClosure { 125 OopClosure* _wrapped_closure; 126 public: 127 NoHeaderExtendedOopClosure(OopClosure* cl) : _wrapped_closure(cl) {} 128 // Warning: this calls the virtual version do_oop in the the wrapped closure. 129 void do_oop_nv(oop* p) { _wrapped_closure->do_oop(p); } 130 void do_oop_nv(narrowOop* p) { _wrapped_closure->do_oop(p); } 131 132 void do_oop(oop* p) { assert(false, "Only the _nv versions should be used"); 133 _wrapped_closure->do_oop(p); } 134 void do_oop(narrowOop* p) { assert(false, "Only the _nv versions should be used"); 135 _wrapped_closure->do_oop(p);} 136 #ifdef ASSERT 137 bool should_verify_oops() { return false; } 138 #endif 139 }; 140 141 class KlassClosure : public Closure { 142 public: 143 virtual void do_klass(Klass* k) = 0; 144 }; 145 146 class CLDClosure : public Closure { 147 public: 148 virtual void do_cld(ClassLoaderData* cld) = 0; 149 }; 150 151 152 class CLDToOopClosure : public CLDClosure { 153 OopClosure* _oop_closure; 154 bool _must_claim_cld; 155 156 public: 157 CLDToOopClosure(OopClosure* oop_closure, bool must_claim_cld = true) : 158 _oop_closure(oop_closure), 159 _must_claim_cld(must_claim_cld) {} 160 161 void do_cld(ClassLoaderData* cld); 162 }; 163 164 // The base class for all concurrent marking closures, 165 // that participates in class unloading. 166 // It's used to proxy through the metadata to the oops defined in them. 167 class MetadataAwareOopClosure: public ExtendedOopClosure { 168 169 public: 170 MetadataAwareOopClosure() : ExtendedOopClosure() { } 171 MetadataAwareOopClosure(ReferenceProcessor* rp) : ExtendedOopClosure(rp) { } 172 173 bool do_metadata_nv() { return true; } 174 virtual bool do_metadata() { return do_metadata_nv(); } 175 176 void do_klass_nv(Klass* k); 177 virtual void do_klass(Klass* k) { do_klass_nv(k); } 178 179 void do_cld_nv(ClassLoaderData* cld); 180 virtual void do_cld(ClassLoaderData* cld) { do_cld_nv(cld); } 181 }; 182 183 // ObjectClosure is used for iterating through an object space 184 185 class ObjectClosure : public Closure { 186 public: 187 // Called for each object. 188 virtual void do_object(oop obj) = 0; 189 }; 190 191 192 class BoolObjectClosure : public Closure { 193 public: 194 virtual bool do_object_b(oop obj) = 0; 195 }; 196 197 class AlwaysTrueClosure: public BoolObjectClosure { 198 public: 199 bool do_object_b(oop p) { return true; } 200 }; 201 202 class AlwaysFalseClosure : public BoolObjectClosure { 203 public: 204 bool do_object_b(oop p) { return false; } 205 }; 206 207 // Applies an oop closure to all ref fields in objects iterated over in an 208 // object iteration. 209 class ObjectToOopClosure: public ObjectClosure { 210 ExtendedOopClosure* _cl; 211 public: 212 void do_object(oop obj); 213 ObjectToOopClosure(ExtendedOopClosure* cl) : _cl(cl) {} 214 }; 215 216 // A version of ObjectClosure that is expected to be robust 217 // in the face of possibly uninitialized objects. 218 class ObjectClosureCareful : public ObjectClosure { 219 public: 220 virtual size_t do_object_careful_m(oop p, MemRegion mr) = 0; 221 virtual size_t do_object_careful(oop p) = 0; 222 }; 223 224 // The following are used in CompactibleFreeListSpace and 225 // ConcurrentMarkSweepGeneration. 226 227 // Blk closure (abstract class) 228 class BlkClosure : public StackObj { 229 public: 230 virtual size_t do_blk(HeapWord* addr) = 0; 231 }; 232 233 // A version of BlkClosure that is expected to be robust 234 // in the face of possibly uninitialized objects. 235 class BlkClosureCareful : public BlkClosure { 236 public: 237 size_t do_blk(HeapWord* addr) { 238 guarantee(false, "call do_blk_careful instead"); 239 return 0; 240 } 241 virtual size_t do_blk_careful(HeapWord* addr) = 0; 242 }; 243 244 // SpaceClosure is used for iterating over spaces 245 246 class Space; 247 class CompactibleSpace; 248 249 class SpaceClosure : public StackObj { 250 public: 251 // Called for each space 252 virtual void do_space(Space* s) = 0; 253 }; 254 255 class CompactibleSpaceClosure : public StackObj { 256 public: 257 // Called for each compactible space 258 virtual void do_space(CompactibleSpace* s) = 0; 259 }; 260 261 262 // CodeBlobClosure is used for iterating through code blobs 263 // in the code cache or on thread stacks 264 265 class CodeBlobClosure : public Closure { 266 public: 267 // Called for each code blob. 268 virtual void do_code_blob(CodeBlob* cb) = 0; 269 }; 270 271 // Applies an oop closure to all ref fields in code blobs 272 // iterated over in an object iteration. 273 class CodeBlobToOopClosure : public CodeBlobClosure { 274 OopClosure* _cl; 275 bool _fix_relocations; 276 protected: 277 void do_nmethod(nmethod* nm); 278 public: 279 // If fix_relocations(), then cl must copy objects to their new location immediately to avoid 280 // patching nmethods with the old locations. 281 CodeBlobToOopClosure(OopClosure* cl, bool fix_relocations) : _cl(cl), _fix_relocations(fix_relocations) {} 282 virtual void do_code_blob(CodeBlob* cb); 283 284 bool fix_relocations() const { return _fix_relocations; } 285 const static bool FixRelocations = true; 286 }; 287 288 class MarkingCodeBlobClosure : public CodeBlobToOopClosure { 289 public: 290 MarkingCodeBlobClosure(OopClosure* cl, bool fix_relocations) : CodeBlobToOopClosure(cl, fix_relocations) {} 291 // Called for each code blob, but at most once per unique blob. 292 293 virtual void do_code_blob(CodeBlob* cb); 294 }; 295 296 // MonitorClosure is used for iterating over monitors in the monitors cache 297 298 class ObjectMonitor; 299 300 class MonitorClosure : public StackObj { 301 public: 302 // called for each monitor in cache 303 virtual void do_monitor(ObjectMonitor* m) = 0; 304 }; 305 306 // A closure that is applied without any arguments. 307 class VoidClosure : public StackObj { 308 public: 309 // I would have liked to declare this a pure virtual, but that breaks 310 // in mysterious ways, for unknown reasons. 311 virtual void do_void(); 312 }; 313 314 315 // YieldClosure is intended for use by iteration loops 316 // to incrementalize their work, allowing interleaving 317 // of an interruptable task so as to allow other 318 // threads to run (which may not otherwise be able to access 319 // exclusive resources, for instance). Additionally, the 320 // closure also allows for aborting an ongoing iteration 321 // by means of checking the return value from the polling 322 // call. 323 class YieldClosure : public StackObj { 324 public: 325 virtual bool should_return() = 0; 326 }; 327 328 // Abstract closure for serializing data (read or write). 329 330 class SerializeClosure : public Closure { 331 public: 332 // Return bool indicating whether closure implements read or write. 333 virtual bool reading() const = 0; 334 335 // Read/write the void pointer pointed to by p. 336 virtual void do_ptr(void** p) = 0; 337 338 // Read/write the 32-bit unsigned integer pointed to by p. 339 virtual void do_u4(u4* p) = 0; 340 341 // Read/write the region specified. 342 virtual void do_region(u_char* start, size_t size) = 0; 343 344 // Check/write the tag. If reading, then compare the tag against 345 // the passed in value and fail is they don't match. This allows 346 // for verification that sections of the serialized data are of the 347 // correct length. 348 virtual void do_tag(int tag) = 0; 349 350 // Read/write the oop 351 virtual void do_oop(oop* o) = 0; 352 353 bool writing() { 354 return !reading(); 355 } 356 }; 357 358 class SymbolClosure : public StackObj { 359 public: 360 virtual void do_symbol(Symbol**) = 0; 361 362 // Clear LSB in symbol address; it can be set by CPSlot. 363 static Symbol* load_symbol(Symbol** p) { 364 return (Symbol*)(intptr_t(*p) & ~1); 365 } 366 367 // Store symbol, adjusting new pointer if the original pointer was adjusted 368 // (symbol references in constant pool slots have their LSB set to 1). 369 static void store_symbol(Symbol** p, Symbol* sym) { 370 *p = (Symbol*)(intptr_t(sym) | (intptr_t(*p) & 1)); 371 } 372 }; 373 374 // The two class template specializations are used to dispatch calls 375 // to the ExtendedOopClosure functions. If use_non_virtual_call is true, 376 // the non-virtual versions are called (E.g. do_oop_nv), otherwise the 377 // virtual versions are called (E.g. do_oop). 378 379 template <bool use_non_virtual_call> 380 class Devirtualizer {}; 381 382 // Dispatches to the non-virtual functions. 383 template <> class Devirtualizer<true> { 384 public: 385 template <class OopClosureType, typename T> static void do_oop(OopClosureType* closure, T* p); 386 template <class OopClosureType> static void do_klass(OopClosureType* closure, Klass* k); 387 template <class OopClosureType> static void do_cld(OopClosureType* closure, ClassLoaderData* cld); 388 template <class OopClosureType> static bool do_metadata(OopClosureType* closure); 389 }; 390 391 // Dispatches to the virtual functions. 392 template <> class Devirtualizer<false> { 393 public: 394 template <class OopClosureType, typename T> static void do_oop(OopClosureType* closure, T* p); 395 template <class OopClosureType> static void do_klass(OopClosureType* closure, Klass* k); 396 template <class OopClosureType> static void do_cld(OopClosureType* closure, ClassLoaderData* cld); 397 template <class OopClosureType> static bool do_metadata(OopClosureType* closure); 398 }; 399 400 #endif // SHARE_VM_MEMORY_ITERATOR_HPP