1 /* 2 * Copyright (c) 1997, 2019, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef SHARE_GC_SHARED_GENERATION_HPP 26 #define SHARE_GC_SHARED_GENERATION_HPP 27 28 #include "gc/shared/collectorCounters.hpp" 29 #include "gc/shared/referenceProcessor.hpp" 30 #include "logging/log.hpp" 31 #include "memory/allocation.hpp" 32 #include "memory/memRegion.hpp" 33 #include "memory/virtualspace.hpp" 34 #include "runtime/mutex.hpp" 35 #include "runtime/perfData.hpp" 36 37 // A Generation models a heap area for similarly-aged objects. 38 // It will contain one ore more spaces holding the actual objects. 39 // 40 // The Generation class hierarchy: 41 // 42 // Generation - abstract base class 43 // - DefNewGeneration - allocation area (copy collected) 44 // - CardGeneration - abstract class adding offset array behavior 45 // - TenuredGeneration - tenured (old object) space (markSweepCompact) 46 // 47 // The system configuration currently allowed is: 48 // 49 // DefNewGeneration + TenuredGeneration 50 // 51 52 class DefNewGeneration; 53 class GCMemoryManager; 54 class GenerationSpec; 55 class CompactibleSpace; 56 class ContiguousSpace; 57 class CompactPoint; 58 class OopsInGenClosure; 59 class OopClosure; 60 class FastScanClosure; 61 class GenCollectedHeap; 62 class GCStats; 63 64 // A "ScratchBlock" represents a block of memory in one generation usable by 65 // another. It represents "num_words" free words, starting at and including 66 // the address of "this". 67 struct ScratchBlock { 68 ScratchBlock* next; 69 size_t num_words; 70 HeapWord scratch_space[1]; // Actually, of size "num_words-2" (assuming 71 // first two fields are word-sized.) 72 }; 73 74 class Generation: public CHeapObj<mtGC> { 75 friend class VMStructs; 76 private: 77 MemRegion _prev_used_region; // for collectors that want to "remember" a value for 78 // used region at some specific point during collection. 79 80 GCMemoryManager* _gc_manager; 81 82 protected: 83 // Minimum and maximum addresses for memory reserved (not necessarily 84 // committed) for generation. 85 // Used by card marking code. Must not overlap with address ranges of 86 // other generations. 87 MemRegion _reserved; 88 89 // Memory area reserved for generation 90 VirtualSpace _virtual_space; 91 92 // ("Weak") Reference processing support 93 SpanSubjectToDiscoveryClosure _span_based_discoverer; 94 ReferenceProcessor* _ref_processor; 95 96 // Performance Counters 97 CollectorCounters* _gc_counters; 98 99 // Statistics for garbage collection 100 GCStats* _gc_stats; 101 102 // Initialize the generation. 103 Generation(ReservedSpace rs, size_t initial_byte_size); 104 105 // Apply "cl->do_oop" to (the address of) (exactly) all the ref fields in 106 // "sp" that point into younger generations. 107 // The iteration is only over objects allocated at the start of the 108 // iterations; objects allocated as a result of applying the closure are 109 // not included. 110 void younger_refs_in_space_iterate(Space* sp, OopsInGenClosure* cl, uint n_threads); 111 112 public: 113 // The set of possible generation kinds. 114 enum Name { 115 DefNew, 116 MarkSweepCompact, 117 Other 118 }; 119 120 enum SomePublicConstants { 121 // Generations are GenGrain-aligned and have size that are multiples of 122 // GenGrain. 123 // Note: on ARM we add 1 bit for card_table_base to be properly aligned 124 // (we expect its low byte to be zero - see implementation of post_barrier) 125 LogOfGenGrain = 16 ARM32_ONLY(+1), 126 GenGrain = 1 << LogOfGenGrain 127 }; 128 129 // allocate and initialize ("weak") refs processing support 130 virtual void ref_processor_init(); 131 void set_ref_processor(ReferenceProcessor* rp) { 132 assert(_ref_processor == NULL, "clobbering existing _ref_processor"); 133 _ref_processor = rp; 134 } 135 136 virtual Generation::Name kind() { return Generation::Other; } 137 138 // This properly belongs in the collector, but for now this 139 // will do. 140 virtual bool refs_discovery_is_atomic() const { return true; } 141 virtual bool refs_discovery_is_mt() const { return false; } 142 143 // Space inquiries (results in bytes) 144 size_t initial_size(); 145 virtual size_t capacity() const = 0; // The maximum number of object bytes the 146 // generation can currently hold. 147 virtual size_t used() const = 0; // The number of used bytes in the gen. 148 virtual size_t free() const = 0; // The number of free bytes in the gen. 149 150 // Support for java.lang.Runtime.maxMemory(); see CollectedHeap. 151 // Returns the total number of bytes available in a generation 152 // for the allocation of objects. 153 virtual size_t max_capacity() const; 154 155 // If this is a young generation, the maximum number of bytes that can be 156 // allocated in this generation before a GC is triggered. 157 virtual size_t capacity_before_gc() const { return 0; } 158 159 // The largest number of contiguous free bytes in the generation, 160 // including expansion (Assumes called at a safepoint.) 161 virtual size_t contiguous_available() const = 0; 162 // The largest number of contiguous free bytes in this or any higher generation. 163 virtual size_t max_contiguous_available() const; 164 165 // Returns true if promotions of the specified amount are 166 // likely to succeed without a promotion failure. 167 // Promotion of the full amount is not guaranteed but 168 // might be attempted in the worst case. 169 virtual bool promotion_attempt_is_safe(size_t max_promotion_in_bytes) const; 170 171 // For a non-young generation, this interface can be used to inform a 172 // generation that a promotion attempt into that generation failed. 173 // Typically used to enable diagnostic output for post-mortem analysis, 174 // but other uses of the interface are not ruled out. 175 virtual void promotion_failure_occurred() { /* does nothing */ } 176 177 // Return an estimate of the maximum allocation that could be performed 178 // in the generation without triggering any collection or expansion 179 // activity. It is "unsafe" because no locks are taken; the result 180 // should be treated as an approximation, not a guarantee, for use in 181 // heuristic resizing decisions. 182 virtual size_t unsafe_max_alloc_nogc() const = 0; 183 184 // Returns true if this generation cannot be expanded further 185 // without a GC. Override as appropriate. 186 virtual bool is_maximal_no_gc() const { 187 return _virtual_space.uncommitted_size() == 0; 188 } 189 190 MemRegion reserved() const { return _reserved; } 191 192 // Returns a region guaranteed to contain all the objects in the 193 // generation. 194 virtual MemRegion used_region() const { return _reserved; } 195 196 MemRegion prev_used_region() const { return _prev_used_region; } 197 virtual void save_used_region() { _prev_used_region = used_region(); } 198 199 // Returns "TRUE" iff "p" points into the committed areas in the generation. 200 // For some kinds of generations, this may be an expensive operation. 201 // To avoid performance problems stemming from its inadvertent use in 202 // product jvm's, we restrict its use to assertion checking or 203 // verification only. 204 virtual bool is_in(const void* p) const; 205 206 /* Returns "TRUE" iff "p" points into the reserved area of the generation. */ 207 bool is_in_reserved(const void* p) const { 208 return _reserved.contains(p); 209 } 210 211 // If some space in the generation contains the given "addr", return a 212 // pointer to that space, else return "NULL". 213 virtual Space* space_containing(const void* addr) const; 214 215 // Iteration - do not use for time critical operations 216 virtual void space_iterate(SpaceClosure* blk, bool usedOnly = false) = 0; 217 218 // Returns the first space, if any, in the generation that can participate 219 // in compaction, or else "NULL". 220 virtual CompactibleSpace* first_compaction_space() const = 0; 221 222 // Returns "true" iff this generation should be used to allocate an 223 // object of the given size. Young generations might 224 // wish to exclude very large objects, for example, since, if allocated 225 // often, they would greatly increase the frequency of young-gen 226 // collection. 227 virtual bool should_allocate(size_t word_size, bool is_tlab) { 228 bool result = false; 229 size_t overflow_limit = (size_t)1 << (BitsPerSize_t - LogHeapWordSize); 230 if (!is_tlab || supports_tlab_allocation()) { 231 result = (word_size > 0) && (word_size < overflow_limit); 232 } 233 return result; 234 } 235 236 // Allocate and returns a block of the requested size, or returns "NULL". 237 // Assumes the caller has done any necessary locking. 238 virtual HeapWord* allocate(size_t word_size, bool is_tlab) = 0; 239 240 // Like "allocate", but performs any necessary locking internally. 241 virtual HeapWord* par_allocate(size_t word_size, bool is_tlab) = 0; 242 243 // Some generation may offer a region for shared, contiguous allocation, 244 // via inlined code (by exporting the address of the top and end fields 245 // defining the extent of the contiguous allocation region.) 246 247 // This function returns "true" iff the heap supports this kind of 248 // allocation. (More precisely, this means the style of allocation that 249 // increments *top_addr()" with a CAS.) (Default is "no".) 250 // A generation that supports this allocation style must use lock-free 251 // allocation for *all* allocation, since there are times when lock free 252 // allocation will be concurrent with plain "allocate" calls. 253 virtual bool supports_inline_contig_alloc() const { return false; } 254 255 // These functions return the addresses of the fields that define the 256 // boundaries of the contiguous allocation area. (These fields should be 257 // physically near to one another.) 258 virtual HeapWord* volatile* top_addr() const { return NULL; } 259 virtual HeapWord** end_addr() const { return NULL; } 260 261 // Thread-local allocation buffers 262 virtual bool supports_tlab_allocation() const { return false; } 263 virtual size_t tlab_capacity() const { 264 guarantee(false, "Generation doesn't support thread local allocation buffers"); 265 return 0; 266 } 267 virtual size_t tlab_used() const { 268 guarantee(false, "Generation doesn't support thread local allocation buffers"); 269 return 0; 270 } 271 virtual size_t unsafe_max_tlab_alloc() const { 272 guarantee(false, "Generation doesn't support thread local allocation buffers"); 273 return 0; 274 } 275 276 // "obj" is the address of an object in a younger generation. Allocate space 277 // for "obj" in the current (or some higher) generation, and copy "obj" into 278 // the newly allocated space, if possible, returning the result (or NULL if 279 // the allocation failed). 280 // 281 // The "obj_size" argument is just obj->size(), passed along so the caller can 282 // avoid repeating the virtual call to retrieve it. 283 virtual oop promote(oop obj, size_t obj_size); 284 285 // Thread "thread_num" (0 <= i < ParalleGCThreads) wants to promote 286 // object "obj", whose original mark word was "m", and whose size is 287 // "word_sz". If possible, allocate space for "obj", copy obj into it 288 // (taking care to copy "m" into the mark word when done, since the mark 289 // word of "obj" may have been overwritten with a forwarding pointer, and 290 // also taking care to copy the klass pointer *last*. Returns the new 291 // object if successful, or else NULL. 292 virtual oop par_promote(int thread_num, oop obj, markWord m, size_t word_sz); 293 294 // Informs the current generation that all par_promote_alloc's in the 295 // collection have been completed; any supporting data structures can be 296 // reset. Default is to do nothing. 297 virtual void par_promote_alloc_done(int thread_num) {} 298 299 // Informs the current generation that all oop_since_save_marks_iterates 300 // performed by "thread_num" in the current collection, if any, have been 301 // completed; any supporting data structures can be reset. Default is to 302 // do nothing. 303 virtual void par_oop_since_save_marks_iterate_done(int thread_num) {} 304 305 // Returns "true" iff collect() should subsequently be called on this 306 // this generation. See comment below. 307 // This is a generic implementation which can be overridden. 308 // 309 // Note: in the current (1.4) implementation, when genCollectedHeap's 310 // incremental_collection_will_fail flag is set, all allocations are 311 // slow path (the only fast-path place to allocate is DefNew, which 312 // will be full if the flag is set). 313 // Thus, older generations which collect younger generations should 314 // test this flag and collect if it is set. 315 virtual bool should_collect(bool full, 316 size_t word_size, 317 bool is_tlab) { 318 return (full || should_allocate(word_size, is_tlab)); 319 } 320 321 // Returns true if the collection is likely to be safely 322 // completed. Even if this method returns true, a collection 323 // may not be guaranteed to succeed, and the system should be 324 // able to safely unwind and recover from that failure, albeit 325 // at some additional cost. 326 virtual bool collection_attempt_is_safe() { 327 guarantee(false, "Are you sure you want to call this method?"); 328 return true; 329 } 330 331 // Perform a garbage collection. 332 // If full is true attempt a full garbage collection of this generation. 333 // Otherwise, attempting to (at least) free enough space to support an 334 // allocation of the given "word_size". 335 virtual void collect(bool full, 336 bool clear_all_soft_refs, 337 size_t word_size, 338 bool is_tlab) = 0; 339 340 // Perform a heap collection, attempting to create (at least) enough 341 // space to support an allocation of the given "word_size". If 342 // successful, perform the allocation and return the resulting 343 // "oop" (initializing the allocated block). If the allocation is 344 // still unsuccessful, return "NULL". 345 virtual HeapWord* expand_and_allocate(size_t word_size, 346 bool is_tlab, 347 bool parallel = false) = 0; 348 349 // Some generations may require some cleanup or preparation actions before 350 // allowing a collection. The default is to do nothing. 351 virtual void gc_prologue(bool full) {} 352 353 // Some generations may require some cleanup actions after a collection. 354 // The default is to do nothing. 355 virtual void gc_epilogue(bool full) {} 356 357 // Save the high water marks for the used space in a generation. 358 virtual void record_spaces_top() {} 359 360 // Some generations may need to be "fixed-up" after some allocation 361 // activity to make them parsable again. The default is to do nothing. 362 virtual void ensure_parsability() {} 363 364 // Generations may keep statistics about collection. This method 365 // updates those statistics. current_generation is the generation 366 // that was most recently collected. This allows the generation to 367 // decide what statistics are valid to collect. For example, the 368 // generation can decide to gather the amount of promoted data if 369 // the collection of the young generation has completed. 370 GCStats* gc_stats() const { return _gc_stats; } 371 virtual void update_gc_stats(Generation* current_generation, bool full) {} 372 373 #if INCLUDE_SERIALGC 374 // Mark sweep support phase2 375 virtual void prepare_for_compaction(CompactPoint* cp); 376 // Mark sweep support phase3 377 virtual void adjust_pointers(); 378 // Mark sweep support phase4 379 virtual void compact(); 380 virtual void post_compact() { ShouldNotReachHere(); } 381 #endif 382 383 // Support for CMS's rescan. In this general form we return a pointer 384 // to an abstract object that can be used, based on specific previously 385 // decided protocols, to exchange information between generations, 386 // information that may be useful for speeding up certain types of 387 // garbage collectors. A NULL value indicates to the client that 388 // no data recording is expected by the provider. The data-recorder is 389 // expected to be GC worker thread-local, with the worker index 390 // indicated by "thr_num". 391 virtual void* get_data_recorder(int thr_num) { return NULL; } 392 virtual void sample_eden_chunk() {} 393 394 // Some generations may require some cleanup actions before allowing 395 // a verification. 396 virtual void prepare_for_verify() {} 397 398 // Accessing "marks". 399 400 // This function gives a generation a chance to note a point between 401 // collections. For example, a contiguous generation might note the 402 // beginning allocation point post-collection, which might allow some later 403 // operations to be optimized. 404 virtual void save_marks() {} 405 406 // This function allows generations to initialize any "saved marks". That 407 // is, should only be called when the generation is empty. 408 virtual void reset_saved_marks() {} 409 410 // This function is "true" iff any no allocations have occurred in the 411 // generation since the last call to "save_marks". 412 virtual bool no_allocs_since_save_marks() = 0; 413 414 // The "requestor" generation is performing some garbage collection 415 // action for which it would be useful to have scratch space. If 416 // the target is not the requestor, no gc actions will be required 417 // of the target. The requestor promises to allocate no more than 418 // "max_alloc_words" in the target generation (via promotion say, 419 // if the requestor is a young generation and the target is older). 420 // If the target generation can provide any scratch space, it adds 421 // it to "list", leaving "list" pointing to the head of the 422 // augmented list. The default is to offer no space. 423 virtual void contribute_scratch(ScratchBlock*& list, Generation* requestor, 424 size_t max_alloc_words) {} 425 426 // Give each generation an opportunity to do clean up for any 427 // contributed scratch. 428 virtual void reset_scratch() {} 429 430 // When an older generation has been collected, and perhaps resized, 431 // this method will be invoked on all younger generations (from older to 432 // younger), allowing them to resize themselves as appropriate. 433 virtual void compute_new_size() = 0; 434 435 // Printing 436 virtual const char* name() const = 0; 437 virtual const char* short_name() const = 0; 438 439 // Reference Processing accessor 440 ReferenceProcessor* const ref_processor() { return _ref_processor; } 441 442 // Iteration. 443 444 // Iterate over all the ref-containing fields of all objects in the 445 // generation, calling "cl.do_oop" on each. 446 virtual void oop_iterate(OopIterateClosure* cl); 447 448 // Iterate over all objects in the generation, calling "cl.do_object" on 449 // each. 450 virtual void object_iterate(ObjectClosure* cl); 451 452 // Apply "cl->do_oop" to (the address of) all and only all the ref fields 453 // in the current generation that contain pointers to objects in younger 454 // generations. Objects allocated since the last "save_marks" call are 455 // excluded. 456 virtual void younger_refs_iterate(OopsInGenClosure* cl, uint n_threads) = 0; 457 458 // Inform a generation that it longer contains references to objects 459 // in any younger generation. [e.g. Because younger gens are empty, 460 // clear the card table.] 461 virtual void clear_remembered_set() { } 462 463 // Inform a generation that some of its objects have moved. [e.g. The 464 // generation's spaces were compacted, invalidating the card table.] 465 virtual void invalidate_remembered_set() { } 466 467 // Block abstraction. 468 469 // Returns the address of the start of the "block" that contains the 470 // address "addr". We say "blocks" instead of "object" since some heaps 471 // may not pack objects densely; a chunk may either be an object or a 472 // non-object. 473 virtual HeapWord* block_start(const void* addr) const; 474 475 // Requires "addr" to be the start of a chunk, and returns its size. 476 // "addr + size" is required to be the start of a new chunk, or the end 477 // of the active area of the heap. 478 virtual size_t block_size(const HeapWord* addr) const ; 479 480 // Requires "addr" to be the start of a block, and returns "TRUE" iff 481 // the block is an object. 482 virtual bool block_is_obj(const HeapWord* addr) const; 483 484 void print_heap_change(size_t prev_used) const; 485 486 virtual void print() const; 487 virtual void print_on(outputStream* st) const; 488 489 virtual void verify() = 0; 490 491 struct StatRecord { 492 int invocations; 493 elapsedTimer accumulated_time; 494 StatRecord() : 495 invocations(0), 496 accumulated_time(elapsedTimer()) {} 497 }; 498 private: 499 StatRecord _stat_record; 500 public: 501 StatRecord* stat_record() { return &_stat_record; } 502 503 virtual void print_summary_info_on(outputStream* st); 504 505 // Performance Counter support 506 virtual void update_counters() = 0; 507 virtual CollectorCounters* counters() { return _gc_counters; } 508 509 GCMemoryManager* gc_manager() const { 510 assert(_gc_manager != NULL, "not initialized yet"); 511 return _gc_manager; 512 } 513 514 void set_gc_manager(GCMemoryManager* gc_manager) { 515 _gc_manager = gc_manager; 516 } 517 518 }; 519 520 #endif // SHARE_GC_SHARED_GENERATION_HPP