1 /* 2 * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef SHARE_VM_GC_SHARED_GENERATION_HPP 26 #define SHARE_VM_GC_SHARED_GENERATION_HPP 27 28 #include "gc/shared/collectorCounters.hpp" 29 #include "gc/shared/referenceProcessor.hpp" 30 #include "logging/log.hpp" 31 #include "memory/allocation.hpp" 32 #include "memory/memRegion.hpp" 33 #include "memory/universe.hpp" 34 #include "memory/virtualspace.hpp" 35 #include "runtime/mutex.hpp" 36 #include "runtime/perfData.hpp" 37 38 // A Generation models a heap area for similarly-aged objects. 39 // It will contain one ore more spaces holding the actual objects. 40 // 41 // The Generation class hierarchy: 42 // 43 // Generation - abstract base class 44 // - DefNewGeneration - allocation area (copy collected) 45 // - ParNewGeneration - a DefNewGeneration that is collected by 46 // several threads 47 // - CardGeneration - abstract class adding offset array behavior 48 // - TenuredGeneration - tenured (old object) space (markSweepCompact) 49 // - ConcurrentMarkSweepGeneration - Mostly Concurrent Mark Sweep Generation 50 // (Detlefs-Printezis refinement of 51 // Boehm-Demers-Schenker) 52 // 53 // The system configurations currently allowed are: 54 // 55 // DefNewGeneration + TenuredGeneration 56 // 57 // ParNewGeneration + ConcurrentMarkSweepGeneration 58 // 59 60 class DefNewGeneration; 61 class GCMemoryManager; 62 class GenerationSpec; 63 class CompactibleSpace; 64 class ContiguousSpace; 65 class CompactPoint; 66 class OopsInGenClosure; 67 class OopClosure; 68 class ScanClosure; 69 class FastScanClosure; 70 class GenCollectedHeap; 71 class GCStats; 72 73 // A "ScratchBlock" represents a block of memory in one generation usable by 74 // another. It represents "num_words" free words, starting at and including 75 // the address of "this". 76 struct ScratchBlock { 77 ScratchBlock* next; 78 size_t num_words; 79 HeapWord scratch_space[1]; // Actually, of size "num_words-2" (assuming 80 // first two fields are word-sized.) 81 }; 82 83 class Generation: public CHeapObj<mtGC> { 84 friend class VMStructs; 85 private: 86 jlong _time_of_last_gc; // time when last gc on this generation happened (ms) 87 MemRegion _prev_used_region; // for collectors that want to "remember" a value for 88 // used region at some specific point during collection. 89 90 GCMemoryManager* _memory_manager; 91 92 protected: 93 // Minimum and maximum addresses for memory reserved (not necessarily 94 // committed) for generation. 95 // Used by card marking code. Must not overlap with address ranges of 96 // other generations. 97 MemRegion _reserved; 98 99 // Memory area reserved for generation 100 VirtualSpace _virtual_space; 101 102 // ("Weak") Reference processing support 103 ReferenceProcessor* _ref_processor; 104 105 // Performance Counters 106 CollectorCounters* _gc_counters; 107 108 // Statistics for garbage collection 109 GCStats* _gc_stats; 110 111 // Initialize the generation. 112 Generation(ReservedSpace rs, size_t initial_byte_size, GCMemoryManager* memory_manager); 113 114 // Apply "cl->do_oop" to (the address of) (exactly) all the ref fields in 115 // "sp" that point into younger generations. 116 // The iteration is only over objects allocated at the start of the 117 // iterations; objects allocated as a result of applying the closure are 118 // not included. 119 void younger_refs_in_space_iterate(Space* sp, OopsInGenClosure* cl, uint n_threads); 120 121 public: 122 // The set of possible generation kinds. 123 enum Name { 124 DefNew, 125 ParNew, 126 MarkSweepCompact, 127 ConcurrentMarkSweep, 128 Other 129 }; 130 131 enum SomePublicConstants { 132 // Generations are GenGrain-aligned and have size that are multiples of 133 // GenGrain. 134 // Note: on ARM we add 1 bit for card_table_base to be properly aligned 135 // (we expect its low byte to be zero - see implementation of post_barrier) 136 LogOfGenGrain = 16 ARM32_ONLY(+1), 137 GenGrain = 1 << LogOfGenGrain 138 }; 139 140 GCMemoryManager* memory_manager() { 141 return _memory_manager; 142 } 143 144 // allocate and initialize ("weak") refs processing support 145 virtual void ref_processor_init(); 146 void set_ref_processor(ReferenceProcessor* rp) { 147 assert(_ref_processor == NULL, "clobbering existing _ref_processor"); 148 _ref_processor = rp; 149 } 150 151 virtual Generation::Name kind() { return Generation::Other; } 152 153 // This properly belongs in the collector, but for now this 154 // will do. 155 virtual bool refs_discovery_is_atomic() const { return true; } 156 virtual bool refs_discovery_is_mt() const { return false; } 157 158 // Space inquiries (results in bytes) 159 size_t initial_size(); 160 virtual size_t capacity() const = 0; // The maximum number of object bytes the 161 // generation can currently hold. 162 virtual size_t used() const = 0; // The number of used bytes in the gen. 163 virtual size_t free() const = 0; // The number of free bytes in the gen. 164 165 // Support for java.lang.Runtime.maxMemory(); see CollectedHeap. 166 // Returns the total number of bytes available in a generation 167 // for the allocation of objects. 168 virtual size_t max_capacity() const; 169 170 // If this is a young generation, the maximum number of bytes that can be 171 // allocated in this generation before a GC is triggered. 172 virtual size_t capacity_before_gc() const { return 0; } 173 174 // The largest number of contiguous free bytes in the generation, 175 // including expansion (Assumes called at a safepoint.) 176 virtual size_t contiguous_available() const = 0; 177 // The largest number of contiguous free bytes in this or any higher generation. 178 virtual size_t max_contiguous_available() const; 179 180 // Returns true if promotions of the specified amount are 181 // likely to succeed without a promotion failure. 182 // Promotion of the full amount is not guaranteed but 183 // might be attempted in the worst case. 184 virtual bool promotion_attempt_is_safe(size_t max_promotion_in_bytes) const; 185 186 // For a non-young generation, this interface can be used to inform a 187 // generation that a promotion attempt into that generation failed. 188 // Typically used to enable diagnostic output for post-mortem analysis, 189 // but other uses of the interface are not ruled out. 190 virtual void promotion_failure_occurred() { /* does nothing */ } 191 192 // Return an estimate of the maximum allocation that could be performed 193 // in the generation without triggering any collection or expansion 194 // activity. It is "unsafe" because no locks are taken; the result 195 // should be treated as an approximation, not a guarantee, for use in 196 // heuristic resizing decisions. 197 virtual size_t unsafe_max_alloc_nogc() const = 0; 198 199 // Returns true if this generation cannot be expanded further 200 // without a GC. Override as appropriate. 201 virtual bool is_maximal_no_gc() const { 202 return _virtual_space.uncommitted_size() == 0; 203 } 204 205 MemRegion reserved() const { return _reserved; } 206 207 // Returns a region guaranteed to contain all the objects in the 208 // generation. 209 virtual MemRegion used_region() const { return _reserved; } 210 211 MemRegion prev_used_region() const { return _prev_used_region; } 212 virtual void save_used_region() { _prev_used_region = used_region(); } 213 214 // Returns "TRUE" iff "p" points into the committed areas in the generation. 215 // For some kinds of generations, this may be an expensive operation. 216 // To avoid performance problems stemming from its inadvertent use in 217 // product jvm's, we restrict its use to assertion checking or 218 // verification only. 219 virtual bool is_in(const void* p) const; 220 221 /* Returns "TRUE" iff "p" points into the reserved area of the generation. */ 222 bool is_in_reserved(const void* p) const { 223 return _reserved.contains(p); 224 } 225 226 // If some space in the generation contains the given "addr", return a 227 // pointer to that space, else return "NULL". 228 virtual Space* space_containing(const void* addr) const; 229 230 // Iteration - do not use for time critical operations 231 virtual void space_iterate(SpaceClosure* blk, bool usedOnly = false) = 0; 232 233 // Returns the first space, if any, in the generation that can participate 234 // in compaction, or else "NULL". 235 virtual CompactibleSpace* first_compaction_space() const = 0; 236 237 // Returns "true" iff this generation should be used to allocate an 238 // object of the given size. Young generations might 239 // wish to exclude very large objects, for example, since, if allocated 240 // often, they would greatly increase the frequency of young-gen 241 // collection. 242 virtual bool should_allocate(size_t word_size, bool is_tlab) { 243 bool result = false; 244 size_t overflow_limit = (size_t)1 << (BitsPerSize_t - LogHeapWordSize); 245 if (!is_tlab || supports_tlab_allocation()) { 246 result = (word_size > 0) && (word_size < overflow_limit); 247 } 248 return result; 249 } 250 251 // Allocate and returns a block of the requested size, or returns "NULL". 252 // Assumes the caller has done any necessary locking. 253 virtual HeapWord* allocate(size_t word_size, bool is_tlab) = 0; 254 255 // Like "allocate", but performs any necessary locking internally. 256 virtual HeapWord* par_allocate(size_t word_size, bool is_tlab) = 0; 257 258 // Some generation may offer a region for shared, contiguous allocation, 259 // via inlined code (by exporting the address of the top and end fields 260 // defining the extent of the contiguous allocation region.) 261 262 // This function returns "true" iff the heap supports this kind of 263 // allocation. (More precisely, this means the style of allocation that 264 // increments *top_addr()" with a CAS.) (Default is "no".) 265 // A generation that supports this allocation style must use lock-free 266 // allocation for *all* allocation, since there are times when lock free 267 // allocation will be concurrent with plain "allocate" calls. 268 virtual bool supports_inline_contig_alloc() const { return false; } 269 270 // These functions return the addresses of the fields that define the 271 // boundaries of the contiguous allocation area. (These fields should be 272 // physically near to one another.) 273 virtual HeapWord* volatile* top_addr() const { return NULL; } 274 virtual HeapWord** end_addr() const { return NULL; } 275 276 // Thread-local allocation buffers 277 virtual bool supports_tlab_allocation() const { return false; } 278 virtual size_t tlab_capacity() const { 279 guarantee(false, "Generation doesn't support thread local allocation buffers"); 280 return 0; 281 } 282 virtual size_t tlab_used() const { 283 guarantee(false, "Generation doesn't support thread local allocation buffers"); 284 return 0; 285 } 286 virtual size_t unsafe_max_tlab_alloc() const { 287 guarantee(false, "Generation doesn't support thread local allocation buffers"); 288 return 0; 289 } 290 291 // "obj" is the address of an object in a younger generation. Allocate space 292 // for "obj" in the current (or some higher) generation, and copy "obj" into 293 // the newly allocated space, if possible, returning the result (or NULL if 294 // the allocation failed). 295 // 296 // The "obj_size" argument is just obj->size(), passed along so the caller can 297 // avoid repeating the virtual call to retrieve it. 298 virtual oop promote(oop obj, size_t obj_size); 299 300 // Thread "thread_num" (0 <= i < ParalleGCThreads) wants to promote 301 // object "obj", whose original mark word was "m", and whose size is 302 // "word_sz". If possible, allocate space for "obj", copy obj into it 303 // (taking care to copy "m" into the mark word when done, since the mark 304 // word of "obj" may have been overwritten with a forwarding pointer, and 305 // also taking care to copy the klass pointer *last*. Returns the new 306 // object if successful, or else NULL. 307 virtual oop par_promote(int thread_num, oop obj, markOop m, size_t word_sz); 308 309 // Informs the current generation that all par_promote_alloc's in the 310 // collection have been completed; any supporting data structures can be 311 // reset. Default is to do nothing. 312 virtual void par_promote_alloc_done(int thread_num) {} 313 314 // Informs the current generation that all oop_since_save_marks_iterates 315 // performed by "thread_num" in the current collection, if any, have been 316 // completed; any supporting data structures can be reset. Default is to 317 // do nothing. 318 virtual void par_oop_since_save_marks_iterate_done(int thread_num) {} 319 320 // Returns "true" iff collect() should subsequently be called on this 321 // this generation. See comment below. 322 // This is a generic implementation which can be overridden. 323 // 324 // Note: in the current (1.4) implementation, when genCollectedHeap's 325 // incremental_collection_will_fail flag is set, all allocations are 326 // slow path (the only fast-path place to allocate is DefNew, which 327 // will be full if the flag is set). 328 // Thus, older generations which collect younger generations should 329 // test this flag and collect if it is set. 330 virtual bool should_collect(bool full, 331 size_t word_size, 332 bool is_tlab) { 333 return (full || should_allocate(word_size, is_tlab)); 334 } 335 336 // Returns true if the collection is likely to be safely 337 // completed. Even if this method returns true, a collection 338 // may not be guaranteed to succeed, and the system should be 339 // able to safely unwind and recover from that failure, albeit 340 // at some additional cost. 341 virtual bool collection_attempt_is_safe() { 342 guarantee(false, "Are you sure you want to call this method?"); 343 return true; 344 } 345 346 // Perform a garbage collection. 347 // If full is true attempt a full garbage collection of this generation. 348 // Otherwise, attempting to (at least) free enough space to support an 349 // allocation of the given "word_size". 350 virtual void collect(bool full, 351 bool clear_all_soft_refs, 352 size_t word_size, 353 bool is_tlab) = 0; 354 355 // Perform a heap collection, attempting to create (at least) enough 356 // space to support an allocation of the given "word_size". If 357 // successful, perform the allocation and return the resulting 358 // "oop" (initializing the allocated block). If the allocation is 359 // still unsuccessful, return "NULL". 360 virtual HeapWord* expand_and_allocate(size_t word_size, 361 bool is_tlab, 362 bool parallel = false) = 0; 363 364 // Some generations may require some cleanup or preparation actions before 365 // allowing a collection. The default is to do nothing. 366 virtual void gc_prologue(bool full) {} 367 368 // Some generations may require some cleanup actions after a collection. 369 // The default is to do nothing. 370 virtual void gc_epilogue(bool full) {} 371 372 // Save the high water marks for the used space in a generation. 373 virtual void record_spaces_top() {} 374 375 // Some generations may need to be "fixed-up" after some allocation 376 // activity to make them parsable again. The default is to do nothing. 377 virtual void ensure_parsability() {} 378 379 // Time (in ms) when we were last collected or now if a collection is 380 // in progress. 381 virtual jlong time_of_last_gc(jlong now) { 382 // Both _time_of_last_gc and now are set using a time source 383 // that guarantees monotonically non-decreasing values provided 384 // the underlying platform provides such a source. So we still 385 // have to guard against non-monotonicity. 386 NOT_PRODUCT( 387 if (now < _time_of_last_gc) { 388 log_warning(gc)("time warp: " JLONG_FORMAT " to " JLONG_FORMAT, _time_of_last_gc, now); 389 } 390 ) 391 return _time_of_last_gc; 392 } 393 394 virtual void update_time_of_last_gc(jlong now) { 395 _time_of_last_gc = now; 396 } 397 398 // Generations may keep statistics about collection. This method 399 // updates those statistics. current_generation is the generation 400 // that was most recently collected. This allows the generation to 401 // decide what statistics are valid to collect. For example, the 402 // generation can decide to gather the amount of promoted data if 403 // the collection of the young generation has completed. 404 GCStats* gc_stats() const { return _gc_stats; } 405 virtual void update_gc_stats(Generation* current_generation, bool full) {} 406 407 // Mark sweep support phase2 408 virtual void prepare_for_compaction(CompactPoint* cp); 409 // Mark sweep support phase3 410 virtual void adjust_pointers(); 411 // Mark sweep support phase4 412 virtual void compact(); 413 virtual void post_compact() { ShouldNotReachHere(); } 414 415 // Support for CMS's rescan. In this general form we return a pointer 416 // to an abstract object that can be used, based on specific previously 417 // decided protocols, to exchange information between generations, 418 // information that may be useful for speeding up certain types of 419 // garbage collectors. A NULL value indicates to the client that 420 // no data recording is expected by the provider. The data-recorder is 421 // expected to be GC worker thread-local, with the worker index 422 // indicated by "thr_num". 423 virtual void* get_data_recorder(int thr_num) { return NULL; } 424 virtual void sample_eden_chunk() {} 425 426 // Some generations may require some cleanup actions before allowing 427 // a verification. 428 virtual void prepare_for_verify() {} 429 430 // Accessing "marks". 431 432 // This function gives a generation a chance to note a point between 433 // collections. For example, a contiguous generation might note the 434 // beginning allocation point post-collection, which might allow some later 435 // operations to be optimized. 436 virtual void save_marks() {} 437 438 // This function allows generations to initialize any "saved marks". That 439 // is, should only be called when the generation is empty. 440 virtual void reset_saved_marks() {} 441 442 // This function is "true" iff any no allocations have occurred in the 443 // generation since the last call to "save_marks". 444 virtual bool no_allocs_since_save_marks() = 0; 445 446 // Apply "cl->apply" to (the addresses of) all reference fields in objects 447 // allocated in the current generation since the last call to "save_marks". 448 // If more objects are allocated in this generation as a result of applying 449 // the closure, iterates over reference fields in those objects as well. 450 // Calls "save_marks" at the end of the iteration. 451 // General signature... 452 virtual void oop_since_save_marks_iterate_v(OopsInGenClosure* cl) = 0; 453 // ...and specializations for de-virtualization. (The general 454 // implementation of the _nv versions call the virtual version. 455 // Note that the _nv suffix is not really semantically necessary, 456 // but it avoids some not-so-useful warnings on Solaris.) 457 #define Generation_SINCE_SAVE_MARKS_DECL(OopClosureType, nv_suffix) \ 458 virtual void oop_since_save_marks_iterate##nv_suffix(OopClosureType* cl) { \ 459 oop_since_save_marks_iterate_v((OopsInGenClosure*)cl); \ 460 } 461 SPECIALIZED_SINCE_SAVE_MARKS_CLOSURES(Generation_SINCE_SAVE_MARKS_DECL) 462 463 #undef Generation_SINCE_SAVE_MARKS_DECL 464 465 // The "requestor" generation is performing some garbage collection 466 // action for which it would be useful to have scratch space. If 467 // the target is not the requestor, no gc actions will be required 468 // of the target. The requestor promises to allocate no more than 469 // "max_alloc_words" in the target generation (via promotion say, 470 // if the requestor is a young generation and the target is older). 471 // If the target generation can provide any scratch space, it adds 472 // it to "list", leaving "list" pointing to the head of the 473 // augmented list. The default is to offer no space. 474 virtual void contribute_scratch(ScratchBlock*& list, Generation* requestor, 475 size_t max_alloc_words) {} 476 477 // Give each generation an opportunity to do clean up for any 478 // contributed scratch. 479 virtual void reset_scratch() {} 480 481 // When an older generation has been collected, and perhaps resized, 482 // this method will be invoked on all younger generations (from older to 483 // younger), allowing them to resize themselves as appropriate. 484 virtual void compute_new_size() = 0; 485 486 // Printing 487 virtual const char* name() const = 0; 488 virtual const char* short_name() const = 0; 489 490 // Reference Processing accessor 491 ReferenceProcessor* const ref_processor() { return _ref_processor; } 492 493 // Iteration. 494 495 // Iterate over all the ref-containing fields of all objects in the 496 // generation, calling "cl.do_oop" on each. 497 virtual void oop_iterate(ExtendedOopClosure* cl); 498 499 // Iterate over all objects in the generation, calling "cl.do_object" on 500 // each. 501 virtual void object_iterate(ObjectClosure* cl); 502 503 // Iterate over all safe objects in the generation, calling "cl.do_object" on 504 // each. An object is safe if its references point to other objects in 505 // the heap. This defaults to object_iterate() unless overridden. 506 virtual void safe_object_iterate(ObjectClosure* cl); 507 508 // Apply "cl->do_oop" to (the address of) all and only all the ref fields 509 // in the current generation that contain pointers to objects in younger 510 // generations. Objects allocated since the last "save_marks" call are 511 // excluded. 512 virtual void younger_refs_iterate(OopsInGenClosure* cl, uint n_threads) = 0; 513 514 // Inform a generation that it longer contains references to objects 515 // in any younger generation. [e.g. Because younger gens are empty, 516 // clear the card table.] 517 virtual void clear_remembered_set() { } 518 519 // Inform a generation that some of its objects have moved. [e.g. The 520 // generation's spaces were compacted, invalidating the card table.] 521 virtual void invalidate_remembered_set() { } 522 523 // Block abstraction. 524 525 // Returns the address of the start of the "block" that contains the 526 // address "addr". We say "blocks" instead of "object" since some heaps 527 // may not pack objects densely; a chunk may either be an object or a 528 // non-object. 529 virtual HeapWord* block_start(const void* addr) const; 530 531 // Requires "addr" to be the start of a chunk, and returns its size. 532 // "addr + size" is required to be the start of a new chunk, or the end 533 // of the active area of the heap. 534 virtual size_t block_size(const HeapWord* addr) const ; 535 536 // Requires "addr" to be the start of a block, and returns "TRUE" iff 537 // the block is an object. 538 virtual bool block_is_obj(const HeapWord* addr) const; 539 540 void print_heap_change(size_t prev_used) const; 541 542 virtual void print() const; 543 virtual void print_on(outputStream* st) const; 544 545 virtual void verify() = 0; 546 547 struct StatRecord { 548 int invocations; 549 elapsedTimer accumulated_time; 550 StatRecord() : 551 invocations(0), 552 accumulated_time(elapsedTimer()) {} 553 }; 554 private: 555 StatRecord _stat_record; 556 public: 557 StatRecord* stat_record() { return &_stat_record; } 558 559 virtual void print_summary_info_on(outputStream* st); 560 561 // Performance Counter support 562 virtual void update_counters() = 0; 563 virtual CollectorCounters* counters() { return _gc_counters; } 564 }; 565 566 #endif // SHARE_VM_GC_SHARED_GENERATION_HPP