1 /* 2 * Copyright (c) 1997, 2017, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef SHARE_VM_GC_SHARED_GENERATION_HPP 26 #define SHARE_VM_GC_SHARED_GENERATION_HPP 27 28 #include "gc/shared/collectorCounters.hpp" 29 #include "gc/shared/referenceProcessor.hpp" 30 #include "logging/log.hpp" 31 #include "memory/allocation.hpp" 32 #include "memory/memRegion.hpp" 33 #include "memory/universe.hpp" 34 #include "memory/virtualspace.hpp" 35 #include "runtime/mutex.hpp" 36 #include "runtime/perfData.hpp" 37 38 // A Generation models a heap area for similarly-aged objects. 39 // It will contain one ore more spaces holding the actual objects. 40 // 41 // The Generation class hierarchy: 42 // 43 // Generation - abstract base class 44 // - DefNewGeneration - allocation area (copy collected) 45 // - ParNewGeneration - a DefNewGeneration that is collected by 46 // several threads 47 // - CardGeneration - abstract class adding offset array behavior 48 // - TenuredGeneration - tenured (old object) space (markSweepCompact) 49 // - ConcurrentMarkSweepGeneration - Mostly Concurrent Mark Sweep Generation 50 // (Detlefs-Printezis refinement of 51 // Boehm-Demers-Schenker) 52 // 53 // The system configurations currently allowed are: 54 // 55 // DefNewGeneration + TenuredGeneration 56 // 57 // ParNewGeneration + ConcurrentMarkSweepGeneration 58 // 59 60 class DefNewGeneration; 61 class GenerationSpec; 62 class CompactibleSpace; 63 class ContiguousSpace; 64 class CompactPoint; 65 class OopsInGenClosure; 66 class OopClosure; 67 class ScanClosure; 68 class FastScanClosure; 69 class GenCollectedHeap; 70 class GCStats; 71 72 // A "ScratchBlock" represents a block of memory in one generation usable by 73 // another. It represents "num_words" free words, starting at and including 74 // the address of "this". 75 struct ScratchBlock { 76 ScratchBlock* next; 77 size_t num_words; 78 HeapWord scratch_space[1]; // Actually, of size "num_words-2" (assuming 79 // first two fields are word-sized.) 80 }; 81 82 class Generation: public CHeapObj<mtGC> { 83 friend class VMStructs; 84 private: 85 jlong _time_of_last_gc; // time when last gc on this generation happened (ms) 86 MemRegion _prev_used_region; // for collectors that want to "remember" a value for 87 // used region at some specific point during collection. 88 89 GCMemoryManager* _gc_manager; 90 91 protected: 92 // Minimum and maximum addresses for memory reserved (not necessarily 93 // committed) for generation. 94 // Used by card marking code. Must not overlap with address ranges of 95 // other generations. 96 MemRegion _reserved; 97 98 // Memory area reserved for generation 99 VirtualSpace _virtual_space; 100 101 // ("Weak") Reference processing support 102 ReferenceProcessor* _ref_processor; 103 104 // Performance Counters 105 CollectorCounters* _gc_counters; 106 107 // Statistics for garbage collection 108 GCStats* _gc_stats; 109 110 // Initialize the generation. 111 Generation(ReservedSpace rs, size_t initial_byte_size); 112 113 // Apply "cl->do_oop" to (the address of) (exactly) all the ref fields in 114 // "sp" that point into younger generations. 115 // The iteration is only over objects allocated at the start of the 116 // iterations; objects allocated as a result of applying the closure are 117 // not included. 118 void younger_refs_in_space_iterate(Space* sp, OopsInGenClosure* cl, uint n_threads); 119 120 public: 121 // The set of possible generation kinds. 122 enum Name { 123 DefNew, 124 ParNew, 125 MarkSweepCompact, 126 ConcurrentMarkSweep, 127 Other 128 }; 129 130 enum SomePublicConstants { 131 // Generations are GenGrain-aligned and have size that are multiples of 132 // GenGrain. 133 // Note: on ARM we add 1 bit for card_table_base to be properly aligned 134 // (we expect its low byte to be zero - see implementation of post_barrier) 135 LogOfGenGrain = 16 ARM32_ONLY(+1), 136 GenGrain = 1 << LogOfGenGrain 137 }; 138 139 // allocate and initialize ("weak") refs processing support 140 virtual void ref_processor_init(); 141 void set_ref_processor(ReferenceProcessor* rp) { 142 assert(_ref_processor == NULL, "clobbering existing _ref_processor"); 143 _ref_processor = rp; 144 } 145 146 virtual Generation::Name kind() { return Generation::Other; } 147 148 // This properly belongs in the collector, but for now this 149 // will do. 150 virtual bool refs_discovery_is_atomic() const { return true; } 151 virtual bool refs_discovery_is_mt() const { return false; } 152 153 // Space inquiries (results in bytes) 154 size_t initial_size(); 155 virtual size_t capacity() const = 0; // The maximum number of object bytes the 156 // generation can currently hold. 157 virtual size_t used() const = 0; // The number of used bytes in the gen. 158 virtual size_t free() const = 0; // The number of free bytes in the gen. 159 160 // Support for java.lang.Runtime.maxMemory(); see CollectedHeap. 161 // Returns the total number of bytes available in a generation 162 // for the allocation of objects. 163 virtual size_t max_capacity() const; 164 165 // If this is a young generation, the maximum number of bytes that can be 166 // allocated in this generation before a GC is triggered. 167 virtual size_t capacity_before_gc() const { return 0; } 168 169 // The largest number of contiguous free bytes in the generation, 170 // including expansion (Assumes called at a safepoint.) 171 virtual size_t contiguous_available() const = 0; 172 // The largest number of contiguous free bytes in this or any higher generation. 173 virtual size_t max_contiguous_available() const; 174 175 // Returns true if promotions of the specified amount are 176 // likely to succeed without a promotion failure. 177 // Promotion of the full amount is not guaranteed but 178 // might be attempted in the worst case. 179 virtual bool promotion_attempt_is_safe(size_t max_promotion_in_bytes) const; 180 181 // For a non-young generation, this interface can be used to inform a 182 // generation that a promotion attempt into that generation failed. 183 // Typically used to enable diagnostic output for post-mortem analysis, 184 // but other uses of the interface are not ruled out. 185 virtual void promotion_failure_occurred() { /* does nothing */ } 186 187 // Return an estimate of the maximum allocation that could be performed 188 // in the generation without triggering any collection or expansion 189 // activity. It is "unsafe" because no locks are taken; the result 190 // should be treated as an approximation, not a guarantee, for use in 191 // heuristic resizing decisions. 192 virtual size_t unsafe_max_alloc_nogc() const = 0; 193 194 // Returns true if this generation cannot be expanded further 195 // without a GC. Override as appropriate. 196 virtual bool is_maximal_no_gc() const { 197 return _virtual_space.uncommitted_size() == 0; 198 } 199 200 MemRegion reserved() const { return _reserved; } 201 202 // Returns a region guaranteed to contain all the objects in the 203 // generation. 204 virtual MemRegion used_region() const { return _reserved; } 205 206 MemRegion prev_used_region() const { return _prev_used_region; } 207 virtual void save_used_region() { _prev_used_region = used_region(); } 208 209 // Returns "TRUE" iff "p" points into the committed areas in the generation. 210 // For some kinds of generations, this may be an expensive operation. 211 // To avoid performance problems stemming from its inadvertent use in 212 // product jvm's, we restrict its use to assertion checking or 213 // verification only. 214 virtual bool is_in(const void* p) const; 215 216 /* Returns "TRUE" iff "p" points into the reserved area of the generation. */ 217 bool is_in_reserved(const void* p) const { 218 return _reserved.contains(p); 219 } 220 221 // If some space in the generation contains the given "addr", return a 222 // pointer to that space, else return "NULL". 223 virtual Space* space_containing(const void* addr) const; 224 225 // Iteration - do not use for time critical operations 226 virtual void space_iterate(SpaceClosure* blk, bool usedOnly = false) = 0; 227 228 // Returns the first space, if any, in the generation that can participate 229 // in compaction, or else "NULL". 230 virtual CompactibleSpace* first_compaction_space() const = 0; 231 232 // Returns "true" iff this generation should be used to allocate an 233 // object of the given size. Young generations might 234 // wish to exclude very large objects, for example, since, if allocated 235 // often, they would greatly increase the frequency of young-gen 236 // collection. 237 virtual bool should_allocate(size_t word_size, bool is_tlab) { 238 bool result = false; 239 size_t overflow_limit = (size_t)1 << (BitsPerSize_t - LogHeapWordSize); 240 if (!is_tlab || supports_tlab_allocation()) { 241 result = (word_size > 0) && (word_size < overflow_limit); 242 } 243 return result; 244 } 245 246 // Allocate and returns a block of the requested size, or returns "NULL". 247 // Assumes the caller has done any necessary locking. 248 virtual HeapWord* allocate(size_t word_size, bool is_tlab) = 0; 249 250 // Like "allocate", but performs any necessary locking internally. 251 virtual HeapWord* par_allocate(size_t word_size, bool is_tlab) = 0; 252 253 // Some generation may offer a region for shared, contiguous allocation, 254 // via inlined code (by exporting the address of the top and end fields 255 // defining the extent of the contiguous allocation region.) 256 257 // This function returns "true" iff the heap supports this kind of 258 // allocation. (More precisely, this means the style of allocation that 259 // increments *top_addr()" with a CAS.) (Default is "no".) 260 // A generation that supports this allocation style must use lock-free 261 // allocation for *all* allocation, since there are times when lock free 262 // allocation will be concurrent with plain "allocate" calls. 263 virtual bool supports_inline_contig_alloc() const { return false; } 264 265 // These functions return the addresses of the fields that define the 266 // boundaries of the contiguous allocation area. (These fields should be 267 // physically near to one another.) 268 virtual HeapWord* volatile* top_addr() const { return NULL; } 269 virtual HeapWord** end_addr() const { return NULL; } 270 271 // Thread-local allocation buffers 272 virtual bool supports_tlab_allocation() const { return false; } 273 virtual size_t tlab_capacity() const { 274 guarantee(false, "Generation doesn't support thread local allocation buffers"); 275 return 0; 276 } 277 virtual size_t tlab_used() const { 278 guarantee(false, "Generation doesn't support thread local allocation buffers"); 279 return 0; 280 } 281 virtual size_t unsafe_max_tlab_alloc() const { 282 guarantee(false, "Generation doesn't support thread local allocation buffers"); 283 return 0; 284 } 285 286 // "obj" is the address of an object in a younger generation. Allocate space 287 // for "obj" in the current (or some higher) generation, and copy "obj" into 288 // the newly allocated space, if possible, returning the result (or NULL if 289 // the allocation failed). 290 // 291 // The "obj_size" argument is just obj->size(), passed along so the caller can 292 // avoid repeating the virtual call to retrieve it. 293 virtual oop promote(oop obj, size_t obj_size); 294 295 // Thread "thread_num" (0 <= i < ParalleGCThreads) wants to promote 296 // object "obj", whose original mark word was "m", and whose size is 297 // "word_sz". If possible, allocate space for "obj", copy obj into it 298 // (taking care to copy "m" into the mark word when done, since the mark 299 // word of "obj" may have been overwritten with a forwarding pointer, and 300 // also taking care to copy the klass pointer *last*. Returns the new 301 // object if successful, or else NULL. 302 virtual oop par_promote(int thread_num, oop obj, markOop m, size_t word_sz); 303 304 // Informs the current generation that all par_promote_alloc's in the 305 // collection have been completed; any supporting data structures can be 306 // reset. Default is to do nothing. 307 virtual void par_promote_alloc_done(int thread_num) {} 308 309 // Informs the current generation that all oop_since_save_marks_iterates 310 // performed by "thread_num" in the current collection, if any, have been 311 // completed; any supporting data structures can be reset. Default is to 312 // do nothing. 313 virtual void par_oop_since_save_marks_iterate_done(int thread_num) {} 314 315 // Returns "true" iff collect() should subsequently be called on this 316 // this generation. See comment below. 317 // This is a generic implementation which can be overridden. 318 // 319 // Note: in the current (1.4) implementation, when genCollectedHeap's 320 // incremental_collection_will_fail flag is set, all allocations are 321 // slow path (the only fast-path place to allocate is DefNew, which 322 // will be full if the flag is set). 323 // Thus, older generations which collect younger generations should 324 // test this flag and collect if it is set. 325 virtual bool should_collect(bool full, 326 size_t word_size, 327 bool is_tlab) { 328 return (full || should_allocate(word_size, is_tlab)); 329 } 330 331 // Returns true if the collection is likely to be safely 332 // completed. Even if this method returns true, a collection 333 // may not be guaranteed to succeed, and the system should be 334 // able to safely unwind and recover from that failure, albeit 335 // at some additional cost. 336 virtual bool collection_attempt_is_safe() { 337 guarantee(false, "Are you sure you want to call this method?"); 338 return true; 339 } 340 341 // Perform a garbage collection. 342 // If full is true attempt a full garbage collection of this generation. 343 // Otherwise, attempting to (at least) free enough space to support an 344 // allocation of the given "word_size". 345 virtual void collect(bool full, 346 bool clear_all_soft_refs, 347 size_t word_size, 348 bool is_tlab) = 0; 349 350 // Perform a heap collection, attempting to create (at least) enough 351 // space to support an allocation of the given "word_size". If 352 // successful, perform the allocation and return the resulting 353 // "oop" (initializing the allocated block). If the allocation is 354 // still unsuccessful, return "NULL". 355 virtual HeapWord* expand_and_allocate(size_t word_size, 356 bool is_tlab, 357 bool parallel = false) = 0; 358 359 // Some generations may require some cleanup or preparation actions before 360 // allowing a collection. The default is to do nothing. 361 virtual void gc_prologue(bool full) {} 362 363 // Some generations may require some cleanup actions after a collection. 364 // The default is to do nothing. 365 virtual void gc_epilogue(bool full) {} 366 367 // Save the high water marks for the used space in a generation. 368 virtual void record_spaces_top() {} 369 370 // Some generations may need to be "fixed-up" after some allocation 371 // activity to make them parsable again. The default is to do nothing. 372 virtual void ensure_parsability() {} 373 374 // Time (in ms) when we were last collected or now if a collection is 375 // in progress. 376 virtual jlong time_of_last_gc(jlong now) { 377 // Both _time_of_last_gc and now are set using a time source 378 // that guarantees monotonically non-decreasing values provided 379 // the underlying platform provides such a source. So we still 380 // have to guard against non-monotonicity. 381 NOT_PRODUCT( 382 if (now < _time_of_last_gc) { 383 log_warning(gc)("time warp: " JLONG_FORMAT " to " JLONG_FORMAT, _time_of_last_gc, now); 384 } 385 ) 386 return _time_of_last_gc; 387 } 388 389 virtual void update_time_of_last_gc(jlong now) { 390 _time_of_last_gc = now; 391 } 392 393 // Generations may keep statistics about collection. This method 394 // updates those statistics. current_generation is the generation 395 // that was most recently collected. This allows the generation to 396 // decide what statistics are valid to collect. For example, the 397 // generation can decide to gather the amount of promoted data if 398 // the collection of the young generation has completed. 399 GCStats* gc_stats() const { return _gc_stats; } 400 virtual void update_gc_stats(Generation* current_generation, bool full) {} 401 402 // Mark sweep support phase2 403 virtual void prepare_for_compaction(CompactPoint* cp); 404 // Mark sweep support phase3 405 virtual void adjust_pointers(); 406 // Mark sweep support phase4 407 virtual void compact(); 408 virtual void post_compact() { ShouldNotReachHere(); } 409 410 // Support for CMS's rescan. In this general form we return a pointer 411 // to an abstract object that can be used, based on specific previously 412 // decided protocols, to exchange information between generations, 413 // information that may be useful for speeding up certain types of 414 // garbage collectors. A NULL value indicates to the client that 415 // no data recording is expected by the provider. The data-recorder is 416 // expected to be GC worker thread-local, with the worker index 417 // indicated by "thr_num". 418 virtual void* get_data_recorder(int thr_num) { return NULL; } 419 virtual void sample_eden_chunk() {} 420 421 // Some generations may require some cleanup actions before allowing 422 // a verification. 423 virtual void prepare_for_verify() {} 424 425 // Accessing "marks". 426 427 // This function gives a generation a chance to note a point between 428 // collections. For example, a contiguous generation might note the 429 // beginning allocation point post-collection, which might allow some later 430 // operations to be optimized. 431 virtual void save_marks() {} 432 433 // This function allows generations to initialize any "saved marks". That 434 // is, should only be called when the generation is empty. 435 virtual void reset_saved_marks() {} 436 437 // This function is "true" iff any no allocations have occurred in the 438 // generation since the last call to "save_marks". 439 virtual bool no_allocs_since_save_marks() = 0; 440 441 // Apply "cl->apply" to (the addresses of) all reference fields in objects 442 // allocated in the current generation since the last call to "save_marks". 443 // If more objects are allocated in this generation as a result of applying 444 // the closure, iterates over reference fields in those objects as well. 445 // Calls "save_marks" at the end of the iteration. 446 // General signature... 447 virtual void oop_since_save_marks_iterate_v(OopsInGenClosure* cl) = 0; 448 // ...and specializations for de-virtualization. (The general 449 // implementation of the _nv versions call the virtual version. 450 // Note that the _nv suffix is not really semantically necessary, 451 // but it avoids some not-so-useful warnings on Solaris.) 452 #define Generation_SINCE_SAVE_MARKS_DECL(OopClosureType, nv_suffix) \ 453 virtual void oop_since_save_marks_iterate##nv_suffix(OopClosureType* cl) { \ 454 oop_since_save_marks_iterate_v((OopsInGenClosure*)cl); \ 455 } 456 SPECIALIZED_SINCE_SAVE_MARKS_CLOSURES(Generation_SINCE_SAVE_MARKS_DECL) 457 458 #undef Generation_SINCE_SAVE_MARKS_DECL 459 460 // The "requestor" generation is performing some garbage collection 461 // action for which it would be useful to have scratch space. If 462 // the target is not the requestor, no gc actions will be required 463 // of the target. The requestor promises to allocate no more than 464 // "max_alloc_words" in the target generation (via promotion say, 465 // if the requestor is a young generation and the target is older). 466 // If the target generation can provide any scratch space, it adds 467 // it to "list", leaving "list" pointing to the head of the 468 // augmented list. The default is to offer no space. 469 virtual void contribute_scratch(ScratchBlock*& list, Generation* requestor, 470 size_t max_alloc_words) {} 471 472 // Give each generation an opportunity to do clean up for any 473 // contributed scratch. 474 virtual void reset_scratch() {} 475 476 // When an older generation has been collected, and perhaps resized, 477 // this method will be invoked on all younger generations (from older to 478 // younger), allowing them to resize themselves as appropriate. 479 virtual void compute_new_size() = 0; 480 481 // Printing 482 virtual const char* name() const = 0; 483 virtual const char* short_name() const = 0; 484 485 // Reference Processing accessor 486 ReferenceProcessor* const ref_processor() { return _ref_processor; } 487 488 // Iteration. 489 490 // Iterate over all the ref-containing fields of all objects in the 491 // generation, calling "cl.do_oop" on each. 492 virtual void oop_iterate(ExtendedOopClosure* cl); 493 494 // Iterate over all objects in the generation, calling "cl.do_object" on 495 // each. 496 virtual void object_iterate(ObjectClosure* cl); 497 498 // Iterate over all safe objects in the generation, calling "cl.do_object" on 499 // each. An object is safe if its references point to other objects in 500 // the heap. This defaults to object_iterate() unless overridden. 501 virtual void safe_object_iterate(ObjectClosure* cl); 502 503 // Apply "cl->do_oop" to (the address of) all and only all the ref fields 504 // in the current generation that contain pointers to objects in younger 505 // generations. Objects allocated since the last "save_marks" call are 506 // excluded. 507 virtual void younger_refs_iterate(OopsInGenClosure* cl, uint n_threads) = 0; 508 509 // Inform a generation that it longer contains references to objects 510 // in any younger generation. [e.g. Because younger gens are empty, 511 // clear the card table.] 512 virtual void clear_remembered_set() { } 513 514 // Inform a generation that some of its objects have moved. [e.g. The 515 // generation's spaces were compacted, invalidating the card table.] 516 virtual void invalidate_remembered_set() { } 517 518 // Block abstraction. 519 520 // Returns the address of the start of the "block" that contains the 521 // address "addr". We say "blocks" instead of "object" since some heaps 522 // may not pack objects densely; a chunk may either be an object or a 523 // non-object. 524 virtual HeapWord* block_start(const void* addr) const; 525 526 // Requires "addr" to be the start of a chunk, and returns its size. 527 // "addr + size" is required to be the start of a new chunk, or the end 528 // of the active area of the heap. 529 virtual size_t block_size(const HeapWord* addr) const ; 530 531 // Requires "addr" to be the start of a block, and returns "TRUE" iff 532 // the block is an object. 533 virtual bool block_is_obj(const HeapWord* addr) const; 534 535 void print_heap_change(size_t prev_used) const; 536 537 virtual void print() const; 538 virtual void print_on(outputStream* st) const; 539 540 virtual void verify() = 0; 541 542 struct StatRecord { 543 int invocations; 544 elapsedTimer accumulated_time; 545 StatRecord() : 546 invocations(0), 547 accumulated_time(elapsedTimer()) {} 548 }; 549 private: 550 StatRecord _stat_record; 551 public: 552 StatRecord* stat_record() { return &_stat_record; } 553 554 virtual void print_summary_info_on(outputStream* st); 555 556 // Performance Counter support 557 virtual void update_counters() = 0; 558 virtual CollectorCounters* counters() { return _gc_counters; } 559 560 GCMemoryManager* gc_manager() const { 561 assert(_gc_manager != NULL, "not initialized yet"); 562 return _gc_manager; 563 } 564 565 void set_gc_manager(GCMemoryManager* gc_manager) { 566 _gc_manager = gc_manager; 567 } 568 569 }; 570 571 #endif // SHARE_VM_GC_SHARED_GENERATION_HPP