1 /* 2 * Copyright (c) 1997, 2012, Oracle and/or its affiliates. All rights reserved. 3 * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. 4 * 5 * This code is free software; you can redistribute it and/or modify it 6 * under the terms of the GNU General Public License version 2 only, as 7 * published by the Free Software Foundation. 8 * 9 * This code is distributed in the hope that it will be useful, but WITHOUT 10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 12 * version 2 for more details (a copy is included in the LICENSE file that 13 * accompanied this code). 14 * 15 * You should have received a copy of the GNU General Public License version 16 * 2 along with this work; if not, write to the Free Software Foundation, 17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 18 * 19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 20 * or visit www.oracle.com if you need additional information or have any 21 * questions. 22 * 23 */ 24 25 #ifndef SHARE_VM_MEMORY_GENERATION_HPP 26 #define SHARE_VM_MEMORY_GENERATION_HPP 27 28 #include "gc_implementation/shared/collectorCounters.hpp" 29 #include "memory/allocation.hpp" 30 #include "memory/memRegion.hpp" 31 #include "memory/referenceProcessor.hpp" 32 #include "memory/universe.hpp" 33 #include "memory/watermark.hpp" 34 #include "runtime/mutex.hpp" 35 #include "runtime/perfData.hpp" 36 #include "runtime/virtualspace.hpp" 37 38 // A Generation models a heap area for similarly-aged objects. 39 // It will contain one ore more spaces holding the actual objects. 40 // 41 // The Generation class hierarchy: 42 // 43 // Generation - abstract base class 44 // - DefNewGeneration - allocation area (copy collected) 45 // - ParNewGeneration - a DefNewGeneration that is collected by 46 // several threads 47 // - CardGeneration - abstract class adding offset array behavior 48 // - OneContigSpaceCardGeneration - abstract class holding a single 49 // contiguous space with card marking 50 // - TenuredGeneration - tenured (old object) space (markSweepCompact) 51 // - CompactingPermGenGen - reflective object area (klasses, methods, symbols, ...) 52 // - ConcurrentMarkSweepGeneration - Mostly Concurrent Mark Sweep Generation 53 // (Detlefs-Printezis refinement of 54 // Boehm-Demers-Schenker) 55 // 56 // The system configurations currently allowed are: 57 // 58 // DefNewGeneration + TenuredGeneration + PermGeneration 59 // DefNewGeneration + ConcurrentMarkSweepGeneration + ConcurrentMarkSweepPermGen 60 // 61 // ParNewGeneration + TenuredGeneration + PermGeneration 62 // ParNewGeneration + ConcurrentMarkSweepGeneration + ConcurrentMarkSweepPermGen 63 // 64 65 class DefNewGeneration; 66 class GenerationSpec; 67 class CompactibleSpace; 68 class ContiguousSpace; 69 class CompactPoint; 70 class OopsInGenClosure; 71 class OopClosure; 72 class ScanClosure; 73 class FastScanClosure; 74 class GenCollectedHeap; 75 class GenRemSet; 76 class GCStats; 77 78 // A "ScratchBlock" represents a block of memory in one generation usable by 79 // another. It represents "num_words" free words, starting at and including 80 // the address of "this". 81 struct ScratchBlock { 82 ScratchBlock* next; 83 size_t num_words; 84 HeapWord scratch_space[1]; // Actually, of size "num_words-2" (assuming 85 // first two fields are word-sized.) 86 }; 87 88 89 class Generation: public CHeapObj<mtGC> { 90 friend class VMStructs; 91 private: 92 jlong _time_of_last_gc; // time when last gc on this generation happened (ms) 93 MemRegion _prev_used_region; // for collectors that want to "remember" a value for 94 // used region at some specific point during collection. 95 96 protected: 97 // Minimum and maximum addresses for memory reserved (not necessarily 98 // committed) for generation. 99 // Used by card marking code. Must not overlap with address ranges of 100 // other generations. 101 MemRegion _reserved; 102 103 // Memory area reserved for generation 104 VirtualSpace _virtual_space; 105 106 // Level in the generation hierarchy. 107 int _level; 108 109 // ("Weak") Reference processing support 110 ReferenceProcessor* _ref_processor; 111 112 // Performance Counters 113 CollectorCounters* _gc_counters; 114 115 // Statistics for garbage collection 116 GCStats* _gc_stats; 117 118 // Returns the next generation in the configuration, or else NULL if this 119 // is the highest generation. 120 Generation* next_gen() const; 121 122 // Initialize the generation. 123 Generation(ReservedSpace rs, size_t initial_byte_size, int level); 124 125 // Apply "cl->do_oop" to (the address of) (exactly) all the ref fields in 126 // "sp" that point into younger generations. 127 // The iteration is only over objects allocated at the start of the 128 // iterations; objects allocated as a result of applying the closure are 129 // not included. 130 void younger_refs_in_space_iterate(Space* sp, OopsInGenClosure* cl); 131 132 public: 133 // The set of possible generation kinds. 134 enum Name { 135 ASParNew, 136 ASConcurrentMarkSweep, 137 DefNew, 138 ParNew, 139 MarkSweepCompact, 140 ConcurrentMarkSweep, 141 Other 142 }; 143 144 enum SomePublicConstants { 145 // Generations are GenGrain-aligned and have size that are multiples of 146 // GenGrain. 147 // Note: on ARM we add 1 bit for card_table_base to be properly aligned 148 // (we expect its low byte to be zero - see implementation of post_barrier) 149 LogOfGenGrain = 16 ARM_ONLY(+1), 150 GenGrain = 1 << LogOfGenGrain 151 }; 152 153 // allocate and initialize ("weak") refs processing support 154 virtual void ref_processor_init(); 155 void set_ref_processor(ReferenceProcessor* rp) { 156 assert(_ref_processor == NULL, "clobbering existing _ref_processor"); 157 _ref_processor = rp; 158 } 159 160 virtual Generation::Name kind() { return Generation::Other; } 161 GenerationSpec* spec(); 162 163 // This properly belongs in the collector, but for now this 164 // will do. 165 virtual bool refs_discovery_is_atomic() const { return true; } 166 virtual bool refs_discovery_is_mt() const { return false; } 167 168 // Space enquiries (results in bytes) 169 virtual size_t capacity() const = 0; // The maximum number of object bytes the 170 // generation can currently hold. 171 virtual size_t used() const = 0; // The number of used bytes in the gen. 172 virtual size_t free() const = 0; // The number of free bytes in the gen. 173 174 // Support for java.lang.Runtime.maxMemory(); see CollectedHeap. 175 // Returns the total number of bytes available in a generation 176 // for the allocation of objects. 177 virtual size_t max_capacity() const; 178 179 // If this is a young generation, the maximum number of bytes that can be 180 // allocated in this generation before a GC is triggered. 181 virtual size_t capacity_before_gc() const { return 0; } 182 183 // The largest number of contiguous free bytes in the generation, 184 // including expansion (Assumes called at a safepoint.) 185 virtual size_t contiguous_available() const = 0; 186 // The largest number of contiguous free bytes in this or any higher generation. 187 virtual size_t max_contiguous_available() const; 188 189 // Returns true if promotions of the specified amount are 190 // likely to succeed without a promotion failure. 191 // Promotion of the full amount is not guaranteed but 192 // might be attempted in the worst case. 193 virtual bool promotion_attempt_is_safe(size_t max_promotion_in_bytes) const; 194 195 // For a non-young generation, this interface can be used to inform a 196 // generation that a promotion attempt into that generation failed. 197 // Typically used to enable diagnostic output for post-mortem analysis, 198 // but other uses of the interface are not ruled out. 199 virtual void promotion_failure_occurred() { /* does nothing */ } 200 201 // Return an estimate of the maximum allocation that could be performed 202 // in the generation without triggering any collection or expansion 203 // activity. It is "unsafe" because no locks are taken; the result 204 // should be treated as an approximation, not a guarantee, for use in 205 // heuristic resizing decisions. 206 virtual size_t unsafe_max_alloc_nogc() const = 0; 207 208 // Returns true if this generation cannot be expanded further 209 // without a GC. Override as appropriate. 210 virtual bool is_maximal_no_gc() const { 211 return _virtual_space.uncommitted_size() == 0; 212 } 213 214 MemRegion reserved() const { return _reserved; } 215 216 // Returns a region guaranteed to contain all the objects in the 217 // generation. 218 virtual MemRegion used_region() const { return _reserved; } 219 220 MemRegion prev_used_region() const { return _prev_used_region; } 221 virtual void save_used_region() { _prev_used_region = used_region(); } 222 223 // Returns "TRUE" iff "p" points into the committed areas in the generation. 224 // For some kinds of generations, this may be an expensive operation. 225 // To avoid performance problems stemming from its inadvertent use in 226 // product jvm's, we restrict its use to assertion checking or 227 // verification only. 228 virtual bool is_in(const void* p) const; 229 230 /* Returns "TRUE" iff "p" points into the reserved area of the generation. */ 231 bool is_in_reserved(const void* p) const { 232 return _reserved.contains(p); 233 } 234 235 // Check that the generation kind is DefNewGeneration or a sub 236 // class of DefNewGeneration and return a DefNewGeneration* 237 DefNewGeneration* as_DefNewGeneration(); 238 239 // If some space in the generation contains the given "addr", return a 240 // pointer to that space, else return "NULL". 241 virtual Space* space_containing(const void* addr) const; 242 243 // Iteration - do not use for time critical operations 244 virtual void space_iterate(SpaceClosure* blk, bool usedOnly = false) = 0; 245 246 // Returns the first space, if any, in the generation that can participate 247 // in compaction, or else "NULL". 248 virtual CompactibleSpace* first_compaction_space() const = 0; 249 250 // Returns "true" iff this generation should be used to allocate an 251 // object of the given size. Young generations might 252 // wish to exclude very large objects, for example, since, if allocated 253 // often, they would greatly increase the frequency of young-gen 254 // collection. 255 virtual bool should_allocate(size_t word_size, bool is_tlab) { 256 bool result = false; 257 size_t overflow_limit = (size_t)1 << (BitsPerSize_t - LogHeapWordSize); 258 if (!is_tlab || supports_tlab_allocation()) { 259 result = (word_size > 0) && (word_size < overflow_limit); 260 } 261 return result; 262 } 263 264 // Allocate and returns a block of the requested size, or returns "NULL". 265 // Assumes the caller has done any necessary locking. 266 virtual HeapWord* allocate(size_t word_size, bool is_tlab) = 0; 267 268 // Like "allocate", but performs any necessary locking internally. 269 virtual HeapWord* par_allocate(size_t word_size, bool is_tlab) = 0; 270 271 // A 'younger' gen has reached an allocation limit, and uses this to notify 272 // the next older gen. The return value is a new limit, or NULL if none. The 273 // caller must do the necessary locking. 274 virtual HeapWord* allocation_limit_reached(Space* space, HeapWord* top, 275 size_t word_size) { 276 return NULL; 277 } 278 279 // Some generation may offer a region for shared, contiguous allocation, 280 // via inlined code (by exporting the address of the top and end fields 281 // defining the extent of the contiguous allocation region.) 282 283 // This function returns "true" iff the heap supports this kind of 284 // allocation. (More precisely, this means the style of allocation that 285 // increments *top_addr()" with a CAS.) (Default is "no".) 286 // A generation that supports this allocation style must use lock-free 287 // allocation for *all* allocation, since there are times when lock free 288 // allocation will be concurrent with plain "allocate" calls. 289 virtual bool supports_inline_contig_alloc() const { return false; } 290 291 // These functions return the addresses of the fields that define the 292 // boundaries of the contiguous allocation area. (These fields should be 293 // physicall near to one another.) 294 virtual HeapWord** top_addr() const { return NULL; } 295 virtual HeapWord** end_addr() const { return NULL; } 296 297 // Thread-local allocation buffers 298 virtual bool supports_tlab_allocation() const { return false; } 299 virtual size_t tlab_capacity() const { 300 guarantee(false, "Generation doesn't support thread local allocation buffers"); 301 return 0; 302 } 303 virtual size_t unsafe_max_tlab_alloc() const { 304 guarantee(false, "Generation doesn't support thread local allocation buffers"); 305 return 0; 306 } 307 308 // "obj" is the address of an object in a younger generation. Allocate space 309 // for "obj" in the current (or some higher) generation, and copy "obj" into 310 // the newly allocated space, if possible, returning the result (or NULL if 311 // the allocation failed). 312 // 313 // The "obj_size" argument is just obj->size(), passed along so the caller can 314 // avoid repeating the virtual call to retrieve it. 315 virtual oop promote(oop obj, size_t obj_size); 316 317 // Thread "thread_num" (0 <= i < ParalleGCThreads) wants to promote 318 // object "obj", whose original mark word was "m", and whose size is 319 // "word_sz". If possible, allocate space for "obj", copy obj into it 320 // (taking care to copy "m" into the mark word when done, since the mark 321 // word of "obj" may have been overwritten with a forwarding pointer, and 322 // also taking care to copy the klass pointer *last*. Returns the new 323 // object if successful, or else NULL. 324 virtual oop par_promote(int thread_num, 325 oop obj, markOop m, size_t word_sz); 326 327 // Undo, if possible, the most recent par_promote_alloc allocation by 328 // "thread_num" ("obj", of "word_sz"). 329 virtual void par_promote_alloc_undo(int thread_num, 330 HeapWord* obj, size_t word_sz); 331 332 // Informs the current generation that all par_promote_alloc's in the 333 // collection have been completed; any supporting data structures can be 334 // reset. Default is to do nothing. 335 virtual void par_promote_alloc_done(int thread_num) {} 336 337 // Informs the current generation that all oop_since_save_marks_iterates 338 // performed by "thread_num" in the current collection, if any, have been 339 // completed; any supporting data structures can be reset. Default is to 340 // do nothing. 341 virtual void par_oop_since_save_marks_iterate_done(int thread_num) {} 342 343 // This generation will collect all younger generations 344 // during a full collection. 345 virtual bool full_collects_younger_generations() const { return false; } 346 347 // This generation does in-place marking, meaning that mark words 348 // are mutated during the marking phase and presumably reinitialized 349 // to a canonical value after the GC. This is currently used by the 350 // biased locking implementation to determine whether additional 351 // work is required during the GC prologue and epilogue. 352 virtual bool performs_in_place_marking() const { return true; } 353 354 // Returns "true" iff collect() should subsequently be called on this 355 // this generation. See comment below. 356 // This is a generic implementation which can be overridden. 357 // 358 // Note: in the current (1.4) implementation, when genCollectedHeap's 359 // incremental_collection_will_fail flag is set, all allocations are 360 // slow path (the only fast-path place to allocate is DefNew, which 361 // will be full if the flag is set). 362 // Thus, older generations which collect younger generations should 363 // test this flag and collect if it is set. 364 virtual bool should_collect(bool full, 365 size_t word_size, 366 bool is_tlab) { 367 return (full || should_allocate(word_size, is_tlab)); 368 } 369 370 // Returns true if the collection is likely to be safely 371 // completed. Even if this method returns true, a collection 372 // may not be guaranteed to succeed, and the system should be 373 // able to safely unwind and recover from that failure, albeit 374 // at some additional cost. 375 virtual bool collection_attempt_is_safe() { 376 guarantee(false, "Are you sure you want to call this method?"); 377 return true; 378 } 379 380 // Perform a garbage collection. 381 // If full is true attempt a full garbage collection of this generation. 382 // Otherwise, attempting to (at least) free enough space to support an 383 // allocation of the given "word_size". 384 virtual void collect(bool full, 385 bool clear_all_soft_refs, 386 size_t word_size, 387 bool is_tlab) = 0; 388 389 // Perform a heap collection, attempting to create (at least) enough 390 // space to support an allocation of the given "word_size". If 391 // successful, perform the allocation and return the resulting 392 // "oop" (initializing the allocated block). If the allocation is 393 // still unsuccessful, return "NULL". 394 virtual HeapWord* expand_and_allocate(size_t word_size, 395 bool is_tlab, 396 bool parallel = false) = 0; 397 398 // Some generations may require some cleanup or preparation actions before 399 // allowing a collection. The default is to do nothing. 400 virtual void gc_prologue(bool full) {}; 401 402 // Some generations may require some cleanup actions after a collection. 403 // The default is to do nothing. 404 virtual void gc_epilogue(bool full) {}; 405 406 // Save the high water marks for the used space in a generation. 407 virtual void record_spaces_top() {}; 408 409 // Some generations may need to be "fixed-up" after some allocation 410 // activity to make them parsable again. The default is to do nothing. 411 virtual void ensure_parsability() {}; 412 413 // Time (in ms) when we were last collected or now if a collection is 414 // in progress. 415 virtual jlong time_of_last_gc(jlong now) { 416 // Both _time_of_last_gc and now are set using a time source 417 // that guarantees monotonically non-decreasing values provided 418 // the underlying platform provides such a source. So we still 419 // have to guard against non-monotonicity. 420 NOT_PRODUCT( 421 if (now < _time_of_last_gc) { 422 warning("time warp: "INT64_FORMAT" to "INT64_FORMAT, _time_of_last_gc, now); 423 } 424 ) 425 return _time_of_last_gc; 426 } 427 428 virtual void update_time_of_last_gc(jlong now) { 429 _time_of_last_gc = now; 430 } 431 432 // Generations may keep statistics about collection. This 433 // method updates those statistics. current_level is 434 // the level of the collection that has most recently 435 // occurred. This allows the generation to decide what 436 // statistics are valid to collect. For example, the 437 // generation can decide to gather the amount of promoted data 438 // if the collection of the younger generations has completed. 439 GCStats* gc_stats() const { return _gc_stats; } 440 virtual void update_gc_stats(int current_level, bool full) {} 441 442 // Mark sweep support phase2 443 virtual void prepare_for_compaction(CompactPoint* cp); 444 // Mark sweep support phase3 445 virtual void pre_adjust_pointers() {ShouldNotReachHere();} 446 virtual void adjust_pointers(); 447 // Mark sweep support phase4 448 virtual void compact(); 449 virtual void post_compact() {ShouldNotReachHere();} 450 451 // Support for CMS's rescan. In this general form we return a pointer 452 // to an abstract object that can be used, based on specific previously 453 // decided protocols, to exchange information between generations, 454 // information that may be useful for speeding up certain types of 455 // garbage collectors. A NULL value indicates to the client that 456 // no data recording is expected by the provider. The data-recorder is 457 // expected to be GC worker thread-local, with the worker index 458 // indicated by "thr_num". 459 virtual void* get_data_recorder(int thr_num) { return NULL; } 460 461 // Some generations may require some cleanup actions before allowing 462 // a verification. 463 virtual void prepare_for_verify() {}; 464 465 // Accessing "marks". 466 467 // This function gives a generation a chance to note a point between 468 // collections. For example, a contiguous generation might note the 469 // beginning allocation point post-collection, which might allow some later 470 // operations to be optimized. 471 virtual void save_marks() {} 472 473 // This function allows generations to initialize any "saved marks". That 474 // is, should only be called when the generation is empty. 475 virtual void reset_saved_marks() {} 476 477 // This function is "true" iff any no allocations have occurred in the 478 // generation since the last call to "save_marks". 479 virtual bool no_allocs_since_save_marks() = 0; 480 481 // Apply "cl->apply" to (the addresses of) all reference fields in objects 482 // allocated in the current generation since the last call to "save_marks". 483 // If more objects are allocated in this generation as a result of applying 484 // the closure, iterates over reference fields in those objects as well. 485 // Calls "save_marks" at the end of the iteration. 486 // General signature... 487 virtual void oop_since_save_marks_iterate_v(OopsInGenClosure* cl) = 0; 488 // ...and specializations for de-virtualization. (The general 489 // implemention of the _nv versions call the virtual version. 490 // Note that the _nv suffix is not really semantically necessary, 491 // but it avoids some not-so-useful warnings on Solaris.) 492 #define Generation_SINCE_SAVE_MARKS_DECL(OopClosureType, nv_suffix) \ 493 virtual void oop_since_save_marks_iterate##nv_suffix(OopClosureType* cl) { \ 494 oop_since_save_marks_iterate_v((OopsInGenClosure*)cl); \ 495 } 496 SPECIALIZED_SINCE_SAVE_MARKS_CLOSURES(Generation_SINCE_SAVE_MARKS_DECL) 497 498 #undef Generation_SINCE_SAVE_MARKS_DECL 499 500 // The "requestor" generation is performing some garbage collection 501 // action for which it would be useful to have scratch space. If 502 // the target is not the requestor, no gc actions will be required 503 // of the target. The requestor promises to allocate no more than 504 // "max_alloc_words" in the target generation (via promotion say, 505 // if the requestor is a young generation and the target is older). 506 // If the target generation can provide any scratch space, it adds 507 // it to "list", leaving "list" pointing to the head of the 508 // augmented list. The default is to offer no space. 509 virtual void contribute_scratch(ScratchBlock*& list, Generation* requestor, 510 size_t max_alloc_words) {} 511 512 // Give each generation an opportunity to do clean up for any 513 // contributed scratch. 514 virtual void reset_scratch() {}; 515 516 // When an older generation has been collected, and perhaps resized, 517 // this method will be invoked on all younger generations (from older to 518 // younger), allowing them to resize themselves as appropriate. 519 virtual void compute_new_size() = 0; 520 521 // Printing 522 virtual const char* name() const = 0; 523 virtual const char* short_name() const = 0; 524 525 int level() const { return _level; } 526 527 // Attributes 528 529 // True iff the given generation may only be the youngest generation. 530 virtual bool must_be_youngest() const = 0; 531 // True iff the given generation may only be the oldest generation. 532 virtual bool must_be_oldest() const = 0; 533 534 // Reference Processing accessor 535 ReferenceProcessor* const ref_processor() { return _ref_processor; } 536 537 // Iteration. 538 539 // Iterate over all the ref-containing fields of all objects in the 540 // generation, calling "cl.do_oop" on each. 541 virtual void oop_iterate(OopClosure* cl); 542 543 // Same as above, restricted to the intersection of a memory region and 544 // the generation. 545 virtual void oop_iterate(MemRegion mr, OopClosure* cl); 546 547 // Iterate over all objects in the generation, calling "cl.do_object" on 548 // each. 549 virtual void object_iterate(ObjectClosure* cl); 550 551 // Iterate over all safe objects in the generation, calling "cl.do_object" on 552 // each. An object is safe if its references point to other objects in 553 // the heap. This defaults to object_iterate() unless overridden. 554 virtual void safe_object_iterate(ObjectClosure* cl); 555 556 // Iterate over all objects allocated in the generation since the last 557 // collection, calling "cl.do_object" on each. The generation must have 558 // been initialized properly to support this function, or else this call 559 // will fail. 560 virtual void object_iterate_since_last_GC(ObjectClosure* cl) = 0; 561 562 // Apply "cl->do_oop" to (the address of) all and only all the ref fields 563 // in the current generation that contain pointers to objects in younger 564 // generations. Objects allocated since the last "save_marks" call are 565 // excluded. 566 virtual void younger_refs_iterate(OopsInGenClosure* cl) = 0; 567 568 // Inform a generation that it longer contains references to objects 569 // in any younger generation. [e.g. Because younger gens are empty, 570 // clear the card table.] 571 virtual void clear_remembered_set() { } 572 573 // Inform a generation that some of its objects have moved. [e.g. The 574 // generation's spaces were compacted, invalidating the card table.] 575 virtual void invalidate_remembered_set() { } 576 577 // Block abstraction. 578 579 // Returns the address of the start of the "block" that contains the 580 // address "addr". We say "blocks" instead of "object" since some heaps 581 // may not pack objects densely; a chunk may either be an object or a 582 // non-object. 583 virtual HeapWord* block_start(const void* addr) const; 584 585 // Requires "addr" to be the start of a chunk, and returns its size. 586 // "addr + size" is required to be the start of a new chunk, or the end 587 // of the active area of the heap. 588 virtual size_t block_size(const HeapWord* addr) const ; 589 590 // Requires "addr" to be the start of a block, and returns "TRUE" iff 591 // the block is an object. 592 virtual bool block_is_obj(const HeapWord* addr) const; 593 594 595 // PrintGC, PrintGCDetails support 596 void print_heap_change(size_t prev_used) const; 597 598 // PrintHeapAtGC support 599 virtual void print() const; 600 virtual void print_on(outputStream* st) const; 601 602 virtual void verify() = 0; 603 604 struct StatRecord { 605 int invocations; 606 elapsedTimer accumulated_time; 607 StatRecord() : 608 invocations(0), 609 accumulated_time(elapsedTimer()) {} 610 }; 611 private: 612 StatRecord _stat_record; 613 public: 614 StatRecord* stat_record() { return &_stat_record; } 615 616 virtual void print_summary_info(); 617 virtual void print_summary_info_on(outputStream* st); 618 619 // Performance Counter support 620 virtual void update_counters() = 0; 621 virtual CollectorCounters* counters() { return _gc_counters; } 622 }; 623 624 // Class CardGeneration is a generation that is covered by a card table, 625 // and uses a card-size block-offset array to implement block_start. 626 627 // class BlockOffsetArray; 628 // class BlockOffsetArrayContigSpace; 629 class BlockOffsetSharedArray; 630 631 class CardGeneration: public Generation { 632 friend class VMStructs; 633 protected: 634 // This is shared with other generations. 635 GenRemSet* _rs; 636 // This is local to this generation. 637 BlockOffsetSharedArray* _bts; 638 639 CardGeneration(ReservedSpace rs, size_t initial_byte_size, int level, 640 GenRemSet* remset); 641 642 public: 643 644 // Attempt to expand the generation by "bytes". Expand by at a 645 // minimum "expand_bytes". Return true if some amount (not 646 // necessarily the full "bytes") was done. 647 virtual bool expand(size_t bytes, size_t expand_bytes); 648 649 virtual void clear_remembered_set(); 650 651 virtual void invalidate_remembered_set(); 652 653 virtual void prepare_for_verify(); 654 655 // Grow generation with specified size (returns false if unable to grow) 656 virtual bool grow_by(size_t bytes) = 0; 657 // Grow generation to reserved size. 658 virtual bool grow_to_reserved() = 0; 659 }; 660 661 // OneContigSpaceCardGeneration models a heap of old objects contained in a single 662 // contiguous space. 663 // 664 // Garbage collection is performed using mark-compact. 665 666 class OneContigSpaceCardGeneration: public CardGeneration { 667 friend class VMStructs; 668 // Abstractly, this is a subtype that gets access to protected fields. 669 friend class CompactingPermGen; 670 friend class VM_PopulateDumpSharedSpace; 671 672 protected: 673 size_t _min_heap_delta_bytes; // Minimum amount to expand. 674 ContiguousSpace* _the_space; // actual space holding objects 675 WaterMark _last_gc; // watermark between objects allocated before 676 // and after last GC. 677 678 // Grow generation with specified size (returns false if unable to grow) 679 virtual bool grow_by(size_t bytes); 680 // Grow generation to reserved size. 681 virtual bool grow_to_reserved(); 682 // Shrink generation with specified size (returns false if unable to shrink) 683 void shrink_by(size_t bytes); 684 685 // Allocation failure 686 virtual bool expand(size_t bytes, size_t expand_bytes); 687 void shrink(size_t bytes); 688 689 // Accessing spaces 690 ContiguousSpace* the_space() const { return _the_space; } 691 692 public: 693 OneContigSpaceCardGeneration(ReservedSpace rs, size_t initial_byte_size, 694 size_t min_heap_delta_bytes, 695 int level, GenRemSet* remset, 696 ContiguousSpace* space) : 697 CardGeneration(rs, initial_byte_size, level, remset), 698 _the_space(space), _min_heap_delta_bytes(min_heap_delta_bytes) 699 {} 700 701 inline bool is_in(const void* p) const; 702 703 // Space enquiries 704 size_t capacity() const; 705 size_t used() const; 706 size_t free() const; 707 708 MemRegion used_region() const; 709 710 size_t unsafe_max_alloc_nogc() const; 711 size_t contiguous_available() const; 712 713 // Iteration 714 void object_iterate(ObjectClosure* blk); 715 void space_iterate(SpaceClosure* blk, bool usedOnly = false); 716 void object_iterate_since_last_GC(ObjectClosure* cl); 717 718 void younger_refs_iterate(OopsInGenClosure* blk); 719 720 inline CompactibleSpace* first_compaction_space() const; 721 722 virtual inline HeapWord* allocate(size_t word_size, bool is_tlab); 723 virtual inline HeapWord* par_allocate(size_t word_size, bool is_tlab); 724 725 // Accessing marks 726 inline WaterMark top_mark(); 727 inline WaterMark bottom_mark(); 728 729 #define OneContig_SINCE_SAVE_MARKS_DECL(OopClosureType, nv_suffix) \ 730 void oop_since_save_marks_iterate##nv_suffix(OopClosureType* cl); 731 OneContig_SINCE_SAVE_MARKS_DECL(OopsInGenClosure,_v) 732 SPECIALIZED_SINCE_SAVE_MARKS_CLOSURES(OneContig_SINCE_SAVE_MARKS_DECL) 733 734 void save_marks(); 735 void reset_saved_marks(); 736 bool no_allocs_since_save_marks(); 737 738 inline size_t block_size(const HeapWord* addr) const; 739 740 inline bool block_is_obj(const HeapWord* addr) const; 741 742 virtual void collect(bool full, 743 bool clear_all_soft_refs, 744 size_t size, 745 bool is_tlab); 746 HeapWord* expand_and_allocate(size_t size, 747 bool is_tlab, 748 bool parallel = false); 749 750 virtual void prepare_for_verify(); 751 752 virtual void gc_epilogue(bool full); 753 754 virtual void record_spaces_top(); 755 756 virtual void verify(); 757 virtual void print_on(outputStream* st) const; 758 }; 759 760 #endif // SHARE_VM_MEMORY_GENERATION_HPP