56 // Forward declarations 57 class HeapRegion; 58 class HRRSCleanupTask; 59 class GenerationSpec; 60 class OopsInHeapRegionClosure; 61 class G1ParScanThreadState; 62 class G1ParScanThreadStateSet; 63 class G1KlassScanClosure; 64 class G1ParScanThreadState; 65 class ObjectClosure; 66 class SpaceClosure; 67 class CompactibleSpaceClosure; 68 class Space; 69 class G1CollectionSet; 70 class G1CollectorPolicy; 71 class G1RemSet; 72 class HeapRegionRemSetIterator; 73 class G1ConcurrentMark; 74 class ConcurrentMarkThread; 75 class ConcurrentG1Refine; 76 class ConcurrentGCTimer; 77 class GenerationCounters; 78 class STWGCTimer; 79 class G1NewTracer; 80 class G1OldTracer; 81 class EvacuationFailedInfo; 82 class nmethod; 83 class Ticks; 84 class WorkGang; 85 class G1Allocator; 86 class G1ArchiveAllocator; 87 class G1HeapVerifier; 88 89 typedef OverflowTaskQueue<StarTask, mtGC> RefToScanQueue; 90 typedef GenericTaskQueueSet<RefToScanQueue, mtGC> RefToScanQueueSet; 91 92 typedef int RegionIdx_t; // needs to hold [ 0..max_regions() ) 93 typedef int CardIdx_t; // needs to hold [ 0..CardsPerRegion ) 94 95 // The G1 STW is alive closure. 96 // An instance is embedded into the G1CH and used as the 97 // (optional) _is_alive_non_header closure in the STW 98 // reference processor. It is also extensively used during 99 // reference processing during STW evacuation pauses. 100 class G1STWIsAliveClosure: public BoolObjectClosure { 253 // explicitly started if: 254 // (a) cause == _gc_locker and +GCLockerInvokesConcurrent, or 255 // (b) cause == _g1_humongous_allocation 256 // (c) cause == _java_lang_system_gc and +ExplicitGCInvokesConcurrent. 257 // (d) cause == _dcmd_gc_run and +ExplicitGCInvokesConcurrent. 258 // (e) cause == _update_allocation_context_stats_inc 259 // (f) cause == _wb_conc_mark 260 bool should_do_concurrent_full_gc(GCCause::Cause cause); 261 262 // indicates whether we are in young or mixed GC mode 263 G1CollectorState _collector_state; 264 265 // Keeps track of how many "old marking cycles" (i.e., Full GCs or 266 // concurrent cycles) we have started. 267 volatile uint _old_marking_cycles_started; 268 269 // Keeps track of how many "old marking cycles" (i.e., Full GCs or 270 // concurrent cycles) we have completed. 271 volatile uint _old_marking_cycles_completed; 272 273 bool _heap_summary_sent; 274 275 // This is a non-product method that is helpful for testing. It is 276 // called at the end of a GC and artificially expands the heap by 277 // allocating a number of dead regions. This way we can induce very 278 // frequent marking cycles and stress the cleanup / concurrent 279 // cleanup code more (as all the regions that will be allocated by 280 // this method will be found dead by the marking cycle). 281 void allocate_dummy_regions() PRODUCT_RETURN; 282 283 // Clear RSets after a compaction. It also resets the GC time stamps. 284 void clear_rsets_post_compaction(); 285 286 // If the HR printer is active, dump the state of the regions in the 287 // heap after a compaction. 288 void print_hrm_post_compaction(); 289 290 // Create a memory mapper for auxiliary data structures of the given size and 291 // translation factor. 292 static G1RegionToSpaceMapper* create_aux_memory_mapper(const char* description, 293 size_t size, 294 size_t translation_factor); 605 // This is called at the end of either a concurrent cycle or a Full 606 // GC to update the number of old marking cycles completed. Those two 607 // can happen in a nested fashion, i.e., we start a concurrent 608 // cycle, a Full GC happens half-way through it which ends first, 609 // and then the cycle notices that a Full GC happened and ends 610 // too. The concurrent parameter is a boolean to help us do a bit 611 // tighter consistency checking in the method. If concurrent is 612 // false, the caller is the inner caller in the nesting (i.e., the 613 // Full GC). If concurrent is true, the caller is the outer caller 614 // in this nesting (i.e., the concurrent cycle). Further nesting is 615 // not currently supported. The end of this call also notifies 616 // the FullGCCount_lock in case a Java thread is waiting for a full 617 // GC to happen (e.g., it called System.gc() with 618 // +ExplicitGCInvokesConcurrent). 619 void increment_old_marking_cycles_completed(bool concurrent); 620 621 uint old_marking_cycles_completed() { 622 return _old_marking_cycles_completed; 623 } 624 625 void register_concurrent_cycle_start(const Ticks& start_time); 626 void register_concurrent_cycle_end(); 627 void trace_heap_after_concurrent_cycle(); 628 629 G1HRPrinter* hr_printer() { return &_hr_printer; } 630 631 // Allocates a new heap region instance. 632 HeapRegion* new_heap_region(uint hrs_index, MemRegion mr); 633 634 // Allocate the highest free region in the reserved heap. This will commit 635 // regions as necessary. 636 HeapRegion* alloc_highest_free_region(); 637 638 // Frees a non-humongous region by initializing its contents and 639 // adding it to the free list that's passed as a parameter (this is 640 // usually a local list which will be appended to the master free 641 // list later). The used bytes of freed regions are accumulated in 642 // pre_used. If par is true, the region's RSet will not be freed 643 // up. The assumption is that this will be done later. 644 // The locked parameter indicates if the caller has already taken 645 // care of proper synchronization. This may allow some optimizations. 646 void free_region(HeapRegion* hr, 647 FreeRegionList* free_list, 648 bool par, 883 // * Temporarily set STW ref processor discovery as single threaded. 884 // * Temporarily clear the STW ref processor's _is_alive_non_header 885 // field. 886 // * Finally enable discovery by the STW ref processor. 887 // 888 // The STW ref processor is used to record any discovered 889 // references during the full GC. 890 // 891 // At the end of a full GC we: 892 // * Enqueue any reference objects discovered by the STW ref processor 893 // that have non-live referents. This has the side-effect of 894 // making the STW ref processor inactive by disabling discovery. 895 // * Verify that the CM ref processor is still inactive 896 // and no references have been placed on it's discovered 897 // lists (also checked as a precondition during initial marking). 898 899 // The (stw) reference processor... 900 ReferenceProcessor* _ref_processor_stw; 901 902 STWGCTimer* _gc_timer_stw; 903 ConcurrentGCTimer* _gc_timer_cm; 904 905 G1OldTracer* _gc_tracer_cm; 906 G1NewTracer* _gc_tracer_stw; 907 908 // During reference object discovery, the _is_alive_non_header 909 // closure (if non-null) is applied to the referent object to 910 // determine whether the referent is live. If so then the 911 // reference object does not need to be 'discovered' and can 912 // be treated as a regular oop. This has the benefit of reducing 913 // the number of 'discovered' reference objects that need to 914 // be processed. 915 // 916 // Instance of the is_alive closure for embedding into the 917 // STW reference processor as the _is_alive_non_header field. 918 // Supplying a value for the _is_alive_non_header field is 919 // optional but doing so prevents unnecessary additions to 920 // the discovered lists during reference discovery. 921 G1STWIsAliveClosure _is_alive_closure_stw; 922 923 // The (concurrent marking) reference processor... 924 ReferenceProcessor* _ref_processor_cm; 925 1018 void reset_gc_time_stamps(HeapRegion* hr); 1019 1020 // Apply the given closure on all cards in the Hot Card Cache, emptying it. 1021 void iterate_hcc_closure(CardTableEntryClosure* cl, uint worker_i); 1022 1023 // Apply the given closure on all cards in the Dirty Card Queue Set, emptying it. 1024 void iterate_dirty_card_closure(CardTableEntryClosure* cl, uint worker_i); 1025 1026 // The shared block offset table array. 1027 G1BlockOffsetTable* bot() const { return _bot; } 1028 1029 // Reference Processing accessors 1030 1031 // The STW reference processor.... 1032 ReferenceProcessor* ref_processor_stw() const { return _ref_processor_stw; } 1033 1034 G1NewTracer* gc_tracer_stw() const { return _gc_tracer_stw; } 1035 1036 // The Concurrent Marking reference processor... 1037 ReferenceProcessor* ref_processor_cm() const { return _ref_processor_cm; } 1038 1039 ConcurrentGCTimer* gc_timer_cm() const { return _gc_timer_cm; } 1040 G1OldTracer* gc_tracer_cm() const { return _gc_tracer_cm; } 1041 1042 virtual size_t capacity() const; 1043 virtual size_t used() const; 1044 // This should be called when we're not holding the heap lock. The 1045 // result might be a bit inaccurate. 1046 size_t used_unlocked() const; 1047 size_t recalculate_used() const; 1048 1049 // These virtual functions do the actual allocation. 1050 // Some heaps may offer a contiguous region for shared non-blocking 1051 // allocation, via inlined code (by exporting the address of the top and 1052 // end fields defining the extent of the contiguous allocation region.) 1053 // But G1CollectedHeap doesn't yet support this. 1054 1055 virtual bool is_maximal_no_gc() const { 1056 return _hrm.available() == 0; 1057 } 1058 1059 // The current number of regions in the heap. 1060 uint num_regions() const { return _hrm.length(); } | 56 // Forward declarations 57 class HeapRegion; 58 class HRRSCleanupTask; 59 class GenerationSpec; 60 class OopsInHeapRegionClosure; 61 class G1ParScanThreadState; 62 class G1ParScanThreadStateSet; 63 class G1KlassScanClosure; 64 class G1ParScanThreadState; 65 class ObjectClosure; 66 class SpaceClosure; 67 class CompactibleSpaceClosure; 68 class Space; 69 class G1CollectionSet; 70 class G1CollectorPolicy; 71 class G1RemSet; 72 class HeapRegionRemSetIterator; 73 class G1ConcurrentMark; 74 class ConcurrentMarkThread; 75 class ConcurrentG1Refine; 76 class GenerationCounters; 77 class STWGCTimer; 78 class G1NewTracer; 79 class EvacuationFailedInfo; 80 class nmethod; 81 class Ticks; 82 class WorkGang; 83 class G1Allocator; 84 class G1ArchiveAllocator; 85 class G1HeapVerifier; 86 87 typedef OverflowTaskQueue<StarTask, mtGC> RefToScanQueue; 88 typedef GenericTaskQueueSet<RefToScanQueue, mtGC> RefToScanQueueSet; 89 90 typedef int RegionIdx_t; // needs to hold [ 0..max_regions() ) 91 typedef int CardIdx_t; // needs to hold [ 0..CardsPerRegion ) 92 93 // The G1 STW is alive closure. 94 // An instance is embedded into the G1CH and used as the 95 // (optional) _is_alive_non_header closure in the STW 96 // reference processor. It is also extensively used during 97 // reference processing during STW evacuation pauses. 98 class G1STWIsAliveClosure: public BoolObjectClosure { 251 // explicitly started if: 252 // (a) cause == _gc_locker and +GCLockerInvokesConcurrent, or 253 // (b) cause == _g1_humongous_allocation 254 // (c) cause == _java_lang_system_gc and +ExplicitGCInvokesConcurrent. 255 // (d) cause == _dcmd_gc_run and +ExplicitGCInvokesConcurrent. 256 // (e) cause == _update_allocation_context_stats_inc 257 // (f) cause == _wb_conc_mark 258 bool should_do_concurrent_full_gc(GCCause::Cause cause); 259 260 // indicates whether we are in young or mixed GC mode 261 G1CollectorState _collector_state; 262 263 // Keeps track of how many "old marking cycles" (i.e., Full GCs or 264 // concurrent cycles) we have started. 265 volatile uint _old_marking_cycles_started; 266 267 // Keeps track of how many "old marking cycles" (i.e., Full GCs or 268 // concurrent cycles) we have completed. 269 volatile uint _old_marking_cycles_completed; 270 271 // This is a non-product method that is helpful for testing. It is 272 // called at the end of a GC and artificially expands the heap by 273 // allocating a number of dead regions. This way we can induce very 274 // frequent marking cycles and stress the cleanup / concurrent 275 // cleanup code more (as all the regions that will be allocated by 276 // this method will be found dead by the marking cycle). 277 void allocate_dummy_regions() PRODUCT_RETURN; 278 279 // Clear RSets after a compaction. It also resets the GC time stamps. 280 void clear_rsets_post_compaction(); 281 282 // If the HR printer is active, dump the state of the regions in the 283 // heap after a compaction. 284 void print_hrm_post_compaction(); 285 286 // Create a memory mapper for auxiliary data structures of the given size and 287 // translation factor. 288 static G1RegionToSpaceMapper* create_aux_memory_mapper(const char* description, 289 size_t size, 290 size_t translation_factor); 601 // This is called at the end of either a concurrent cycle or a Full 602 // GC to update the number of old marking cycles completed. Those two 603 // can happen in a nested fashion, i.e., we start a concurrent 604 // cycle, a Full GC happens half-way through it which ends first, 605 // and then the cycle notices that a Full GC happened and ends 606 // too. The concurrent parameter is a boolean to help us do a bit 607 // tighter consistency checking in the method. If concurrent is 608 // false, the caller is the inner caller in the nesting (i.e., the 609 // Full GC). If concurrent is true, the caller is the outer caller 610 // in this nesting (i.e., the concurrent cycle). Further nesting is 611 // not currently supported. The end of this call also notifies 612 // the FullGCCount_lock in case a Java thread is waiting for a full 613 // GC to happen (e.g., it called System.gc() with 614 // +ExplicitGCInvokesConcurrent). 615 void increment_old_marking_cycles_completed(bool concurrent); 616 617 uint old_marking_cycles_completed() { 618 return _old_marking_cycles_completed; 619 } 620 621 G1HRPrinter* hr_printer() { return &_hr_printer; } 622 623 // Allocates a new heap region instance. 624 HeapRegion* new_heap_region(uint hrs_index, MemRegion mr); 625 626 // Allocate the highest free region in the reserved heap. This will commit 627 // regions as necessary. 628 HeapRegion* alloc_highest_free_region(); 629 630 // Frees a non-humongous region by initializing its contents and 631 // adding it to the free list that's passed as a parameter (this is 632 // usually a local list which will be appended to the master free 633 // list later). The used bytes of freed regions are accumulated in 634 // pre_used. If par is true, the region's RSet will not be freed 635 // up. The assumption is that this will be done later. 636 // The locked parameter indicates if the caller has already taken 637 // care of proper synchronization. This may allow some optimizations. 638 void free_region(HeapRegion* hr, 639 FreeRegionList* free_list, 640 bool par, 875 // * Temporarily set STW ref processor discovery as single threaded. 876 // * Temporarily clear the STW ref processor's _is_alive_non_header 877 // field. 878 // * Finally enable discovery by the STW ref processor. 879 // 880 // The STW ref processor is used to record any discovered 881 // references during the full GC. 882 // 883 // At the end of a full GC we: 884 // * Enqueue any reference objects discovered by the STW ref processor 885 // that have non-live referents. This has the side-effect of 886 // making the STW ref processor inactive by disabling discovery. 887 // * Verify that the CM ref processor is still inactive 888 // and no references have been placed on it's discovered 889 // lists (also checked as a precondition during initial marking). 890 891 // The (stw) reference processor... 892 ReferenceProcessor* _ref_processor_stw; 893 894 STWGCTimer* _gc_timer_stw; 895 896 G1NewTracer* _gc_tracer_stw; 897 898 // During reference object discovery, the _is_alive_non_header 899 // closure (if non-null) is applied to the referent object to 900 // determine whether the referent is live. If so then the 901 // reference object does not need to be 'discovered' and can 902 // be treated as a regular oop. This has the benefit of reducing 903 // the number of 'discovered' reference objects that need to 904 // be processed. 905 // 906 // Instance of the is_alive closure for embedding into the 907 // STW reference processor as the _is_alive_non_header field. 908 // Supplying a value for the _is_alive_non_header field is 909 // optional but doing so prevents unnecessary additions to 910 // the discovered lists during reference discovery. 911 G1STWIsAliveClosure _is_alive_closure_stw; 912 913 // The (concurrent marking) reference processor... 914 ReferenceProcessor* _ref_processor_cm; 915 1008 void reset_gc_time_stamps(HeapRegion* hr); 1009 1010 // Apply the given closure on all cards in the Hot Card Cache, emptying it. 1011 void iterate_hcc_closure(CardTableEntryClosure* cl, uint worker_i); 1012 1013 // Apply the given closure on all cards in the Dirty Card Queue Set, emptying it. 1014 void iterate_dirty_card_closure(CardTableEntryClosure* cl, uint worker_i); 1015 1016 // The shared block offset table array. 1017 G1BlockOffsetTable* bot() const { return _bot; } 1018 1019 // Reference Processing accessors 1020 1021 // The STW reference processor.... 1022 ReferenceProcessor* ref_processor_stw() const { return _ref_processor_stw; } 1023 1024 G1NewTracer* gc_tracer_stw() const { return _gc_tracer_stw; } 1025 1026 // The Concurrent Marking reference processor... 1027 ReferenceProcessor* ref_processor_cm() const { return _ref_processor_cm; } 1028 1029 virtual size_t capacity() const; 1030 virtual size_t used() const; 1031 // This should be called when we're not holding the heap lock. The 1032 // result might be a bit inaccurate. 1033 size_t used_unlocked() const; 1034 size_t recalculate_used() const; 1035 1036 // These virtual functions do the actual allocation. 1037 // Some heaps may offer a contiguous region for shared non-blocking 1038 // allocation, via inlined code (by exporting the address of the top and 1039 // end fields defining the extent of the contiguous allocation region.) 1040 // But G1CollectedHeap doesn't yet support this. 1041 1042 virtual bool is_maximal_no_gc() const { 1043 return _hrm.available() == 0; 1044 } 1045 1046 // The current number of regions in the heap. 1047 uint num_regions() const { return _hrm.length(); } |