55 class G1KlassScanClosure;
56 class G1ScanHeapEvacClosure;
57 class ObjectClosure;
58 class SpaceClosure;
59 class CompactibleSpaceClosure;
60 class Space;
61 class G1CollectorPolicy;
62 class GenRemSet;
63 class G1RemSet;
64 class HeapRegionRemSetIterator;
65 class ConcurrentMark;
66 class ConcurrentMarkThread;
67 class ConcurrentG1Refine;
68 class ConcurrentGCTimer;
69 class GenerationCounters;
70 class STWGCTimer;
71 class G1NewTracer;
72 class G1OldTracer;
73 class EvacuationFailedInfo;
74 class nmethod;
75
76 typedef OverflowTaskQueue<StarTask, mtGC> RefToScanQueue;
77 typedef GenericTaskQueueSet<RefToScanQueue, mtGC> RefToScanQueueSet;
78
79 typedef int RegionIdx_t; // needs to hold [ 0..max_regions() )
80 typedef int CardIdx_t; // needs to hold [ 0..CardsPerRegion )
81
82 enum GCAllocPurpose {
83 GCAllocForTenured,
84 GCAllocForSurvived,
85 GCAllocPurposeCount
86 };
87
88 class YoungList : public CHeapObj<mtGC> {
89 private:
90 G1CollectedHeap* _g1h;
91
92 HeapRegion* _head;
93
94 HeapRegion* _survivor_head;
729 // This is called at the end of either a concurrent cycle or a Full
730 // GC to update the number of old marking cycles completed. Those two
731 // can happen in a nested fashion, i.e., we start a concurrent
732 // cycle, a Full GC happens half-way through it which ends first,
733 // and then the cycle notices that a Full GC happened and ends
734 // too. The concurrent parameter is a boolean to help us do a bit
735 // tighter consistency checking in the method. If concurrent is
736 // false, the caller is the inner caller in the nesting (i.e., the
737 // Full GC). If concurrent is true, the caller is the outer caller
738 // in this nesting (i.e., the concurrent cycle). Further nesting is
739 // not currently supported. The end of this call also notifies
740 // the FullGCCount_lock in case a Java thread is waiting for a full
741 // GC to happen (e.g., it called System.gc() with
742 // +ExplicitGCInvokesConcurrent).
743 void increment_old_marking_cycles_completed(bool concurrent);
744
745 unsigned int old_marking_cycles_completed() {
746 return _old_marking_cycles_completed;
747 }
748
749 void register_concurrent_cycle_start(jlong start_time);
750 void register_concurrent_cycle_end();
751 void trace_heap_after_concurrent_cycle();
752
753 G1YCType yc_type();
754
755 G1HRPrinter* hr_printer() { return &_hr_printer; }
756
757 protected:
758
759 // Shrink the garbage-first heap by at most the given size (in bytes!).
760 // (Rounds down to a HeapRegion boundary.)
761 virtual void shrink(size_t expand_bytes);
762 void shrink_helper(size_t expand_bytes);
763
764 #if TASKQUEUE_STATS
765 static void print_taskqueue_stats_hdr(outputStream* const st = gclog_or_tty);
766 void print_taskqueue_stats(outputStream* const st = gclog_or_tty) const;
767 void reset_taskqueue_stats();
768 #endif // TASKQUEUE_STATS
769
|
55 class G1KlassScanClosure;
56 class G1ScanHeapEvacClosure;
57 class ObjectClosure;
58 class SpaceClosure;
59 class CompactibleSpaceClosure;
60 class Space;
61 class G1CollectorPolicy;
62 class GenRemSet;
63 class G1RemSet;
64 class HeapRegionRemSetIterator;
65 class ConcurrentMark;
66 class ConcurrentMarkThread;
67 class ConcurrentG1Refine;
68 class ConcurrentGCTimer;
69 class GenerationCounters;
70 class STWGCTimer;
71 class G1NewTracer;
72 class G1OldTracer;
73 class EvacuationFailedInfo;
74 class nmethod;
75 class Ticks;
76
77 typedef OverflowTaskQueue<StarTask, mtGC> RefToScanQueue;
78 typedef GenericTaskQueueSet<RefToScanQueue, mtGC> RefToScanQueueSet;
79
80 typedef int RegionIdx_t; // needs to hold [ 0..max_regions() )
81 typedef int CardIdx_t; // needs to hold [ 0..CardsPerRegion )
82
83 enum GCAllocPurpose {
84 GCAllocForTenured,
85 GCAllocForSurvived,
86 GCAllocPurposeCount
87 };
88
89 class YoungList : public CHeapObj<mtGC> {
90 private:
91 G1CollectedHeap* _g1h;
92
93 HeapRegion* _head;
94
95 HeapRegion* _survivor_head;
730 // This is called at the end of either a concurrent cycle or a Full
731 // GC to update the number of old marking cycles completed. Those two
732 // can happen in a nested fashion, i.e., we start a concurrent
733 // cycle, a Full GC happens half-way through it which ends first,
734 // and then the cycle notices that a Full GC happened and ends
735 // too. The concurrent parameter is a boolean to help us do a bit
736 // tighter consistency checking in the method. If concurrent is
737 // false, the caller is the inner caller in the nesting (i.e., the
738 // Full GC). If concurrent is true, the caller is the outer caller
739 // in this nesting (i.e., the concurrent cycle). Further nesting is
740 // not currently supported. The end of this call also notifies
741 // the FullGCCount_lock in case a Java thread is waiting for a full
742 // GC to happen (e.g., it called System.gc() with
743 // +ExplicitGCInvokesConcurrent).
744 void increment_old_marking_cycles_completed(bool concurrent);
745
746 unsigned int old_marking_cycles_completed() {
747 return _old_marking_cycles_completed;
748 }
749
750 void register_concurrent_cycle_start(const Ticks& start_time);
751 void register_concurrent_cycle_end();
752 void trace_heap_after_concurrent_cycle();
753
754 G1YCType yc_type();
755
756 G1HRPrinter* hr_printer() { return &_hr_printer; }
757
758 protected:
759
760 // Shrink the garbage-first heap by at most the given size (in bytes!).
761 // (Rounds down to a HeapRegion boundary.)
762 virtual void shrink(size_t expand_bytes);
763 void shrink_helper(size_t expand_bytes);
764
765 #if TASKQUEUE_STATS
766 static void print_taskqueue_stats_hdr(outputStream* const st = gclog_or_tty);
767 void print_taskqueue_stats(outputStream* const st = gclog_or_tty) const;
768 void reset_taskqueue_stats();
769 #endif // TASKQUEUE_STATS
770
|