600 elapsedTimer _intra_sweep_timer; // Time _in_ sweeps
601 // Padded decaying average estimates of the above
602 AdaptivePaddedAverage _inter_sweep_estimate;
603 AdaptivePaddedAverage _intra_sweep_estimate;
604
605 CMSTracer* _gc_tracer_cm;
606 ConcurrentGCTimer* _gc_timer_cm;
607
608 bool _cms_start_registered;
609
610 GCHeapSummary _last_heap_summary;
611 MetaspaceSummary _last_metaspace_summary;
612
613 void register_gc_start(GCCause::Cause cause);
614 void register_gc_end();
615 void save_heap_summary();
616 void report_heap_summary(GCWhen::Type when);
617
618 protected:
619 ConcurrentMarkSweepGeneration* _cmsGen; // Old gen (CMS)
620 MemRegion _span; // Span covering above two
621 CardTableRS* _ct; // Card table
622
623 // CMS marking support structures
624 CMSBitMap _markBitMap;
625 CMSBitMap _modUnionTable;
626 CMSMarkStack _markStack;
627
628 HeapWord* _restart_addr; // In support of marking stack overflow
629 void lower_restart_addr(HeapWord* low);
630
631 // Counters in support of marking stack / work queue overflow handling:
632 // a non-zero value indicates certain types of overflow events during
633 // the current CMS cycle and could lead to stack resizing efforts at
634 // an opportune future time.
635 size_t _ser_pmc_preclean_ovflw;
636 size_t _ser_pmc_remark_ovflw;
637 size_t _par_pmc_remark_ovflw;
638 size_t _ser_kac_preclean_ovflw;
639 size_t _ser_kac_ovflw;
640 size_t _par_kac_ovflw;
641 NOT_PRODUCT(ssize_t _num_par_pushes;)
642
643 // ("Weak") Reference processing support.
644 SpanReferenceProcessor* _ref_processor;
645 CMSIsAliveClosure _is_alive_closure;
646 // Keep this textually after _markBitMap and _span; c'tor dependency.
647
648 ConcurrentMarkSweepThread* _cmsThread; // The thread doing the work
649 ModUnionClosurePar _modUnionClosurePar;
650
651 // CMS abstract state machine
652 // initial_state: Idling
653 // next_state(Idling) = {Marking}
654 // next_state(Marking) = {Precleaning, Sweeping}
655 // next_state(Precleaning) = {AbortablePreclean, FinalMarking}
656 // next_state(AbortablePreclean) = {FinalMarking}
657 // next_state(FinalMarking) = {Sweeping}
658 // next_state(Sweeping) = {Resizing}
659 // next_state(Resizing) = {Resetting}
660 // next_state(Resetting) = {Idling}
661 // The numeric values below are chosen so that:
662 // . _collectorState <= Idling == post-sweep && pre-mark
663 // . _collectorState in (Idling, Sweeping) == {initial,final}marking ||
664 // precleaning || abortablePrecleanb
824
825 // For synchronizing passing of control from background to foreground
826 // GC. waitForForegroundGC() is called by the background
827 // collector. It if had to wait for a foreground collection,
828 // it returns true and the background collection should assume
829 // that the collection was finished by the foreground
830 // collector.
831 bool waitForForegroundGC();
832
833 size_t block_size_using_printezis_bits(HeapWord* addr) const;
834 size_t block_size_if_printezis_bits(HeapWord* addr) const;
835 HeapWord* next_card_start_after_block(HeapWord* addr) const;
836
837 void setup_cms_unloading_and_verification_state();
838 public:
839 CMSCollector(ConcurrentMarkSweepGeneration* cmsGen,
840 CardTableRS* ct,
841 ConcurrentMarkSweepPolicy* cp);
842 ConcurrentMarkSweepThread* cmsThread() { return _cmsThread; }
843
844 SpanReferenceProcessor* ref_processor() { return _ref_processor; }
845 void ref_processor_init();
846
847 Mutex* bitMapLock() const { return _markBitMap.lock(); }
848 static CollectorState abstract_state() { return _collectorState; }
849
850 bool should_abort_preclean() const; // Whether preclean should be aborted.
851 size_t get_eden_used() const;
852 size_t get_eden_capacity() const;
853
854 ConcurrentMarkSweepGeneration* cmsGen() { return _cmsGen; }
855
856 // Locking checks
857 NOT_PRODUCT(static bool have_cms_token();)
858
859 bool shouldConcurrentCollect();
860
861 void collect(bool full,
862 bool clear_all_soft_refs,
863 size_t size,
864 bool tlab);
|
600 elapsedTimer _intra_sweep_timer; // Time _in_ sweeps
601 // Padded decaying average estimates of the above
602 AdaptivePaddedAverage _inter_sweep_estimate;
603 AdaptivePaddedAverage _intra_sweep_estimate;
604
605 CMSTracer* _gc_tracer_cm;
606 ConcurrentGCTimer* _gc_timer_cm;
607
608 bool _cms_start_registered;
609
610 GCHeapSummary _last_heap_summary;
611 MetaspaceSummary _last_metaspace_summary;
612
613 void register_gc_start(GCCause::Cause cause);
614 void register_gc_end();
615 void save_heap_summary();
616 void report_heap_summary(GCWhen::Type when);
617
618 protected:
619 ConcurrentMarkSweepGeneration* _cmsGen; // Old gen (CMS)
620 MemRegion _span; // Span covering above
621 CardTableRS* _ct; // Card table
622
623 // CMS marking support structures
624 CMSBitMap _markBitMap;
625 CMSBitMap _modUnionTable;
626 CMSMarkStack _markStack;
627
628 HeapWord* _restart_addr; // In support of marking stack overflow
629 void lower_restart_addr(HeapWord* low);
630
631 // Counters in support of marking stack / work queue overflow handling:
632 // a non-zero value indicates certain types of overflow events during
633 // the current CMS cycle and could lead to stack resizing efforts at
634 // an opportune future time.
635 size_t _ser_pmc_preclean_ovflw;
636 size_t _ser_pmc_remark_ovflw;
637 size_t _par_pmc_remark_ovflw;
638 size_t _ser_kac_preclean_ovflw;
639 size_t _ser_kac_ovflw;
640 size_t _par_kac_ovflw;
641 NOT_PRODUCT(ssize_t _num_par_pushes;)
642
643 // ("Weak") Reference processing support.
644 SpanSubjectToDiscoveryClosure _span_discoverer;
645 ReferenceProcessor* _ref_processor;
646 CMSIsAliveClosure _is_alive_closure;
647 // Keep this textually after _markBitMap and _span; c'tor dependency.
648
649 ConcurrentMarkSweepThread* _cmsThread; // The thread doing the work
650 ModUnionClosurePar _modUnionClosurePar;
651
652 // CMS abstract state machine
653 // initial_state: Idling
654 // next_state(Idling) = {Marking}
655 // next_state(Marking) = {Precleaning, Sweeping}
656 // next_state(Precleaning) = {AbortablePreclean, FinalMarking}
657 // next_state(AbortablePreclean) = {FinalMarking}
658 // next_state(FinalMarking) = {Sweeping}
659 // next_state(Sweeping) = {Resizing}
660 // next_state(Resizing) = {Resetting}
661 // next_state(Resetting) = {Idling}
662 // The numeric values below are chosen so that:
663 // . _collectorState <= Idling == post-sweep && pre-mark
664 // . _collectorState in (Idling, Sweeping) == {initial,final}marking ||
665 // precleaning || abortablePrecleanb
825
826 // For synchronizing passing of control from background to foreground
827 // GC. waitForForegroundGC() is called by the background
828 // collector. It if had to wait for a foreground collection,
829 // it returns true and the background collection should assume
830 // that the collection was finished by the foreground
831 // collector.
832 bool waitForForegroundGC();
833
834 size_t block_size_using_printezis_bits(HeapWord* addr) const;
835 size_t block_size_if_printezis_bits(HeapWord* addr) const;
836 HeapWord* next_card_start_after_block(HeapWord* addr) const;
837
838 void setup_cms_unloading_and_verification_state();
839 public:
840 CMSCollector(ConcurrentMarkSweepGeneration* cmsGen,
841 CardTableRS* ct,
842 ConcurrentMarkSweepPolicy* cp);
843 ConcurrentMarkSweepThread* cmsThread() { return _cmsThread; }
844
845 MemRegion ref_processor_span() const { return _span_discoverer.span(); }
846 ReferenceProcessor* ref_processor() { return _ref_processor; }
847 void ref_processor_init();
848
849 Mutex* bitMapLock() const { return _markBitMap.lock(); }
850 static CollectorState abstract_state() { return _collectorState; }
851
852 bool should_abort_preclean() const; // Whether preclean should be aborted.
853 size_t get_eden_used() const;
854 size_t get_eden_capacity() const;
855
856 ConcurrentMarkSweepGeneration* cmsGen() { return _cmsGen; }
857
858 // Locking checks
859 NOT_PRODUCT(static bool have_cms_token();)
860
861 bool shouldConcurrentCollect();
862
863 void collect(bool full,
864 bool clear_all_soft_refs,
865 size_t size,
866 bool tlab);
|