591 ConcurrentMarkSweepPolicy* _collector_policy;
592 ConcurrentMarkSweepPolicy* collector_policy() { return _collector_policy; }
593
594 void set_did_compact(bool v);
595
596 // XXX Move these to CMSStats ??? FIX ME !!!
597 elapsedTimer _inter_sweep_timer; // Time between sweeps
598 elapsedTimer _intra_sweep_timer; // Time _in_ sweeps
599 // Padded decaying average estimates of the above
600 AdaptivePaddedAverage _inter_sweep_estimate;
601 AdaptivePaddedAverage _intra_sweep_estimate;
602
603 CMSTracer* _gc_tracer_cm;
604 ConcurrentGCTimer* _gc_timer_cm;
605
606 bool _cms_start_registered;
607
608 GCHeapSummary _last_heap_summary;
609 MetaspaceSummary _last_metaspace_summary;
610
611 void register_foreground_gc_start(GCCause::Cause cause);
612 void register_gc_start(GCCause::Cause cause);
613 void register_gc_end();
614 void save_heap_summary();
615 void report_heap_summary(GCWhen::Type when);
616
617 protected:
618 ConcurrentMarkSweepGeneration* _cmsGen; // Old gen (CMS)
619 MemRegion _span; // Span covering above two
620 CardTableRS* _ct; // Card table
621
622 // CMS marking support structures
623 CMSBitMap _markBitMap;
624 CMSBitMap _modUnionTable;
625 CMSMarkStack _markStack;
626
627 HeapWord* _restart_addr; // In support of marking stack overflow
628 void lower_restart_addr(HeapWord* low);
629
630 // Counters in support of marking stack / work queue overflow handling:
631 // a non-zero value indicates certain types of overflow events during
678 static CollectorState _collectorState;
679
680 // State related to prologue/epilogue invocation for my generations
681 bool _between_prologue_and_epilogue;
682
683 // Signaling/State related to coordination between fore- and background GC
684 // Note: When the baton has been passed from background GC to foreground GC,
685 // _foregroundGCIsActive is true and _foregroundGCShouldWait is false.
686 static bool _foregroundGCIsActive; // true iff foreground collector is active or
687 // wants to go active
688 static bool _foregroundGCShouldWait; // true iff background GC is active and has not
689 // yet passed the baton to the foreground GC
690
691 // Support for CMSScheduleRemark (abortable preclean)
692 bool _abort_preclean;
693 bool _start_sampling;
694
695 int _numYields;
696 size_t _numDirtyCards;
697 size_t _sweep_count;
698 // Number of full gc's since the last concurrent gc.
699 uint _full_gcs_since_conc_gc;
700
701 // Occupancy used for bootstrapping stats
702 double _bootstrap_occupancy;
703
704 // Timer
705 elapsedTimer _timer;
706
707 // Timing, allocation and promotion statistics, used for scheduling.
708 CMSStats _stats;
709
710 enum CMS_op_type {
711 CMS_op_checkpointRootsInitial,
712 CMS_op_checkpointRootsFinal
713 };
714
715 void do_CMS_operation(CMS_op_type op, GCCause::Cause gc_cause);
716 bool stop_world_and_do(CMS_op_type op);
717
718 OopTaskQueueSet* task_queues() { return _task_queues; }
719 int* hash_seed(int i) { return &_hash_seed[i]; }
743 bool take_from_overflow_list(size_t num, CMSMarkStack* to_stack);
744 bool par_take_from_overflow_list(size_t num,
745 OopTaskQueue* to_work_q,
746 int no_of_gc_threads);
747 void push_on_overflow_list(oop p);
748 void par_push_on_overflow_list(oop p);
749 // The following is, obviously, not, in general, "MT-stable"
750 bool overflow_list_is_empty() const;
751
752 void preserve_mark_if_necessary(oop p);
753 void par_preserve_mark_if_necessary(oop p);
754 void preserve_mark_work(oop p, markOop m);
755 void restore_preserved_marks_if_any();
756 NOT_PRODUCT(bool no_preserved_marks() const;)
757 // In support of testing overflow code
758 NOT_PRODUCT(int _overflow_counter;)
759 NOT_PRODUCT(bool simulate_overflow();) // Sequential
760 NOT_PRODUCT(bool par_simulate_overflow();) // MT version
761
762 // CMS work methods
763 void checkpointRootsInitialWork(bool asynch); // Initial checkpoint work
764
765 // A return value of false indicates failure due to stack overflow
766 bool markFromRootsWork(bool asynch); // Concurrent marking work
767
768 public: // FIX ME!!! only for testing
769 bool do_marking_st(bool asynch); // Single-threaded marking
770 bool do_marking_mt(bool asynch); // Multi-threaded marking
771
772 private:
773
774 // Concurrent precleaning work
775 size_t preclean_mod_union_table(ConcurrentMarkSweepGeneration* gen,
776 ScanMarkedObjectsAgainCarefullyClosure* cl);
777 size_t preclean_card_table(ConcurrentMarkSweepGeneration* gen,
778 ScanMarkedObjectsAgainCarefullyClosure* cl);
779 // Does precleaning work, returning a quantity indicative of
780 // the amount of "useful work" done.
781 size_t preclean_work(bool clean_refs, bool clean_survivors);
782 void preclean_klasses(MarkRefsIntoAndScanClosure* cl, Mutex* freelistLock);
783 void abortable_preclean(); // Preclean while looking for possible abort
784 void initialize_sequential_subtasks_for_young_gen_rescan(int i);
785 // Helper function for above; merge-sorts the per-thread plab samples
786 void merge_survivor_plab_arrays(ContiguousSpace* surv, int no_of_gc_threads);
787 // Resets (i.e. clears) the per-thread plab sample vectors
788 void reset_survivor_plab_arrays();
789
790 // Final (second) checkpoint work
791 void checkpointRootsFinalWork(bool asynch, bool clear_all_soft_refs,
792 bool init_mark_was_synchronous);
793 // Work routine for parallel version of remark
794 void do_remark_parallel();
795 // Work routine for non-parallel version of remark
796 void do_remark_non_parallel();
797 // Reference processing work routine (during second checkpoint)
798 void refProcessingWork(bool asynch, bool clear_all_soft_refs);
799
800 // Concurrent sweeping work
801 void sweepWork(ConcurrentMarkSweepGeneration* gen, bool asynch);
802
803 // (Concurrent) resetting of support data structures
804 void reset(bool asynch);
805
806 // Clear _expansion_cause fields of constituent generations
807 void clear_expansion_cause();
808
809 // An auxiliary method used to record the ends of
810 // used regions of each generation to limit the extent of sweep
811 void save_sweep_limits();
812
813 // A work method used by foreground collection to determine
814 // what type of collection (compacting or not, continuing or fresh)
815 // it should do.
816 void decide_foreground_collection_type(bool clear_all_soft_refs,
817 bool* should_compact, bool* should_start_over);
818
819 // A work method used by the foreground collector to do
820 // a mark-sweep-compact.
821 void do_compaction_work(bool clear_all_soft_refs);
822
823 // A work method used by the foreground collector to do
824 // a mark-sweep, after taking over from a possibly on-going
825 // concurrent mark-sweep collection.
826 void do_mark_sweep_work(bool clear_all_soft_refs,
827 CollectorState first_state, bool should_start_over);
828
829 // Work methods for reporting concurrent mode interruption or failure
830 bool is_external_interruption();
831 void report_concurrent_mode_interruption();
832
833 // If the background GC is active, acquire control from the background
834 // GC and do the collection.
835 void acquire_control_and_collect(bool full, bool clear_all_soft_refs);
836
837 // For synchronizing passing of control from background to foreground
838 // GC. waitForForegroundGC() is called by the background
839 // collector. It if had to wait for a foreground collection,
840 // it returns true and the background collection should assume
841 // that the collection was finished by the foreground
842 // collector.
843 bool waitForForegroundGC();
844
845 size_t block_size_using_printezis_bits(HeapWord* addr) const;
846 size_t block_size_if_printezis_bits(HeapWord* addr) const;
847 HeapWord* next_card_start_after_block(HeapWord* addr) const;
848
851 CMSCollector(ConcurrentMarkSweepGeneration* cmsGen,
852 CardTableRS* ct,
853 ConcurrentMarkSweepPolicy* cp);
854 ConcurrentMarkSweepThread* cmsThread() { return _cmsThread; }
855
856 ReferenceProcessor* ref_processor() { return _ref_processor; }
857 void ref_processor_init();
858
859 Mutex* bitMapLock() const { return _markBitMap.lock(); }
860 static CollectorState abstract_state() { return _collectorState; }
861
862 bool should_abort_preclean() const; // Whether preclean should be aborted.
863 size_t get_eden_used() const;
864 size_t get_eden_capacity() const;
865
866 ConcurrentMarkSweepGeneration* cmsGen() { return _cmsGen; }
867
868 // Locking checks
869 NOT_PRODUCT(static bool have_cms_token();)
870
871 // XXXPERM bool should_collect(bool full, size_t size, bool tlab);
872 bool shouldConcurrentCollect();
873
874 void collect(bool full,
875 bool clear_all_soft_refs,
876 size_t size,
877 bool tlab);
878 void collect_in_background(bool clear_all_soft_refs, GCCause::Cause cause);
879 void collect_in_foreground(bool clear_all_soft_refs, GCCause::Cause cause);
880
881 // In support of ExplicitGCInvokesConcurrent
882 static void request_full_gc(unsigned int full_gc_count, GCCause::Cause cause);
883 // Should we unload classes in a particular concurrent cycle?
884 bool should_unload_classes() const {
885 return _should_unload_classes;
886 }
887 void update_should_unload_classes();
888
889 void direct_allocated(HeapWord* start, size_t size);
890
891 // Object is dead if not marked and current phase is sweeping.
892 bool is_dead_obj(oop obj) const;
893
894 // After a promotion (of "start"), do any necessary marking.
895 // If "par", then it's being done by a parallel GC thread.
896 // The last two args indicate if we need precise marking
897 // and if so the size of the object so it can be dirtied
898 // in its entirety.
899 void promoted(bool par, HeapWord* start,
911 void gc_epilogue(bool full);
912
913 jlong time_of_last_gc(jlong now) {
914 if (_collectorState <= Idling) {
915 // gc not in progress
916 return _time_of_last_gc;
917 } else {
918 // collection in progress
919 return now;
920 }
921 }
922
923 // Support for parallel remark of survivor space
924 void* get_data_recorder(int thr_num);
925 void sample_eden_chunk();
926
927 CMSBitMap* markBitMap() { return &_markBitMap; }
928 void directAllocated(HeapWord* start, size_t size);
929
930 // Main CMS steps and related support
931 void checkpointRootsInitial(bool asynch);
932 bool markFromRoots(bool asynch); // a return value of false indicates failure
933 // due to stack overflow
934 void preclean();
935 void checkpointRootsFinal(bool asynch, bool clear_all_soft_refs,
936 bool init_mark_was_synchronous);
937 void sweep(bool asynch);
938
939 // Check that the currently executing thread is the expected
940 // one (foreground collector or background collector).
941 static void check_correct_thread_executing() PRODUCT_RETURN;
942 // XXXPERM void print_statistics() PRODUCT_RETURN;
943
944 bool is_cms_reachable(HeapWord* addr);
945
946 // Performance Counter Support
947 CollectorCounters* counters() { return _gc_counters; }
948
949 // Timer stuff
950 void startTimer() { assert(!_timer.is_active(), "Error"); _timer.start(); }
951 void stopTimer() { assert( _timer.is_active(), "Error"); _timer.stop(); }
952 void resetTimer() { assert(!_timer.is_active(), "Error"); _timer.reset(); }
953 double timerValue() { assert(!_timer.is_active(), "Error"); return _timer.seconds(); }
954
955 int yields() { return _numYields; }
956 void resetYields() { _numYields = 0; }
957 void incrementYields() { _numYields++; }
958 void resetNumDirtyCards() { _numDirtyCards = 0; }
959 void incrementNumDirtyCards(size_t num) { _numDirtyCards += num; }
960 size_t numDirtyCards() { return _numDirtyCards; }
961
962 static bool foregroundGCShouldWait() { return _foregroundGCShouldWait; }
1043 _incremental_collection_failed = true;
1044 }
1045 void clear_incremental_collection_failed() {
1046 _incremental_collection_failed = false;
1047 }
1048
1049 // accessors
1050 void set_expansion_cause(CMSExpansionCause::Cause v) { _expansion_cause = v;}
1051 CMSExpansionCause::Cause expansion_cause() const { return _expansion_cause; }
1052
1053 private:
1054 // For parallel young-gen GC support.
1055 CMSParGCThreadState** _par_gc_thread_states;
1056
1057 // Reason generation was expanded
1058 CMSExpansionCause::Cause _expansion_cause;
1059
1060 // In support of MinChunkSize being larger than min object size
1061 const double _dilatation_factor;
1062
1063 enum CollectionTypes {
1064 Concurrent_collection_type = 0,
1065 MS_foreground_collection_type = 1,
1066 MSC_foreground_collection_type = 2,
1067 Unknown_collection_type = 3
1068 };
1069
1070 CollectionTypes _debug_collection_type;
1071
1072 // True if a compacting collection was done.
1073 bool _did_compact;
1074 bool did_compact() { return _did_compact; }
1075
1076 // Fraction of current occupancy at which to start a CMS collection which
1077 // will collect this generation (at least).
1078 double _initiating_occupancy;
1079
1080 protected:
1081 // Shrink generation by specified size (returns false if unable to shrink)
1082 void shrink_free_list_by(size_t bytes);
1083
1084 // Update statistics for GC
1085 virtual void update_gc_stats(int level, bool full);
1086
1087 // Maximum available space in the generation (including uncommitted)
1088 // space.
1089 size_t max_available() const;
1090
1135
1136 void clear_expansion_cause() { _expansion_cause = CMSExpansionCause::_no_expansion; }
1137
1138 // Space enquiries
1139 size_t capacity() const;
1140 size_t used() const;
1141 size_t free() const;
1142 double occupancy() const { return ((double)used())/((double)capacity()); }
1143 size_t contiguous_available() const;
1144 size_t unsafe_max_alloc_nogc() const;
1145
1146 // over-rides
1147 MemRegion used_region() const;
1148 MemRegion used_region_at_save_marks() const;
1149
1150 // Does a "full" (forced) collection invoked on this generation collect
1151 // all younger generations as well? Note that the second conjunct is a
1152 // hack to allow the collection of the younger gen first if the flag is
1153 // set.
1154 virtual bool full_collects_younger_generations() const {
1155 return UseCMSCompactAtFullCollection && !ScavengeBeforeFullGC;
1156 }
1157
1158 void space_iterate(SpaceClosure* blk, bool usedOnly = false);
1159
1160 // Support for compaction
1161 CompactibleSpace* first_compaction_space() const;
1162 // Adjust quantities in the generation affected by
1163 // the compaction.
1164 void reset_after_compaction();
1165
1166 // Allocation support
1167 HeapWord* allocate(size_t size, bool tlab);
1168 HeapWord* have_lock_and_allocate(size_t size, bool tlab);
1169 oop promote(oop obj, size_t obj_size);
1170 HeapWord* par_allocate(size_t size, bool tlab) {
1171 return allocate(size, tlab);
1172 }
1173
1174
1175 // Used by CMSStats to track direct allocation. The value is sampled and
1279 //Delegate to collector
1280 return collector()->sample_eden_chunk();
1281 }
1282
1283 // Printing
1284 const char* name() const;
1285 virtual const char* short_name() const { return "CMS"; }
1286 void print() const;
1287 void printOccupancy(const char* s);
1288 bool must_be_youngest() const { return false; }
1289 bool must_be_oldest() const { return true; }
1290
1291 // Resize the generation after a compacting GC. The
1292 // generation can be treated as a contiguous space
1293 // after the compaction.
1294 virtual void compute_new_size();
1295 // Resize the generation after a non-compacting
1296 // collection.
1297 void compute_new_size_free_list();
1298
1299 CollectionTypes debug_collection_type() { return _debug_collection_type; }
1300 void rotate_debug_collection_type();
1301 };
1302
1303 //
1304 // Closures of various sorts used by CMS to accomplish its work
1305 //
1306
1307 // This closure is used to do concurrent marking from the roots
1308 // following the first checkpoint.
1309 class MarkFromRootsClosure: public BitMapClosure {
1310 CMSCollector* _collector;
1311 MemRegion _span;
1312 CMSBitMap* _bitMap;
1313 CMSBitMap* _mut;
1314 CMSMarkStack* _markStack;
1315 bool _yield;
1316 int _skipBits;
1317 HeapWord* _finger;
1318 HeapWord* _threshold;
1319 DEBUG_ONLY(bool _verifying;)
1327 void reset(HeapWord* addr);
1328 inline void do_yield_check();
1329
1330 private:
1331 void scanOopsInOop(HeapWord* ptr);
1332 void do_yield_work();
1333 };
1334
1335 // This closure is used to do concurrent multi-threaded
1336 // marking from the roots following the first checkpoint.
1337 // XXX This should really be a subclass of The serial version
1338 // above, but i have not had the time to refactor things cleanly.
1339 class Par_MarkFromRootsClosure: public BitMapClosure {
1340 CMSCollector* _collector;
1341 MemRegion _whole_span;
1342 MemRegion _span;
1343 CMSBitMap* _bit_map;
1344 CMSBitMap* _mut;
1345 OopTaskQueue* _work_queue;
1346 CMSMarkStack* _overflow_stack;
1347 bool _yield;
1348 int _skip_bits;
1349 HeapWord* _finger;
1350 HeapWord* _threshold;
1351 CMSConcMarkingTask* _task;
1352 public:
1353 Par_MarkFromRootsClosure(CMSConcMarkingTask* task, CMSCollector* collector,
1354 MemRegion span,
1355 CMSBitMap* bit_map,
1356 OopTaskQueue* work_queue,
1357 CMSMarkStack* overflow_stack,
1358 bool should_yield);
1359 bool do_bit(size_t offset);
1360 inline void do_yield_check();
1361
1362 private:
1363 void scan_oops_in_oop(HeapWord* ptr);
1364 void do_yield_work();
1365 bool get_work_from_overflow_stack();
1366 };
1367
1368 // The following closures are used to do certain kinds of verification of
1369 // CMS marking.
1370 class PushAndMarkVerifyClosure: public MetadataAwareOopClosure {
1371 CMSCollector* _collector;
1372 MemRegion _span;
1373 CMSBitMap* _verification_bm;
1374 CMSBitMap* _cms_bm;
1375 CMSMarkStack* _mark_stack;
1376 protected:
1377 void do_oop(oop p);
1378 template <class T> inline void do_oop_work(T *p) {
|
591 ConcurrentMarkSweepPolicy* _collector_policy;
592 ConcurrentMarkSweepPolicy* collector_policy() { return _collector_policy; }
593
594 void set_did_compact(bool v);
595
596 // XXX Move these to CMSStats ??? FIX ME !!!
597 elapsedTimer _inter_sweep_timer; // Time between sweeps
598 elapsedTimer _intra_sweep_timer; // Time _in_ sweeps
599 // Padded decaying average estimates of the above
600 AdaptivePaddedAverage _inter_sweep_estimate;
601 AdaptivePaddedAverage _intra_sweep_estimate;
602
603 CMSTracer* _gc_tracer_cm;
604 ConcurrentGCTimer* _gc_timer_cm;
605
606 bool _cms_start_registered;
607
608 GCHeapSummary _last_heap_summary;
609 MetaspaceSummary _last_metaspace_summary;
610
611 void register_gc_start(GCCause::Cause cause);
612 void register_gc_end();
613 void save_heap_summary();
614 void report_heap_summary(GCWhen::Type when);
615
616 protected:
617 ConcurrentMarkSweepGeneration* _cmsGen; // Old gen (CMS)
618 MemRegion _span; // Span covering above two
619 CardTableRS* _ct; // Card table
620
621 // CMS marking support structures
622 CMSBitMap _markBitMap;
623 CMSBitMap _modUnionTable;
624 CMSMarkStack _markStack;
625
626 HeapWord* _restart_addr; // In support of marking stack overflow
627 void lower_restart_addr(HeapWord* low);
628
629 // Counters in support of marking stack / work queue overflow handling:
630 // a non-zero value indicates certain types of overflow events during
677 static CollectorState _collectorState;
678
679 // State related to prologue/epilogue invocation for my generations
680 bool _between_prologue_and_epilogue;
681
682 // Signaling/State related to coordination between fore- and background GC
683 // Note: When the baton has been passed from background GC to foreground GC,
684 // _foregroundGCIsActive is true and _foregroundGCShouldWait is false.
685 static bool _foregroundGCIsActive; // true iff foreground collector is active or
686 // wants to go active
687 static bool _foregroundGCShouldWait; // true iff background GC is active and has not
688 // yet passed the baton to the foreground GC
689
690 // Support for CMSScheduleRemark (abortable preclean)
691 bool _abort_preclean;
692 bool _start_sampling;
693
694 int _numYields;
695 size_t _numDirtyCards;
696 size_t _sweep_count;
697
698 // Occupancy used for bootstrapping stats
699 double _bootstrap_occupancy;
700
701 // Timer
702 elapsedTimer _timer;
703
704 // Timing, allocation and promotion statistics, used for scheduling.
705 CMSStats _stats;
706
707 enum CMS_op_type {
708 CMS_op_checkpointRootsInitial,
709 CMS_op_checkpointRootsFinal
710 };
711
712 void do_CMS_operation(CMS_op_type op, GCCause::Cause gc_cause);
713 bool stop_world_and_do(CMS_op_type op);
714
715 OopTaskQueueSet* task_queues() { return _task_queues; }
716 int* hash_seed(int i) { return &_hash_seed[i]; }
740 bool take_from_overflow_list(size_t num, CMSMarkStack* to_stack);
741 bool par_take_from_overflow_list(size_t num,
742 OopTaskQueue* to_work_q,
743 int no_of_gc_threads);
744 void push_on_overflow_list(oop p);
745 void par_push_on_overflow_list(oop p);
746 // The following is, obviously, not, in general, "MT-stable"
747 bool overflow_list_is_empty() const;
748
749 void preserve_mark_if_necessary(oop p);
750 void par_preserve_mark_if_necessary(oop p);
751 void preserve_mark_work(oop p, markOop m);
752 void restore_preserved_marks_if_any();
753 NOT_PRODUCT(bool no_preserved_marks() const;)
754 // In support of testing overflow code
755 NOT_PRODUCT(int _overflow_counter;)
756 NOT_PRODUCT(bool simulate_overflow();) // Sequential
757 NOT_PRODUCT(bool par_simulate_overflow();) // MT version
758
759 // CMS work methods
760 void checkpointRootsInitialWork(); // Initial checkpoint work
761
762 // A return value of false indicates failure due to stack overflow
763 bool markFromRootsWork(); // Concurrent marking work
764
765 public: // FIX ME!!! only for testing
766 bool do_marking_st(); // Single-threaded marking
767 bool do_marking_mt(); // Multi-threaded marking
768
769 private:
770
771 // Concurrent precleaning work
772 size_t preclean_mod_union_table(ConcurrentMarkSweepGeneration* gen,
773 ScanMarkedObjectsAgainCarefullyClosure* cl);
774 size_t preclean_card_table(ConcurrentMarkSweepGeneration* gen,
775 ScanMarkedObjectsAgainCarefullyClosure* cl);
776 // Does precleaning work, returning a quantity indicative of
777 // the amount of "useful work" done.
778 size_t preclean_work(bool clean_refs, bool clean_survivors);
779 void preclean_klasses(MarkRefsIntoAndScanClosure* cl, Mutex* freelistLock);
780 void abortable_preclean(); // Preclean while looking for possible abort
781 void initialize_sequential_subtasks_for_young_gen_rescan(int i);
782 // Helper function for above; merge-sorts the per-thread plab samples
783 void merge_survivor_plab_arrays(ContiguousSpace* surv, int no_of_gc_threads);
784 // Resets (i.e. clears) the per-thread plab sample vectors
785 void reset_survivor_plab_arrays();
786
787 // Final (second) checkpoint work
788 void checkpointRootsFinalWork();
789 // Work routine for parallel version of remark
790 void do_remark_parallel();
791 // Work routine for non-parallel version of remark
792 void do_remark_non_parallel();
793 // Reference processing work routine (during second checkpoint)
794 void refProcessingWork();
795
796 // Concurrent sweeping work
797 void sweepWork(ConcurrentMarkSweepGeneration* gen);
798
799 // (Concurrent) resetting of support data structures
800 void reset(bool concurrent);
801
802 // Clear _expansion_cause fields of constituent generations
803 void clear_expansion_cause();
804
805 // An auxiliary method used to record the ends of
806 // used regions of each generation to limit the extent of sweep
807 void save_sweep_limits();
808
809 // A work method used by the foreground collector to do
810 // a mark-sweep-compact.
811 void do_compaction_work(bool clear_all_soft_refs);
812
813 // Work methods for reporting concurrent mode interruption or failure
814 bool is_external_interruption();
815 void report_concurrent_mode_interruption();
816
817 // If the background GC is active, acquire control from the background
818 // GC and do the collection.
819 void acquire_control_and_collect(bool full, bool clear_all_soft_refs);
820
821 // For synchronizing passing of control from background to foreground
822 // GC. waitForForegroundGC() is called by the background
823 // collector. It if had to wait for a foreground collection,
824 // it returns true and the background collection should assume
825 // that the collection was finished by the foreground
826 // collector.
827 bool waitForForegroundGC();
828
829 size_t block_size_using_printezis_bits(HeapWord* addr) const;
830 size_t block_size_if_printezis_bits(HeapWord* addr) const;
831 HeapWord* next_card_start_after_block(HeapWord* addr) const;
832
835 CMSCollector(ConcurrentMarkSweepGeneration* cmsGen,
836 CardTableRS* ct,
837 ConcurrentMarkSweepPolicy* cp);
838 ConcurrentMarkSweepThread* cmsThread() { return _cmsThread; }
839
840 ReferenceProcessor* ref_processor() { return _ref_processor; }
841 void ref_processor_init();
842
843 Mutex* bitMapLock() const { return _markBitMap.lock(); }
844 static CollectorState abstract_state() { return _collectorState; }
845
846 bool should_abort_preclean() const; // Whether preclean should be aborted.
847 size_t get_eden_used() const;
848 size_t get_eden_capacity() const;
849
850 ConcurrentMarkSweepGeneration* cmsGen() { return _cmsGen; }
851
852 // Locking checks
853 NOT_PRODUCT(static bool have_cms_token();)
854
855 bool shouldConcurrentCollect();
856
857 void collect(bool full,
858 bool clear_all_soft_refs,
859 size_t size,
860 bool tlab);
861 void collect_in_background(GCCause::Cause cause);
862
863 // In support of ExplicitGCInvokesConcurrent
864 static void request_full_gc(unsigned int full_gc_count, GCCause::Cause cause);
865 // Should we unload classes in a particular concurrent cycle?
866 bool should_unload_classes() const {
867 return _should_unload_classes;
868 }
869 void update_should_unload_classes();
870
871 void direct_allocated(HeapWord* start, size_t size);
872
873 // Object is dead if not marked and current phase is sweeping.
874 bool is_dead_obj(oop obj) const;
875
876 // After a promotion (of "start"), do any necessary marking.
877 // If "par", then it's being done by a parallel GC thread.
878 // The last two args indicate if we need precise marking
879 // and if so the size of the object so it can be dirtied
880 // in its entirety.
881 void promoted(bool par, HeapWord* start,
893 void gc_epilogue(bool full);
894
895 jlong time_of_last_gc(jlong now) {
896 if (_collectorState <= Idling) {
897 // gc not in progress
898 return _time_of_last_gc;
899 } else {
900 // collection in progress
901 return now;
902 }
903 }
904
905 // Support for parallel remark of survivor space
906 void* get_data_recorder(int thr_num);
907 void sample_eden_chunk();
908
909 CMSBitMap* markBitMap() { return &_markBitMap; }
910 void directAllocated(HeapWord* start, size_t size);
911
912 // Main CMS steps and related support
913 void checkpointRootsInitial();
914 bool markFromRoots(); // a return value of false indicates failure
915 // due to stack overflow
916 void preclean();
917 void checkpointRootsFinal();
918 void sweep();
919
920 // Check that the currently executing thread is the expected
921 // one (foreground collector or background collector).
922 static void check_correct_thread_executing() PRODUCT_RETURN;
923
924 bool is_cms_reachable(HeapWord* addr);
925
926 // Performance Counter Support
927 CollectorCounters* counters() { return _gc_counters; }
928
929 // Timer stuff
930 void startTimer() { assert(!_timer.is_active(), "Error"); _timer.start(); }
931 void stopTimer() { assert( _timer.is_active(), "Error"); _timer.stop(); }
932 void resetTimer() { assert(!_timer.is_active(), "Error"); _timer.reset(); }
933 double timerValue() { assert(!_timer.is_active(), "Error"); return _timer.seconds(); }
934
935 int yields() { return _numYields; }
936 void resetYields() { _numYields = 0; }
937 void incrementYields() { _numYields++; }
938 void resetNumDirtyCards() { _numDirtyCards = 0; }
939 void incrementNumDirtyCards(size_t num) { _numDirtyCards += num; }
940 size_t numDirtyCards() { return _numDirtyCards; }
941
942 static bool foregroundGCShouldWait() { return _foregroundGCShouldWait; }
1023 _incremental_collection_failed = true;
1024 }
1025 void clear_incremental_collection_failed() {
1026 _incremental_collection_failed = false;
1027 }
1028
1029 // accessors
1030 void set_expansion_cause(CMSExpansionCause::Cause v) { _expansion_cause = v;}
1031 CMSExpansionCause::Cause expansion_cause() const { return _expansion_cause; }
1032
1033 private:
1034 // For parallel young-gen GC support.
1035 CMSParGCThreadState** _par_gc_thread_states;
1036
1037 // Reason generation was expanded
1038 CMSExpansionCause::Cause _expansion_cause;
1039
1040 // In support of MinChunkSize being larger than min object size
1041 const double _dilatation_factor;
1042
1043 bool _debug_concurrent_cycle;
1044
1045 // True if a compacting collection was done.
1046 bool _did_compact;
1047 bool did_compact() { return _did_compact; }
1048
1049 // Fraction of current occupancy at which to start a CMS collection which
1050 // will collect this generation (at least).
1051 double _initiating_occupancy;
1052
1053 protected:
1054 // Shrink generation by specified size (returns false if unable to shrink)
1055 void shrink_free_list_by(size_t bytes);
1056
1057 // Update statistics for GC
1058 virtual void update_gc_stats(int level, bool full);
1059
1060 // Maximum available space in the generation (including uncommitted)
1061 // space.
1062 size_t max_available() const;
1063
1108
1109 void clear_expansion_cause() { _expansion_cause = CMSExpansionCause::_no_expansion; }
1110
1111 // Space enquiries
1112 size_t capacity() const;
1113 size_t used() const;
1114 size_t free() const;
1115 double occupancy() const { return ((double)used())/((double)capacity()); }
1116 size_t contiguous_available() const;
1117 size_t unsafe_max_alloc_nogc() const;
1118
1119 // over-rides
1120 MemRegion used_region() const;
1121 MemRegion used_region_at_save_marks() const;
1122
1123 // Does a "full" (forced) collection invoked on this generation collect
1124 // all younger generations as well? Note that the second conjunct is a
1125 // hack to allow the collection of the younger gen first if the flag is
1126 // set.
1127 virtual bool full_collects_younger_generations() const {
1128 return !ScavengeBeforeFullGC;
1129 }
1130
1131 void space_iterate(SpaceClosure* blk, bool usedOnly = false);
1132
1133 // Support for compaction
1134 CompactibleSpace* first_compaction_space() const;
1135 // Adjust quantities in the generation affected by
1136 // the compaction.
1137 void reset_after_compaction();
1138
1139 // Allocation support
1140 HeapWord* allocate(size_t size, bool tlab);
1141 HeapWord* have_lock_and_allocate(size_t size, bool tlab);
1142 oop promote(oop obj, size_t obj_size);
1143 HeapWord* par_allocate(size_t size, bool tlab) {
1144 return allocate(size, tlab);
1145 }
1146
1147
1148 // Used by CMSStats to track direct allocation. The value is sampled and
1252 //Delegate to collector
1253 return collector()->sample_eden_chunk();
1254 }
1255
1256 // Printing
1257 const char* name() const;
1258 virtual const char* short_name() const { return "CMS"; }
1259 void print() const;
1260 void printOccupancy(const char* s);
1261 bool must_be_youngest() const { return false; }
1262 bool must_be_oldest() const { return true; }
1263
1264 // Resize the generation after a compacting GC. The
1265 // generation can be treated as a contiguous space
1266 // after the compaction.
1267 virtual void compute_new_size();
1268 // Resize the generation after a non-compacting
1269 // collection.
1270 void compute_new_size_free_list();
1271
1272 bool debug_concurrent_cycle() { return _debug_concurrent_cycle; }
1273 void rotate_debug_collection_type();
1274 };
1275
1276 //
1277 // Closures of various sorts used by CMS to accomplish its work
1278 //
1279
1280 // This closure is used to do concurrent marking from the roots
1281 // following the first checkpoint.
1282 class MarkFromRootsClosure: public BitMapClosure {
1283 CMSCollector* _collector;
1284 MemRegion _span;
1285 CMSBitMap* _bitMap;
1286 CMSBitMap* _mut;
1287 CMSMarkStack* _markStack;
1288 bool _yield;
1289 int _skipBits;
1290 HeapWord* _finger;
1291 HeapWord* _threshold;
1292 DEBUG_ONLY(bool _verifying;)
1300 void reset(HeapWord* addr);
1301 inline void do_yield_check();
1302
1303 private:
1304 void scanOopsInOop(HeapWord* ptr);
1305 void do_yield_work();
1306 };
1307
1308 // This closure is used to do concurrent multi-threaded
1309 // marking from the roots following the first checkpoint.
1310 // XXX This should really be a subclass of The serial version
1311 // above, but i have not had the time to refactor things cleanly.
1312 class Par_MarkFromRootsClosure: public BitMapClosure {
1313 CMSCollector* _collector;
1314 MemRegion _whole_span;
1315 MemRegion _span;
1316 CMSBitMap* _bit_map;
1317 CMSBitMap* _mut;
1318 OopTaskQueue* _work_queue;
1319 CMSMarkStack* _overflow_stack;
1320 int _skip_bits;
1321 HeapWord* _finger;
1322 HeapWord* _threshold;
1323 CMSConcMarkingTask* _task;
1324 public:
1325 Par_MarkFromRootsClosure(CMSConcMarkingTask* task, CMSCollector* collector,
1326 MemRegion span,
1327 CMSBitMap* bit_map,
1328 OopTaskQueue* work_queue,
1329 CMSMarkStack* overflow_stack);
1330 bool do_bit(size_t offset);
1331 inline void do_yield_check();
1332
1333 private:
1334 void scan_oops_in_oop(HeapWord* ptr);
1335 void do_yield_work();
1336 bool get_work_from_overflow_stack();
1337 };
1338
1339 // The following closures are used to do certain kinds of verification of
1340 // CMS marking.
1341 class PushAndMarkVerifyClosure: public MetadataAwareOopClosure {
1342 CMSCollector* _collector;
1343 MemRegion _span;
1344 CMSBitMap* _verification_bm;
1345 CMSBitMap* _cms_bm;
1346 CMSMarkStack* _mark_stack;
1347 protected:
1348 void do_oop(oop p);
1349 template <class T> inline void do_oop_work(T *p) {
|