154 // conversion utilities
155 HeapWord* offsetToHeapWord(size_t offset) const;
156 size_t heapWordToOffset(HeapWord* addr) const;
157 size_t heapWordDiffToOffsetDiff(size_t diff) const;
158
159 void print_on_error(outputStream* st, const char* prefix) const;
160
161 // debugging
162 // is this address range covered by the bit-map?
163 NOT_PRODUCT(
164 bool covers(MemRegion mr) const;
165 bool covers(HeapWord* start, size_t size = 0) const;
166 )
167 void verifyNoOneBitsInRange(HeapWord* left, HeapWord* right) PRODUCT_RETURN;
168 };
169
170 // Represents a marking stack used by the CMS collector.
171 // Ideally this should be GrowableArray<> just like MSC's marking stack(s).
172 class CMSMarkStack: public CHeapObj<mtGC> {
173 //
174 friend class CMSCollector; // to get at expasion stats further below
175 //
176
177 VirtualSpace _virtual_space; // space for the stack
178 oop* _base; // bottom of stack
179 size_t _index; // one more than last occupied index
180 size_t _capacity; // max #elements
181 Mutex _par_lock; // an advisory lock used in case of parallel access
182 NOT_PRODUCT(size_t _max_depth;) // max depth plumbed during run
183
184 protected:
185 size_t _hit_limit; // we hit max stack size limit
186 size_t _failed_double; // we failed expansion before hitting limit
187
188 public:
189 CMSMarkStack():
190 _par_lock(Mutex::event, "CMSMarkStack._par_lock", true),
191 _hit_limit(0),
192 _failed_double(0) {}
193
194 bool allocate(size_t size);
195
196 size_t capacity() const { return _capacity; }
197
198 oop pop() {
199 if (!isEmpty()) {
200 return _base[--_index] ;
201 }
202 return NULL;
203 }
204
205 bool push(oop ptr) {
206 if (isFull()) {
221 size_t length() { return _index; }
222
223 // "Parallel versions" of some of the above
224 oop par_pop() {
225 // lock and pop
226 MutexLockerEx x(&_par_lock, Mutex::_no_safepoint_check_flag);
227 return pop();
228 }
229
230 bool par_push(oop ptr) {
231 // lock and push
232 MutexLockerEx x(&_par_lock, Mutex::_no_safepoint_check_flag);
233 return push(ptr);
234 }
235
236 // Forcibly reset the stack, losing all of its contents.
237 void reset() {
238 _index = 0;
239 }
240
241 // Expand the stack, typically in response to an overflow condition
242 void expand();
243
244 // Compute the least valued stack element.
245 oop least_value(HeapWord* low) {
246 oop least = (oop)low;
247 for (size_t i = 0; i < _index; i++) {
248 least = MIN2(least, _base[i]);
249 }
250 return least;
251 }
252
253 // Exposed here to allow stack expansion in || case
254 Mutex* par_lock() { return &_par_lock; }
255 };
256
257 class CardTableRS;
258 class CMSParGCThreadState;
259
260 class ModUnionClosure: public MemRegionClosure {
261 protected:
262 CMSBitMap* _t;
263 public:
264 ModUnionClosure(CMSBitMap* t): _t(t) { }
265 void do_MemRegion(MemRegion mr);
266 };
267
268 class ModUnionClosurePar: public ModUnionClosure {
269 public:
270 ModUnionClosurePar(CMSBitMap* t): ModUnionClosure(t) { }
271 void do_MemRegion(MemRegion mr);
272 };
273
540 friend class CMSInnerParMarkAndPushClosure; // -- ditto --
541 NOT_PRODUCT(friend class ScanMarkedObjectsAgainClosure;) // assertion on _overflow_list
542 friend class ReleaseForegroundGC; // to access _foregroundGCShouldWait
543 friend class VM_CMS_Operation;
544 friend class VM_CMS_Initial_Mark;
545 friend class VM_CMS_Final_Remark;
546 friend class TraceCMSMemoryManagerStats;
547
548 private:
549 jlong _time_of_last_gc;
550 void update_time_of_last_gc(jlong now) {
551 _time_of_last_gc = now;
552 }
553
554 OopTaskQueueSet* _task_queues;
555
556 // Overflow list of grey objects, threaded through mark-word
557 // Manipulated with CAS in the parallel/multi-threaded case.
558 oop _overflow_list;
559 // The following array-pair keeps track of mark words
560 // displaced for accomodating overflow list above.
561 // This code will likely be revisited under RFE#4922830.
562 Stack<oop, mtGC> _preserved_oop_stack;
563 Stack<markOop, mtGC> _preserved_mark_stack;
564
565 int* _hash_seed;
566
567 // In support of multi-threaded concurrent phases
568 YieldingFlexibleWorkGang* _conc_workers;
569
570 // Performance Counters
571 CollectorCounters* _gc_counters;
572
573 // Initialization Errors
574 bool _completed_initialization;
575
576 // In support of ExplicitGCInvokesConcurrent
577 static bool _full_gc_requested;
578 static GCCause::Cause _full_gc_cause;
579 unsigned int _collection_count_start;
580
582 bool _should_unload_classes;
583 unsigned int _concurrent_cycles_since_last_unload;
584 unsigned int concurrent_cycles_since_last_unload() const {
585 return _concurrent_cycles_since_last_unload;
586 }
587 // Did we (allow) unload classes in the previous concurrent cycle?
588 bool unloaded_classes_last_cycle() const {
589 return concurrent_cycles_since_last_unload() == 0;
590 }
591 // Root scanning options for perm gen
592 int _roots_scanning_options;
593 int roots_scanning_options() const { return _roots_scanning_options; }
594 void add_root_scanning_option(int o) { _roots_scanning_options |= o; }
595 void remove_root_scanning_option(int o) { _roots_scanning_options &= ~o; }
596
597 // Verification support
598 CMSBitMap _verification_mark_bm;
599 void verify_after_remark_work_1();
600 void verify_after_remark_work_2();
601
602 // true if any verification flag is on.
603 bool _verifying;
604 bool verifying() const { return _verifying; }
605 void set_verifying(bool v) { _verifying = v; }
606
607 // Collector policy
608 ConcurrentMarkSweepPolicy* _collector_policy;
609 ConcurrentMarkSweepPolicy* collector_policy() { return _collector_policy; }
610
611 void set_did_compact(bool v);
612
613 // XXX Move these to CMSStats ??? FIX ME !!!
614 elapsedTimer _inter_sweep_timer; // time between sweeps
615 elapsedTimer _intra_sweep_timer; // time _in_ sweeps
616 // padded decaying average estimates of the above
617 AdaptivePaddedAverage _inter_sweep_estimate;
618 AdaptivePaddedAverage _intra_sweep_estimate;
619
620 CMSTracer* _gc_tracer_cm;
621 ConcurrentGCTimer* _gc_timer_cm;
622
623 bool _cms_start_registered;
624
625 GCHeapSummary _last_heap_summary;
626 MetaspaceSummary _last_metaspace_summary;
627
628 void register_foreground_gc_start(GCCause::Cause cause);
629 void register_gc_start(GCCause::Cause cause);
630 void register_gc_end();
631 void save_heap_summary();
632 void report_heap_summary(GCWhen::Type when);
633
634 protected:
635 ConcurrentMarkSweepGeneration* _cmsGen; // old gen (CMS)
636 MemRegion _span; // span covering above two
637 CardTableRS* _ct; // card table
638
639 // CMS marking support structures
640 CMSBitMap _markBitMap;
641 CMSBitMap _modUnionTable;
642 CMSMarkStack _markStack;
643
644 HeapWord* _restart_addr; // in support of marking stack overflow
645 void lower_restart_addr(HeapWord* low);
646
647 // Counters in support of marking stack / work queue overflow handling:
648 // a non-zero value indicates certain types of overflow events during
649 // the current CMS cycle and could lead to stack resizing efforts at
650 // an opportune future time.
651 size_t _ser_pmc_preclean_ovflw;
652 size_t _ser_pmc_remark_ovflw;
653 size_t _par_pmc_remark_ovflw;
654 size_t _ser_kac_preclean_ovflw;
655 size_t _ser_kac_ovflw;
656 size_t _par_kac_ovflw;
657 NOT_PRODUCT(ssize_t _num_par_pushes;)
658
659 // ("Weak") Reference processing support
660 ReferenceProcessor* _ref_processor;
661 CMSIsAliveClosure _is_alive_closure;
662 // keep this textually after _markBitMap and _span; c'tor dependency
663
664 ConcurrentMarkSweepThread* _cmsThread; // the thread doing the work
665 ModUnionClosure _modUnionClosure;
666 ModUnionClosurePar _modUnionClosurePar;
667
668 // CMS abstract state machine
669 // initial_state: Idling
670 // next_state(Idling) = {Marking}
671 // next_state(Marking) = {Precleaning, Sweeping}
672 // next_state(Precleaning) = {AbortablePreclean, FinalMarking}
673 // next_state(AbortablePreclean) = {FinalMarking}
674 // next_state(FinalMarking) = {Sweeping}
675 // next_state(Sweeping) = {Resizing}
676 // next_state(Resizing) = {Resetting}
677 // next_state(Resetting) = {Idling}
678 // The numeric values below are chosen so that:
679 // . _collectorState <= Idling == post-sweep && pre-mark
680 // . _collectorState in (Idling, Sweeping) == {initial,final}marking ||
681 // precleaning || abortablePrecleanb
682 public:
683 enum CollectorState {
684 Resizing = 0,
685 Resetting = 1,
686 Idling = 2,
687 InitialMarking = 3,
688 Marking = 4,
689 Precleaning = 5,
690 AbortablePreclean = 6,
691 FinalMarking = 7,
692 Sweeping = 8
693 };
694 protected:
695 static CollectorState _collectorState;
696
697 // State related to prologue/epilogue invocation for my generations
698 bool _between_prologue_and_epilogue;
699
700 // Signalling/State related to coordination between fore- and backgroud GC
701 // Note: When the baton has been passed from background GC to foreground GC,
702 // _foregroundGCIsActive is true and _foregroundGCShouldWait is false.
703 static bool _foregroundGCIsActive; // true iff foreground collector is active or
704 // wants to go active
705 static bool _foregroundGCShouldWait; // true iff background GC is active and has not
706 // yet passed the baton to the foreground GC
707
708 // Support for CMSScheduleRemark (abortable preclean)
709 bool _abort_preclean;
710 bool _start_sampling;
711
712 int _numYields;
713 size_t _numDirtyCards;
714 size_t _sweep_count;
715 // number of full gc's since the last concurrent gc.
716 uint _full_gcs_since_conc_gc;
717
718 // occupancy used for bootstrapping stats
719 double _bootstrap_occupancy;
720
721 // timer
722 elapsedTimer _timer;
723
724 // Timing, allocation and promotion statistics, used for scheduling.
725 CMSStats _stats;
726
727 // Allocation limits installed in the young gen, used only in
728 // CMSIncrementalMode. When an allocation in the young gen would cross one of
729 // these limits, the cms generation is notified and the cms thread is started
730 // or stopped, respectively.
731 HeapWord* _icms_start_limit;
732 HeapWord* _icms_stop_limit;
733
734 enum CMS_op_type {
735 CMS_op_checkpointRootsInitial,
736 CMS_op_checkpointRootsFinal
737 };
738
739 void do_CMS_operation(CMS_op_type op, GCCause::Cause gc_cause);
740 bool stop_world_and_do(CMS_op_type op);
741
753 HeapWord** _end_addr; // ... End of Eden
754 Mutex* _eden_chunk_lock;
755 HeapWord** _eden_chunk_array; // ... Eden partitioning array
756 size_t _eden_chunk_index; // ... top (exclusive) of array
757 size_t _eden_chunk_capacity; // ... max entries in array
758
759 // Support for parallelizing survivor space rescan
760 HeapWord** _survivor_chunk_array;
761 size_t _survivor_chunk_index;
762 size_t _survivor_chunk_capacity;
763 size_t* _cursor;
764 ChunkArray* _survivor_plab_array;
765
766 // Support for marking stack overflow handling
767 bool take_from_overflow_list(size_t num, CMSMarkStack* to_stack);
768 bool par_take_from_overflow_list(size_t num,
769 OopTaskQueue* to_work_q,
770 int no_of_gc_threads);
771 void push_on_overflow_list(oop p);
772 void par_push_on_overflow_list(oop p);
773 // the following is, obviously, not, in general, "MT-stable"
774 bool overflow_list_is_empty() const;
775
776 void preserve_mark_if_necessary(oop p);
777 void par_preserve_mark_if_necessary(oop p);
778 void preserve_mark_work(oop p, markOop m);
779 void restore_preserved_marks_if_any();
780 NOT_PRODUCT(bool no_preserved_marks() const;)
781 // in support of testing overflow code
782 NOT_PRODUCT(int _overflow_counter;)
783 NOT_PRODUCT(bool simulate_overflow();) // sequential
784 NOT_PRODUCT(bool par_simulate_overflow();) // MT version
785
786 // CMS work methods
787 void checkpointRootsInitialWork(bool asynch); // initial checkpoint work
788
789 // a return value of false indicates failure due to stack overflow
790 bool markFromRootsWork(bool asynch); // concurrent marking work
791
792 public: // FIX ME!!! only for testing
793 bool do_marking_st(bool asynch); // single-threaded marking
794 bool do_marking_mt(bool asynch); // multi-threaded marking
795
796 private:
797
798 // concurrent precleaning work
799 size_t preclean_mod_union_table(ConcurrentMarkSweepGeneration* gen,
800 ScanMarkedObjectsAgainCarefullyClosure* cl);
801 size_t preclean_card_table(ConcurrentMarkSweepGeneration* gen,
802 ScanMarkedObjectsAgainCarefullyClosure* cl);
803 // Does precleaning work, returning a quantity indicative of
804 // the amount of "useful work" done.
805 size_t preclean_work(bool clean_refs, bool clean_survivors);
806 void preclean_klasses(MarkRefsIntoAndScanClosure* cl, Mutex* freelistLock);
807 void abortable_preclean(); // Preclean while looking for possible abort
808 void initialize_sequential_subtasks_for_young_gen_rescan(int i);
809 // Helper function for above; merge-sorts the per-thread plab samples
810 void merge_survivor_plab_arrays(ContiguousSpace* surv, int no_of_gc_threads);
811 // Resets (i.e. clears) the per-thread plab sample vectors
812 void reset_survivor_plab_arrays();
813
814 // final (second) checkpoint work
815 void checkpointRootsFinalWork(bool asynch, bool clear_all_soft_refs,
816 bool init_mark_was_synchronous);
817 // work routine for parallel version of remark
818 void do_remark_parallel();
819 // work routine for non-parallel version of remark
820 void do_remark_non_parallel();
821 // reference processing work routine (during second checkpoint)
822 void refProcessingWork(bool asynch, bool clear_all_soft_refs);
823
824 // concurrent sweeping work
825 void sweepWork(ConcurrentMarkSweepGeneration* gen, bool asynch);
826
827 // (concurrent) resetting of support data structures
828 void reset(bool asynch);
829
830 // Clear _expansion_cause fields of constituent generations
831 void clear_expansion_cause();
832
833 // An auxilliary method used to record the ends of
834 // used regions of each generation to limit the extent of sweep
835 void save_sweep_limits();
836
837 // A work method used by foreground collection to determine
838 // what type of collection (compacting or not, continuing or fresh)
839 // it should do.
840 void decide_foreground_collection_type(bool clear_all_soft_refs,
841 bool* should_compact, bool* should_start_over);
842
843 // A work method used by the foreground collector to do
844 // a mark-sweep-compact.
845 void do_compaction_work(bool clear_all_soft_refs);
846
847 // A work method used by the foreground collector to do
848 // a mark-sweep, after taking over from a possibly on-going
849 // concurrent mark-sweep collection.
850 void do_mark_sweep_work(bool clear_all_soft_refs,
851 CollectorState first_state, bool should_start_over);
852
853 // Work methods for reporting concurrent mode interruption or failure
854 bool is_external_interruption();
855 void report_concurrent_mode_interruption();
856
857 // If the backgrould GC is active, acquire control from the background
858 // GC and do the collection.
859 void acquire_control_and_collect(bool full, bool clear_all_soft_refs);
860
861 // For synchronizing passing of control from background to foreground
862 // GC. waitForForegroundGC() is called by the background
863 // collector. It if had to wait for a foreground collection,
864 // it returns true and the background collection should assume
865 // that the collection was finished by the foreground
866 // collector.
867 bool waitForForegroundGC();
868
869 // Incremental mode triggering: recompute the icms duty cycle and set the
870 // allocation limits in the young gen.
871 void icms_update_allocation_limits();
872
873 size_t block_size_using_printezis_bits(HeapWord* addr) const;
874 size_t block_size_if_printezis_bits(HeapWord* addr) const;
875 HeapWord* next_card_start_after_block(HeapWord* addr) const;
876
877 void setup_cms_unloading_and_verification_state();
878 public:
879 CMSCollector(ConcurrentMarkSweepGeneration* cmsGen,
880 CardTableRS* ct,
881 ConcurrentMarkSweepPolicy* cp);
882 ConcurrentMarkSweepThread* cmsThread() { return _cmsThread; }
883
884 ReferenceProcessor* ref_processor() { return _ref_processor; }
885 void ref_processor_init();
886
887 Mutex* bitMapLock() const { return _markBitMap.lock(); }
888 static CollectorState abstract_state() { return _collectorState; }
889
890 bool should_abort_preclean() const; // Whether preclean should be aborted.
891 size_t get_eden_used() const;
892 size_t get_eden_capacity() const;
893
894 ConcurrentMarkSweepGeneration* cmsGen() { return _cmsGen; }
895
896 // locking checks
897 NOT_PRODUCT(static bool have_cms_token();)
898
899 // XXXPERM bool should_collect(bool full, size_t size, bool tlab);
900 bool shouldConcurrentCollect();
901
902 void collect(bool full,
903 bool clear_all_soft_refs,
904 size_t size,
905 bool tlab);
906 void collect_in_background(bool clear_all_soft_refs, GCCause::Cause cause);
907 void collect_in_foreground(bool clear_all_soft_refs, GCCause::Cause cause);
908
909 // In support of ExplicitGCInvokesConcurrent
910 static void request_full_gc(unsigned int full_gc_count, GCCause::Cause cause);
911 // Should we unload classes in a particular concurrent cycle?
912 bool should_unload_classes() const {
913 return _should_unload_classes;
914 }
915 void update_should_unload_classes();
916
941 void gc_prologue(bool full);
942 void gc_epilogue(bool full);
943
944 jlong time_of_last_gc(jlong now) {
945 if (_collectorState <= Idling) {
946 // gc not in progress
947 return _time_of_last_gc;
948 } else {
949 // collection in progress
950 return now;
951 }
952 }
953
954 // Support for parallel remark of survivor space
955 void* get_data_recorder(int thr_num);
956 void sample_eden_chunk();
957
958 CMSBitMap* markBitMap() { return &_markBitMap; }
959 void directAllocated(HeapWord* start, size_t size);
960
961 // main CMS steps and related support
962 void checkpointRootsInitial(bool asynch);
963 bool markFromRoots(bool asynch); // a return value of false indicates failure
964 // due to stack overflow
965 void preclean();
966 void checkpointRootsFinal(bool asynch, bool clear_all_soft_refs,
967 bool init_mark_was_synchronous);
968 void sweep(bool asynch);
969
970 // Check that the currently executing thread is the expected
971 // one (foreground collector or background collector).
972 static void check_correct_thread_executing() PRODUCT_RETURN;
973 // XXXPERM void print_statistics() PRODUCT_RETURN;
974
975 bool is_cms_reachable(HeapWord* addr);
976
977 // Performance Counter Support
978 CollectorCounters* counters() { return _gc_counters; }
979
980 // timer stuff
981 void startTimer() { assert(!_timer.is_active(), "Error"); _timer.start(); }
982 void stopTimer() { assert( _timer.is_active(), "Error"); _timer.stop(); }
983 void resetTimer() { assert(!_timer.is_active(), "Error"); _timer.reset(); }
984 double timerValue() { assert(!_timer.is_active(), "Error"); return _timer.seconds(); }
985
986 int yields() { return _numYields; }
987 void resetYields() { _numYields = 0; }
988 void incrementYields() { _numYields++; }
989 void resetNumDirtyCards() { _numDirtyCards = 0; }
990 void incrementNumDirtyCards(size_t num) { _numDirtyCards += num; }
991 size_t numDirtyCards() { return _numDirtyCards; }
992
993 static bool foregroundGCShouldWait() { return _foregroundGCShouldWait; }
994 static void set_foregroundGCShouldWait(bool v) { _foregroundGCShouldWait = v; }
995 static bool foregroundGCIsActive() { return _foregroundGCIsActive; }
996 static void set_foregroundGCIsActive(bool v) { _foregroundGCIsActive = v; }
997 size_t sweep_count() const { return _sweep_count; }
998 void increment_sweep_count() { _sweep_count++; }
999
1000 // Timers/stats for gc scheduling and incremental mode pacing.
1001 CMSStats& stats() { return _stats; }
1002
1003 // Convenience methods that check whether CMSIncrementalMode is enabled and
1004 // forward to the corresponding methods in ConcurrentMarkSweepThread.
1005 static void start_icms();
1006 static void stop_icms(); // Called at the end of the cms cycle.
1007 static void disable_icms(); // Called before a foreground collection.
1008 static void enable_icms(); // Called after a foreground collection.
1009 void icms_wait(); // Called at yield points.
1010
1011 // Adaptive size policy
1012 CMSAdaptiveSizePolicy* size_policy();
1013 CMSGCAdaptivePolicyCounters* gc_adaptive_policy_counters();
1014
1015 static void print_on_error(outputStream* st);
1016
1017 // debugging
1018 void verify();
1019 bool verify_after_remark(bool silent = VerifySilently);
1020 void verify_ok_to_terminate() const PRODUCT_RETURN;
1021 void verify_work_stacks_empty() const PRODUCT_RETURN;
1022 void verify_overflow_empty() const PRODUCT_RETURN;
1023
1024 // convenience methods in support of debugging
1025 static const size_t skip_header_HeapWords() PRODUCT_RETURN0;
1026 HeapWord* block_start(const void* p) const PRODUCT_RETURN0;
1027
1028 // accessors
1029 CMSMarkStack* verification_mark_stack() { return &_markStack; }
1030 CMSBitMap* verification_mark_bm() { return &_verification_mark_bm; }
1031
1032 // Initialization errors
1033 bool completed_initialization() { return _completed_initialization; }
1034
1035 void print_eden_and_survivor_chunk_arrays();
1036 };
1037
1038 class CMSExpansionCause : public AllStatic {
1039 public:
1040 enum Cause {
1041 _no_expansion,
1042 _satisfy_free_ratio,
1043 _satisfy_promotion,
1044 _satisfy_allocation,
1045 _allocate_par_lab,
1046 _allocate_par_spooling_space,
1047 _adaptive_size_policy
1048 };
1092
1093 private:
1094 // For parallel young-gen GC support.
1095 CMSParGCThreadState** _par_gc_thread_states;
1096
1097 // Reason generation was expanded
1098 CMSExpansionCause::Cause _expansion_cause;
1099
1100 // In support of MinChunkSize being larger than min object size
1101 const double _dilatation_factor;
1102
1103 enum CollectionTypes {
1104 Concurrent_collection_type = 0,
1105 MS_foreground_collection_type = 1,
1106 MSC_foreground_collection_type = 2,
1107 Unknown_collection_type = 3
1108 };
1109
1110 CollectionTypes _debug_collection_type;
1111
1112 // True if a compactiing collection was done.
1113 bool _did_compact;
1114 bool did_compact() { return _did_compact; }
1115
1116 // Fraction of current occupancy at which to start a CMS collection which
1117 // will collect this generation (at least).
1118 double _initiating_occupancy;
1119
1120 protected:
1121 // Shrink generation by specified size (returns false if unable to shrink)
1122 void shrink_free_list_by(size_t bytes);
1123
1124 // Update statistics for GC
1125 virtual void update_gc_stats(int level, bool full);
1126
1127 // Maximum available space in the generation (including uncommitted)
1128 // space.
1129 size_t max_available() const;
1130
1131 // getter and initializer for _initiating_occupancy field.
1132 double initiating_occupancy() const { return _initiating_occupancy; }
1186 size_t contiguous_available() const;
1187 size_t unsafe_max_alloc_nogc() const;
1188
1189 // over-rides
1190 MemRegion used_region() const;
1191 MemRegion used_region_at_save_marks() const;
1192
1193 // Does a "full" (forced) collection invoked on this generation collect
1194 // all younger generations as well? Note that the second conjunct is a
1195 // hack to allow the collection of the younger gen first if the flag is
1196 // set. This is better than using th policy's should_collect_gen0_first()
1197 // since that causes us to do an extra unnecessary pair of restart-&-stop-world.
1198 virtual bool full_collects_younger_generations() const {
1199 return UseCMSCompactAtFullCollection && !CollectGen0First;
1200 }
1201
1202 void space_iterate(SpaceClosure* blk, bool usedOnly = false);
1203
1204 // Support for compaction
1205 CompactibleSpace* first_compaction_space() const;
1206 // Adjust quantites in the generation affected by
1207 // the compaction.
1208 void reset_after_compaction();
1209
1210 // Allocation support
1211 HeapWord* allocate(size_t size, bool tlab);
1212 HeapWord* have_lock_and_allocate(size_t size, bool tlab);
1213 oop promote(oop obj, size_t obj_size);
1214 HeapWord* par_allocate(size_t size, bool tlab) {
1215 return allocate(size, tlab);
1216 }
1217
1218 // Incremental mode triggering.
1219 HeapWord* allocation_limit_reached(Space* space, HeapWord* top,
1220 size_t word_size);
1221
1222 // Used by CMSStats to track direct allocation. The value is sampled and
1223 // reset after each young gen collection.
1224 size_t direct_allocated_words() const { return _direct_allocated_words; }
1225 void reset_direct_allocated_words() { _direct_allocated_words = 0; }
1226
1284 // Iteration support specific to CMS generations
1285 void save_sweep_limit();
1286
1287 // More iteration support
1288 virtual void oop_iterate(MemRegion mr, ExtendedOopClosure* cl);
1289 virtual void oop_iterate(ExtendedOopClosure* cl);
1290 virtual void safe_object_iterate(ObjectClosure* cl);
1291 virtual void object_iterate(ObjectClosure* cl);
1292
1293 // Need to declare the full complement of closures, whether we'll
1294 // override them or not, or get message from the compiler:
1295 // oop_since_save_marks_iterate_nv hides virtual function...
1296 #define CMS_SINCE_SAVE_MARKS_DECL(OopClosureType, nv_suffix) \
1297 void oop_since_save_marks_iterate##nv_suffix(OopClosureType* cl);
1298 ALL_SINCE_SAVE_MARKS_CLOSURES(CMS_SINCE_SAVE_MARKS_DECL)
1299
1300 // Smart allocation XXX -- move to CFLSpace?
1301 void setNearLargestChunk();
1302 bool isNearLargestChunk(HeapWord* addr);
1303
1304 // Get the chunk at the end of the space. Delagates to
1305 // the space.
1306 FreeChunk* find_chunk_at_end();
1307
1308 void post_compact();
1309
1310 // Debugging
1311 void prepare_for_verify();
1312 void verify();
1313 void print_statistics() PRODUCT_RETURN;
1314
1315 // Performance Counters support
1316 virtual void update_counters();
1317 virtual void update_counters(size_t used);
1318 void initialize_performance_counters();
1319 CollectorCounters* counters() { return collector()->counters(); }
1320
1321 // Support for parallel remark of survivor space
1322 void* get_data_recorder(int thr_num) {
1323 //Delegate to collector
1324 return collector()->get_data_recorder(thr_num);
1405 DEBUG_ONLY(bool _verifying;)
1406
1407 public:
1408 MarkFromRootsClosure(CMSCollector* collector, MemRegion span,
1409 CMSBitMap* bitMap,
1410 CMSMarkStack* markStack,
1411 bool should_yield, bool verifying = false);
1412 bool do_bit(size_t offset);
1413 void reset(HeapWord* addr);
1414 inline void do_yield_check();
1415
1416 private:
1417 void scanOopsInOop(HeapWord* ptr);
1418 void do_yield_work();
1419 };
1420
1421 // This closure is used to do concurrent multi-threaded
1422 // marking from the roots following the first checkpoint.
1423 // XXX This should really be a subclass of The serial version
1424 // above, but i have not had the time to refactor things cleanly.
1425 // That willbe done for Dolphin.
1426 class Par_MarkFromRootsClosure: public BitMapClosure {
1427 CMSCollector* _collector;
1428 MemRegion _whole_span;
1429 MemRegion _span;
1430 CMSBitMap* _bit_map;
1431 CMSBitMap* _mut;
1432 OopTaskQueue* _work_queue;
1433 CMSMarkStack* _overflow_stack;
1434 bool _yield;
1435 int _skip_bits;
1436 HeapWord* _finger;
1437 HeapWord* _threshold;
1438 CMSConcMarkingTask* _task;
1439 public:
1440 Par_MarkFromRootsClosure(CMSConcMarkingTask* task, CMSCollector* collector,
1441 MemRegion span,
1442 CMSBitMap* bit_map,
1443 OopTaskQueue* work_queue,
1444 CMSMarkStack* overflow_stack,
1445 bool should_yield);
1763 // When _inFreeRange is set, this
1764 // indicates the accumulated size
1765 // of the "left hand chunk"
1766 NOT_PRODUCT(
1767 size_t _numObjectsFreed;
1768 size_t _numWordsFreed;
1769 size_t _numObjectsLive;
1770 size_t _numWordsLive;
1771 size_t _numObjectsAlreadyFree;
1772 size_t _numWordsAlreadyFree;
1773 FreeChunk* _last_fc;
1774 )
1775 private:
1776 // Code that is common to a free chunk or garbage when
1777 // encountered during sweeping.
1778 void do_post_free_or_garbage_chunk(FreeChunk *fc, size_t chunkSize);
1779 // Process a free chunk during sweeping.
1780 void do_already_free_chunk(FreeChunk *fc);
1781 // Work method called when processing an already free or a
1782 // freshly garbage chunk to do a lookahead and possibly a
1783 // premptive flush if crossing over _limit.
1784 void lookahead_and_flush(FreeChunk* fc, size_t chunkSize);
1785 // Process a garbage chunk during sweeping.
1786 size_t do_garbage_chunk(FreeChunk *fc);
1787 // Process a live chunk during sweeping.
1788 size_t do_live_chunk(FreeChunk* fc);
1789
1790 // Accessors.
1791 HeapWord* freeFinger() const { return _freeFinger; }
1792 void set_freeFinger(HeapWord* v) { _freeFinger = v; }
1793 bool inFreeRange() const { return _inFreeRange; }
1794 void set_inFreeRange(bool v) { _inFreeRange = v; }
1795 bool lastFreeRangeCoalesced() const { return _lastFreeRangeCoalesced; }
1796 void set_lastFreeRangeCoalesced(bool v) { _lastFreeRangeCoalesced = v; }
1797 bool freeRangeInFreeLists() const { return _freeRangeInFreeLists; }
1798 void set_freeRangeInFreeLists(bool v) { _freeRangeInFreeLists = v; }
1799
1800 // Initialize a free range.
1801 void initialize_free_range(HeapWord* freeFinger, bool freeRangeInFreeLists);
1802 // Return this chunk to the free lists.
1803 void flush_cur_free_chunk(HeapWord* chunk, size_t size);
1862 OopTaskQueue* _work_queue;
1863 CMSBitMap* _bit_map;
1864 CMSInnerParMarkAndPushClosure _mark_and_push;
1865
1866 public:
1867 CMSParDrainMarkingStackClosure(CMSCollector* collector,
1868 MemRegion span, CMSBitMap* bit_map,
1869 OopTaskQueue* work_queue):
1870 _collector(collector),
1871 _span(span),
1872 _bit_map(bit_map),
1873 _work_queue(work_queue),
1874 _mark_and_push(collector, span, bit_map, work_queue) { }
1875
1876 public:
1877 void trim_queue(uint max);
1878 void do_void();
1879 };
1880
1881 // Allow yielding or short-circuiting of reference list
1882 // prelceaning work.
1883 class CMSPrecleanRefsYieldClosure: public YieldClosure {
1884 CMSCollector* _collector;
1885 void do_yield_work();
1886 public:
1887 CMSPrecleanRefsYieldClosure(CMSCollector* collector):
1888 _collector(collector) {}
1889 virtual bool should_return();
1890 };
1891
1892
1893 // Convenience class that locks free list locks for given CMS collector
1894 class FreelistLocker: public StackObj {
1895 private:
1896 CMSCollector* _collector;
1897 public:
1898 FreelistLocker(CMSCollector* collector):
1899 _collector(collector) {
1900 _collector->getFreelistLocks();
1901 }
1902
|
154 // conversion utilities
155 HeapWord* offsetToHeapWord(size_t offset) const;
156 size_t heapWordToOffset(HeapWord* addr) const;
157 size_t heapWordDiffToOffsetDiff(size_t diff) const;
158
159 void print_on_error(outputStream* st, const char* prefix) const;
160
161 // debugging
162 // is this address range covered by the bit-map?
163 NOT_PRODUCT(
164 bool covers(MemRegion mr) const;
165 bool covers(HeapWord* start, size_t size = 0) const;
166 )
167 void verifyNoOneBitsInRange(HeapWord* left, HeapWord* right) PRODUCT_RETURN;
168 };
169
170 // Represents a marking stack used by the CMS collector.
171 // Ideally this should be GrowableArray<> just like MSC's marking stack(s).
172 class CMSMarkStack: public CHeapObj<mtGC> {
173 //
174 friend class CMSCollector; // To get at expansion stats further below.
175 //
176
177 VirtualSpace _virtual_space; // Space for the stack
178 oop* _base; // Bottom of stack
179 size_t _index; // One more than last occupied index
180 size_t _capacity; // Max #elements
181 Mutex _par_lock; // An advisory lock used in case of parallel access
182 NOT_PRODUCT(size_t _max_depth;) // Max depth plumbed during run
183
184 protected:
185 size_t _hit_limit; // We hit max stack size limit
186 size_t _failed_double; // We failed expansion before hitting limit
187
188 public:
189 CMSMarkStack():
190 _par_lock(Mutex::event, "CMSMarkStack._par_lock", true),
191 _hit_limit(0),
192 _failed_double(0) {}
193
194 bool allocate(size_t size);
195
196 size_t capacity() const { return _capacity; }
197
198 oop pop() {
199 if (!isEmpty()) {
200 return _base[--_index] ;
201 }
202 return NULL;
203 }
204
205 bool push(oop ptr) {
206 if (isFull()) {
221 size_t length() { return _index; }
222
223 // "Parallel versions" of some of the above
224 oop par_pop() {
225 // lock and pop
226 MutexLockerEx x(&_par_lock, Mutex::_no_safepoint_check_flag);
227 return pop();
228 }
229
230 bool par_push(oop ptr) {
231 // lock and push
232 MutexLockerEx x(&_par_lock, Mutex::_no_safepoint_check_flag);
233 return push(ptr);
234 }
235
236 // Forcibly reset the stack, losing all of its contents.
237 void reset() {
238 _index = 0;
239 }
240
241 // Expand the stack, typically in response to an overflow condition.
242 void expand();
243
244 // Compute the least valued stack element.
245 oop least_value(HeapWord* low) {
246 oop least = (oop)low;
247 for (size_t i = 0; i < _index; i++) {
248 least = MIN2(least, _base[i]);
249 }
250 return least;
251 }
252
253 // Exposed here to allow stack expansion in || case.
254 Mutex* par_lock() { return &_par_lock; }
255 };
256
257 class CardTableRS;
258 class CMSParGCThreadState;
259
260 class ModUnionClosure: public MemRegionClosure {
261 protected:
262 CMSBitMap* _t;
263 public:
264 ModUnionClosure(CMSBitMap* t): _t(t) { }
265 void do_MemRegion(MemRegion mr);
266 };
267
268 class ModUnionClosurePar: public ModUnionClosure {
269 public:
270 ModUnionClosurePar(CMSBitMap* t): ModUnionClosure(t) { }
271 void do_MemRegion(MemRegion mr);
272 };
273
540 friend class CMSInnerParMarkAndPushClosure; // -- ditto --
541 NOT_PRODUCT(friend class ScanMarkedObjectsAgainClosure;) // assertion on _overflow_list
542 friend class ReleaseForegroundGC; // to access _foregroundGCShouldWait
543 friend class VM_CMS_Operation;
544 friend class VM_CMS_Initial_Mark;
545 friend class VM_CMS_Final_Remark;
546 friend class TraceCMSMemoryManagerStats;
547
548 private:
549 jlong _time_of_last_gc;
550 void update_time_of_last_gc(jlong now) {
551 _time_of_last_gc = now;
552 }
553
554 OopTaskQueueSet* _task_queues;
555
556 // Overflow list of grey objects, threaded through mark-word
557 // Manipulated with CAS in the parallel/multi-threaded case.
558 oop _overflow_list;
559 // The following array-pair keeps track of mark words
560 // displaced for accommodating overflow list above.
561 // This code will likely be revisited under RFE#4922830.
562 Stack<oop, mtGC> _preserved_oop_stack;
563 Stack<markOop, mtGC> _preserved_mark_stack;
564
565 int* _hash_seed;
566
567 // In support of multi-threaded concurrent phases
568 YieldingFlexibleWorkGang* _conc_workers;
569
570 // Performance Counters
571 CollectorCounters* _gc_counters;
572
573 // Initialization Errors
574 bool _completed_initialization;
575
576 // In support of ExplicitGCInvokesConcurrent
577 static bool _full_gc_requested;
578 static GCCause::Cause _full_gc_cause;
579 unsigned int _collection_count_start;
580
582 bool _should_unload_classes;
583 unsigned int _concurrent_cycles_since_last_unload;
584 unsigned int concurrent_cycles_since_last_unload() const {
585 return _concurrent_cycles_since_last_unload;
586 }
587 // Did we (allow) unload classes in the previous concurrent cycle?
588 bool unloaded_classes_last_cycle() const {
589 return concurrent_cycles_since_last_unload() == 0;
590 }
591 // Root scanning options for perm gen
592 int _roots_scanning_options;
593 int roots_scanning_options() const { return _roots_scanning_options; }
594 void add_root_scanning_option(int o) { _roots_scanning_options |= o; }
595 void remove_root_scanning_option(int o) { _roots_scanning_options &= ~o; }
596
597 // Verification support
598 CMSBitMap _verification_mark_bm;
599 void verify_after_remark_work_1();
600 void verify_after_remark_work_2();
601
602 // True if any verification flag is on.
603 bool _verifying;
604 bool verifying() const { return _verifying; }
605 void set_verifying(bool v) { _verifying = v; }
606
607 // Collector policy
608 ConcurrentMarkSweepPolicy* _collector_policy;
609 ConcurrentMarkSweepPolicy* collector_policy() { return _collector_policy; }
610
611 void set_did_compact(bool v);
612
613 // XXX Move these to CMSStats ??? FIX ME !!!
614 elapsedTimer _inter_sweep_timer; // Time between sweeps
615 elapsedTimer _intra_sweep_timer; // Time _in_ sweeps
616 // Padded decaying average estimates of the above
617 AdaptivePaddedAverage _inter_sweep_estimate;
618 AdaptivePaddedAverage _intra_sweep_estimate;
619
620 CMSTracer* _gc_tracer_cm;
621 ConcurrentGCTimer* _gc_timer_cm;
622
623 bool _cms_start_registered;
624
625 GCHeapSummary _last_heap_summary;
626 MetaspaceSummary _last_metaspace_summary;
627
628 void register_foreground_gc_start(GCCause::Cause cause);
629 void register_gc_start(GCCause::Cause cause);
630 void register_gc_end();
631 void save_heap_summary();
632 void report_heap_summary(GCWhen::Type when);
633
634 protected:
635 ConcurrentMarkSweepGeneration* _cmsGen; // Old gen (CMS)
636 MemRegion _span; // Span covering above two
637 CardTableRS* _ct; // Card table
638
639 // CMS marking support structures
640 CMSBitMap _markBitMap;
641 CMSBitMap _modUnionTable;
642 CMSMarkStack _markStack;
643
644 HeapWord* _restart_addr; // In support of marking stack overflow
645 void lower_restart_addr(HeapWord* low);
646
647 // Counters in support of marking stack / work queue overflow handling:
648 // a non-zero value indicates certain types of overflow events during
649 // the current CMS cycle and could lead to stack resizing efforts at
650 // an opportune future time.
651 size_t _ser_pmc_preclean_ovflw;
652 size_t _ser_pmc_remark_ovflw;
653 size_t _par_pmc_remark_ovflw;
654 size_t _ser_kac_preclean_ovflw;
655 size_t _ser_kac_ovflw;
656 size_t _par_kac_ovflw;
657 NOT_PRODUCT(ssize_t _num_par_pushes;)
658
659 // ("Weak") Reference processing support.
660 ReferenceProcessor* _ref_processor;
661 CMSIsAliveClosure _is_alive_closure;
662 // Keep this textually after _markBitMap and _span; c'tor dependency.
663
664 ConcurrentMarkSweepThread* _cmsThread; // The thread doing the work
665 ModUnionClosure _modUnionClosure;
666 ModUnionClosurePar _modUnionClosurePar;
667
668 // CMS abstract state machine
669 // initial_state: Idling
670 // next_state(Idling) = {Marking}
671 // next_state(Marking) = {Precleaning, Sweeping}
672 // next_state(Precleaning) = {AbortablePreclean, FinalMarking}
673 // next_state(AbortablePreclean) = {FinalMarking}
674 // next_state(FinalMarking) = {Sweeping}
675 // next_state(Sweeping) = {Resizing}
676 // next_state(Resizing) = {Resetting}
677 // next_state(Resetting) = {Idling}
678 // The numeric values below are chosen so that:
679 // . _collectorState <= Idling == post-sweep && pre-mark
680 // . _collectorState in (Idling, Sweeping) == {initial,final}marking ||
681 // precleaning || abortablePrecleanb
682 public:
683 enum CollectorState {
684 Resizing = 0,
685 Resetting = 1,
686 Idling = 2,
687 InitialMarking = 3,
688 Marking = 4,
689 Precleaning = 5,
690 AbortablePreclean = 6,
691 FinalMarking = 7,
692 Sweeping = 8
693 };
694 protected:
695 static CollectorState _collectorState;
696
697 // State related to prologue/epilogue invocation for my generations
698 bool _between_prologue_and_epilogue;
699
700 // Signaling/State related to coordination between fore- and background GC
701 // Note: When the baton has been passed from background GC to foreground GC,
702 // _foregroundGCIsActive is true and _foregroundGCShouldWait is false.
703 static bool _foregroundGCIsActive; // true iff foreground collector is active or
704 // wants to go active
705 static bool _foregroundGCShouldWait; // true iff background GC is active and has not
706 // yet passed the baton to the foreground GC
707
708 // Support for CMSScheduleRemark (abortable preclean)
709 bool _abort_preclean;
710 bool _start_sampling;
711
712 int _numYields;
713 size_t _numDirtyCards;
714 size_t _sweep_count;
715 // Number of full gc's since the last concurrent gc.
716 uint _full_gcs_since_conc_gc;
717
718 // Occupancy used for bootstrapping stats
719 double _bootstrap_occupancy;
720
721 // Timer
722 elapsedTimer _timer;
723
724 // Timing, allocation and promotion statistics, used for scheduling.
725 CMSStats _stats;
726
727 // Allocation limits installed in the young gen, used only in
728 // CMSIncrementalMode. When an allocation in the young gen would cross one of
729 // these limits, the cms generation is notified and the cms thread is started
730 // or stopped, respectively.
731 HeapWord* _icms_start_limit;
732 HeapWord* _icms_stop_limit;
733
734 enum CMS_op_type {
735 CMS_op_checkpointRootsInitial,
736 CMS_op_checkpointRootsFinal
737 };
738
739 void do_CMS_operation(CMS_op_type op, GCCause::Cause gc_cause);
740 bool stop_world_and_do(CMS_op_type op);
741
753 HeapWord** _end_addr; // ... End of Eden
754 Mutex* _eden_chunk_lock;
755 HeapWord** _eden_chunk_array; // ... Eden partitioning array
756 size_t _eden_chunk_index; // ... top (exclusive) of array
757 size_t _eden_chunk_capacity; // ... max entries in array
758
759 // Support for parallelizing survivor space rescan
760 HeapWord** _survivor_chunk_array;
761 size_t _survivor_chunk_index;
762 size_t _survivor_chunk_capacity;
763 size_t* _cursor;
764 ChunkArray* _survivor_plab_array;
765
766 // Support for marking stack overflow handling
767 bool take_from_overflow_list(size_t num, CMSMarkStack* to_stack);
768 bool par_take_from_overflow_list(size_t num,
769 OopTaskQueue* to_work_q,
770 int no_of_gc_threads);
771 void push_on_overflow_list(oop p);
772 void par_push_on_overflow_list(oop p);
773 // The following is, obviously, not, in general, "MT-stable"
774 bool overflow_list_is_empty() const;
775
776 void preserve_mark_if_necessary(oop p);
777 void par_preserve_mark_if_necessary(oop p);
778 void preserve_mark_work(oop p, markOop m);
779 void restore_preserved_marks_if_any();
780 NOT_PRODUCT(bool no_preserved_marks() const;)
781 // In support of testing overflow code
782 NOT_PRODUCT(int _overflow_counter;)
783 NOT_PRODUCT(bool simulate_overflow();) // Sequential
784 NOT_PRODUCT(bool par_simulate_overflow();) // MT version
785
786 // CMS work methods
787 void checkpointRootsInitialWork(bool asynch); // Initial checkpoint work
788
789 // A return value of false indicates failure due to stack overflow
790 bool markFromRootsWork(bool asynch); // Concurrent marking work
791
792 public: // FIX ME!!! only for testing
793 bool do_marking_st(bool asynch); // Single-threaded marking
794 bool do_marking_mt(bool asynch); // Multi-threaded marking
795
796 private:
797
798 // Concurrent precleaning work
799 size_t preclean_mod_union_table(ConcurrentMarkSweepGeneration* gen,
800 ScanMarkedObjectsAgainCarefullyClosure* cl);
801 size_t preclean_card_table(ConcurrentMarkSweepGeneration* gen,
802 ScanMarkedObjectsAgainCarefullyClosure* cl);
803 // Does precleaning work, returning a quantity indicative of
804 // the amount of "useful work" done.
805 size_t preclean_work(bool clean_refs, bool clean_survivors);
806 void preclean_klasses(MarkRefsIntoAndScanClosure* cl, Mutex* freelistLock);
807 void abortable_preclean(); // Preclean while looking for possible abort
808 void initialize_sequential_subtasks_for_young_gen_rescan(int i);
809 // Helper function for above; merge-sorts the per-thread plab samples
810 void merge_survivor_plab_arrays(ContiguousSpace* surv, int no_of_gc_threads);
811 // Resets (i.e. clears) the per-thread plab sample vectors
812 void reset_survivor_plab_arrays();
813
814 // Final (second) checkpoint work
815 void checkpointRootsFinalWork(bool asynch, bool clear_all_soft_refs,
816 bool init_mark_was_synchronous);
817 // Work routine for parallel version of remark
818 void do_remark_parallel();
819 // Work routine for non-parallel version of remark
820 void do_remark_non_parallel();
821 // Reference processing work routine (during second checkpoint)
822 void refProcessingWork(bool asynch, bool clear_all_soft_refs);
823
824 // Concurrent sweeping work
825 void sweepWork(ConcurrentMarkSweepGeneration* gen, bool asynch);
826
827 // (Concurrent) resetting of support data structures
828 void reset(bool asynch);
829
830 // Clear _expansion_cause fields of constituent generations
831 void clear_expansion_cause();
832
833 // An auxiliary method used to record the ends of
834 // used regions of each generation to limit the extent of sweep
835 void save_sweep_limits();
836
837 // A work method used by foreground collection to determine
838 // what type of collection (compacting or not, continuing or fresh)
839 // it should do.
840 void decide_foreground_collection_type(bool clear_all_soft_refs,
841 bool* should_compact, bool* should_start_over);
842
843 // A work method used by the foreground collector to do
844 // a mark-sweep-compact.
845 void do_compaction_work(bool clear_all_soft_refs);
846
847 // A work method used by the foreground collector to do
848 // a mark-sweep, after taking over from a possibly on-going
849 // concurrent mark-sweep collection.
850 void do_mark_sweep_work(bool clear_all_soft_refs,
851 CollectorState first_state, bool should_start_over);
852
853 // Work methods for reporting concurrent mode interruption or failure
854 bool is_external_interruption();
855 void report_concurrent_mode_interruption();
856
857 // If the background GC is active, acquire control from the background
858 // GC and do the collection.
859 void acquire_control_and_collect(bool full, bool clear_all_soft_refs);
860
861 // For synchronizing passing of control from background to foreground
862 // GC. waitForForegroundGC() is called by the background
863 // collector. It if had to wait for a foreground collection,
864 // it returns true and the background collection should assume
865 // that the collection was finished by the foreground
866 // collector.
867 bool waitForForegroundGC();
868
869 // Incremental mode triggering: recompute the icms duty cycle and set the
870 // allocation limits in the young gen.
871 void icms_update_allocation_limits();
872
873 size_t block_size_using_printezis_bits(HeapWord* addr) const;
874 size_t block_size_if_printezis_bits(HeapWord* addr) const;
875 HeapWord* next_card_start_after_block(HeapWord* addr) const;
876
877 void setup_cms_unloading_and_verification_state();
878 public:
879 CMSCollector(ConcurrentMarkSweepGeneration* cmsGen,
880 CardTableRS* ct,
881 ConcurrentMarkSweepPolicy* cp);
882 ConcurrentMarkSweepThread* cmsThread() { return _cmsThread; }
883
884 ReferenceProcessor* ref_processor() { return _ref_processor; }
885 void ref_processor_init();
886
887 Mutex* bitMapLock() const { return _markBitMap.lock(); }
888 static CollectorState abstract_state() { return _collectorState; }
889
890 bool should_abort_preclean() const; // Whether preclean should be aborted.
891 size_t get_eden_used() const;
892 size_t get_eden_capacity() const;
893
894 ConcurrentMarkSweepGeneration* cmsGen() { return _cmsGen; }
895
896 // Locking checks
897 NOT_PRODUCT(static bool have_cms_token();)
898
899 // XXXPERM bool should_collect(bool full, size_t size, bool tlab);
900 bool shouldConcurrentCollect();
901
902 void collect(bool full,
903 bool clear_all_soft_refs,
904 size_t size,
905 bool tlab);
906 void collect_in_background(bool clear_all_soft_refs, GCCause::Cause cause);
907 void collect_in_foreground(bool clear_all_soft_refs, GCCause::Cause cause);
908
909 // In support of ExplicitGCInvokesConcurrent
910 static void request_full_gc(unsigned int full_gc_count, GCCause::Cause cause);
911 // Should we unload classes in a particular concurrent cycle?
912 bool should_unload_classes() const {
913 return _should_unload_classes;
914 }
915 void update_should_unload_classes();
916
941 void gc_prologue(bool full);
942 void gc_epilogue(bool full);
943
944 jlong time_of_last_gc(jlong now) {
945 if (_collectorState <= Idling) {
946 // gc not in progress
947 return _time_of_last_gc;
948 } else {
949 // collection in progress
950 return now;
951 }
952 }
953
954 // Support for parallel remark of survivor space
955 void* get_data_recorder(int thr_num);
956 void sample_eden_chunk();
957
958 CMSBitMap* markBitMap() { return &_markBitMap; }
959 void directAllocated(HeapWord* start, size_t size);
960
961 // Main CMS steps and related support
962 void checkpointRootsInitial(bool asynch);
963 bool markFromRoots(bool asynch); // a return value of false indicates failure
964 // due to stack overflow
965 void preclean();
966 void checkpointRootsFinal(bool asynch, bool clear_all_soft_refs,
967 bool init_mark_was_synchronous);
968 void sweep(bool asynch);
969
970 // Check that the currently executing thread is the expected
971 // one (foreground collector or background collector).
972 static void check_correct_thread_executing() PRODUCT_RETURN;
973 // XXXPERM void print_statistics() PRODUCT_RETURN;
974
975 bool is_cms_reachable(HeapWord* addr);
976
977 // Performance Counter Support
978 CollectorCounters* counters() { return _gc_counters; }
979
980 // Timer stuff
981 void startTimer() { assert(!_timer.is_active(), "Error"); _timer.start(); }
982 void stopTimer() { assert( _timer.is_active(), "Error"); _timer.stop(); }
983 void resetTimer() { assert(!_timer.is_active(), "Error"); _timer.reset(); }
984 double timerValue() { assert(!_timer.is_active(), "Error"); return _timer.seconds(); }
985
986 int yields() { return _numYields; }
987 void resetYields() { _numYields = 0; }
988 void incrementYields() { _numYields++; }
989 void resetNumDirtyCards() { _numDirtyCards = 0; }
990 void incrementNumDirtyCards(size_t num) { _numDirtyCards += num; }
991 size_t numDirtyCards() { return _numDirtyCards; }
992
993 static bool foregroundGCShouldWait() { return _foregroundGCShouldWait; }
994 static void set_foregroundGCShouldWait(bool v) { _foregroundGCShouldWait = v; }
995 static bool foregroundGCIsActive() { return _foregroundGCIsActive; }
996 static void set_foregroundGCIsActive(bool v) { _foregroundGCIsActive = v; }
997 size_t sweep_count() const { return _sweep_count; }
998 void increment_sweep_count() { _sweep_count++; }
999
1000 // Timers/stats for gc scheduling and incremental mode pacing.
1001 CMSStats& stats() { return _stats; }
1002
1003 // Convenience methods that check whether CMSIncrementalMode is enabled and
1004 // forward to the corresponding methods in ConcurrentMarkSweepThread.
1005 static void start_icms();
1006 static void stop_icms(); // Called at the end of the cms cycle.
1007 static void disable_icms(); // Called before a foreground collection.
1008 static void enable_icms(); // Called after a foreground collection.
1009 void icms_wait(); // Called at yield points.
1010
1011 // Adaptive size policy
1012 CMSAdaptiveSizePolicy* size_policy();
1013 CMSGCAdaptivePolicyCounters* gc_adaptive_policy_counters();
1014
1015 static void print_on_error(outputStream* st);
1016
1017 // Debugging
1018 void verify();
1019 bool verify_after_remark(bool silent = VerifySilently);
1020 void verify_ok_to_terminate() const PRODUCT_RETURN;
1021 void verify_work_stacks_empty() const PRODUCT_RETURN;
1022 void verify_overflow_empty() const PRODUCT_RETURN;
1023
1024 // Convenience methods in support of debugging
1025 static const size_t skip_header_HeapWords() PRODUCT_RETURN0;
1026 HeapWord* block_start(const void* p) const PRODUCT_RETURN0;
1027
1028 // Accessors
1029 CMSMarkStack* verification_mark_stack() { return &_markStack; }
1030 CMSBitMap* verification_mark_bm() { return &_verification_mark_bm; }
1031
1032 // Initialization errors
1033 bool completed_initialization() { return _completed_initialization; }
1034
1035 void print_eden_and_survivor_chunk_arrays();
1036 };
1037
1038 class CMSExpansionCause : public AllStatic {
1039 public:
1040 enum Cause {
1041 _no_expansion,
1042 _satisfy_free_ratio,
1043 _satisfy_promotion,
1044 _satisfy_allocation,
1045 _allocate_par_lab,
1046 _allocate_par_spooling_space,
1047 _adaptive_size_policy
1048 };
1092
1093 private:
1094 // For parallel young-gen GC support.
1095 CMSParGCThreadState** _par_gc_thread_states;
1096
1097 // Reason generation was expanded
1098 CMSExpansionCause::Cause _expansion_cause;
1099
1100 // In support of MinChunkSize being larger than min object size
1101 const double _dilatation_factor;
1102
1103 enum CollectionTypes {
1104 Concurrent_collection_type = 0,
1105 MS_foreground_collection_type = 1,
1106 MSC_foreground_collection_type = 2,
1107 Unknown_collection_type = 3
1108 };
1109
1110 CollectionTypes _debug_collection_type;
1111
1112 // True if a compacting collection was done.
1113 bool _did_compact;
1114 bool did_compact() { return _did_compact; }
1115
1116 // Fraction of current occupancy at which to start a CMS collection which
1117 // will collect this generation (at least).
1118 double _initiating_occupancy;
1119
1120 protected:
1121 // Shrink generation by specified size (returns false if unable to shrink)
1122 void shrink_free_list_by(size_t bytes);
1123
1124 // Update statistics for GC
1125 virtual void update_gc_stats(int level, bool full);
1126
1127 // Maximum available space in the generation (including uncommitted)
1128 // space.
1129 size_t max_available() const;
1130
1131 // getter and initializer for _initiating_occupancy field.
1132 double initiating_occupancy() const { return _initiating_occupancy; }
1186 size_t contiguous_available() const;
1187 size_t unsafe_max_alloc_nogc() const;
1188
1189 // over-rides
1190 MemRegion used_region() const;
1191 MemRegion used_region_at_save_marks() const;
1192
1193 // Does a "full" (forced) collection invoked on this generation collect
1194 // all younger generations as well? Note that the second conjunct is a
1195 // hack to allow the collection of the younger gen first if the flag is
1196 // set. This is better than using th policy's should_collect_gen0_first()
1197 // since that causes us to do an extra unnecessary pair of restart-&-stop-world.
1198 virtual bool full_collects_younger_generations() const {
1199 return UseCMSCompactAtFullCollection && !CollectGen0First;
1200 }
1201
1202 void space_iterate(SpaceClosure* blk, bool usedOnly = false);
1203
1204 // Support for compaction
1205 CompactibleSpace* first_compaction_space() const;
1206 // Adjust quantities in the generation affected by
1207 // the compaction.
1208 void reset_after_compaction();
1209
1210 // Allocation support
1211 HeapWord* allocate(size_t size, bool tlab);
1212 HeapWord* have_lock_and_allocate(size_t size, bool tlab);
1213 oop promote(oop obj, size_t obj_size);
1214 HeapWord* par_allocate(size_t size, bool tlab) {
1215 return allocate(size, tlab);
1216 }
1217
1218 // Incremental mode triggering.
1219 HeapWord* allocation_limit_reached(Space* space, HeapWord* top,
1220 size_t word_size);
1221
1222 // Used by CMSStats to track direct allocation. The value is sampled and
1223 // reset after each young gen collection.
1224 size_t direct_allocated_words() const { return _direct_allocated_words; }
1225 void reset_direct_allocated_words() { _direct_allocated_words = 0; }
1226
1284 // Iteration support specific to CMS generations
1285 void save_sweep_limit();
1286
1287 // More iteration support
1288 virtual void oop_iterate(MemRegion mr, ExtendedOopClosure* cl);
1289 virtual void oop_iterate(ExtendedOopClosure* cl);
1290 virtual void safe_object_iterate(ObjectClosure* cl);
1291 virtual void object_iterate(ObjectClosure* cl);
1292
1293 // Need to declare the full complement of closures, whether we'll
1294 // override them or not, or get message from the compiler:
1295 // oop_since_save_marks_iterate_nv hides virtual function...
1296 #define CMS_SINCE_SAVE_MARKS_DECL(OopClosureType, nv_suffix) \
1297 void oop_since_save_marks_iterate##nv_suffix(OopClosureType* cl);
1298 ALL_SINCE_SAVE_MARKS_CLOSURES(CMS_SINCE_SAVE_MARKS_DECL)
1299
1300 // Smart allocation XXX -- move to CFLSpace?
1301 void setNearLargestChunk();
1302 bool isNearLargestChunk(HeapWord* addr);
1303
1304 // Get the chunk at the end of the space. Delegates to
1305 // the space.
1306 FreeChunk* find_chunk_at_end();
1307
1308 void post_compact();
1309
1310 // Debugging
1311 void prepare_for_verify();
1312 void verify();
1313 void print_statistics() PRODUCT_RETURN;
1314
1315 // Performance Counters support
1316 virtual void update_counters();
1317 virtual void update_counters(size_t used);
1318 void initialize_performance_counters();
1319 CollectorCounters* counters() { return collector()->counters(); }
1320
1321 // Support for parallel remark of survivor space
1322 void* get_data_recorder(int thr_num) {
1323 //Delegate to collector
1324 return collector()->get_data_recorder(thr_num);
1405 DEBUG_ONLY(bool _verifying;)
1406
1407 public:
1408 MarkFromRootsClosure(CMSCollector* collector, MemRegion span,
1409 CMSBitMap* bitMap,
1410 CMSMarkStack* markStack,
1411 bool should_yield, bool verifying = false);
1412 bool do_bit(size_t offset);
1413 void reset(HeapWord* addr);
1414 inline void do_yield_check();
1415
1416 private:
1417 void scanOopsInOop(HeapWord* ptr);
1418 void do_yield_work();
1419 };
1420
1421 // This closure is used to do concurrent multi-threaded
1422 // marking from the roots following the first checkpoint.
1423 // XXX This should really be a subclass of The serial version
1424 // above, but i have not had the time to refactor things cleanly.
1425 class Par_MarkFromRootsClosure: public BitMapClosure {
1426 CMSCollector* _collector;
1427 MemRegion _whole_span;
1428 MemRegion _span;
1429 CMSBitMap* _bit_map;
1430 CMSBitMap* _mut;
1431 OopTaskQueue* _work_queue;
1432 CMSMarkStack* _overflow_stack;
1433 bool _yield;
1434 int _skip_bits;
1435 HeapWord* _finger;
1436 HeapWord* _threshold;
1437 CMSConcMarkingTask* _task;
1438 public:
1439 Par_MarkFromRootsClosure(CMSConcMarkingTask* task, CMSCollector* collector,
1440 MemRegion span,
1441 CMSBitMap* bit_map,
1442 OopTaskQueue* work_queue,
1443 CMSMarkStack* overflow_stack,
1444 bool should_yield);
1762 // When _inFreeRange is set, this
1763 // indicates the accumulated size
1764 // of the "left hand chunk"
1765 NOT_PRODUCT(
1766 size_t _numObjectsFreed;
1767 size_t _numWordsFreed;
1768 size_t _numObjectsLive;
1769 size_t _numWordsLive;
1770 size_t _numObjectsAlreadyFree;
1771 size_t _numWordsAlreadyFree;
1772 FreeChunk* _last_fc;
1773 )
1774 private:
1775 // Code that is common to a free chunk or garbage when
1776 // encountered during sweeping.
1777 void do_post_free_or_garbage_chunk(FreeChunk *fc, size_t chunkSize);
1778 // Process a free chunk during sweeping.
1779 void do_already_free_chunk(FreeChunk *fc);
1780 // Work method called when processing an already free or a
1781 // freshly garbage chunk to do a lookahead and possibly a
1782 // preemptive flush if crossing over _limit.
1783 void lookahead_and_flush(FreeChunk* fc, size_t chunkSize);
1784 // Process a garbage chunk during sweeping.
1785 size_t do_garbage_chunk(FreeChunk *fc);
1786 // Process a live chunk during sweeping.
1787 size_t do_live_chunk(FreeChunk* fc);
1788
1789 // Accessors.
1790 HeapWord* freeFinger() const { return _freeFinger; }
1791 void set_freeFinger(HeapWord* v) { _freeFinger = v; }
1792 bool inFreeRange() const { return _inFreeRange; }
1793 void set_inFreeRange(bool v) { _inFreeRange = v; }
1794 bool lastFreeRangeCoalesced() const { return _lastFreeRangeCoalesced; }
1795 void set_lastFreeRangeCoalesced(bool v) { _lastFreeRangeCoalesced = v; }
1796 bool freeRangeInFreeLists() const { return _freeRangeInFreeLists; }
1797 void set_freeRangeInFreeLists(bool v) { _freeRangeInFreeLists = v; }
1798
1799 // Initialize a free range.
1800 void initialize_free_range(HeapWord* freeFinger, bool freeRangeInFreeLists);
1801 // Return this chunk to the free lists.
1802 void flush_cur_free_chunk(HeapWord* chunk, size_t size);
1861 OopTaskQueue* _work_queue;
1862 CMSBitMap* _bit_map;
1863 CMSInnerParMarkAndPushClosure _mark_and_push;
1864
1865 public:
1866 CMSParDrainMarkingStackClosure(CMSCollector* collector,
1867 MemRegion span, CMSBitMap* bit_map,
1868 OopTaskQueue* work_queue):
1869 _collector(collector),
1870 _span(span),
1871 _bit_map(bit_map),
1872 _work_queue(work_queue),
1873 _mark_and_push(collector, span, bit_map, work_queue) { }
1874
1875 public:
1876 void trim_queue(uint max);
1877 void do_void();
1878 };
1879
1880 // Allow yielding or short-circuiting of reference list
1881 // precleaning work.
1882 class CMSPrecleanRefsYieldClosure: public YieldClosure {
1883 CMSCollector* _collector;
1884 void do_yield_work();
1885 public:
1886 CMSPrecleanRefsYieldClosure(CMSCollector* collector):
1887 _collector(collector) {}
1888 virtual bool should_return();
1889 };
1890
1891
1892 // Convenience class that locks free list locks for given CMS collector
1893 class FreelistLocker: public StackObj {
1894 private:
1895 CMSCollector* _collector;
1896 public:
1897 FreelistLocker(CMSCollector* collector):
1898 _collector(collector) {
1899 _collector->getFreelistLocks();
1900 }
1901
|