339 //
340 // The durations measure: end_time[n] - start_time[n]
341 // The periods measure: start_time[n] - start_time[n-1]
342 //
343 // The cms period and duration include only concurrent collections; time spent
344 // in foreground cms collections due to System.gc() or because of a failure to
345 // keep up are not included.
346 //
347 // There are 3 alphas to "bootstrap" the statistics. The _saved_alpha is the
348 // real value, but is used only after the first period. A value of 100 is
349 // used for the first sample so it gets the entire weight.
350 unsigned int _saved_alpha; // 0-100
351 unsigned int _gc0_alpha;
352 unsigned int _cms_alpha;
353
354 double _gc0_duration;
355 double _gc0_period;
356 size_t _gc0_promoted; // bytes promoted per gc0
357 double _cms_duration;
358 double _cms_duration_pre_sweep; // time from initiation to start of sweep
359 double _cms_duration_per_mb;
360 double _cms_period;
361 size_t _cms_allocated; // bytes of direct allocation per gc0 period
362
363 // Timers.
364 elapsedTimer _cms_timer;
365 TimeStamp _gc0_begin_time;
366 TimeStamp _cms_begin_time;
367 TimeStamp _cms_end_time;
368
369 // Snapshots of the amount used in the CMS generation.
370 size_t _cms_used_at_gc0_begin;
371 size_t _cms_used_at_gc0_end;
372 size_t _cms_used_at_cms_begin;
373
374 // Used to prevent the duty cycle from being reduced in the middle of a cms
375 // cycle.
376 bool _allow_duty_cycle_reduction;
377
378 enum {
379 _GC0_VALID = 0x1,
380 _CMS_VALID = 0x2,
381 _ALL_VALID = _GC0_VALID | _CMS_VALID
382 };
383
384 unsigned int _valid_bits;
385
386 unsigned int _icms_duty_cycle; // icms duty cycle (0-100).
387
388 protected:
389
390 // Return a duty cycle that avoids wild oscillations, by limiting the amount
391 // of change between old_duty_cycle and new_duty_cycle (the latter is treated
392 // as a recommended value).
393 static unsigned int icms_damped_duty_cycle(unsigned int old_duty_cycle,
394 unsigned int new_duty_cycle);
395 unsigned int icms_update_duty_cycle_impl();
396
397 // In support of adjusting of cms trigger ratios based on history
398 // of concurrent mode failure.
399 double cms_free_adjustment_factor(size_t free) const;
400 void adjust_cms_free_adjustment_factor(bool fail, size_t free);
401
402 public:
403 CMSStats(ConcurrentMarkSweepGeneration* cms_gen,
404 unsigned int alpha = CMSExpAvgFactor);
405
406 // Whether or not the statistics contain valid data; higher level statistics
407 // cannot be called until this returns true (they require at least one young
408 // gen and one cms cycle to have completed).
409 bool valid() const;
410
411 // Record statistics.
412 void record_gc0_begin();
413 void record_gc0_end(size_t cms_gen_bytes_used);
414 void record_cms_begin();
415 void record_cms_end();
416
417 // Allow management of the cms timer, which must be stopped/started around
418 // yield points.
419 elapsedTimer& cms_timer() { return _cms_timer; }
420 void start_cms_timer() { _cms_timer.start(); }
421 void stop_cms_timer() { _cms_timer.stop(); }
422
423 // Basic statistics; units are seconds or bytes.
424 double gc0_period() const { return _gc0_period; }
425 double gc0_duration() const { return _gc0_duration; }
426 size_t gc0_promoted() const { return _gc0_promoted; }
427 double cms_period() const { return _cms_period; }
428 double cms_duration() const { return _cms_duration; }
429 double cms_duration_per_mb() const { return _cms_duration_per_mb; }
430 size_t cms_allocated() const { return _cms_allocated; }
431
432 size_t cms_used_at_gc0_end() const { return _cms_used_at_gc0_end;}
433
434 // Seconds since the last background cms cycle began or ended.
435 double cms_time_since_begin() const;
436 double cms_time_since_end() const;
437
438 // Higher level statistics--caller must check that valid() returns true before
439 // calling.
440
441 // Returns bytes promoted per second of wall clock time.
442 double promotion_rate() const;
443
444 // Returns bytes directly allocated per second of wall clock time.
445 double cms_allocation_rate() const;
446
447 // Rate at which space in the cms generation is being consumed (sum of the
448 // above two).
449 double cms_consumption_rate() const;
450
451 // Returns an estimate of the number of seconds until the cms generation will
452 // fill up, assuming no collection work is done.
453 double time_until_cms_gen_full() const;
454
455 // Returns an estimate of the number of seconds remaining until
456 // the cms generation collection should start.
457 double time_until_cms_start() const;
458
459 // End of higher level statistics.
460
461 // Returns the cms incremental mode duty cycle, as a percentage (0-100).
462 unsigned int icms_duty_cycle() const { return _icms_duty_cycle; }
463
464 // Update the duty cycle and return the new value.
465 unsigned int icms_update_duty_cycle();
466
467 // Debugging.
468 void print_on(outputStream* st) const PRODUCT_RETURN;
469 void print() const { print_on(gclog_or_tty); }
470 };
471
472 // A closure related to weak references processing which
473 // we embed in the CMSCollector, since we need to pass
474 // it to the reference processor for secondary filtering
475 // of references based on reachability of referent;
476 // see role of _is_alive_non_header closure in the
477 // ReferenceProcessor class.
478 // For objects in the CMS generation, this closure checks
479 // if the object is "live" (reachable). Used in weak
480 // reference processing.
481 class CMSIsAliveClosure: public BoolObjectClosure {
482 const MemRegion _span;
483 const CMSBitMap* _bit_map;
484
485 friend class CMSCollector;
486 public:
708
709 // Support for CMSScheduleRemark (abortable preclean)
710 bool _abort_preclean;
711 bool _start_sampling;
712
713 int _numYields;
714 size_t _numDirtyCards;
715 size_t _sweep_count;
716 // Number of full gc's since the last concurrent gc.
717 uint _full_gcs_since_conc_gc;
718
719 // Occupancy used for bootstrapping stats
720 double _bootstrap_occupancy;
721
722 // Timer
723 elapsedTimer _timer;
724
725 // Timing, allocation and promotion statistics, used for scheduling.
726 CMSStats _stats;
727
728 // Allocation limits installed in the young gen, used only in
729 // CMSIncrementalMode. When an allocation in the young gen would cross one of
730 // these limits, the cms generation is notified and the cms thread is started
731 // or stopped, respectively.
732 HeapWord* _icms_start_limit;
733 HeapWord* _icms_stop_limit;
734
735 enum CMS_op_type {
736 CMS_op_checkpointRootsInitial,
737 CMS_op_checkpointRootsFinal
738 };
739
740 void do_CMS_operation(CMS_op_type op, GCCause::Cause gc_cause);
741 bool stop_world_and_do(CMS_op_type op);
742
743 OopTaskQueueSet* task_queues() { return _task_queues; }
744 int* hash_seed(int i) { return &_hash_seed[i]; }
745 YieldingFlexibleWorkGang* conc_workers() { return _conc_workers; }
746
747 // Support for parallelizing Eden rescan in CMS remark phase
748 void sample_eden(); // ... sample Eden space top
749
750 private:
751 // Support for parallelizing young gen rescan in CMS remark phase
752 Generation* _young_gen; // the younger gen
753 HeapWord** _top_addr; // ... Top of Eden
754 HeapWord** _end_addr; // ... End of Eden
850 // concurrent mark-sweep collection.
851 void do_mark_sweep_work(bool clear_all_soft_refs,
852 CollectorState first_state, bool should_start_over);
853
854 // Work methods for reporting concurrent mode interruption or failure
855 bool is_external_interruption();
856 void report_concurrent_mode_interruption();
857
858 // If the background GC is active, acquire control from the background
859 // GC and do the collection.
860 void acquire_control_and_collect(bool full, bool clear_all_soft_refs);
861
862 // For synchronizing passing of control from background to foreground
863 // GC. waitForForegroundGC() is called by the background
864 // collector. It if had to wait for a foreground collection,
865 // it returns true and the background collection should assume
866 // that the collection was finished by the foreground
867 // collector.
868 bool waitForForegroundGC();
869
870 // Incremental mode triggering: recompute the icms duty cycle and set the
871 // allocation limits in the young gen.
872 void icms_update_allocation_limits();
873
874 size_t block_size_using_printezis_bits(HeapWord* addr) const;
875 size_t block_size_if_printezis_bits(HeapWord* addr) const;
876 HeapWord* next_card_start_after_block(HeapWord* addr) const;
877
878 void setup_cms_unloading_and_verification_state();
879 public:
880 CMSCollector(ConcurrentMarkSweepGeneration* cmsGen,
881 CardTableRS* ct,
882 ConcurrentMarkSweepPolicy* cp);
883 ConcurrentMarkSweepThread* cmsThread() { return _cmsThread; }
884
885 ReferenceProcessor* ref_processor() { return _ref_processor; }
886 void ref_processor_init();
887
888 Mutex* bitMapLock() const { return _markBitMap.lock(); }
889 static CollectorState abstract_state() { return _collectorState; }
890
891 bool should_abort_preclean() const; // Whether preclean should be aborted.
892 size_t get_eden_used() const;
893 size_t get_eden_capacity() const;
911 static void request_full_gc(unsigned int full_gc_count, GCCause::Cause cause);
912 // Should we unload classes in a particular concurrent cycle?
913 bool should_unload_classes() const {
914 return _should_unload_classes;
915 }
916 void update_should_unload_classes();
917
918 void direct_allocated(HeapWord* start, size_t size);
919
920 // Object is dead if not marked and current phase is sweeping.
921 bool is_dead_obj(oop obj) const;
922
923 // After a promotion (of "start"), do any necessary marking.
924 // If "par", then it's being done by a parallel GC thread.
925 // The last two args indicate if we need precise marking
926 // and if so the size of the object so it can be dirtied
927 // in its entirety.
928 void promoted(bool par, HeapWord* start,
929 bool is_obj_array, size_t obj_size);
930
931 HeapWord* allocation_limit_reached(Space* space, HeapWord* top,
932 size_t word_size);
933
934 void getFreelistLocks() const;
935 void releaseFreelistLocks() const;
936 bool haveFreelistLocks() const;
937
938 // Adjust size of underlying generation
939 void compute_new_size();
940
941 // GC prologue and epilogue
942 void gc_prologue(bool full);
943 void gc_epilogue(bool full);
944
945 jlong time_of_last_gc(jlong now) {
946 if (_collectorState <= Idling) {
947 // gc not in progress
948 return _time_of_last_gc;
949 } else {
950 // collection in progress
951 return now;
952 }
953 }
984 void resetTimer() { assert(!_timer.is_active(), "Error"); _timer.reset(); }
985 double timerValue() { assert(!_timer.is_active(), "Error"); return _timer.seconds(); }
986
987 int yields() { return _numYields; }
988 void resetYields() { _numYields = 0; }
989 void incrementYields() { _numYields++; }
990 void resetNumDirtyCards() { _numDirtyCards = 0; }
991 void incrementNumDirtyCards(size_t num) { _numDirtyCards += num; }
992 size_t numDirtyCards() { return _numDirtyCards; }
993
994 static bool foregroundGCShouldWait() { return _foregroundGCShouldWait; }
995 static void set_foregroundGCShouldWait(bool v) { _foregroundGCShouldWait = v; }
996 static bool foregroundGCIsActive() { return _foregroundGCIsActive; }
997 static void set_foregroundGCIsActive(bool v) { _foregroundGCIsActive = v; }
998 size_t sweep_count() const { return _sweep_count; }
999 void increment_sweep_count() { _sweep_count++; }
1000
1001 // Timers/stats for gc scheduling and incremental mode pacing.
1002 CMSStats& stats() { return _stats; }
1003
1004 // Convenience methods that check whether CMSIncrementalMode is enabled and
1005 // forward to the corresponding methods in ConcurrentMarkSweepThread.
1006 static void start_icms();
1007 static void stop_icms(); // Called at the end of the cms cycle.
1008 static void disable_icms(); // Called before a foreground collection.
1009 static void enable_icms(); // Called after a foreground collection.
1010 void icms_wait(); // Called at yield points.
1011
1012 // Adaptive size policy
1013 AdaptiveSizePolicy* size_policy();
1014
1015 static void print_on_error(outputStream* st);
1016
1017 // Debugging
1018 void verify();
1019 bool verify_after_remark(bool silent = VerifySilently);
1020 void verify_ok_to_terminate() const PRODUCT_RETURN;
1021 void verify_work_stacks_empty() const PRODUCT_RETURN;
1022 void verify_overflow_empty() const PRODUCT_RETURN;
1023
1024 // Convenience methods in support of debugging
1025 static const size_t skip_header_HeapWords() PRODUCT_RETURN0;
1026 HeapWord* block_start(const void* p) const PRODUCT_RETURN0;
1027
1028 // Accessors
1029 CMSMarkStack* verification_mark_stack() { return &_markStack; }
1030 CMSBitMap* verification_mark_bm() { return &_verification_mark_bm; }
1031
1194 virtual bool full_collects_younger_generations() const {
1195 return UseCMSCompactAtFullCollection && !ScavengeBeforeFullGC;
1196 }
1197
1198 void space_iterate(SpaceClosure* blk, bool usedOnly = false);
1199
1200 // Support for compaction
1201 CompactibleSpace* first_compaction_space() const;
1202 // Adjust quantities in the generation affected by
1203 // the compaction.
1204 void reset_after_compaction();
1205
1206 // Allocation support
1207 HeapWord* allocate(size_t size, bool tlab);
1208 HeapWord* have_lock_and_allocate(size_t size, bool tlab);
1209 oop promote(oop obj, size_t obj_size);
1210 HeapWord* par_allocate(size_t size, bool tlab) {
1211 return allocate(size, tlab);
1212 }
1213
1214 // Incremental mode triggering.
1215 HeapWord* allocation_limit_reached(Space* space, HeapWord* top,
1216 size_t word_size);
1217
1218 // Used by CMSStats to track direct allocation. The value is sampled and
1219 // reset after each young gen collection.
1220 size_t direct_allocated_words() const { return _direct_allocated_words; }
1221 void reset_direct_allocated_words() { _direct_allocated_words = 0; }
1222
1223 // Overrides for parallel promotion.
1224 virtual oop par_promote(int thread_num,
1225 oop obj, markOop m, size_t word_sz);
1226 // This one should not be called for CMS.
1227 virtual void par_promote_alloc_undo(int thread_num,
1228 HeapWord* obj, size_t word_sz);
1229 virtual void par_promote_alloc_done(int thread_num);
1230 virtual void par_oop_since_save_marks_iterate_done(int thread_num);
1231
1232 virtual bool promotion_attempt_is_safe(size_t promotion_in_bytes) const;
1233
1234 // Inform this (non-young) generation that a promotion failure was
1235 // encountered during a collection of a younger generation that
1236 // promotes into this generation.
|
339 //
340 // The durations measure: end_time[n] - start_time[n]
341 // The periods measure: start_time[n] - start_time[n-1]
342 //
343 // The cms period and duration include only concurrent collections; time spent
344 // in foreground cms collections due to System.gc() or because of a failure to
345 // keep up are not included.
346 //
347 // There are 3 alphas to "bootstrap" the statistics. The _saved_alpha is the
348 // real value, but is used only after the first period. A value of 100 is
349 // used for the first sample so it gets the entire weight.
350 unsigned int _saved_alpha; // 0-100
351 unsigned int _gc0_alpha;
352 unsigned int _cms_alpha;
353
354 double _gc0_duration;
355 double _gc0_period;
356 size_t _gc0_promoted; // bytes promoted per gc0
357 double _cms_duration;
358 double _cms_duration_pre_sweep; // time from initiation to start of sweep
359 double _cms_period;
360 size_t _cms_allocated; // bytes of direct allocation per gc0 period
361
362 // Timers.
363 elapsedTimer _cms_timer;
364 TimeStamp _gc0_begin_time;
365 TimeStamp _cms_begin_time;
366 TimeStamp _cms_end_time;
367
368 // Snapshots of the amount used in the CMS generation.
369 size_t _cms_used_at_gc0_begin;
370 size_t _cms_used_at_gc0_end;
371 size_t _cms_used_at_cms_begin;
372
373 // Used to prevent the duty cycle from being reduced in the middle of a cms
374 // cycle.
375 bool _allow_duty_cycle_reduction;
376
377 enum {
378 _GC0_VALID = 0x1,
379 _CMS_VALID = 0x2,
380 _ALL_VALID = _GC0_VALID | _CMS_VALID
381 };
382
383 unsigned int _valid_bits;
384
385 protected:
386 // In support of adjusting of cms trigger ratios based on history
387 // of concurrent mode failure.
388 double cms_free_adjustment_factor(size_t free) const;
389 void adjust_cms_free_adjustment_factor(bool fail, size_t free);
390
391 public:
392 CMSStats(ConcurrentMarkSweepGeneration* cms_gen,
393 unsigned int alpha = CMSExpAvgFactor);
394
395 // Whether or not the statistics contain valid data; higher level statistics
396 // cannot be called until this returns true (they require at least one young
397 // gen and one cms cycle to have completed).
398 bool valid() const;
399
400 // Record statistics.
401 void record_gc0_begin();
402 void record_gc0_end(size_t cms_gen_bytes_used);
403 void record_cms_begin();
404 void record_cms_end();
405
406 // Allow management of the cms timer, which must be stopped/started around
407 // yield points.
408 elapsedTimer& cms_timer() { return _cms_timer; }
409 void start_cms_timer() { _cms_timer.start(); }
410 void stop_cms_timer() { _cms_timer.stop(); }
411
412 // Basic statistics; units are seconds or bytes.
413 double gc0_period() const { return _gc0_period; }
414 double gc0_duration() const { return _gc0_duration; }
415 size_t gc0_promoted() const { return _gc0_promoted; }
416 double cms_period() const { return _cms_period; }
417 double cms_duration() const { return _cms_duration; }
418 size_t cms_allocated() const { return _cms_allocated; }
419
420 size_t cms_used_at_gc0_end() const { return _cms_used_at_gc0_end;}
421
422 // Seconds since the last background cms cycle began or ended.
423 double cms_time_since_begin() const;
424 double cms_time_since_end() const;
425
426 // Higher level statistics--caller must check that valid() returns true before
427 // calling.
428
429 // Returns bytes promoted per second of wall clock time.
430 double promotion_rate() const;
431
432 // Returns bytes directly allocated per second of wall clock time.
433 double cms_allocation_rate() const;
434
435 // Rate at which space in the cms generation is being consumed (sum of the
436 // above two).
437 double cms_consumption_rate() const;
438
439 // Returns an estimate of the number of seconds until the cms generation will
440 // fill up, assuming no collection work is done.
441 double time_until_cms_gen_full() const;
442
443 // Returns an estimate of the number of seconds remaining until
444 // the cms generation collection should start.
445 double time_until_cms_start() const;
446
447 // End of higher level statistics.
448
449 // Debugging.
450 void print_on(outputStream* st) const PRODUCT_RETURN;
451 void print() const { print_on(gclog_or_tty); }
452 };
453
454 // A closure related to weak references processing which
455 // we embed in the CMSCollector, since we need to pass
456 // it to the reference processor for secondary filtering
457 // of references based on reachability of referent;
458 // see role of _is_alive_non_header closure in the
459 // ReferenceProcessor class.
460 // For objects in the CMS generation, this closure checks
461 // if the object is "live" (reachable). Used in weak
462 // reference processing.
463 class CMSIsAliveClosure: public BoolObjectClosure {
464 const MemRegion _span;
465 const CMSBitMap* _bit_map;
466
467 friend class CMSCollector;
468 public:
690
691 // Support for CMSScheduleRemark (abortable preclean)
692 bool _abort_preclean;
693 bool _start_sampling;
694
695 int _numYields;
696 size_t _numDirtyCards;
697 size_t _sweep_count;
698 // Number of full gc's since the last concurrent gc.
699 uint _full_gcs_since_conc_gc;
700
701 // Occupancy used for bootstrapping stats
702 double _bootstrap_occupancy;
703
704 // Timer
705 elapsedTimer _timer;
706
707 // Timing, allocation and promotion statistics, used for scheduling.
708 CMSStats _stats;
709
710 enum CMS_op_type {
711 CMS_op_checkpointRootsInitial,
712 CMS_op_checkpointRootsFinal
713 };
714
715 void do_CMS_operation(CMS_op_type op, GCCause::Cause gc_cause);
716 bool stop_world_and_do(CMS_op_type op);
717
718 OopTaskQueueSet* task_queues() { return _task_queues; }
719 int* hash_seed(int i) { return &_hash_seed[i]; }
720 YieldingFlexibleWorkGang* conc_workers() { return _conc_workers; }
721
722 // Support for parallelizing Eden rescan in CMS remark phase
723 void sample_eden(); // ... sample Eden space top
724
725 private:
726 // Support for parallelizing young gen rescan in CMS remark phase
727 Generation* _young_gen; // the younger gen
728 HeapWord** _top_addr; // ... Top of Eden
729 HeapWord** _end_addr; // ... End of Eden
825 // concurrent mark-sweep collection.
826 void do_mark_sweep_work(bool clear_all_soft_refs,
827 CollectorState first_state, bool should_start_over);
828
829 // Work methods for reporting concurrent mode interruption or failure
830 bool is_external_interruption();
831 void report_concurrent_mode_interruption();
832
833 // If the background GC is active, acquire control from the background
834 // GC and do the collection.
835 void acquire_control_and_collect(bool full, bool clear_all_soft_refs);
836
837 // For synchronizing passing of control from background to foreground
838 // GC. waitForForegroundGC() is called by the background
839 // collector. It if had to wait for a foreground collection,
840 // it returns true and the background collection should assume
841 // that the collection was finished by the foreground
842 // collector.
843 bool waitForForegroundGC();
844
845 size_t block_size_using_printezis_bits(HeapWord* addr) const;
846 size_t block_size_if_printezis_bits(HeapWord* addr) const;
847 HeapWord* next_card_start_after_block(HeapWord* addr) const;
848
849 void setup_cms_unloading_and_verification_state();
850 public:
851 CMSCollector(ConcurrentMarkSweepGeneration* cmsGen,
852 CardTableRS* ct,
853 ConcurrentMarkSweepPolicy* cp);
854 ConcurrentMarkSweepThread* cmsThread() { return _cmsThread; }
855
856 ReferenceProcessor* ref_processor() { return _ref_processor; }
857 void ref_processor_init();
858
859 Mutex* bitMapLock() const { return _markBitMap.lock(); }
860 static CollectorState abstract_state() { return _collectorState; }
861
862 bool should_abort_preclean() const; // Whether preclean should be aborted.
863 size_t get_eden_used() const;
864 size_t get_eden_capacity() const;
882 static void request_full_gc(unsigned int full_gc_count, GCCause::Cause cause);
883 // Should we unload classes in a particular concurrent cycle?
884 bool should_unload_classes() const {
885 return _should_unload_classes;
886 }
887 void update_should_unload_classes();
888
889 void direct_allocated(HeapWord* start, size_t size);
890
891 // Object is dead if not marked and current phase is sweeping.
892 bool is_dead_obj(oop obj) const;
893
894 // After a promotion (of "start"), do any necessary marking.
895 // If "par", then it's being done by a parallel GC thread.
896 // The last two args indicate if we need precise marking
897 // and if so the size of the object so it can be dirtied
898 // in its entirety.
899 void promoted(bool par, HeapWord* start,
900 bool is_obj_array, size_t obj_size);
901
902 void getFreelistLocks() const;
903 void releaseFreelistLocks() const;
904 bool haveFreelistLocks() const;
905
906 // Adjust size of underlying generation
907 void compute_new_size();
908
909 // GC prologue and epilogue
910 void gc_prologue(bool full);
911 void gc_epilogue(bool full);
912
913 jlong time_of_last_gc(jlong now) {
914 if (_collectorState <= Idling) {
915 // gc not in progress
916 return _time_of_last_gc;
917 } else {
918 // collection in progress
919 return now;
920 }
921 }
952 void resetTimer() { assert(!_timer.is_active(), "Error"); _timer.reset(); }
953 double timerValue() { assert(!_timer.is_active(), "Error"); return _timer.seconds(); }
954
955 int yields() { return _numYields; }
956 void resetYields() { _numYields = 0; }
957 void incrementYields() { _numYields++; }
958 void resetNumDirtyCards() { _numDirtyCards = 0; }
959 void incrementNumDirtyCards(size_t num) { _numDirtyCards += num; }
960 size_t numDirtyCards() { return _numDirtyCards; }
961
962 static bool foregroundGCShouldWait() { return _foregroundGCShouldWait; }
963 static void set_foregroundGCShouldWait(bool v) { _foregroundGCShouldWait = v; }
964 static bool foregroundGCIsActive() { return _foregroundGCIsActive; }
965 static void set_foregroundGCIsActive(bool v) { _foregroundGCIsActive = v; }
966 size_t sweep_count() const { return _sweep_count; }
967 void increment_sweep_count() { _sweep_count++; }
968
969 // Timers/stats for gc scheduling and incremental mode pacing.
970 CMSStats& stats() { return _stats; }
971
972 // Adaptive size policy
973 AdaptiveSizePolicy* size_policy();
974
975 static void print_on_error(outputStream* st);
976
977 // Debugging
978 void verify();
979 bool verify_after_remark(bool silent = VerifySilently);
980 void verify_ok_to_terminate() const PRODUCT_RETURN;
981 void verify_work_stacks_empty() const PRODUCT_RETURN;
982 void verify_overflow_empty() const PRODUCT_RETURN;
983
984 // Convenience methods in support of debugging
985 static const size_t skip_header_HeapWords() PRODUCT_RETURN0;
986 HeapWord* block_start(const void* p) const PRODUCT_RETURN0;
987
988 // Accessors
989 CMSMarkStack* verification_mark_stack() { return &_markStack; }
990 CMSBitMap* verification_mark_bm() { return &_verification_mark_bm; }
991
1154 virtual bool full_collects_younger_generations() const {
1155 return UseCMSCompactAtFullCollection && !ScavengeBeforeFullGC;
1156 }
1157
1158 void space_iterate(SpaceClosure* blk, bool usedOnly = false);
1159
1160 // Support for compaction
1161 CompactibleSpace* first_compaction_space() const;
1162 // Adjust quantities in the generation affected by
1163 // the compaction.
1164 void reset_after_compaction();
1165
1166 // Allocation support
1167 HeapWord* allocate(size_t size, bool tlab);
1168 HeapWord* have_lock_and_allocate(size_t size, bool tlab);
1169 oop promote(oop obj, size_t obj_size);
1170 HeapWord* par_allocate(size_t size, bool tlab) {
1171 return allocate(size, tlab);
1172 }
1173
1174
1175 // Used by CMSStats to track direct allocation. The value is sampled and
1176 // reset after each young gen collection.
1177 size_t direct_allocated_words() const { return _direct_allocated_words; }
1178 void reset_direct_allocated_words() { _direct_allocated_words = 0; }
1179
1180 // Overrides for parallel promotion.
1181 virtual oop par_promote(int thread_num,
1182 oop obj, markOop m, size_t word_sz);
1183 // This one should not be called for CMS.
1184 virtual void par_promote_alloc_undo(int thread_num,
1185 HeapWord* obj, size_t word_sz);
1186 virtual void par_promote_alloc_done(int thread_num);
1187 virtual void par_oop_since_save_marks_iterate_done(int thread_num);
1188
1189 virtual bool promotion_attempt_is_safe(size_t promotion_in_bytes) const;
1190
1191 // Inform this (non-young) generation that a promotion failure was
1192 // encountered during a collection of a younger generation that
1193 // promotes into this generation.
|