57 _analytics(new G1Analytics(&_predictor)),
58 _remset_tracker(),
59 _mmu_tracker(new G1MMUTrackerQueue(GCPauseIntervalMillis / 1000.0, MaxGCPauseMillis / 1000.0)),
60 _ihop_control(create_ihop_control(&_predictor)),
61 _policy_counters(new GCPolicyCounters("GarbageFirst", 1, 2)),
62 _full_collection_start_sec(0.0),
63 _collection_pause_end_millis(os::javaTimeNanos() / NANOSECS_PER_MILLISEC),
64 _young_list_target_length(0),
65 _young_list_fixed_length(0),
66 _young_list_max_length(0),
67 _eden_surv_rate_group(new G1SurvRateGroup()),
68 _survivor_surv_rate_group(new G1SurvRateGroup()),
69 _reserve_factor((double) G1ReservePercent / 100.0),
70 _reserve_regions(0),
71 _young_gen_sizer(G1YoungGenSizer::create_gen_sizer()),
72 _free_regions_at_end_of_collection(0),
73 _rs_length(0),
74 _rs_length_prediction(0),
75 _pending_cards_at_gc_start(0),
76 _old_gen_alloc_tracker(),
77 _initial_mark_to_mixed(),
78 _collection_set(NULL),
79 _g1h(NULL),
80 _phase_times(new G1GCPhaseTimes(gc_timer, ParallelGCThreads)),
81 _mark_remark_start_sec(0),
82 _mark_cleanup_start_sec(0),
83 _tenuring_threshold(MaxTenuringThreshold),
84 _max_survivor_regions(0),
85 _survivors_age_table(true)
86 {
87 }
88
89 G1Policy::~G1Policy() {
90 delete _ihop_control;
91 delete _young_gen_sizer;
92 }
93
94 G1Policy* G1Policy::create_policy(STWGCTimer* gc_timer_stw) {
95 if (G1Arguments::is_heterogeneous_heap()) {
96 return new G1HeterogeneousHeapPolicy(gc_timer_stw);
97 } else {
431 _collection_set->clear_candidates();
432 _pending_cards_at_gc_start = 0;
433 }
434
435 void G1Policy::record_full_collection_end() {
436 // Consider this like a collection pause for the purposes of allocation
437 // since last pause.
438 double end_sec = os::elapsedTime();
439 double full_gc_time_sec = end_sec - _full_collection_start_sec;
440 double full_gc_time_ms = full_gc_time_sec * 1000.0;
441
442 _analytics->update_recent_gc_times(end_sec, full_gc_time_ms);
443
444 collector_state()->set_in_full_gc(false);
445
446 // "Nuke" the heuristics that control the young/mixed GC
447 // transitions and make sure we start with young GCs after the Full GC.
448 collector_state()->set_in_young_only_phase(true);
449 collector_state()->set_in_young_gc_before_mixed(false);
450 collector_state()->set_initiate_conc_mark_if_possible(need_to_start_conc_mark("end of Full GC", 0));
451 collector_state()->set_in_initial_mark_gc(false);
452 collector_state()->set_mark_or_rebuild_in_progress(false);
453 collector_state()->set_clearing_next_bitmap(false);
454
455 _eden_surv_rate_group->start_adding_regions();
456 // also call this on any additional surv rate groups
457
458 _free_regions_at_end_of_collection = _g1h->num_free_regions();
459 _survivor_surv_rate_group->reset();
460 update_young_list_max_and_target_length();
461 update_rs_length_prediction();
462
463 _old_gen_alloc_tracker.reset_after_full_gc();
464
465 record_pause(FullGC, _full_collection_start_sec, end_sec);
466 }
467
468 static void log_refinement_stats(const char* kind, const G1ConcurrentRefineStats& stats) {
469 log_debug(gc, refine, stats)
470 ("%s refinement: %.2fms, refined: " SIZE_FORMAT
471 ", precleaned: " SIZE_FORMAT ", dirtied: " SIZE_FORMAT,
527 assert(max_survivor_regions() + _g1h->num_used_regions() <= _g1h->max_regions(),
528 "Maximum survivor regions %u plus used regions %u exceeds max regions %u",
529 max_survivor_regions(), _g1h->num_used_regions(), _g1h->max_regions());
530 assert_used_and_recalculate_used_equal(_g1h);
531
532 phase_times()->record_cur_collection_start_sec(start_time_sec);
533
534 record_concurrent_refinement_stats();
535
536 _collection_set->reset_bytes_used_before();
537
538 // do that for any other surv rate groups
539 _eden_surv_rate_group->stop_adding_regions();
540 _survivors_age_table.clear();
541
542 assert(_g1h->collection_set()->verify_young_ages(), "region age verification failed");
543 }
544
545 void G1Policy::record_concurrent_mark_init_end(double mark_init_elapsed_time_ms) {
546 assert(!collector_state()->initiate_conc_mark_if_possible(), "we should have cleared it by now");
547 collector_state()->set_in_initial_mark_gc(false);
548 }
549
550 void G1Policy::record_concurrent_mark_remark_start() {
551 _mark_remark_start_sec = os::elapsedTime();
552 }
553
554 void G1Policy::record_concurrent_mark_remark_end() {
555 double end_time_sec = os::elapsedTime();
556 double elapsed_time_ms = (end_time_sec - _mark_remark_start_sec)*1000.0;
557 _analytics->report_concurrent_mark_remark_times_ms(elapsed_time_ms);
558 _analytics->append_prev_collection_pause_end_ms(elapsed_time_ms);
559
560 record_pause(Remark, _mark_remark_start_sec, end_time_sec);
561 }
562
563 void G1Policy::record_concurrent_mark_cleanup_start() {
564 _mark_cleanup_start_sec = os::elapsedTime();
565 }
566
567 double G1Policy::average_time_ms(G1GCPhaseTimes::GCParPhases phase) const {
615 double G1Policy::logged_cards_processing_time() const {
616 double all_cards_processing_time = average_time_ms(G1GCPhaseTimes::ScanHR) + average_time_ms(G1GCPhaseTimes::OptScanHR);
617 size_t logged_dirty_cards = phase_times()->sum_thread_work_items(G1GCPhaseTimes::MergeLB, G1GCPhaseTimes::MergeLBDirtyCards);
618 size_t scan_heap_roots_cards = phase_times()->sum_thread_work_items(G1GCPhaseTimes::ScanHR, G1GCPhaseTimes::ScanHRScannedCards) +
619 phase_times()->sum_thread_work_items(G1GCPhaseTimes::OptScanHR, G1GCPhaseTimes::ScanHRScannedCards);
620 // This may happen if there are duplicate cards in different log buffers.
621 if (logged_dirty_cards > scan_heap_roots_cards) {
622 return all_cards_processing_time + average_time_ms(G1GCPhaseTimes::MergeLB);
623 }
624 return (all_cards_processing_time * logged_dirty_cards / scan_heap_roots_cards) + average_time_ms(G1GCPhaseTimes::MergeLB);
625 }
626
627 // Anything below that is considered to be zero
628 #define MIN_TIMER_GRANULARITY 0.0000001
629
630 void G1Policy::record_collection_pause_end(double pause_time_ms) {
631 G1GCPhaseTimes* p = phase_times();
632
633 double end_time_sec = os::elapsedTime();
634
635 bool this_pause_included_initial_mark = false;
636 bool this_pause_was_young_only = collector_state()->in_young_only_phase();
637
638 bool update_stats = !_g1h->evacuation_failed();
639
640 record_pause(young_gc_pause_kind(), end_time_sec - pause_time_ms / 1000.0, end_time_sec);
641
642 _collection_pause_end_millis = os::javaTimeNanos() / NANOSECS_PER_MILLISEC;
643
644 this_pause_included_initial_mark = collector_state()->in_initial_mark_gc();
645 if (this_pause_included_initial_mark) {
646 record_concurrent_mark_init_end(0.0);
647 } else {
648 maybe_start_marking();
649 }
650
651 double app_time_ms = (phase_times()->cur_collection_start_sec() * 1000.0 - _analytics->prev_collection_pause_end_ms());
652 if (app_time_ms < MIN_TIMER_GRANULARITY) {
653 // This usually happens due to the timer not having the required
654 // granularity. Some Linuxes are the usual culprits.
655 // We'll just set it to something (arbitrarily) small.
656 app_time_ms = 1.0;
657 }
658
659 if (update_stats) {
660 // We maintain the invariant that all objects allocated by mutator
661 // threads will be allocated out of eden regions. So, we can use
662 // the eden region number allocated since the previous GC to
663 // calculate the application's allocate rate. The only exception
664 // to that is humongous objects that are allocated separately. But
665 // given that humongous object allocations do not really affect
666 // either the pause's duration nor when the next pause will take
667 // place we can safely ignore them here.
668 uint regions_allocated = _collection_set->eden_region_length();
669 double alloc_rate_ms = (double) regions_allocated / app_time_ms;
670 _analytics->report_alloc_rate_ms(alloc_rate_ms);
671
672 _analytics->compute_pause_time_ratios(end_time_sec, pause_time_ms);
673 _analytics->update_recent_gc_times(end_time_sec, pause_time_ms);
674 }
675
676 if (collector_state()->in_young_gc_before_mixed()) {
677 assert(!this_pause_included_initial_mark, "The young GC before mixed is not allowed to be an initial mark GC");
678 // This has been the young GC before we start doing mixed GCs. We already
679 // decided to start mixed GCs much earlier, so there is nothing to do except
680 // advancing the state.
681 collector_state()->set_in_young_only_phase(false);
682 collector_state()->set_in_young_gc_before_mixed(false);
683 } else if (!this_pause_was_young_only) {
684 // This is a mixed GC. Here we decide whether to continue doing more
685 // mixed GCs or not.
686 if (!next_gc_should_be_mixed("continue mixed GCs",
687 "do not continue mixed GCs")) {
688 collector_state()->set_in_young_only_phase(true);
689
690 clear_collection_set_candidates();
691 maybe_start_marking();
692 }
693 }
694
695 _eden_surv_rate_group->start_adding_regions();
696
697 double merge_hcc_time_ms = average_time_ms(G1GCPhaseTimes::MergeHCC);
698 if (update_stats) {
699 size_t const total_log_buffer_cards = p->sum_thread_work_items(G1GCPhaseTimes::MergeHCC, G1GCPhaseTimes::MergeHCCDirtyCards) +
700 p->sum_thread_work_items(G1GCPhaseTimes::MergeLB, G1GCPhaseTimes::MergeLBDirtyCards);
701 // Update prediction for card merge; MergeRSDirtyCards includes the cards from the Eager Reclaim phase.
702 size_t const total_cards_merged = p->sum_thread_work_items(G1GCPhaseTimes::MergeRS, G1GCPhaseTimes::MergeRSDirtyCards) +
703 p->sum_thread_work_items(G1GCPhaseTimes::OptMergeRS, G1GCPhaseTimes::MergeRSDirtyCards) +
704 total_log_buffer_cards;
705
706 // The threshold for the number of cards in a given sampling which we consider
707 // large enough so that the impact from setup and other costs is negligible.
708 size_t const CardsNumSamplingThreshold = 10;
709
710 if (total_cards_merged > CardsNumSamplingThreshold) {
711 double avg_time_merge_cards = average_time_ms(G1GCPhaseTimes::MergeER) +
712 average_time_ms(G1GCPhaseTimes::MergeRS) +
713 average_time_ms(G1GCPhaseTimes::MergeHCC) +
714 average_time_ms(G1GCPhaseTimes::MergeLB) +
715 average_time_ms(G1GCPhaseTimes::OptMergeRS);
716 _analytics->report_cost_per_card_merge_ms(avg_time_merge_cards / total_cards_merged, this_pause_was_young_only);
717 }
718
719 // Update prediction for card scan
720 size_t const total_cards_scanned = p->sum_thread_work_items(G1GCPhaseTimes::ScanHR, G1GCPhaseTimes::ScanHRScannedCards) +
721 p->sum_thread_work_items(G1GCPhaseTimes::OptScanHR, G1GCPhaseTimes::ScanHRScannedCards);
722
723 if (total_cards_scanned > CardsNumSamplingThreshold) {
724 double avg_time_dirty_card_scan = average_time_ms(G1GCPhaseTimes::ScanHR) +
725 average_time_ms(G1GCPhaseTimes::OptScanHR);
726
727 _analytics->report_cost_per_card_scan_ms(avg_time_dirty_card_scan / total_cards_scanned, this_pause_was_young_only);
728 }
729
730 // Update prediction for the ratio between cards from the remembered
731 // sets and actually scanned cards from the remembered sets.
732 // Cards from the remembered sets are all cards not duplicated by cards from
733 // the logs.
734 // Due to duplicates in the log buffers, the number of actually scanned cards
735 // can be smaller than the cards in the log buffers.
736 const size_t from_rs_length_cards = (total_cards_scanned > total_log_buffer_cards) ? total_cards_scanned - total_log_buffer_cards : 0;
737 double merge_to_scan_ratio = 0.0;
738 if (total_cards_scanned > 0) {
739 merge_to_scan_ratio = (double) from_rs_length_cards / total_cards_scanned;
740 }
741 _analytics->report_card_merge_to_scan_ratio(merge_to_scan_ratio, this_pause_was_young_only);
742
743 const size_t recorded_rs_length = _collection_set->recorded_rs_length();
744 const size_t rs_length_diff = _rs_length > recorded_rs_length ? _rs_length - recorded_rs_length : 0;
745 _analytics->report_rs_length_diff(rs_length_diff);
746
747 // Update prediction for copy cost per byte
748 size_t copied_bytes = p->sum_thread_work_items(G1GCPhaseTimes::MergePSS, G1GCPhaseTimes::MergePSSCopiedBytes);
749
750 if (copied_bytes > 0) {
751 double cost_per_byte_ms = (average_time_ms(G1GCPhaseTimes::ObjCopy) + average_time_ms(G1GCPhaseTimes::OptObjCopy)) / copied_bytes;
752 _analytics->report_cost_per_byte_ms(cost_per_byte_ms, collector_state()->mark_or_rebuild_in_progress());
753 }
754
755 if (_collection_set->young_region_length() > 0) {
756 _analytics->report_young_other_cost_per_region_ms(young_other_time_ms() /
757 _collection_set->young_region_length());
758 }
759
760 if (_collection_set->old_region_length() > 0) {
761 _analytics->report_non_young_other_cost_per_region_ms(non_young_other_time_ms() /
762 _collection_set->old_region_length());
763 }
764
765 _analytics->report_constant_other_time_ms(constant_other_time_ms(pause_time_ms));
766
767 // Do not update RS lengths and the number of pending cards with information from mixed gc:
768 // these are is wildly different to during young only gc and mess up young gen sizing right
769 // after the mixed gc phase.
770 // During mixed gc we do not use them for young gen sizing.
771 if (this_pause_was_young_only) {
772 _analytics->report_pending_cards((double) _pending_cards_at_gc_start);
773 _analytics->report_rs_length((double) _rs_length);
774 }
775 }
776
777 assert(!(this_pause_included_initial_mark && collector_state()->mark_or_rebuild_in_progress()),
778 "If the last pause has been an initial mark, we should not have been in the marking window");
779 if (this_pause_included_initial_mark) {
780 collector_state()->set_mark_or_rebuild_in_progress(true);
781 }
782
783 _free_regions_at_end_of_collection = _g1h->num_free_regions();
784
785 update_rs_length_prediction();
786
787 // Do not update dynamic IHOP due to G1 periodic collection as it is highly likely
788 // that in this case we are not running in a "normal" operating mode.
789 if (_g1h->gc_cause() != GCCause::_g1_periodic_collection) {
790 // IHOP control wants to know the expected young gen length if it were not
791 // restrained by the heap reserve. Using the actual length would make the
792 // prediction too small and the limit the young gen every time we get to the
793 // predicted target occupancy.
794 size_t last_unrestrained_young_length = update_young_list_max_and_target_length();
795
796 _old_gen_alloc_tracker.reset_after_young_gc(app_time_ms / 1000.0);
797 update_ihop_prediction(_old_gen_alloc_tracker.last_cycle_duration(),
798 _old_gen_alloc_tracker.last_cycle_old_bytes(),
799 last_unrestrained_young_length * HeapRegion::GrainBytes,
800 this_pause_was_young_only);
801
802 _ihop_control->send_trace_event(_g1h->gc_tracer_stw());
803 } else {
804 // Any garbage collection triggered as periodic collection resets the time-to-mixed
805 // measurement. Periodic collection typically means that the application is "inactive", i.e.
806 // the marking threads may have received an uncharacterisic amount of cpu time
807 // for completing the marking, i.e. are faster than expected.
808 // This skews the predicted marking length towards smaller values which might cause
809 // the mark start being too late.
810 _initial_mark_to_mixed.reset();
811 }
812
813 // Note that _mmu_tracker->max_gc_time() returns the time in seconds.
814 double scan_logged_cards_time_goal_ms = _mmu_tracker->max_gc_time() * MILLIUNITS * G1RSetUpdatingPauseTimePercent / 100.0;
815
816 if (scan_logged_cards_time_goal_ms < merge_hcc_time_ms) {
817 log_debug(gc, ergo, refine)("Adjust concurrent refinement thresholds (scanning the HCC expected to take longer than Update RS time goal)."
818 "Logged Cards Scan time goal: %1.2fms Scan HCC time: %1.2fms",
819 scan_logged_cards_time_goal_ms, merge_hcc_time_ms);
820
821 scan_logged_cards_time_goal_ms = 0;
822 } else {
823 scan_logged_cards_time_goal_ms -= merge_hcc_time_ms;
824 }
825
826 double const logged_cards_time = logged_cards_processing_time();
827
828 log_debug(gc, ergo, refine)("Concurrent refinement times: Logged Cards Scan time goal: %1.2fms Logged Cards Scan time: %1.2fms HCC time: %1.2fms",
829 scan_logged_cards_time_goal_ms, logged_cards_time, merge_hcc_time_ms);
830
841 G1HeapWastePercent);
842 } else {
843 return new G1StaticIHOPControl(InitiatingHeapOccupancyPercent);
844 }
845 }
846
847 void G1Policy::update_ihop_prediction(double mutator_time_s,
848 size_t mutator_alloc_bytes,
849 size_t young_gen_size,
850 bool this_gc_was_young_only) {
851 // Always try to update IHOP prediction. Even evacuation failures give information
852 // about e.g. whether to start IHOP earlier next time.
853
854 // Avoid using really small application times that might create samples with
855 // very high or very low values. They may be caused by e.g. back-to-back gcs.
856 double const min_valid_time = 1e-6;
857
858 bool report = false;
859
860 double marking_to_mixed_time = -1.0;
861 if (!this_gc_was_young_only && _initial_mark_to_mixed.has_result()) {
862 marking_to_mixed_time = _initial_mark_to_mixed.last_marking_time();
863 assert(marking_to_mixed_time > 0.0,
864 "Initial mark to mixed time must be larger than zero but is %.3f",
865 marking_to_mixed_time);
866 if (marking_to_mixed_time > min_valid_time) {
867 _ihop_control->update_marking_length(marking_to_mixed_time);
868 report = true;
869 }
870 }
871
872 // As an approximation for the young gc promotion rates during marking we use
873 // all of them. In many applications there are only a few if any young gcs during
874 // marking, which makes any prediction useless. This increases the accuracy of the
875 // prediction.
876 if (this_gc_was_young_only && mutator_time_s > min_valid_time) {
877 _ihop_control->update_allocation_info(mutator_time_s, mutator_alloc_bytes, young_gen_size);
878 report = true;
879 }
880
881 if (report) {
882 report_ihop_statistics();
883 }
884 }
1000 double max_survivor_regions_d =
1001 (double) _young_list_target_length / (double) SurvivorRatio;
1002
1003 // Calculate desired survivor size based on desired max survivor regions (unconstrained
1004 // by remaining heap). Otherwise we may cause undesired promotions as we are
1005 // already getting close to end of the heap, impacting performance even more.
1006 uint const desired_max_survivor_regions = ceil(max_survivor_regions_d);
1007 size_t const survivor_size = desired_survivor_size(desired_max_survivor_regions);
1008
1009 _tenuring_threshold = _survivors_age_table.compute_tenuring_threshold(survivor_size);
1010 if (UsePerfData) {
1011 _policy_counters->tenuring_threshold()->set_value(_tenuring_threshold);
1012 _policy_counters->desired_survivor_size()->set_value(survivor_size * oopSize);
1013 }
1014 // The real maximum survivor size is bounded by the number of regions that can
1015 // be allocated into.
1016 _max_survivor_regions = MIN2(desired_max_survivor_regions,
1017 _g1h->num_free_or_available_regions());
1018 }
1019
1020 bool G1Policy::force_initial_mark_if_outside_cycle(GCCause::Cause gc_cause) {
1021 // We actually check whether we are marking here and not if we are in a
1022 // reclamation phase. This means that we will schedule a concurrent mark
1023 // even while we are still in the process of reclaiming memory.
1024 bool during_cycle = _g1h->concurrent_mark()->cm_thread()->during_cycle();
1025 if (!during_cycle) {
1026 log_debug(gc, ergo)("Request concurrent cycle initiation (requested by GC cause). GC cause: %s", GCCause::to_string(gc_cause));
1027 collector_state()->set_initiate_conc_mark_if_possible(true);
1028 return true;
1029 } else {
1030 log_debug(gc, ergo)("Do not request concurrent cycle initiation (concurrent cycle already in progress). GC cause: %s", GCCause::to_string(gc_cause));
1031 return false;
1032 }
1033 }
1034
1035 void G1Policy::initiate_conc_mark() {
1036 collector_state()->set_in_initial_mark_gc(true);
1037 collector_state()->set_initiate_conc_mark_if_possible(false);
1038 }
1039
1040 void G1Policy::decide_on_conc_mark_initiation() {
1041 // We are about to decide on whether this pause will be an
1042 // initial-mark pause.
1043
1044 // First, collector_state()->in_initial_mark_gc() should not be already set. We
1045 // will set it here if we have to. However, it should be cleared by
1046 // the end of the pause (it's only set for the duration of an
1047 // initial-mark pause).
1048 assert(!collector_state()->in_initial_mark_gc(), "pre-condition");
1049
1050 if (collector_state()->initiate_conc_mark_if_possible()) {
1051 // We had noticed on a previous pause that the heap occupancy has
1052 // gone over the initiating threshold and we should start a
1053 // concurrent marking cycle. Or we've been explicitly requested
1054 // to start a concurrent marking cycle. Either way, we initiate
1055 // one if not inhibited for some reason.
1056
1057 GCCause::Cause cause = _g1h->gc_cause();
1058 if ((cause != GCCause::_wb_breakpoint) &&
1059 ConcurrentGCBreakpoints::is_controlled()) {
1060 log_debug(gc, ergo)("Do not initiate concurrent cycle (whitebox controlled)");
1061 } else if (!about_to_start_mixed_phase() && collector_state()->in_young_only_phase()) {
1062 // Initiate a new initial mark if there is no marking or reclamation going on.
1063 initiate_conc_mark();
1064 log_debug(gc, ergo)("Initiate concurrent cycle (concurrent cycle initiation requested)");
1065 } else if (_g1h->is_user_requested_concurrent_full_gc(cause) ||
1066 (cause == GCCause::_wb_breakpoint)) {
1067 // Initiate a user requested initial mark or run_to a breakpoint.
1068 // An initial mark must be young only GC, so the collector state
1069 // must be updated to reflect this.
1070 collector_state()->set_in_young_only_phase(true);
1071 collector_state()->set_in_young_gc_before_mixed(false);
1072
1073 // We might have ended up coming here about to start a mixed phase with a collection set
1074 // active. The following remark might change the change the "evacuation efficiency" of
1075 // the regions in this set, leading to failing asserts later.
1076 // Since the concurrent cycle will recreate the collection set anyway, simply drop it here.
1077 clear_collection_set_candidates();
1078 abort_time_to_mixed_tracking();
1079 initiate_conc_mark();
1080 log_debug(gc, ergo)("Initiate concurrent cycle (%s requested concurrent cycle)",
1081 (cause == GCCause::_wb_breakpoint) ? "run_to breakpoint" : "user");
1082 } else {
1083 // The concurrent marking thread is still finishing up the
1084 // previous cycle. If we start one right now the two cycles
1085 // overlap. In particular, the concurrent marking thread might
1086 // be in the process of clearing the next marking bitmap (which
1087 // we will use for the next cycle if we start one). Starting a
1088 // cycle now will be bad given that parts of the marking
1128 }
1129 };
1130
1131 void G1Policy::clear_collection_set_candidates() {
1132 // Clear remembered sets of remaining candidate regions and the actual candidate
1133 // set.
1134 G1ClearCollectionSetCandidateRemSets cl;
1135 _collection_set->candidates()->iterate(&cl);
1136 _collection_set->clear_candidates();
1137 }
1138
1139 void G1Policy::maybe_start_marking() {
1140 if (need_to_start_conc_mark("end of GC")) {
1141 // Note: this might have already been set, if during the last
1142 // pause we decided to start a cycle but at the beginning of
1143 // this pause we decided to postpone it. That's OK.
1144 collector_state()->set_initiate_conc_mark_if_possible(true);
1145 }
1146 }
1147
1148 G1Policy::PauseKind G1Policy::young_gc_pause_kind() const {
1149 assert(!collector_state()->in_full_gc(), "must be");
1150 if (collector_state()->in_initial_mark_gc()) {
1151 assert(!collector_state()->in_young_gc_before_mixed(), "must be");
1152 return InitialMarkGC;
1153 } else if (collector_state()->in_young_gc_before_mixed()) {
1154 assert(!collector_state()->in_initial_mark_gc(), "must be");
1155 return LastYoungGC;
1156 } else if (collector_state()->in_mixed_phase()) {
1157 assert(!collector_state()->in_initial_mark_gc(), "must be");
1158 assert(!collector_state()->in_young_gc_before_mixed(), "must be");
1159 return MixedGC;
1160 } else {
1161 assert(!collector_state()->in_initial_mark_gc(), "must be");
1162 assert(!collector_state()->in_young_gc_before_mixed(), "must be");
1163 return YoungOnlyGC;
1164 }
1165 }
1166
1167 void G1Policy::record_pause(PauseKind kind, double start, double end) {
1168 // Manage the MMU tracker. For some reason it ignores Full GCs.
1169 if (kind != FullGC) {
1170 _mmu_tracker->add_pause(start, end);
1171 }
1172 // Manage the mutator time tracking from initial mark to first mixed gc.
1173 switch (kind) {
1174 case FullGC:
1175 abort_time_to_mixed_tracking();
1176 break;
1177 case Cleanup:
1178 case Remark:
1179 case YoungOnlyGC:
1180 case LastYoungGC:
1181 _initial_mark_to_mixed.add_pause(end - start);
1182 break;
1183 case InitialMarkGC:
1184 if (_g1h->gc_cause() != GCCause::_g1_periodic_collection) {
1185 _initial_mark_to_mixed.record_initial_mark_end(end);
1186 }
1187 break;
1188 case MixedGC:
1189 _initial_mark_to_mixed.record_mixed_gc_start(start);
1190 break;
1191 default:
1192 ShouldNotReachHere();
1193 }
1194 }
1195
1196 void G1Policy::abort_time_to_mixed_tracking() {
1197 _initial_mark_to_mixed.reset();
1198 }
1199
1200 bool G1Policy::next_gc_should_be_mixed(const char* true_action_str,
1201 const char* false_action_str) const {
1202 G1CollectionSetCandidates* candidates = _collection_set->candidates();
1203
1204 if (candidates->is_empty()) {
1205 log_debug(gc, ergo)("%s (candidate old regions not available)", false_action_str);
1206 return false;
1207 }
1208
1209 // Is the amount of uncollected reclaimable space above G1HeapWastePercent?
1210 size_t reclaimable_bytes = candidates->remaining_reclaimable_bytes();
1211 double reclaimable_percent = reclaimable_bytes_percent(reclaimable_bytes);
1212 double threshold = (double) G1HeapWastePercent;
1213 if (reclaimable_percent <= threshold) {
1214 log_debug(gc, ergo)("%s (reclaimable percentage not over threshold). candidate old regions: %u reclaimable: " SIZE_FORMAT " (%1.2f) threshold: " UINTX_FORMAT,
1215 false_action_str, candidates->num_remaining(), reclaimable_bytes, reclaimable_percent, G1HeapWastePercent);
1216 return false;
1217 }
|
57 _analytics(new G1Analytics(&_predictor)),
58 _remset_tracker(),
59 _mmu_tracker(new G1MMUTrackerQueue(GCPauseIntervalMillis / 1000.0, MaxGCPauseMillis / 1000.0)),
60 _ihop_control(create_ihop_control(&_predictor)),
61 _policy_counters(new GCPolicyCounters("GarbageFirst", 1, 2)),
62 _full_collection_start_sec(0.0),
63 _collection_pause_end_millis(os::javaTimeNanos() / NANOSECS_PER_MILLISEC),
64 _young_list_target_length(0),
65 _young_list_fixed_length(0),
66 _young_list_max_length(0),
67 _eden_surv_rate_group(new G1SurvRateGroup()),
68 _survivor_surv_rate_group(new G1SurvRateGroup()),
69 _reserve_factor((double) G1ReservePercent / 100.0),
70 _reserve_regions(0),
71 _young_gen_sizer(G1YoungGenSizer::create_gen_sizer()),
72 _free_regions_at_end_of_collection(0),
73 _rs_length(0),
74 _rs_length_prediction(0),
75 _pending_cards_at_gc_start(0),
76 _old_gen_alloc_tracker(),
77 _concurrent_start_to_mixed(),
78 _collection_set(NULL),
79 _g1h(NULL),
80 _phase_times(new G1GCPhaseTimes(gc_timer, ParallelGCThreads)),
81 _mark_remark_start_sec(0),
82 _mark_cleanup_start_sec(0),
83 _tenuring_threshold(MaxTenuringThreshold),
84 _max_survivor_regions(0),
85 _survivors_age_table(true)
86 {
87 }
88
89 G1Policy::~G1Policy() {
90 delete _ihop_control;
91 delete _young_gen_sizer;
92 }
93
94 G1Policy* G1Policy::create_policy(STWGCTimer* gc_timer_stw) {
95 if (G1Arguments::is_heterogeneous_heap()) {
96 return new G1HeterogeneousHeapPolicy(gc_timer_stw);
97 } else {
431 _collection_set->clear_candidates();
432 _pending_cards_at_gc_start = 0;
433 }
434
435 void G1Policy::record_full_collection_end() {
436 // Consider this like a collection pause for the purposes of allocation
437 // since last pause.
438 double end_sec = os::elapsedTime();
439 double full_gc_time_sec = end_sec - _full_collection_start_sec;
440 double full_gc_time_ms = full_gc_time_sec * 1000.0;
441
442 _analytics->update_recent_gc_times(end_sec, full_gc_time_ms);
443
444 collector_state()->set_in_full_gc(false);
445
446 // "Nuke" the heuristics that control the young/mixed GC
447 // transitions and make sure we start with young GCs after the Full GC.
448 collector_state()->set_in_young_only_phase(true);
449 collector_state()->set_in_young_gc_before_mixed(false);
450 collector_state()->set_initiate_conc_mark_if_possible(need_to_start_conc_mark("end of Full GC", 0));
451 collector_state()->set_in_concurrent_start_gc(false);
452 collector_state()->set_mark_or_rebuild_in_progress(false);
453 collector_state()->set_clearing_next_bitmap(false);
454
455 _eden_surv_rate_group->start_adding_regions();
456 // also call this on any additional surv rate groups
457
458 _free_regions_at_end_of_collection = _g1h->num_free_regions();
459 _survivor_surv_rate_group->reset();
460 update_young_list_max_and_target_length();
461 update_rs_length_prediction();
462
463 _old_gen_alloc_tracker.reset_after_full_gc();
464
465 record_pause(FullGC, _full_collection_start_sec, end_sec);
466 }
467
468 static void log_refinement_stats(const char* kind, const G1ConcurrentRefineStats& stats) {
469 log_debug(gc, refine, stats)
470 ("%s refinement: %.2fms, refined: " SIZE_FORMAT
471 ", precleaned: " SIZE_FORMAT ", dirtied: " SIZE_FORMAT,
527 assert(max_survivor_regions() + _g1h->num_used_regions() <= _g1h->max_regions(),
528 "Maximum survivor regions %u plus used regions %u exceeds max regions %u",
529 max_survivor_regions(), _g1h->num_used_regions(), _g1h->max_regions());
530 assert_used_and_recalculate_used_equal(_g1h);
531
532 phase_times()->record_cur_collection_start_sec(start_time_sec);
533
534 record_concurrent_refinement_stats();
535
536 _collection_set->reset_bytes_used_before();
537
538 // do that for any other surv rate groups
539 _eden_surv_rate_group->stop_adding_regions();
540 _survivors_age_table.clear();
541
542 assert(_g1h->collection_set()->verify_young_ages(), "region age verification failed");
543 }
544
545 void G1Policy::record_concurrent_mark_init_end(double mark_init_elapsed_time_ms) {
546 assert(!collector_state()->initiate_conc_mark_if_possible(), "we should have cleared it by now");
547 collector_state()->set_in_concurrent_start_gc(false);
548 }
549
550 void G1Policy::record_concurrent_mark_remark_start() {
551 _mark_remark_start_sec = os::elapsedTime();
552 }
553
554 void G1Policy::record_concurrent_mark_remark_end() {
555 double end_time_sec = os::elapsedTime();
556 double elapsed_time_ms = (end_time_sec - _mark_remark_start_sec)*1000.0;
557 _analytics->report_concurrent_mark_remark_times_ms(elapsed_time_ms);
558 _analytics->append_prev_collection_pause_end_ms(elapsed_time_ms);
559
560 record_pause(Remark, _mark_remark_start_sec, end_time_sec);
561 }
562
563 void G1Policy::record_concurrent_mark_cleanup_start() {
564 _mark_cleanup_start_sec = os::elapsedTime();
565 }
566
567 double G1Policy::average_time_ms(G1GCPhaseTimes::GCParPhases phase) const {
615 double G1Policy::logged_cards_processing_time() const {
616 double all_cards_processing_time = average_time_ms(G1GCPhaseTimes::ScanHR) + average_time_ms(G1GCPhaseTimes::OptScanHR);
617 size_t logged_dirty_cards = phase_times()->sum_thread_work_items(G1GCPhaseTimes::MergeLB, G1GCPhaseTimes::MergeLBDirtyCards);
618 size_t scan_heap_roots_cards = phase_times()->sum_thread_work_items(G1GCPhaseTimes::ScanHR, G1GCPhaseTimes::ScanHRScannedCards) +
619 phase_times()->sum_thread_work_items(G1GCPhaseTimes::OptScanHR, G1GCPhaseTimes::ScanHRScannedCards);
620 // This may happen if there are duplicate cards in different log buffers.
621 if (logged_dirty_cards > scan_heap_roots_cards) {
622 return all_cards_processing_time + average_time_ms(G1GCPhaseTimes::MergeLB);
623 }
624 return (all_cards_processing_time * logged_dirty_cards / scan_heap_roots_cards) + average_time_ms(G1GCPhaseTimes::MergeLB);
625 }
626
627 // Anything below that is considered to be zero
628 #define MIN_TIMER_GRANULARITY 0.0000001
629
630 void G1Policy::record_collection_pause_end(double pause_time_ms) {
631 G1GCPhaseTimes* p = phase_times();
632
633 double end_time_sec = os::elapsedTime();
634
635 PauseKind this_pause = young_gc_pause_kind();
636
637 bool update_stats = !_g1h->evacuation_failed();
638
639 record_pause(this_pause, end_time_sec - pause_time_ms / 1000.0, end_time_sec);
640
641 _collection_pause_end_millis = os::javaTimeNanos() / NANOSECS_PER_MILLISEC;
642
643 if (is_concurrent_start_pause(this_pause)) {
644 record_concurrent_mark_init_end(0.0);
645 } else {
646 maybe_start_marking();
647 }
648
649 double app_time_ms = (phase_times()->cur_collection_start_sec() * 1000.0 - _analytics->prev_collection_pause_end_ms());
650 if (app_time_ms < MIN_TIMER_GRANULARITY) {
651 // This usually happens due to the timer not having the required
652 // granularity. Some Linuxes are the usual culprits.
653 // We'll just set it to something (arbitrarily) small.
654 app_time_ms = 1.0;
655 }
656
657 if (update_stats) {
658 // We maintain the invariant that all objects allocated by mutator
659 // threads will be allocated out of eden regions. So, we can use
660 // the eden region number allocated since the previous GC to
661 // calculate the application's allocate rate. The only exception
662 // to that is humongous objects that are allocated separately. But
663 // given that humongous object allocations do not really affect
664 // either the pause's duration nor when the next pause will take
665 // place we can safely ignore them here.
666 uint regions_allocated = _collection_set->eden_region_length();
667 double alloc_rate_ms = (double) regions_allocated / app_time_ms;
668 _analytics->report_alloc_rate_ms(alloc_rate_ms);
669
670 _analytics->compute_pause_time_ratios(end_time_sec, pause_time_ms);
671 _analytics->update_recent_gc_times(end_time_sec, pause_time_ms);
672 }
673
674 if (is_last_young_pause(this_pause)) {
675 assert(!is_concurrent_start_pause(this_pause),
676 "The young GC before mixed is not allowed to be concurrent start GC");
677 // This has been the young GC before we start doing mixed GCs. We already
678 // decided to start mixed GCs much earlier, so there is nothing to do except
679 // advancing the state.
680 collector_state()->set_in_young_only_phase(false);
681 collector_state()->set_in_young_gc_before_mixed(false);
682 } else if (is_mixed_pause(this_pause)) {
683 // This is a mixed GC. Here we decide whether to continue doing more
684 // mixed GCs or not.
685 if (!next_gc_should_be_mixed("continue mixed GCs",
686 "do not continue mixed GCs")) {
687 collector_state()->set_in_young_only_phase(true);
688
689 clear_collection_set_candidates();
690 maybe_start_marking();
691 }
692 } else {
693 assert(is_young_only_pause(this_pause), "must be");
694 }
695
696 _eden_surv_rate_group->start_adding_regions();
697
698 double merge_hcc_time_ms = average_time_ms(G1GCPhaseTimes::MergeHCC);
699 if (update_stats) {
700 size_t const total_log_buffer_cards = p->sum_thread_work_items(G1GCPhaseTimes::MergeHCC, G1GCPhaseTimes::MergeHCCDirtyCards) +
701 p->sum_thread_work_items(G1GCPhaseTimes::MergeLB, G1GCPhaseTimes::MergeLBDirtyCards);
702 // Update prediction for card merge; MergeRSDirtyCards includes the cards from the Eager Reclaim phase.
703 size_t const total_cards_merged = p->sum_thread_work_items(G1GCPhaseTimes::MergeRS, G1GCPhaseTimes::MergeRSDirtyCards) +
704 p->sum_thread_work_items(G1GCPhaseTimes::OptMergeRS, G1GCPhaseTimes::MergeRSDirtyCards) +
705 total_log_buffer_cards;
706
707 // The threshold for the number of cards in a given sampling which we consider
708 // large enough so that the impact from setup and other costs is negligible.
709 size_t const CardsNumSamplingThreshold = 10;
710
711 if (total_cards_merged > CardsNumSamplingThreshold) {
712 double avg_time_merge_cards = average_time_ms(G1GCPhaseTimes::MergeER) +
713 average_time_ms(G1GCPhaseTimes::MergeRS) +
714 average_time_ms(G1GCPhaseTimes::MergeHCC) +
715 average_time_ms(G1GCPhaseTimes::MergeLB) +
716 average_time_ms(G1GCPhaseTimes::OptMergeRS);
717 _analytics->report_cost_per_card_merge_ms(avg_time_merge_cards / total_cards_merged,
718 is_young_only_pause(this_pause));
719 }
720
721 // Update prediction for card scan
722 size_t const total_cards_scanned = p->sum_thread_work_items(G1GCPhaseTimes::ScanHR, G1GCPhaseTimes::ScanHRScannedCards) +
723 p->sum_thread_work_items(G1GCPhaseTimes::OptScanHR, G1GCPhaseTimes::ScanHRScannedCards);
724
725 if (total_cards_scanned > CardsNumSamplingThreshold) {
726 double avg_time_dirty_card_scan = average_time_ms(G1GCPhaseTimes::ScanHR) +
727 average_time_ms(G1GCPhaseTimes::OptScanHR);
728
729 _analytics->report_cost_per_card_scan_ms(avg_time_dirty_card_scan / total_cards_scanned,
730 is_young_only_pause(this_pause));
731 }
732
733 // Update prediction for the ratio between cards from the remembered
734 // sets and actually scanned cards from the remembered sets.
735 // Cards from the remembered sets are all cards not duplicated by cards from
736 // the logs.
737 // Due to duplicates in the log buffers, the number of actually scanned cards
738 // can be smaller than the cards in the log buffers.
739 const size_t from_rs_length_cards = (total_cards_scanned > total_log_buffer_cards) ? total_cards_scanned - total_log_buffer_cards : 0;
740 double merge_to_scan_ratio = 0.0;
741 if (total_cards_scanned > 0) {
742 merge_to_scan_ratio = (double) from_rs_length_cards / total_cards_scanned;
743 }
744 _analytics->report_card_merge_to_scan_ratio(merge_to_scan_ratio,
745 is_young_only_pause(this_pause));
746
747 const size_t recorded_rs_length = _collection_set->recorded_rs_length();
748 const size_t rs_length_diff = _rs_length > recorded_rs_length ? _rs_length - recorded_rs_length : 0;
749 _analytics->report_rs_length_diff(rs_length_diff);
750
751 // Update prediction for copy cost per byte
752 size_t copied_bytes = p->sum_thread_work_items(G1GCPhaseTimes::MergePSS, G1GCPhaseTimes::MergePSSCopiedBytes);
753
754 if (copied_bytes > 0) {
755 double cost_per_byte_ms = (average_time_ms(G1GCPhaseTimes::ObjCopy) + average_time_ms(G1GCPhaseTimes::OptObjCopy)) / copied_bytes;
756 _analytics->report_cost_per_byte_ms(cost_per_byte_ms, collector_state()->mark_or_rebuild_in_progress());
757 }
758
759 if (_collection_set->young_region_length() > 0) {
760 _analytics->report_young_other_cost_per_region_ms(young_other_time_ms() /
761 _collection_set->young_region_length());
762 }
763
764 if (_collection_set->old_region_length() > 0) {
765 _analytics->report_non_young_other_cost_per_region_ms(non_young_other_time_ms() /
766 _collection_set->old_region_length());
767 }
768
769 _analytics->report_constant_other_time_ms(constant_other_time_ms(pause_time_ms));
770
771 // Do not update RS lengths and the number of pending cards with information from mixed gc:
772 // these are is wildly different to during young only gc and mess up young gen sizing right
773 // after the mixed gc phase.
774 // During mixed gc we do not use them for young gen sizing.
775 if (is_young_only_pause(this_pause)) {
776 _analytics->report_pending_cards((double) _pending_cards_at_gc_start);
777 _analytics->report_rs_length((double) _rs_length);
778 }
779 }
780
781 assert(!(is_concurrent_start_pause(this_pause) && collector_state()->mark_or_rebuild_in_progress()),
782 "If the last pause has been concurrent start, we should not have been in the marking window");
783 if (is_concurrent_start_pause(this_pause)) {
784 collector_state()->set_mark_or_rebuild_in_progress(true);
785 }
786
787 _free_regions_at_end_of_collection = _g1h->num_free_regions();
788
789 update_rs_length_prediction();
790
791 // Do not update dynamic IHOP due to G1 periodic collection as it is highly likely
792 // that in this case we are not running in a "normal" operating mode.
793 if (_g1h->gc_cause() != GCCause::_g1_periodic_collection) {
794 // IHOP control wants to know the expected young gen length if it were not
795 // restrained by the heap reserve. Using the actual length would make the
796 // prediction too small and the limit the young gen every time we get to the
797 // predicted target occupancy.
798 size_t last_unrestrained_young_length = update_young_list_max_and_target_length();
799
800 _old_gen_alloc_tracker.reset_after_young_gc(app_time_ms / 1000.0);
801 update_ihop_prediction(_old_gen_alloc_tracker.last_cycle_duration(),
802 _old_gen_alloc_tracker.last_cycle_old_bytes(),
803 last_unrestrained_young_length * HeapRegion::GrainBytes,
804 is_young_only_pause(this_pause));
805
806 _ihop_control->send_trace_event(_g1h->gc_tracer_stw());
807 } else {
808 // Any garbage collection triggered as periodic collection resets the time-to-mixed
809 // measurement. Periodic collection typically means that the application is "inactive", i.e.
810 // the marking threads may have received an uncharacterisic amount of cpu time
811 // for completing the marking, i.e. are faster than expected.
812 // This skews the predicted marking length towards smaller values which might cause
813 // the mark start being too late.
814 _concurrent_start_to_mixed.reset();
815 }
816
817 // Note that _mmu_tracker->max_gc_time() returns the time in seconds.
818 double scan_logged_cards_time_goal_ms = _mmu_tracker->max_gc_time() * MILLIUNITS * G1RSetUpdatingPauseTimePercent / 100.0;
819
820 if (scan_logged_cards_time_goal_ms < merge_hcc_time_ms) {
821 log_debug(gc, ergo, refine)("Adjust concurrent refinement thresholds (scanning the HCC expected to take longer than Update RS time goal)."
822 "Logged Cards Scan time goal: %1.2fms Scan HCC time: %1.2fms",
823 scan_logged_cards_time_goal_ms, merge_hcc_time_ms);
824
825 scan_logged_cards_time_goal_ms = 0;
826 } else {
827 scan_logged_cards_time_goal_ms -= merge_hcc_time_ms;
828 }
829
830 double const logged_cards_time = logged_cards_processing_time();
831
832 log_debug(gc, ergo, refine)("Concurrent refinement times: Logged Cards Scan time goal: %1.2fms Logged Cards Scan time: %1.2fms HCC time: %1.2fms",
833 scan_logged_cards_time_goal_ms, logged_cards_time, merge_hcc_time_ms);
834
845 G1HeapWastePercent);
846 } else {
847 return new G1StaticIHOPControl(InitiatingHeapOccupancyPercent);
848 }
849 }
850
851 void G1Policy::update_ihop_prediction(double mutator_time_s,
852 size_t mutator_alloc_bytes,
853 size_t young_gen_size,
854 bool this_gc_was_young_only) {
855 // Always try to update IHOP prediction. Even evacuation failures give information
856 // about e.g. whether to start IHOP earlier next time.
857
858 // Avoid using really small application times that might create samples with
859 // very high or very low values. They may be caused by e.g. back-to-back gcs.
860 double const min_valid_time = 1e-6;
861
862 bool report = false;
863
864 double marking_to_mixed_time = -1.0;
865 if (!this_gc_was_young_only && _concurrent_start_to_mixed.has_result()) {
866 marking_to_mixed_time = _concurrent_start_to_mixed.last_marking_time();
867 assert(marking_to_mixed_time > 0.0,
868 "Concurrent start to mixed time must be larger than zero but is %.3f",
869 marking_to_mixed_time);
870 if (marking_to_mixed_time > min_valid_time) {
871 _ihop_control->update_marking_length(marking_to_mixed_time);
872 report = true;
873 }
874 }
875
876 // As an approximation for the young gc promotion rates during marking we use
877 // all of them. In many applications there are only a few if any young gcs during
878 // marking, which makes any prediction useless. This increases the accuracy of the
879 // prediction.
880 if (this_gc_was_young_only && mutator_time_s > min_valid_time) {
881 _ihop_control->update_allocation_info(mutator_time_s, mutator_alloc_bytes, young_gen_size);
882 report = true;
883 }
884
885 if (report) {
886 report_ihop_statistics();
887 }
888 }
1004 double max_survivor_regions_d =
1005 (double) _young_list_target_length / (double) SurvivorRatio;
1006
1007 // Calculate desired survivor size based on desired max survivor regions (unconstrained
1008 // by remaining heap). Otherwise we may cause undesired promotions as we are
1009 // already getting close to end of the heap, impacting performance even more.
1010 uint const desired_max_survivor_regions = ceil(max_survivor_regions_d);
1011 size_t const survivor_size = desired_survivor_size(desired_max_survivor_regions);
1012
1013 _tenuring_threshold = _survivors_age_table.compute_tenuring_threshold(survivor_size);
1014 if (UsePerfData) {
1015 _policy_counters->tenuring_threshold()->set_value(_tenuring_threshold);
1016 _policy_counters->desired_survivor_size()->set_value(survivor_size * oopSize);
1017 }
1018 // The real maximum survivor size is bounded by the number of regions that can
1019 // be allocated into.
1020 _max_survivor_regions = MIN2(desired_max_survivor_regions,
1021 _g1h->num_free_or_available_regions());
1022 }
1023
1024 bool G1Policy::force_concurrent_start_if_outside_cycle(GCCause::Cause gc_cause) {
1025 // We actually check whether we are marking here and not if we are in a
1026 // reclamation phase. This means that we will schedule a concurrent mark
1027 // even while we are still in the process of reclaiming memory.
1028 bool during_cycle = _g1h->concurrent_mark()->cm_thread()->during_cycle();
1029 if (!during_cycle) {
1030 log_debug(gc, ergo)("Request concurrent cycle initiation (requested by GC cause). "
1031 "GC cause: %s",
1032 GCCause::to_string(gc_cause));
1033 collector_state()->set_initiate_conc_mark_if_possible(true);
1034 return true;
1035 } else {
1036 log_debug(gc, ergo)("Do not request concurrent cycle initiation "
1037 "(concurrent cycle already in progress). GC cause: %s",
1038 GCCause::to_string(gc_cause));
1039 return false;
1040 }
1041 }
1042
1043 void G1Policy::initiate_conc_mark() {
1044 collector_state()->set_in_concurrent_start_gc(true);
1045 collector_state()->set_initiate_conc_mark_if_possible(false);
1046 }
1047
1048 void G1Policy::decide_on_conc_mark_initiation() {
1049 // We are about to decide on whether this pause will be a
1050 // concurrent start pause.
1051
1052 // First, collector_state()->in_concurrent_start_gc() should not be already set. We
1053 // will set it here if we have to. However, it should be cleared by
1054 // the end of the pause (it's only set for the duration of a
1055 // concurrent start pause).
1056 assert(!collector_state()->in_concurrent_start_gc(), "pre-condition");
1057
1058 if (collector_state()->initiate_conc_mark_if_possible()) {
1059 // We had noticed on a previous pause that the heap occupancy has
1060 // gone over the initiating threshold and we should start a
1061 // concurrent marking cycle. Or we've been explicitly requested
1062 // to start a concurrent marking cycle. Either way, we initiate
1063 // one if not inhibited for some reason.
1064
1065 GCCause::Cause cause = _g1h->gc_cause();
1066 if ((cause != GCCause::_wb_breakpoint) &&
1067 ConcurrentGCBreakpoints::is_controlled()) {
1068 log_debug(gc, ergo)("Do not initiate concurrent cycle (whitebox controlled)");
1069 } else if (!about_to_start_mixed_phase() && collector_state()->in_young_only_phase()) {
1070 // Initiate a new concurrent start if there is no marking or reclamation going on.
1071 initiate_conc_mark();
1072 log_debug(gc, ergo)("Initiate concurrent cycle (concurrent cycle initiation requested)");
1073 } else if (_g1h->is_user_requested_concurrent_full_gc(cause) ||
1074 (cause == GCCause::_wb_breakpoint)) {
1075 // Initiate a user requested concurrent start or run to a breakpoint.
1076 // A concurrent start must be young only GC, so the collector state
1077 // must be updated to reflect this.
1078 collector_state()->set_in_young_only_phase(true);
1079 collector_state()->set_in_young_gc_before_mixed(false);
1080
1081 // We might have ended up coming here about to start a mixed phase with a collection set
1082 // active. The following remark might change the change the "evacuation efficiency" of
1083 // the regions in this set, leading to failing asserts later.
1084 // Since the concurrent cycle will recreate the collection set anyway, simply drop it here.
1085 clear_collection_set_candidates();
1086 abort_time_to_mixed_tracking();
1087 initiate_conc_mark();
1088 log_debug(gc, ergo)("Initiate concurrent cycle (%s requested concurrent cycle)",
1089 (cause == GCCause::_wb_breakpoint) ? "run_to breakpoint" : "user");
1090 } else {
1091 // The concurrent marking thread is still finishing up the
1092 // previous cycle. If we start one right now the two cycles
1093 // overlap. In particular, the concurrent marking thread might
1094 // be in the process of clearing the next marking bitmap (which
1095 // we will use for the next cycle if we start one). Starting a
1096 // cycle now will be bad given that parts of the marking
1136 }
1137 };
1138
1139 void G1Policy::clear_collection_set_candidates() {
1140 // Clear remembered sets of remaining candidate regions and the actual candidate
1141 // set.
1142 G1ClearCollectionSetCandidateRemSets cl;
1143 _collection_set->candidates()->iterate(&cl);
1144 _collection_set->clear_candidates();
1145 }
1146
1147 void G1Policy::maybe_start_marking() {
1148 if (need_to_start_conc_mark("end of GC")) {
1149 // Note: this might have already been set, if during the last
1150 // pause we decided to start a cycle but at the beginning of
1151 // this pause we decided to postpone it. That's OK.
1152 collector_state()->set_initiate_conc_mark_if_possible(true);
1153 }
1154 }
1155
1156 bool G1Policy::is_young_only_pause(PauseKind kind) {
1157 assert(kind != FullGC, "must be");
1158 assert(kind != Remark, "must be");
1159 assert(kind != Cleanup, "must be");
1160 return kind == ConcurrentStartGC || kind == LastYoungGC || kind == YoungOnlyGC;
1161 }
1162
1163 bool G1Policy::is_mixed_pause(PauseKind kind) {
1164 assert(kind != FullGC, "must be");
1165 assert(kind != Remark, "must be");
1166 assert(kind != Cleanup, "must be");
1167 return kind == MixedGC;
1168 }
1169
1170 bool G1Policy::is_last_young_pause(PauseKind kind) {
1171 return kind == LastYoungGC;
1172 }
1173
1174 bool G1Policy::is_concurrent_start_pause(PauseKind kind) {
1175 return kind == ConcurrentStartGC;
1176 }
1177
1178 G1Policy::PauseKind G1Policy::young_gc_pause_kind() const {
1179 assert(!collector_state()->in_full_gc(), "must be");
1180 if (collector_state()->in_concurrent_start_gc()) {
1181 assert(!collector_state()->in_young_gc_before_mixed(), "must be");
1182 return ConcurrentStartGC;
1183 } else if (collector_state()->in_young_gc_before_mixed()) {
1184 assert(!collector_state()->in_concurrent_start_gc(), "must be");
1185 return LastYoungGC;
1186 } else if (collector_state()->in_mixed_phase()) {
1187 assert(!collector_state()->in_concurrent_start_gc(), "must be");
1188 assert(!collector_state()->in_young_gc_before_mixed(), "must be");
1189 return MixedGC;
1190 } else {
1191 assert(!collector_state()->in_concurrent_start_gc(), "must be");
1192 assert(!collector_state()->in_young_gc_before_mixed(), "must be");
1193 return YoungOnlyGC;
1194 }
1195 }
1196
1197 void G1Policy::record_pause(PauseKind kind, double start, double end) {
1198 // Manage the MMU tracker. For some reason it ignores Full GCs.
1199 if (kind != FullGC) {
1200 _mmu_tracker->add_pause(start, end);
1201 }
1202 // Manage the mutator time tracking from concurrent start to first mixed gc.
1203 switch (kind) {
1204 case FullGC:
1205 abort_time_to_mixed_tracking();
1206 break;
1207 case Cleanup:
1208 case Remark:
1209 case YoungOnlyGC:
1210 case LastYoungGC:
1211 _concurrent_start_to_mixed.add_pause(end - start);
1212 break;
1213 case ConcurrentStartGC:
1214 if (_g1h->gc_cause() != GCCause::_g1_periodic_collection) {
1215 _concurrent_start_to_mixed.record_concurrent_start_end(end);
1216 }
1217 break;
1218 case MixedGC:
1219 _concurrent_start_to_mixed.record_mixed_gc_start(start);
1220 break;
1221 default:
1222 ShouldNotReachHere();
1223 }
1224 }
1225
1226 void G1Policy::abort_time_to_mixed_tracking() {
1227 _concurrent_start_to_mixed.reset();
1228 }
1229
1230 bool G1Policy::next_gc_should_be_mixed(const char* true_action_str,
1231 const char* false_action_str) const {
1232 G1CollectionSetCandidates* candidates = _collection_set->candidates();
1233
1234 if (candidates->is_empty()) {
1235 log_debug(gc, ergo)("%s (candidate old regions not available)", false_action_str);
1236 return false;
1237 }
1238
1239 // Is the amount of uncollected reclaimable space above G1HeapWastePercent?
1240 size_t reclaimable_bytes = candidates->remaining_reclaimable_bytes();
1241 double reclaimable_percent = reclaimable_bytes_percent(reclaimable_bytes);
1242 double threshold = (double) G1HeapWastePercent;
1243 if (reclaimable_percent <= threshold) {
1244 log_debug(gc, ergo)("%s (reclaimable percentage not over threshold). candidate old regions: %u reclaimable: " SIZE_FORMAT " (%1.2f) threshold: " UINTX_FORMAT,
1245 false_action_str, candidates->num_remaining(), reclaimable_bytes, reclaimable_percent, G1HeapWastePercent);
1246 return false;
1247 }
|