57 _analytics(new G1Analytics(&_predictor)),
58 _remset_tracker(),
59 _mmu_tracker(new G1MMUTrackerQueue(GCPauseIntervalMillis / 1000.0, MaxGCPauseMillis / 1000.0)),
60 _ihop_control(create_ihop_control(&_predictor)),
61 _policy_counters(new GCPolicyCounters("GarbageFirst", 1, 2)),
62 _full_collection_start_sec(0.0),
63 _collection_pause_end_millis(os::javaTimeNanos() / NANOSECS_PER_MILLISEC),
64 _young_list_target_length(0),
65 _young_list_fixed_length(0),
66 _young_list_max_length(0),
67 _eden_surv_rate_group(new G1SurvRateGroup()),
68 _survivor_surv_rate_group(new G1SurvRateGroup()),
69 _reserve_factor((double) G1ReservePercent / 100.0),
70 _reserve_regions(0),
71 _young_gen_sizer(G1YoungGenSizer::create_gen_sizer()),
72 _free_regions_at_end_of_collection(0),
73 _rs_length(0),
74 _rs_length_prediction(0),
75 _pending_cards_at_gc_start(0),
76 _old_gen_alloc_tracker(),
77 _initial_mark_to_mixed(),
78 _collection_set(NULL),
79 _g1h(NULL),
80 _phase_times(new G1GCPhaseTimes(gc_timer, ParallelGCThreads)),
81 _mark_remark_start_sec(0),
82 _mark_cleanup_start_sec(0),
83 _tenuring_threshold(MaxTenuringThreshold),
84 _max_survivor_regions(0),
85 _survivors_age_table(true)
86 {
87 }
88
89 G1Policy::~G1Policy() {
90 delete _ihop_control;
91 delete _young_gen_sizer;
92 }
93
94 G1Policy* G1Policy::create_policy(STWGCTimer* gc_timer_stw) {
95 if (G1Arguments::is_heterogeneous_heap()) {
96 return new G1HeterogeneousHeapPolicy(gc_timer_stw);
97 } else {
431 _collection_set->clear_candidates();
432 _pending_cards_at_gc_start = 0;
433 }
434
435 void G1Policy::record_full_collection_end() {
436 // Consider this like a collection pause for the purposes of allocation
437 // since last pause.
438 double end_sec = os::elapsedTime();
439 double full_gc_time_sec = end_sec - _full_collection_start_sec;
440 double full_gc_time_ms = full_gc_time_sec * 1000.0;
441
442 _analytics->update_recent_gc_times(end_sec, full_gc_time_ms);
443
444 collector_state()->set_in_full_gc(false);
445
446 // "Nuke" the heuristics that control the young/mixed GC
447 // transitions and make sure we start with young GCs after the Full GC.
448 collector_state()->set_in_young_only_phase(true);
449 collector_state()->set_in_young_gc_before_mixed(false);
450 collector_state()->set_initiate_conc_mark_if_possible(need_to_start_conc_mark("end of Full GC", 0));
451 collector_state()->set_in_initial_mark_gc(false);
452 collector_state()->set_mark_or_rebuild_in_progress(false);
453 collector_state()->set_clearing_next_bitmap(false);
454
455 _eden_surv_rate_group->start_adding_regions();
456 // also call this on any additional surv rate groups
457
458 _free_regions_at_end_of_collection = _g1h->num_free_regions();
459 _survivor_surv_rate_group->reset();
460 update_young_list_max_and_target_length();
461 update_rs_length_prediction();
462
463 _old_gen_alloc_tracker.reset_after_full_gc();
464
465 record_pause(FullGC, _full_collection_start_sec, end_sec);
466 }
467
468 static void log_refinement_stats(const char* kind, const G1ConcurrentRefineStats& stats) {
469 log_debug(gc, refine, stats)
470 ("%s refinement: %.2fms, refined: " SIZE_FORMAT
471 ", precleaned: " SIZE_FORMAT ", dirtied: " SIZE_FORMAT,
527 assert(max_survivor_regions() + _g1h->num_used_regions() <= _g1h->max_regions(),
528 "Maximum survivor regions %u plus used regions %u exceeds max regions %u",
529 max_survivor_regions(), _g1h->num_used_regions(), _g1h->max_regions());
530 assert_used_and_recalculate_used_equal(_g1h);
531
532 phase_times()->record_cur_collection_start_sec(start_time_sec);
533
534 record_concurrent_refinement_stats();
535
536 _collection_set->reset_bytes_used_before();
537
538 // do that for any other surv rate groups
539 _eden_surv_rate_group->stop_adding_regions();
540 _survivors_age_table.clear();
541
542 assert(_g1h->collection_set()->verify_young_ages(), "region age verification failed");
543 }
544
545 void G1Policy::record_concurrent_mark_init_end(double mark_init_elapsed_time_ms) {
546 assert(!collector_state()->initiate_conc_mark_if_possible(), "we should have cleared it by now");
547 collector_state()->set_in_initial_mark_gc(false);
548 }
549
550 void G1Policy::record_concurrent_mark_remark_start() {
551 _mark_remark_start_sec = os::elapsedTime();
552 }
553
554 void G1Policy::record_concurrent_mark_remark_end() {
555 double end_time_sec = os::elapsedTime();
556 double elapsed_time_ms = (end_time_sec - _mark_remark_start_sec)*1000.0;
557 _analytics->report_concurrent_mark_remark_times_ms(elapsed_time_ms);
558 _analytics->append_prev_collection_pause_end_ms(elapsed_time_ms);
559
560 record_pause(Remark, _mark_remark_start_sec, end_time_sec);
561 }
562
563 void G1Policy::record_concurrent_mark_cleanup_start() {
564 _mark_cleanup_start_sec = os::elapsedTime();
565 }
566
567 double G1Policy::average_time_ms(G1GCPhaseTimes::GCParPhases phase) const {
615 double G1Policy::logged_cards_processing_time() const {
616 double all_cards_processing_time = average_time_ms(G1GCPhaseTimes::ScanHR) + average_time_ms(G1GCPhaseTimes::OptScanHR);
617 size_t logged_dirty_cards = phase_times()->sum_thread_work_items(G1GCPhaseTimes::MergeLB, G1GCPhaseTimes::MergeLBDirtyCards);
618 size_t scan_heap_roots_cards = phase_times()->sum_thread_work_items(G1GCPhaseTimes::ScanHR, G1GCPhaseTimes::ScanHRScannedCards) +
619 phase_times()->sum_thread_work_items(G1GCPhaseTimes::OptScanHR, G1GCPhaseTimes::ScanHRScannedCards);
620 // This may happen if there are duplicate cards in different log buffers.
621 if (logged_dirty_cards > scan_heap_roots_cards) {
622 return all_cards_processing_time + average_time_ms(G1GCPhaseTimes::MergeLB);
623 }
624 return (all_cards_processing_time * logged_dirty_cards / scan_heap_roots_cards) + average_time_ms(G1GCPhaseTimes::MergeLB);
625 }
626
627 // Anything below that is considered to be zero
628 #define MIN_TIMER_GRANULARITY 0.0000001
629
630 void G1Policy::record_collection_pause_end(double pause_time_ms) {
631 G1GCPhaseTimes* p = phase_times();
632
633 double end_time_sec = os::elapsedTime();
634
635 bool this_pause_included_initial_mark = false;
636 bool this_pause_was_young_only = collector_state()->in_young_only_phase();
637
638 bool update_stats = !_g1h->evacuation_failed();
639
640 record_pause(young_gc_pause_kind(), end_time_sec - pause_time_ms / 1000.0, end_time_sec);
641
642 _collection_pause_end_millis = os::javaTimeNanos() / NANOSECS_PER_MILLISEC;
643
644 this_pause_included_initial_mark = collector_state()->in_initial_mark_gc();
645 if (this_pause_included_initial_mark) {
646 record_concurrent_mark_init_end(0.0);
647 } else {
648 maybe_start_marking();
649 }
650
651 double app_time_ms = (phase_times()->cur_collection_start_sec() * 1000.0 - _analytics->prev_collection_pause_end_ms());
652 if (app_time_ms < MIN_TIMER_GRANULARITY) {
653 // This usually happens due to the timer not having the required
654 // granularity. Some Linuxes are the usual culprits.
655 // We'll just set it to something (arbitrarily) small.
656 app_time_ms = 1.0;
657 }
658
659 if (update_stats) {
660 // We maintain the invariant that all objects allocated by mutator
661 // threads will be allocated out of eden regions. So, we can use
662 // the eden region number allocated since the previous GC to
663 // calculate the application's allocate rate. The only exception
664 // to that is humongous objects that are allocated separately. But
665 // given that humongous object allocations do not really affect
666 // either the pause's duration nor when the next pause will take
667 // place we can safely ignore them here.
668 uint regions_allocated = _collection_set->eden_region_length();
669 double alloc_rate_ms = (double) regions_allocated / app_time_ms;
670 _analytics->report_alloc_rate_ms(alloc_rate_ms);
671
672 _analytics->compute_pause_time_ratios(end_time_sec, pause_time_ms);
673 _analytics->update_recent_gc_times(end_time_sec, pause_time_ms);
674 }
675
676 if (collector_state()->in_young_gc_before_mixed()) {
677 assert(!this_pause_included_initial_mark, "The young GC before mixed is not allowed to be an initial mark GC");
678 // This has been the young GC before we start doing mixed GCs. We already
679 // decided to start mixed GCs much earlier, so there is nothing to do except
680 // advancing the state.
681 collector_state()->set_in_young_only_phase(false);
682 collector_state()->set_in_young_gc_before_mixed(false);
683 } else if (!this_pause_was_young_only) {
684 // This is a mixed GC. Here we decide whether to continue doing more
685 // mixed GCs or not.
686 if (!next_gc_should_be_mixed("continue mixed GCs",
687 "do not continue mixed GCs")) {
688 collector_state()->set_in_young_only_phase(true);
689
690 clear_collection_set_candidates();
691 maybe_start_marking();
692 }
693 }
694
695 _eden_surv_rate_group->start_adding_regions();
696
697 double merge_hcc_time_ms = average_time_ms(G1GCPhaseTimes::MergeHCC);
698 if (update_stats) {
699 size_t const total_log_buffer_cards = p->sum_thread_work_items(G1GCPhaseTimes::MergeHCC, G1GCPhaseTimes::MergeHCCDirtyCards) +
700 p->sum_thread_work_items(G1GCPhaseTimes::MergeLB, G1GCPhaseTimes::MergeLBDirtyCards);
701 // Update prediction for card merge; MergeRSDirtyCards includes the cards from the Eager Reclaim phase.
702 size_t const total_cards_merged = p->sum_thread_work_items(G1GCPhaseTimes::MergeRS, G1GCPhaseTimes::MergeRSDirtyCards) +
703 p->sum_thread_work_items(G1GCPhaseTimes::OptMergeRS, G1GCPhaseTimes::MergeRSDirtyCards) +
704 total_log_buffer_cards;
705
706 // The threshold for the number of cards in a given sampling which we consider
707 // large enough so that the impact from setup and other costs is negligible.
708 size_t const CardsNumSamplingThreshold = 10;
709
710 if (total_cards_merged > CardsNumSamplingThreshold) {
711 double avg_time_merge_cards = average_time_ms(G1GCPhaseTimes::MergeER) +
712 average_time_ms(G1GCPhaseTimes::MergeRS) +
713 average_time_ms(G1GCPhaseTimes::MergeHCC) +
714 average_time_ms(G1GCPhaseTimes::MergeLB) +
715 average_time_ms(G1GCPhaseTimes::OptMergeRS);
716 _analytics->report_cost_per_card_merge_ms(avg_time_merge_cards / total_cards_merged, this_pause_was_young_only);
717 }
718
719 // Update prediction for card scan
720 size_t const total_cards_scanned = p->sum_thread_work_items(G1GCPhaseTimes::ScanHR, G1GCPhaseTimes::ScanHRScannedCards) +
721 p->sum_thread_work_items(G1GCPhaseTimes::OptScanHR, G1GCPhaseTimes::ScanHRScannedCards);
722
723 if (total_cards_scanned > CardsNumSamplingThreshold) {
724 double avg_time_dirty_card_scan = average_time_ms(G1GCPhaseTimes::ScanHR) +
725 average_time_ms(G1GCPhaseTimes::OptScanHR);
726
727 _analytics->report_cost_per_card_scan_ms(avg_time_dirty_card_scan / total_cards_scanned, this_pause_was_young_only);
728 }
729
730 // Update prediction for the ratio between cards from the remembered
731 // sets and actually scanned cards from the remembered sets.
732 // Cards from the remembered sets are all cards not duplicated by cards from
733 // the logs.
734 // Due to duplicates in the log buffers, the number of actually scanned cards
735 // can be smaller than the cards in the log buffers.
736 const size_t from_rs_length_cards = (total_cards_scanned > total_log_buffer_cards) ? total_cards_scanned - total_log_buffer_cards : 0;
737 double merge_to_scan_ratio = 0.0;
738 if (total_cards_scanned > 0) {
739 merge_to_scan_ratio = (double) from_rs_length_cards / total_cards_scanned;
740 }
741 _analytics->report_card_merge_to_scan_ratio(merge_to_scan_ratio, this_pause_was_young_only);
742
743 const size_t recorded_rs_length = _collection_set->recorded_rs_length();
744 const size_t rs_length_diff = _rs_length > recorded_rs_length ? _rs_length - recorded_rs_length : 0;
745 _analytics->report_rs_length_diff(rs_length_diff);
746
747 // Update prediction for copy cost per byte
748 size_t copied_bytes = p->sum_thread_work_items(G1GCPhaseTimes::MergePSS, G1GCPhaseTimes::MergePSSCopiedBytes);
749
750 if (copied_bytes > 0) {
751 double cost_per_byte_ms = (average_time_ms(G1GCPhaseTimes::ObjCopy) + average_time_ms(G1GCPhaseTimes::OptObjCopy)) / copied_bytes;
752 _analytics->report_cost_per_byte_ms(cost_per_byte_ms, collector_state()->mark_or_rebuild_in_progress());
753 }
754
755 if (_collection_set->young_region_length() > 0) {
756 _analytics->report_young_other_cost_per_region_ms(young_other_time_ms() /
757 _collection_set->young_region_length());
758 }
759
760 if (_collection_set->old_region_length() > 0) {
761 _analytics->report_non_young_other_cost_per_region_ms(non_young_other_time_ms() /
762 _collection_set->old_region_length());
763 }
764
765 _analytics->report_constant_other_time_ms(constant_other_time_ms(pause_time_ms));
766
767 // Do not update RS lengths and the number of pending cards with information from mixed gc:
768 // these are is wildly different to during young only gc and mess up young gen sizing right
769 // after the mixed gc phase.
770 // During mixed gc we do not use them for young gen sizing.
771 if (this_pause_was_young_only) {
772 _analytics->report_pending_cards((double) _pending_cards_at_gc_start);
773 _analytics->report_rs_length((double) _rs_length);
774 }
775 }
776
777 assert(!(this_pause_included_initial_mark && collector_state()->mark_or_rebuild_in_progress()),
778 "If the last pause has been an initial mark, we should not have been in the marking window");
779 if (this_pause_included_initial_mark) {
780 collector_state()->set_mark_or_rebuild_in_progress(true);
781 }
782
783 _free_regions_at_end_of_collection = _g1h->num_free_regions();
784
785 update_rs_length_prediction();
786
787 // Do not update dynamic IHOP due to G1 periodic collection as it is highly likely
788 // that in this case we are not running in a "normal" operating mode.
789 if (_g1h->gc_cause() != GCCause::_g1_periodic_collection) {
790 // IHOP control wants to know the expected young gen length if it were not
791 // restrained by the heap reserve. Using the actual length would make the
792 // prediction too small and the limit the young gen every time we get to the
793 // predicted target occupancy.
794 size_t last_unrestrained_young_length = update_young_list_max_and_target_length();
795
796 _old_gen_alloc_tracker.reset_after_young_gc(app_time_ms / 1000.0);
797 update_ihop_prediction(_old_gen_alloc_tracker.last_cycle_duration(),
798 _old_gen_alloc_tracker.last_cycle_old_bytes(),
799 last_unrestrained_young_length * HeapRegion::GrainBytes,
800 this_pause_was_young_only);
801
802 _ihop_control->send_trace_event(_g1h->gc_tracer_stw());
803 } else {
804 // Any garbage collection triggered as periodic collection resets the time-to-mixed
805 // measurement. Periodic collection typically means that the application is "inactive", i.e.
806 // the marking threads may have received an uncharacterisic amount of cpu time
807 // for completing the marking, i.e. are faster than expected.
808 // This skews the predicted marking length towards smaller values which might cause
809 // the mark start being too late.
810 _initial_mark_to_mixed.reset();
811 }
812
813 // Note that _mmu_tracker->max_gc_time() returns the time in seconds.
814 double scan_logged_cards_time_goal_ms = _mmu_tracker->max_gc_time() * MILLIUNITS * G1RSetUpdatingPauseTimePercent / 100.0;
815
816 if (scan_logged_cards_time_goal_ms < merge_hcc_time_ms) {
817 log_debug(gc, ergo, refine)("Adjust concurrent refinement thresholds (scanning the HCC expected to take longer than Update RS time goal)."
818 "Logged Cards Scan time goal: %1.2fms Scan HCC time: %1.2fms",
819 scan_logged_cards_time_goal_ms, merge_hcc_time_ms);
820
821 scan_logged_cards_time_goal_ms = 0;
822 } else {
823 scan_logged_cards_time_goal_ms -= merge_hcc_time_ms;
824 }
825
826 double const logged_cards_time = logged_cards_processing_time();
827
828 log_debug(gc, ergo, refine)("Concurrent refinement times: Logged Cards Scan time goal: %1.2fms Logged Cards Scan time: %1.2fms HCC time: %1.2fms",
829 scan_logged_cards_time_goal_ms, logged_cards_time, merge_hcc_time_ms);
830
841 G1HeapWastePercent);
842 } else {
843 return new G1StaticIHOPControl(InitiatingHeapOccupancyPercent);
844 }
845 }
846
847 void G1Policy::update_ihop_prediction(double mutator_time_s,
848 size_t mutator_alloc_bytes,
849 size_t young_gen_size,
850 bool this_gc_was_young_only) {
851 // Always try to update IHOP prediction. Even evacuation failures give information
852 // about e.g. whether to start IHOP earlier next time.
853
854 // Avoid using really small application times that might create samples with
855 // very high or very low values. They may be caused by e.g. back-to-back gcs.
856 double const min_valid_time = 1e-6;
857
858 bool report = false;
859
860 double marking_to_mixed_time = -1.0;
861 if (!this_gc_was_young_only && _initial_mark_to_mixed.has_result()) {
862 marking_to_mixed_time = _initial_mark_to_mixed.last_marking_time();
863 assert(marking_to_mixed_time > 0.0,
864 "Initial mark to mixed time must be larger than zero but is %.3f",
865 marking_to_mixed_time);
866 if (marking_to_mixed_time > min_valid_time) {
867 _ihop_control->update_marking_length(marking_to_mixed_time);
868 report = true;
869 }
870 }
871
872 // As an approximation for the young gc promotion rates during marking we use
873 // all of them. In many applications there are only a few if any young gcs during
874 // marking, which makes any prediction useless. This increases the accuracy of the
875 // prediction.
876 if (this_gc_was_young_only && mutator_time_s > min_valid_time) {
877 _ihop_control->update_allocation_info(mutator_time_s, mutator_alloc_bytes, young_gen_size);
878 report = true;
879 }
880
881 if (report) {
882 report_ihop_statistics();
883 }
884 }
1000 double max_survivor_regions_d =
1001 (double) _young_list_target_length / (double) SurvivorRatio;
1002
1003 // Calculate desired survivor size based on desired max survivor regions (unconstrained
1004 // by remaining heap). Otherwise we may cause undesired promotions as we are
1005 // already getting close to end of the heap, impacting performance even more.
1006 uint const desired_max_survivor_regions = ceil(max_survivor_regions_d);
1007 size_t const survivor_size = desired_survivor_size(desired_max_survivor_regions);
1008
1009 _tenuring_threshold = _survivors_age_table.compute_tenuring_threshold(survivor_size);
1010 if (UsePerfData) {
1011 _policy_counters->tenuring_threshold()->set_value(_tenuring_threshold);
1012 _policy_counters->desired_survivor_size()->set_value(survivor_size * oopSize);
1013 }
1014 // The real maximum survivor size is bounded by the number of regions that can
1015 // be allocated into.
1016 _max_survivor_regions = MIN2(desired_max_survivor_regions,
1017 _g1h->num_free_or_available_regions());
1018 }
1019
1020 bool G1Policy::force_initial_mark_if_outside_cycle(GCCause::Cause gc_cause) {
1021 // We actually check whether we are marking here and not if we are in a
1022 // reclamation phase. This means that we will schedule a concurrent mark
1023 // even while we are still in the process of reclaiming memory.
1024 bool during_cycle = _g1h->concurrent_mark()->cm_thread()->during_cycle();
1025 if (!during_cycle) {
1026 log_debug(gc, ergo)("Request concurrent cycle initiation (requested by GC cause). GC cause: %s", GCCause::to_string(gc_cause));
1027 collector_state()->set_initiate_conc_mark_if_possible(true);
1028 return true;
1029 } else {
1030 log_debug(gc, ergo)("Do not request concurrent cycle initiation (concurrent cycle already in progress). GC cause: %s", GCCause::to_string(gc_cause));
1031 return false;
1032 }
1033 }
1034
1035 void G1Policy::initiate_conc_mark() {
1036 collector_state()->set_in_initial_mark_gc(true);
1037 collector_state()->set_initiate_conc_mark_if_possible(false);
1038 }
1039
1040 void G1Policy::decide_on_conc_mark_initiation() {
1041 // We are about to decide on whether this pause will be an
1042 // initial-mark pause.
1043
1044 // First, collector_state()->in_initial_mark_gc() should not be already set. We
1045 // will set it here if we have to. However, it should be cleared by
1046 // the end of the pause (it's only set for the duration of an
1047 // initial-mark pause).
1048 assert(!collector_state()->in_initial_mark_gc(), "pre-condition");
1049
1050 if (collector_state()->initiate_conc_mark_if_possible()) {
1051 // We had noticed on a previous pause that the heap occupancy has
1052 // gone over the initiating threshold and we should start a
1053 // concurrent marking cycle. Or we've been explicitly requested
1054 // to start a concurrent marking cycle. Either way, we initiate
1055 // one if not inhibited for some reason.
1056
1057 GCCause::Cause cause = _g1h->gc_cause();
1058 if ((cause != GCCause::_wb_breakpoint) &&
1059 ConcurrentGCBreakpoints::is_controlled()) {
1060 log_debug(gc, ergo)("Do not initiate concurrent cycle (whitebox controlled)");
1061 } else if (!about_to_start_mixed_phase() && collector_state()->in_young_only_phase()) {
1062 // Initiate a new initial mark if there is no marking or reclamation going on.
1063 initiate_conc_mark();
1064 log_debug(gc, ergo)("Initiate concurrent cycle (concurrent cycle initiation requested)");
1065 } else if (_g1h->is_user_requested_concurrent_full_gc(cause) ||
1066 (cause == GCCause::_wb_breakpoint)) {
1067 // Initiate a user requested initial mark or run_to a breakpoint.
1068 // An initial mark must be young only GC, so the collector state
1069 // must be updated to reflect this.
1070 collector_state()->set_in_young_only_phase(true);
1071 collector_state()->set_in_young_gc_before_mixed(false);
1072
1073 // We might have ended up coming here about to start a mixed phase with a collection set
1074 // active. The following remark might change the change the "evacuation efficiency" of
1075 // the regions in this set, leading to failing asserts later.
1076 // Since the concurrent cycle will recreate the collection set anyway, simply drop it here.
1077 clear_collection_set_candidates();
1078 abort_time_to_mixed_tracking();
1079 initiate_conc_mark();
1080 log_debug(gc, ergo)("Initiate concurrent cycle (%s requested concurrent cycle)",
1081 (cause == GCCause::_wb_breakpoint) ? "run_to breakpoint" : "user");
1082 } else {
1083 // The concurrent marking thread is still finishing up the
1084 // previous cycle. If we start one right now the two cycles
1085 // overlap. In particular, the concurrent marking thread might
1086 // be in the process of clearing the next marking bitmap (which
1087 // we will use for the next cycle if we start one). Starting a
1088 // cycle now will be bad given that parts of the marking
1128 }
1129 };
1130
1131 void G1Policy::clear_collection_set_candidates() {
1132 // Clear remembered sets of remaining candidate regions and the actual candidate
1133 // set.
1134 G1ClearCollectionSetCandidateRemSets cl;
1135 _collection_set->candidates()->iterate(&cl);
1136 _collection_set->clear_candidates();
1137 }
1138
1139 void G1Policy::maybe_start_marking() {
1140 if (need_to_start_conc_mark("end of GC")) {
1141 // Note: this might have already been set, if during the last
1142 // pause we decided to start a cycle but at the beginning of
1143 // this pause we decided to postpone it. That's OK.
1144 collector_state()->set_initiate_conc_mark_if_possible(true);
1145 }
1146 }
1147
1148 G1Policy::PauseKind G1Policy::young_gc_pause_kind() const {
1149 assert(!collector_state()->in_full_gc(), "must be");
1150 if (collector_state()->in_initial_mark_gc()) {
1151 assert(!collector_state()->in_young_gc_before_mixed(), "must be");
1152 return InitialMarkGC;
1153 } else if (collector_state()->in_young_gc_before_mixed()) {
1154 assert(!collector_state()->in_initial_mark_gc(), "must be");
1155 return LastYoungGC;
1156 } else if (collector_state()->in_mixed_phase()) {
1157 assert(!collector_state()->in_initial_mark_gc(), "must be");
1158 assert(!collector_state()->in_young_gc_before_mixed(), "must be");
1159 return MixedGC;
1160 } else {
1161 assert(!collector_state()->in_initial_mark_gc(), "must be");
1162 assert(!collector_state()->in_young_gc_before_mixed(), "must be");
1163 return YoungOnlyGC;
1164 }
1165 }
1166
1167 void G1Policy::record_pause(PauseKind kind, double start, double end) {
1168 // Manage the MMU tracker. For some reason it ignores Full GCs.
1169 if (kind != FullGC) {
1170 _mmu_tracker->add_pause(start, end);
1171 }
1172 // Manage the mutator time tracking from initial mark to first mixed gc.
1173 switch (kind) {
1174 case FullGC:
1175 abort_time_to_mixed_tracking();
1176 break;
1177 case Cleanup:
1178 case Remark:
1179 case YoungOnlyGC:
1180 case LastYoungGC:
1181 _initial_mark_to_mixed.add_pause(end - start);
1182 break;
1183 case InitialMarkGC:
1184 if (_g1h->gc_cause() != GCCause::_g1_periodic_collection) {
1185 _initial_mark_to_mixed.record_initial_mark_end(end);
1186 }
1187 break;
1188 case MixedGC:
1189 _initial_mark_to_mixed.record_mixed_gc_start(start);
1190 break;
1191 default:
1192 ShouldNotReachHere();
1193 }
1194 }
1195
1196 void G1Policy::abort_time_to_mixed_tracking() {
1197 _initial_mark_to_mixed.reset();
1198 }
1199
1200 bool G1Policy::next_gc_should_be_mixed(const char* true_action_str,
1201 const char* false_action_str) const {
1202 G1CollectionSetCandidates* candidates = _collection_set->candidates();
1203
1204 if (candidates->is_empty()) {
1205 log_debug(gc, ergo)("%s (candidate old regions not available)", false_action_str);
1206 return false;
1207 }
1208
1209 // Is the amount of uncollected reclaimable space above G1HeapWastePercent?
1210 size_t reclaimable_bytes = candidates->remaining_reclaimable_bytes();
1211 double reclaimable_percent = reclaimable_bytes_percent(reclaimable_bytes);
1212 double threshold = (double) G1HeapWastePercent;
1213 if (reclaimable_percent <= threshold) {
1214 log_debug(gc, ergo)("%s (reclaimable percentage not over threshold). candidate old regions: %u reclaimable: " SIZE_FORMAT " (%1.2f) threshold: " UINTX_FORMAT,
1215 false_action_str, candidates->num_remaining(), reclaimable_bytes, reclaimable_percent, G1HeapWastePercent);
1216 return false;
1217 }
|
57 _analytics(new G1Analytics(&_predictor)),
58 _remset_tracker(),
59 _mmu_tracker(new G1MMUTrackerQueue(GCPauseIntervalMillis / 1000.0, MaxGCPauseMillis / 1000.0)),
60 _ihop_control(create_ihop_control(&_predictor)),
61 _policy_counters(new GCPolicyCounters("GarbageFirst", 1, 2)),
62 _full_collection_start_sec(0.0),
63 _collection_pause_end_millis(os::javaTimeNanos() / NANOSECS_PER_MILLISEC),
64 _young_list_target_length(0),
65 _young_list_fixed_length(0),
66 _young_list_max_length(0),
67 _eden_surv_rate_group(new G1SurvRateGroup()),
68 _survivor_surv_rate_group(new G1SurvRateGroup()),
69 _reserve_factor((double) G1ReservePercent / 100.0),
70 _reserve_regions(0),
71 _young_gen_sizer(G1YoungGenSizer::create_gen_sizer()),
72 _free_regions_at_end_of_collection(0),
73 _rs_length(0),
74 _rs_length_prediction(0),
75 _pending_cards_at_gc_start(0),
76 _old_gen_alloc_tracker(),
77 _concurrent_start_to_mixed(),
78 _collection_set(NULL),
79 _g1h(NULL),
80 _phase_times(new G1GCPhaseTimes(gc_timer, ParallelGCThreads)),
81 _mark_remark_start_sec(0),
82 _mark_cleanup_start_sec(0),
83 _tenuring_threshold(MaxTenuringThreshold),
84 _max_survivor_regions(0),
85 _survivors_age_table(true)
86 {
87 }
88
89 G1Policy::~G1Policy() {
90 delete _ihop_control;
91 delete _young_gen_sizer;
92 }
93
94 G1Policy* G1Policy::create_policy(STWGCTimer* gc_timer_stw) {
95 if (G1Arguments::is_heterogeneous_heap()) {
96 return new G1HeterogeneousHeapPolicy(gc_timer_stw);
97 } else {
431 _collection_set->clear_candidates();
432 _pending_cards_at_gc_start = 0;
433 }
434
435 void G1Policy::record_full_collection_end() {
436 // Consider this like a collection pause for the purposes of allocation
437 // since last pause.
438 double end_sec = os::elapsedTime();
439 double full_gc_time_sec = end_sec - _full_collection_start_sec;
440 double full_gc_time_ms = full_gc_time_sec * 1000.0;
441
442 _analytics->update_recent_gc_times(end_sec, full_gc_time_ms);
443
444 collector_state()->set_in_full_gc(false);
445
446 // "Nuke" the heuristics that control the young/mixed GC
447 // transitions and make sure we start with young GCs after the Full GC.
448 collector_state()->set_in_young_only_phase(true);
449 collector_state()->set_in_young_gc_before_mixed(false);
450 collector_state()->set_initiate_conc_mark_if_possible(need_to_start_conc_mark("end of Full GC", 0));
451 collector_state()->set_in_concurrent_start_gc(false);
452 collector_state()->set_mark_or_rebuild_in_progress(false);
453 collector_state()->set_clearing_next_bitmap(false);
454
455 _eden_surv_rate_group->start_adding_regions();
456 // also call this on any additional surv rate groups
457
458 _free_regions_at_end_of_collection = _g1h->num_free_regions();
459 _survivor_surv_rate_group->reset();
460 update_young_list_max_and_target_length();
461 update_rs_length_prediction();
462
463 _old_gen_alloc_tracker.reset_after_full_gc();
464
465 record_pause(FullGC, _full_collection_start_sec, end_sec);
466 }
467
468 static void log_refinement_stats(const char* kind, const G1ConcurrentRefineStats& stats) {
469 log_debug(gc, refine, stats)
470 ("%s refinement: %.2fms, refined: " SIZE_FORMAT
471 ", precleaned: " SIZE_FORMAT ", dirtied: " SIZE_FORMAT,
527 assert(max_survivor_regions() + _g1h->num_used_regions() <= _g1h->max_regions(),
528 "Maximum survivor regions %u plus used regions %u exceeds max regions %u",
529 max_survivor_regions(), _g1h->num_used_regions(), _g1h->max_regions());
530 assert_used_and_recalculate_used_equal(_g1h);
531
532 phase_times()->record_cur_collection_start_sec(start_time_sec);
533
534 record_concurrent_refinement_stats();
535
536 _collection_set->reset_bytes_used_before();
537
538 // do that for any other surv rate groups
539 _eden_surv_rate_group->stop_adding_regions();
540 _survivors_age_table.clear();
541
542 assert(_g1h->collection_set()->verify_young_ages(), "region age verification failed");
543 }
544
545 void G1Policy::record_concurrent_mark_init_end(double mark_init_elapsed_time_ms) {
546 assert(!collector_state()->initiate_conc_mark_if_possible(), "we should have cleared it by now");
547 collector_state()->set_in_concurrent_start_gc(false);
548 }
549
550 void G1Policy::record_concurrent_mark_remark_start() {
551 _mark_remark_start_sec = os::elapsedTime();
552 }
553
554 void G1Policy::record_concurrent_mark_remark_end() {
555 double end_time_sec = os::elapsedTime();
556 double elapsed_time_ms = (end_time_sec - _mark_remark_start_sec)*1000.0;
557 _analytics->report_concurrent_mark_remark_times_ms(elapsed_time_ms);
558 _analytics->append_prev_collection_pause_end_ms(elapsed_time_ms);
559
560 record_pause(Remark, _mark_remark_start_sec, end_time_sec);
561 }
562
563 void G1Policy::record_concurrent_mark_cleanup_start() {
564 _mark_cleanup_start_sec = os::elapsedTime();
565 }
566
567 double G1Policy::average_time_ms(G1GCPhaseTimes::GCParPhases phase) const {
615 double G1Policy::logged_cards_processing_time() const {
616 double all_cards_processing_time = average_time_ms(G1GCPhaseTimes::ScanHR) + average_time_ms(G1GCPhaseTimes::OptScanHR);
617 size_t logged_dirty_cards = phase_times()->sum_thread_work_items(G1GCPhaseTimes::MergeLB, G1GCPhaseTimes::MergeLBDirtyCards);
618 size_t scan_heap_roots_cards = phase_times()->sum_thread_work_items(G1GCPhaseTimes::ScanHR, G1GCPhaseTimes::ScanHRScannedCards) +
619 phase_times()->sum_thread_work_items(G1GCPhaseTimes::OptScanHR, G1GCPhaseTimes::ScanHRScannedCards);
620 // This may happen if there are duplicate cards in different log buffers.
621 if (logged_dirty_cards > scan_heap_roots_cards) {
622 return all_cards_processing_time + average_time_ms(G1GCPhaseTimes::MergeLB);
623 }
624 return (all_cards_processing_time * logged_dirty_cards / scan_heap_roots_cards) + average_time_ms(G1GCPhaseTimes::MergeLB);
625 }
626
627 // Anything below that is considered to be zero
628 #define MIN_TIMER_GRANULARITY 0.0000001
629
630 void G1Policy::record_collection_pause_end(double pause_time_ms) {
631 G1GCPhaseTimes* p = phase_times();
632
633 double end_time_sec = os::elapsedTime();
634
635 PauseKind this_pause = young_gc_pause_kind();
636
637 bool update_stats = !_g1h->evacuation_failed();
638
639 record_pause(this_pause, end_time_sec - pause_time_ms / 1000.0, end_time_sec);
640
641 _collection_pause_end_millis = os::javaTimeNanos() / NANOSECS_PER_MILLISEC;
642
643 if (is_concurrent_start_pause(this_pause)) {
644 record_concurrent_mark_init_end(0.0);
645 } else {
646 maybe_start_marking();
647 }
648
649 double app_time_ms = (phase_times()->cur_collection_start_sec() * 1000.0 - _analytics->prev_collection_pause_end_ms());
650 if (app_time_ms < MIN_TIMER_GRANULARITY) {
651 // This usually happens due to the timer not having the required
652 // granularity. Some Linuxes are the usual culprits.
653 // We'll just set it to something (arbitrarily) small.
654 app_time_ms = 1.0;
655 }
656
657 if (update_stats) {
658 // We maintain the invariant that all objects allocated by mutator
659 // threads will be allocated out of eden regions. So, we can use
660 // the eden region number allocated since the previous GC to
661 // calculate the application's allocate rate. The only exception
662 // to that is humongous objects that are allocated separately. But
663 // given that humongous object allocations do not really affect
664 // either the pause's duration nor when the next pause will take
665 // place we can safely ignore them here.
666 uint regions_allocated = _collection_set->eden_region_length();
667 double alloc_rate_ms = (double) regions_allocated / app_time_ms;
668 _analytics->report_alloc_rate_ms(alloc_rate_ms);
669
670 _analytics->compute_pause_time_ratios(end_time_sec, pause_time_ms);
671 _analytics->update_recent_gc_times(end_time_sec, pause_time_ms);
672 }
673
674 if (is_last_young_pause(this_pause)) {
675 assert(!is_concurrent_start_pause(this_pause),
676 "The young GC before mixed is not allowed to be concurrent start GC");
677 // This has been the young GC before we start doing mixed GCs. We already
678 // decided to start mixed GCs much earlier, so there is nothing to do except
679 // advancing the state.
680 collector_state()->set_in_young_only_phase(false);
681 collector_state()->set_in_young_gc_before_mixed(false);
682 } else if (!is_young_only_pause(this_pause)) {
683 // This is a mixed GC. Here we decide whether to continue doing more
684 // mixed GCs or not.
685 if (!next_gc_should_be_mixed("continue mixed GCs",
686 "do not continue mixed GCs")) {
687 collector_state()->set_in_young_only_phase(true);
688
689 clear_collection_set_candidates();
690 maybe_start_marking();
691 }
692 }
693
694 _eden_surv_rate_group->start_adding_regions();
695
696 double merge_hcc_time_ms = average_time_ms(G1GCPhaseTimes::MergeHCC);
697 if (update_stats) {
698 size_t const total_log_buffer_cards = p->sum_thread_work_items(G1GCPhaseTimes::MergeHCC, G1GCPhaseTimes::MergeHCCDirtyCards) +
699 p->sum_thread_work_items(G1GCPhaseTimes::MergeLB, G1GCPhaseTimes::MergeLBDirtyCards);
700 // Update prediction for card merge; MergeRSDirtyCards includes the cards from the Eager Reclaim phase.
701 size_t const total_cards_merged = p->sum_thread_work_items(G1GCPhaseTimes::MergeRS, G1GCPhaseTimes::MergeRSDirtyCards) +
702 p->sum_thread_work_items(G1GCPhaseTimes::OptMergeRS, G1GCPhaseTimes::MergeRSDirtyCards) +
703 total_log_buffer_cards;
704
705 // The threshold for the number of cards in a given sampling which we consider
706 // large enough so that the impact from setup and other costs is negligible.
707 size_t const CardsNumSamplingThreshold = 10;
708
709 if (total_cards_merged > CardsNumSamplingThreshold) {
710 double avg_time_merge_cards = average_time_ms(G1GCPhaseTimes::MergeER) +
711 average_time_ms(G1GCPhaseTimes::MergeRS) +
712 average_time_ms(G1GCPhaseTimes::MergeHCC) +
713 average_time_ms(G1GCPhaseTimes::MergeLB) +
714 average_time_ms(G1GCPhaseTimes::OptMergeRS);
715 _analytics->report_cost_per_card_merge_ms(avg_time_merge_cards / total_cards_merged,
716 is_young_only_pause(this_pause));
717 }
718
719 // Update prediction for card scan
720 size_t const total_cards_scanned = p->sum_thread_work_items(G1GCPhaseTimes::ScanHR, G1GCPhaseTimes::ScanHRScannedCards) +
721 p->sum_thread_work_items(G1GCPhaseTimes::OptScanHR, G1GCPhaseTimes::ScanHRScannedCards);
722
723 if (total_cards_scanned > CardsNumSamplingThreshold) {
724 double avg_time_dirty_card_scan = average_time_ms(G1GCPhaseTimes::ScanHR) +
725 average_time_ms(G1GCPhaseTimes::OptScanHR);
726
727 _analytics->report_cost_per_card_scan_ms(avg_time_dirty_card_scan / total_cards_scanned,
728 is_young_only_pause(this_pause));
729 }
730
731 // Update prediction for the ratio between cards from the remembered
732 // sets and actually scanned cards from the remembered sets.
733 // Cards from the remembered sets are all cards not duplicated by cards from
734 // the logs.
735 // Due to duplicates in the log buffers, the number of actually scanned cards
736 // can be smaller than the cards in the log buffers.
737 const size_t from_rs_length_cards = (total_cards_scanned > total_log_buffer_cards) ? total_cards_scanned - total_log_buffer_cards : 0;
738 double merge_to_scan_ratio = 0.0;
739 if (total_cards_scanned > 0) {
740 merge_to_scan_ratio = (double) from_rs_length_cards / total_cards_scanned;
741 }
742 _analytics->report_card_merge_to_scan_ratio(merge_to_scan_ratio,
743 is_young_only_pause(this_pause));
744
745 const size_t recorded_rs_length = _collection_set->recorded_rs_length();
746 const size_t rs_length_diff = _rs_length > recorded_rs_length ? _rs_length - recorded_rs_length : 0;
747 _analytics->report_rs_length_diff(rs_length_diff);
748
749 // Update prediction for copy cost per byte
750 size_t copied_bytes = p->sum_thread_work_items(G1GCPhaseTimes::MergePSS, G1GCPhaseTimes::MergePSSCopiedBytes);
751
752 if (copied_bytes > 0) {
753 double cost_per_byte_ms = (average_time_ms(G1GCPhaseTimes::ObjCopy) + average_time_ms(G1GCPhaseTimes::OptObjCopy)) / copied_bytes;
754 _analytics->report_cost_per_byte_ms(cost_per_byte_ms, collector_state()->mark_or_rebuild_in_progress());
755 }
756
757 if (_collection_set->young_region_length() > 0) {
758 _analytics->report_young_other_cost_per_region_ms(young_other_time_ms() /
759 _collection_set->young_region_length());
760 }
761
762 if (_collection_set->old_region_length() > 0) {
763 _analytics->report_non_young_other_cost_per_region_ms(non_young_other_time_ms() /
764 _collection_set->old_region_length());
765 }
766
767 _analytics->report_constant_other_time_ms(constant_other_time_ms(pause_time_ms));
768
769 // Do not update RS lengths and the number of pending cards with information from mixed gc:
770 // these are is wildly different to during young only gc and mess up young gen sizing right
771 // after the mixed gc phase.
772 // During mixed gc we do not use them for young gen sizing.
773 if (is_young_only_pause(this_pause)) {
774 _analytics->report_pending_cards((double) _pending_cards_at_gc_start);
775 _analytics->report_rs_length((double) _rs_length);
776 }
777 }
778
779 assert(!(is_concurrent_start_pause(this_pause) && collector_state()->mark_or_rebuild_in_progress()),
780 "If the last pause has been concurrent start, we should not have been in the marking window");
781 if (is_concurrent_start_pause(this_pause)) {
782 collector_state()->set_mark_or_rebuild_in_progress(true);
783 }
784
785 _free_regions_at_end_of_collection = _g1h->num_free_regions();
786
787 update_rs_length_prediction();
788
789 // Do not update dynamic IHOP due to G1 periodic collection as it is highly likely
790 // that in this case we are not running in a "normal" operating mode.
791 if (_g1h->gc_cause() != GCCause::_g1_periodic_collection) {
792 // IHOP control wants to know the expected young gen length if it were not
793 // restrained by the heap reserve. Using the actual length would make the
794 // prediction too small and the limit the young gen every time we get to the
795 // predicted target occupancy.
796 size_t last_unrestrained_young_length = update_young_list_max_and_target_length();
797
798 _old_gen_alloc_tracker.reset_after_young_gc(app_time_ms / 1000.0);
799 update_ihop_prediction(_old_gen_alloc_tracker.last_cycle_duration(),
800 _old_gen_alloc_tracker.last_cycle_old_bytes(),
801 last_unrestrained_young_length * HeapRegion::GrainBytes,
802 is_young_only_pause(this_pause));
803
804 _ihop_control->send_trace_event(_g1h->gc_tracer_stw());
805 } else {
806 // Any garbage collection triggered as periodic collection resets the time-to-mixed
807 // measurement. Periodic collection typically means that the application is "inactive", i.e.
808 // the marking threads may have received an uncharacterisic amount of cpu time
809 // for completing the marking, i.e. are faster than expected.
810 // This skews the predicted marking length towards smaller values which might cause
811 // the mark start being too late.
812 _concurrent_start_to_mixed.reset();
813 }
814
815 // Note that _mmu_tracker->max_gc_time() returns the time in seconds.
816 double scan_logged_cards_time_goal_ms = _mmu_tracker->max_gc_time() * MILLIUNITS * G1RSetUpdatingPauseTimePercent / 100.0;
817
818 if (scan_logged_cards_time_goal_ms < merge_hcc_time_ms) {
819 log_debug(gc, ergo, refine)("Adjust concurrent refinement thresholds (scanning the HCC expected to take longer than Update RS time goal)."
820 "Logged Cards Scan time goal: %1.2fms Scan HCC time: %1.2fms",
821 scan_logged_cards_time_goal_ms, merge_hcc_time_ms);
822
823 scan_logged_cards_time_goal_ms = 0;
824 } else {
825 scan_logged_cards_time_goal_ms -= merge_hcc_time_ms;
826 }
827
828 double const logged_cards_time = logged_cards_processing_time();
829
830 log_debug(gc, ergo, refine)("Concurrent refinement times: Logged Cards Scan time goal: %1.2fms Logged Cards Scan time: %1.2fms HCC time: %1.2fms",
831 scan_logged_cards_time_goal_ms, logged_cards_time, merge_hcc_time_ms);
832
843 G1HeapWastePercent);
844 } else {
845 return new G1StaticIHOPControl(InitiatingHeapOccupancyPercent);
846 }
847 }
848
849 void G1Policy::update_ihop_prediction(double mutator_time_s,
850 size_t mutator_alloc_bytes,
851 size_t young_gen_size,
852 bool this_gc_was_young_only) {
853 // Always try to update IHOP prediction. Even evacuation failures give information
854 // about e.g. whether to start IHOP earlier next time.
855
856 // Avoid using really small application times that might create samples with
857 // very high or very low values. They may be caused by e.g. back-to-back gcs.
858 double const min_valid_time = 1e-6;
859
860 bool report = false;
861
862 double marking_to_mixed_time = -1.0;
863 if (!this_gc_was_young_only && _concurrent_start_to_mixed.has_result()) {
864 marking_to_mixed_time = _concurrent_start_to_mixed.last_marking_time();
865 assert(marking_to_mixed_time > 0.0,
866 "Concurrent start to mixed time must be larger than zero but is %.3f",
867 marking_to_mixed_time);
868 if (marking_to_mixed_time > min_valid_time) {
869 _ihop_control->update_marking_length(marking_to_mixed_time);
870 report = true;
871 }
872 }
873
874 // As an approximation for the young gc promotion rates during marking we use
875 // all of them. In many applications there are only a few if any young gcs during
876 // marking, which makes any prediction useless. This increases the accuracy of the
877 // prediction.
878 if (this_gc_was_young_only && mutator_time_s > min_valid_time) {
879 _ihop_control->update_allocation_info(mutator_time_s, mutator_alloc_bytes, young_gen_size);
880 report = true;
881 }
882
883 if (report) {
884 report_ihop_statistics();
885 }
886 }
1002 double max_survivor_regions_d =
1003 (double) _young_list_target_length / (double) SurvivorRatio;
1004
1005 // Calculate desired survivor size based on desired max survivor regions (unconstrained
1006 // by remaining heap). Otherwise we may cause undesired promotions as we are
1007 // already getting close to end of the heap, impacting performance even more.
1008 uint const desired_max_survivor_regions = ceil(max_survivor_regions_d);
1009 size_t const survivor_size = desired_survivor_size(desired_max_survivor_regions);
1010
1011 _tenuring_threshold = _survivors_age_table.compute_tenuring_threshold(survivor_size);
1012 if (UsePerfData) {
1013 _policy_counters->tenuring_threshold()->set_value(_tenuring_threshold);
1014 _policy_counters->desired_survivor_size()->set_value(survivor_size * oopSize);
1015 }
1016 // The real maximum survivor size is bounded by the number of regions that can
1017 // be allocated into.
1018 _max_survivor_regions = MIN2(desired_max_survivor_regions,
1019 _g1h->num_free_or_available_regions());
1020 }
1021
1022 bool G1Policy::force_concurrent_start_if_outside_cycle(GCCause::Cause gc_cause) {
1023 // We actually check whether we are marking here and not if we are in a
1024 // reclamation phase. This means that we will schedule a concurrent mark
1025 // even while we are still in the process of reclaiming memory.
1026 bool during_cycle = _g1h->concurrent_mark()->cm_thread()->during_cycle();
1027 if (!during_cycle) {
1028 log_debug(gc, ergo)("Request concurrent cycle initiation (requested by GC cause). "
1029 "GC cause: %s",
1030 GCCause::to_string(gc_cause));
1031 collector_state()->set_initiate_conc_mark_if_possible(true);
1032 return true;
1033 } else {
1034 log_debug(gc, ergo)("Do not request concurrent cycle initiation "
1035 "(concurrent cycle already in progress). GC cause: %s",
1036 GCCause::to_string(gc_cause));
1037 return false;
1038 }
1039 }
1040
1041 void G1Policy::initiate_conc_mark() {
1042 collector_state()->set_in_concurrent_start_gc(true);
1043 collector_state()->set_initiate_conc_mark_if_possible(false);
1044 }
1045
1046 void G1Policy::decide_on_conc_mark_initiation() {
1047 // We are about to decide on whether this pause will be a
1048 // concurrent start pause.
1049
1050 // First, collector_state()->in_concurrent_start_gc() should not be already set. We
1051 // will set it here if we have to. However, it should be cleared by
1052 // the end of the pause (it's only set for the duration of a
1053 // concurrent start pause).
1054 assert(!collector_state()->in_concurrent_start_gc(), "pre-condition");
1055
1056 if (collector_state()->initiate_conc_mark_if_possible()) {
1057 // We had noticed on a previous pause that the heap occupancy has
1058 // gone over the initiating threshold and we should start a
1059 // concurrent marking cycle. Or we've been explicitly requested
1060 // to start a concurrent marking cycle. Either way, we initiate
1061 // one if not inhibited for some reason.
1062
1063 GCCause::Cause cause = _g1h->gc_cause();
1064 if ((cause != GCCause::_wb_breakpoint) &&
1065 ConcurrentGCBreakpoints::is_controlled()) {
1066 log_debug(gc, ergo)("Do not initiate concurrent cycle (whitebox controlled)");
1067 } else if (!about_to_start_mixed_phase() && collector_state()->in_young_only_phase()) {
1068 // Initiate a new concurrent start if there is no marking or reclamation going on.
1069 initiate_conc_mark();
1070 log_debug(gc, ergo)("Initiate concurrent cycle (concurrent cycle initiation requested)");
1071 } else if (_g1h->is_user_requested_concurrent_full_gc(cause) ||
1072 (cause == GCCause::_wb_breakpoint)) {
1073 // Initiate a user requested concurrent start or run to a breakpoint.
1074 // A concurrent start must be young only GC, so the collector state
1075 // must be updated to reflect this.
1076 collector_state()->set_in_young_only_phase(true);
1077 collector_state()->set_in_young_gc_before_mixed(false);
1078
1079 // We might have ended up coming here about to start a mixed phase with a collection set
1080 // active. The following remark might change the change the "evacuation efficiency" of
1081 // the regions in this set, leading to failing asserts later.
1082 // Since the concurrent cycle will recreate the collection set anyway, simply drop it here.
1083 clear_collection_set_candidates();
1084 abort_time_to_mixed_tracking();
1085 initiate_conc_mark();
1086 log_debug(gc, ergo)("Initiate concurrent cycle (%s requested concurrent cycle)",
1087 (cause == GCCause::_wb_breakpoint) ? "run_to breakpoint" : "user");
1088 } else {
1089 // The concurrent marking thread is still finishing up the
1090 // previous cycle. If we start one right now the two cycles
1091 // overlap. In particular, the concurrent marking thread might
1092 // be in the process of clearing the next marking bitmap (which
1093 // we will use for the next cycle if we start one). Starting a
1094 // cycle now will be bad given that parts of the marking
1134 }
1135 };
1136
1137 void G1Policy::clear_collection_set_candidates() {
1138 // Clear remembered sets of remaining candidate regions and the actual candidate
1139 // set.
1140 G1ClearCollectionSetCandidateRemSets cl;
1141 _collection_set->candidates()->iterate(&cl);
1142 _collection_set->clear_candidates();
1143 }
1144
1145 void G1Policy::maybe_start_marking() {
1146 if (need_to_start_conc_mark("end of GC")) {
1147 // Note: this might have already been set, if during the last
1148 // pause we decided to start a cycle but at the beginning of
1149 // this pause we decided to postpone it. That's OK.
1150 collector_state()->set_initiate_conc_mark_if_possible(true);
1151 }
1152 }
1153
1154 bool G1Policy::is_young_only_pause(PauseKind kind) {
1155 assert(kind != FullGC, "must be");
1156 assert(kind != Remark, "must be");
1157 assert(kind != Cleanup, "must be");
1158 return kind == ConcurrentStartGC || kind == LastYoungGC || kind == YoungOnlyGC;
1159 }
1160
1161 bool G1Policy::is_last_young_pause(PauseKind kind) {
1162 return kind == LastYoungGC;
1163 }
1164
1165 bool G1Policy::is_concurrent_start_pause(PauseKind kind) {
1166 return kind == ConcurrentStartGC;
1167 }
1168
1169 G1Policy::PauseKind G1Policy::young_gc_pause_kind() const {
1170 assert(!collector_state()->in_full_gc(), "must be");
1171 if (collector_state()->in_concurrent_start_gc()) {
1172 assert(!collector_state()->in_young_gc_before_mixed(), "must be");
1173 return ConcurrentStartGC;
1174 } else if (collector_state()->in_young_gc_before_mixed()) {
1175 assert(!collector_state()->in_concurrent_start_gc(), "must be");
1176 return LastYoungGC;
1177 } else if (collector_state()->in_mixed_phase()) {
1178 assert(!collector_state()->in_concurrent_start_gc(), "must be");
1179 assert(!collector_state()->in_young_gc_before_mixed(), "must be");
1180 return MixedGC;
1181 } else {
1182 assert(!collector_state()->in_concurrent_start_gc(), "must be");
1183 assert(!collector_state()->in_young_gc_before_mixed(), "must be");
1184 return YoungOnlyGC;
1185 }
1186 }
1187
1188 void G1Policy::record_pause(PauseKind kind, double start, double end) {
1189 // Manage the MMU tracker. For some reason it ignores Full GCs.
1190 if (kind != FullGC) {
1191 _mmu_tracker->add_pause(start, end);
1192 }
1193 // Manage the mutator time tracking from concurrent start to first mixed gc.
1194 switch (kind) {
1195 case FullGC:
1196 abort_time_to_mixed_tracking();
1197 break;
1198 case Cleanup:
1199 case Remark:
1200 case YoungOnlyGC:
1201 case LastYoungGC:
1202 _concurrent_start_to_mixed.add_pause(end - start);
1203 break;
1204 case ConcurrentStartGC:
1205 if (_g1h->gc_cause() != GCCause::_g1_periodic_collection) {
1206 _concurrent_start_to_mixed.record_concurrent_start_end(end);
1207 }
1208 break;
1209 case MixedGC:
1210 _concurrent_start_to_mixed.record_mixed_gc_start(start);
1211 break;
1212 default:
1213 ShouldNotReachHere();
1214 }
1215 }
1216
1217 void G1Policy::abort_time_to_mixed_tracking() {
1218 _concurrent_start_to_mixed.reset();
1219 }
1220
1221 bool G1Policy::next_gc_should_be_mixed(const char* true_action_str,
1222 const char* false_action_str) const {
1223 G1CollectionSetCandidates* candidates = _collection_set->candidates();
1224
1225 if (candidates->is_empty()) {
1226 log_debug(gc, ergo)("%s (candidate old regions not available)", false_action_str);
1227 return false;
1228 }
1229
1230 // Is the amount of uncollected reclaimable space above G1HeapWastePercent?
1231 size_t reclaimable_bytes = candidates->remaining_reclaimable_bytes();
1232 double reclaimable_percent = reclaimable_bytes_percent(reclaimable_bytes);
1233 double threshold = (double) G1HeapWastePercent;
1234 if (reclaimable_percent <= threshold) {
1235 log_debug(gc, ergo)("%s (reclaimable percentage not over threshold). candidate old regions: %u reclaimable: " SIZE_FORMAT " (%1.2f) threshold: " UINTX_FORMAT,
1236 false_action_str, candidates->num_remaining(), reclaimable_bytes, reclaimable_percent, G1HeapWastePercent);
1237 return false;
1238 }
|