555 return false;
556 }
557
558 size_t marking_initiating_used_threshold = _ihop_control->get_conc_mark_start_threshold();
559
560 size_t cur_used_bytes = _g1h->non_young_capacity_bytes();
561 size_t alloc_byte_size = alloc_word_size * HeapWordSize;
562 size_t marking_request_bytes = cur_used_bytes + alloc_byte_size;
563
564 bool result = false;
565 if (marking_request_bytes > marking_initiating_used_threshold) {
566 result = collector_state()->in_young_only_phase() && !collector_state()->in_young_gc_before_mixed();
567 log_debug(gc, ergo, ihop)("%s occupancy: " SIZE_FORMAT "B allocation request: " SIZE_FORMAT "B threshold: " SIZE_FORMAT "B (%1.2f) source: %s",
568 result ? "Request concurrent cycle initiation (occupancy higher than threshold)" : "Do not request concurrent cycle initiation (still doing mixed collections)",
569 cur_used_bytes, alloc_byte_size, marking_initiating_used_threshold, (double) marking_initiating_used_threshold / _g1h->capacity() * 100, source);
570 }
571
572 return result;
573 }
574
575 double G1Policy::log_buffer_processing_time() const {
576 double all_cards_processing_time = average_time_ms(G1GCPhaseTimes::ScanHR) + average_time_ms(G1GCPhaseTimes::OptScanHR);
577 size_t log_buffer_dirty_cards = phase_times()->sum_thread_work_items(G1GCPhaseTimes::MergeLB, G1GCPhaseTimes::MergeLBDirtyCards);
578 size_t scan_heap_roots_cards = phase_times()->sum_thread_work_items(G1GCPhaseTimes::ScanHR, G1GCPhaseTimes::ScanHRScannedCards) +
579 phase_times()->sum_thread_work_items(G1GCPhaseTimes::OptScanHR, G1GCPhaseTimes::ScanHRScannedCards);
580 // This may happen if there are duplicate cards in different log buffers.
581 if (log_buffer_dirty_cards > scan_heap_roots_cards) {
582 return all_cards_processing_time + average_time_ms(G1GCPhaseTimes::MergeLB);
583 }
584 return (all_cards_processing_time * log_buffer_dirty_cards / scan_heap_roots_cards) + average_time_ms(G1GCPhaseTimes::MergeLB);
585 }
586
587 // Anything below that is considered to be zero
588 #define MIN_TIMER_GRANULARITY 0.0000001
589
590 void G1Policy::record_collection_pause_end(double pause_time_ms, size_t heap_used_bytes_before_gc) {
591 G1GCPhaseTimes* p = phase_times();
592
593 double end_time_sec = os::elapsedTime();
594
595 assert_used_and_recalculate_used_equal(_g1h);
596 size_t cur_used_bytes = _g1h->used();
597 bool this_pause_included_initial_mark = false;
598 bool this_pause_was_young_only = collector_state()->in_young_only_phase();
599
600 bool update_stats = !_g1h->evacuation_failed();
601
602 record_pause(young_gc_pause_kind(), end_time_sec - pause_time_ms / 1000.0, end_time_sec);
603
604 _collection_pause_end_millis = os::javaTimeNanos() / NANOSECS_PER_MILLISEC;
645 collector_state()->set_in_young_only_phase(false);
646 collector_state()->set_in_young_gc_before_mixed(false);
647 } else if (!this_pause_was_young_only) {
648 // This is a mixed GC. Here we decide whether to continue doing more
649 // mixed GCs or not.
650 if (!next_gc_should_be_mixed("continue mixed GCs",
651 "do not continue mixed GCs")) {
652 collector_state()->set_in_young_only_phase(true);
653
654 clear_collection_set_candidates();
655 maybe_start_marking();
656 }
657 }
658
659 _short_lived_surv_rate_group->start_adding_regions();
660 // Do that for any other surv rate groups
661
662 double scan_hcc_time_ms = G1HotCardCache::default_use_cache() ? average_time_ms(G1GCPhaseTimes::MergeHCC) : 0.0;
663
664 if (update_stats) {
665 double cost_per_log_buffer_entry = 0.0;
666 size_t const pending_log_buffer_entries = p->sum_thread_work_items(G1GCPhaseTimes::MergeLB, G1GCPhaseTimes::MergeLBDirtyCards);
667 if (pending_log_buffer_entries > 0) {
668 cost_per_log_buffer_entry = log_buffer_processing_time() / pending_log_buffer_entries;
669 _analytics->report_cost_per_log_buffer_entry_ms(cost_per_log_buffer_entry);
670 }
671 _analytics->report_cost_scan_hcc(scan_hcc_time_ms);
672
673 size_t const total_cards_scanned = p->sum_thread_work_items(G1GCPhaseTimes::ScanHR, G1GCPhaseTimes::ScanHRScannedCards) +
674 p->sum_thread_work_items(G1GCPhaseTimes::OptScanHR, G1GCPhaseTimes::ScanHRScannedCards);
675 size_t remset_cards_scanned = 0;
676 // There might have been duplicate log buffer entries in the queues which could
677 // increase this value beyond the cards scanned. In this case attribute all cards
678 // to the log buffers.
679 if (pending_log_buffer_entries <= total_cards_scanned) {
680 remset_cards_scanned = total_cards_scanned - pending_log_buffer_entries;
681 }
682
683 double cost_per_remset_card_ms = 0.0;
684 if (remset_cards_scanned > 10) {
685 double avg_time_remset_scan = ((average_time_ms(G1GCPhaseTimes::ScanHR) + average_time_ms(G1GCPhaseTimes::OptScanHR)) *
686 remset_cards_scanned / total_cards_scanned) +
687 average_time_ms(G1GCPhaseTimes::MergeER) +
688 average_time_ms(G1GCPhaseTimes::MergeRS) +
689 average_time_ms(G1GCPhaseTimes::OptMergeRS);
690
691 cost_per_remset_card_ms = avg_time_remset_scan / remset_cards_scanned;
692 _analytics->report_cost_per_remset_card_ms(cost_per_remset_card_ms, this_pause_was_young_only);
693 }
694
695 if (_max_rs_length > 0) {
696 double cards_per_entry_ratio =
697 (double) remset_cards_scanned / (double) _max_rs_length;
698 _analytics->report_cards_per_entry_ratio(cards_per_entry_ratio, this_pause_was_young_only);
699 }
700
769 size_t last_unrestrained_young_length = update_young_list_max_and_target_length();
770
771 update_ihop_prediction(app_time_ms / 1000.0,
772 _bytes_allocated_in_old_since_last_gc,
773 last_unrestrained_young_length * HeapRegion::GrainBytes,
774 this_pause_was_young_only);
775 _bytes_allocated_in_old_since_last_gc = 0;
776
777 _ihop_control->send_trace_event(_g1h->gc_tracer_stw());
778 } else {
779 // Any garbage collection triggered as periodic collection resets the time-to-mixed
780 // measurement. Periodic collection typically means that the application is "inactive", i.e.
781 // the marking threads may have received an uncharacterisic amount of cpu time
782 // for completing the marking, i.e. are faster than expected.
783 // This skews the predicted marking length towards smaller values which might cause
784 // the mark start being too late.
785 _initial_mark_to_mixed.reset();
786 }
787
788 // Note that _mmu_tracker->max_gc_time() returns the time in seconds.
789 double scan_log_buffer_time_goal_ms = _mmu_tracker->max_gc_time() * MILLIUNITS * G1RSetUpdatingPauseTimePercent / 100.0;
790
791 if (scan_log_buffer_time_goal_ms < scan_hcc_time_ms) {
792 log_debug(gc, ergo, refine)("Adjust concurrent refinement thresholds (scanning the HCC expected to take longer than Update RS time goal)."
793 "Log Buffer Scan time goal: %1.2fms Scan HCC time: %1.2fms",
794 scan_log_buffer_time_goal_ms, scan_hcc_time_ms);
795
796 scan_log_buffer_time_goal_ms = 0;
797 } else {
798 scan_log_buffer_time_goal_ms -= scan_hcc_time_ms;
799 }
800
801 double const log_buffer_time = log_buffer_processing_time();
802
803 log_debug(gc, ergo, refine)("Concurrent refinement times: Log Buffer Scan time goal: %1.2fms Log Buffer Scan time: %1.2fms HCC time: %1.2fms",
804 scan_log_buffer_time_goal_ms, log_buffer_time, scan_hcc_time_ms);
805
806 _g1h->concurrent_refine()->adjust(log_buffer_time,
807 phase_times()->sum_thread_work_items(G1GCPhaseTimes::MergeLB, G1GCPhaseTimes::MergeLBProcessedBuffers),
808 scan_log_buffer_time_goal_ms);
809 }
810
811 G1IHOPControl* G1Policy::create_ihop_control(const G1Predictions* predictor){
812 if (G1UseAdaptiveIHOP) {
813 return new G1AdaptiveIHOPControl(InitiatingHeapOccupancyPercent,
814 predictor,
815 G1ReservePercent,
816 G1HeapWastePercent);
817 } else {
818 return new G1StaticIHOPControl(InitiatingHeapOccupancyPercent);
819 }
820 }
821
822 void G1Policy::update_ihop_prediction(double mutator_time_s,
823 size_t mutator_alloc_bytes,
824 size_t young_gen_size,
825 bool this_gc_was_young_only) {
826 // Always try to update IHOP prediction. Even evacuation failures give information
827 // about e.g. whether to start IHOP earlier next time.
828
|
555 return false;
556 }
557
558 size_t marking_initiating_used_threshold = _ihop_control->get_conc_mark_start_threshold();
559
560 size_t cur_used_bytes = _g1h->non_young_capacity_bytes();
561 size_t alloc_byte_size = alloc_word_size * HeapWordSize;
562 size_t marking_request_bytes = cur_used_bytes + alloc_byte_size;
563
564 bool result = false;
565 if (marking_request_bytes > marking_initiating_used_threshold) {
566 result = collector_state()->in_young_only_phase() && !collector_state()->in_young_gc_before_mixed();
567 log_debug(gc, ergo, ihop)("%s occupancy: " SIZE_FORMAT "B allocation request: " SIZE_FORMAT "B threshold: " SIZE_FORMAT "B (%1.2f) source: %s",
568 result ? "Request concurrent cycle initiation (occupancy higher than threshold)" : "Do not request concurrent cycle initiation (still doing mixed collections)",
569 cur_used_bytes, alloc_byte_size, marking_initiating_used_threshold, (double) marking_initiating_used_threshold / _g1h->capacity() * 100, source);
570 }
571
572 return result;
573 }
574
575 double G1Policy::logged_cards_processing_time() const {
576 double all_cards_processing_time = average_time_ms(G1GCPhaseTimes::ScanHR) + average_time_ms(G1GCPhaseTimes::OptScanHR);
577 size_t logged_dirty_cards = phase_times()->sum_thread_work_items(G1GCPhaseTimes::MergeLB, G1GCPhaseTimes::MergeLBDirtyCards);
578 size_t scan_heap_roots_cards = phase_times()->sum_thread_work_items(G1GCPhaseTimes::ScanHR, G1GCPhaseTimes::ScanHRScannedCards) +
579 phase_times()->sum_thread_work_items(G1GCPhaseTimes::OptScanHR, G1GCPhaseTimes::ScanHRScannedCards);
580 // This may happen if there are duplicate cards in different log buffers.
581 if (logged_dirty_cards > scan_heap_roots_cards) {
582 return all_cards_processing_time + average_time_ms(G1GCPhaseTimes::MergeLB);
583 }
584 return (all_cards_processing_time * logged_dirty_cards / scan_heap_roots_cards) + average_time_ms(G1GCPhaseTimes::MergeLB);
585 }
586
587 // Anything below that is considered to be zero
588 #define MIN_TIMER_GRANULARITY 0.0000001
589
590 void G1Policy::record_collection_pause_end(double pause_time_ms, size_t heap_used_bytes_before_gc) {
591 G1GCPhaseTimes* p = phase_times();
592
593 double end_time_sec = os::elapsedTime();
594
595 assert_used_and_recalculate_used_equal(_g1h);
596 size_t cur_used_bytes = _g1h->used();
597 bool this_pause_included_initial_mark = false;
598 bool this_pause_was_young_only = collector_state()->in_young_only_phase();
599
600 bool update_stats = !_g1h->evacuation_failed();
601
602 record_pause(young_gc_pause_kind(), end_time_sec - pause_time_ms / 1000.0, end_time_sec);
603
604 _collection_pause_end_millis = os::javaTimeNanos() / NANOSECS_PER_MILLISEC;
645 collector_state()->set_in_young_only_phase(false);
646 collector_state()->set_in_young_gc_before_mixed(false);
647 } else if (!this_pause_was_young_only) {
648 // This is a mixed GC. Here we decide whether to continue doing more
649 // mixed GCs or not.
650 if (!next_gc_should_be_mixed("continue mixed GCs",
651 "do not continue mixed GCs")) {
652 collector_state()->set_in_young_only_phase(true);
653
654 clear_collection_set_candidates();
655 maybe_start_marking();
656 }
657 }
658
659 _short_lived_surv_rate_group->start_adding_regions();
660 // Do that for any other surv rate groups
661
662 double scan_hcc_time_ms = G1HotCardCache::default_use_cache() ? average_time_ms(G1GCPhaseTimes::MergeHCC) : 0.0;
663
664 if (update_stats) {
665 double cost_per_logged_card = 0.0;
666 size_t const pending_logged_cards = p->sum_thread_work_items(G1GCPhaseTimes::MergeLB, G1GCPhaseTimes::MergeLBDirtyCards);
667 if (pending_logged_cards > 0) {
668 cost_per_logged_card = logged_cards_processing_time() / pending_logged_cards;
669 _analytics->report_cost_per_logged_card_ms(cost_per_logged_card);
670 }
671 _analytics->report_cost_scan_hcc(scan_hcc_time_ms);
672
673 size_t const total_cards_scanned = p->sum_thread_work_items(G1GCPhaseTimes::ScanHR, G1GCPhaseTimes::ScanHRScannedCards) +
674 p->sum_thread_work_items(G1GCPhaseTimes::OptScanHR, G1GCPhaseTimes::ScanHRScannedCards);
675 size_t remset_cards_scanned = 0;
676 // There might have been duplicate log buffer entries in the queues which could
677 // increase this value beyond the cards scanned. In this case attribute all cards
678 // to the log buffers.
679 if (pending_logged_cards <= total_cards_scanned) {
680 remset_cards_scanned = total_cards_scanned - pending_logged_cards;
681 }
682
683 double cost_per_remset_card_ms = 0.0;
684 if (remset_cards_scanned > 10) {
685 double avg_time_remset_scan = ((average_time_ms(G1GCPhaseTimes::ScanHR) + average_time_ms(G1GCPhaseTimes::OptScanHR)) *
686 remset_cards_scanned / total_cards_scanned) +
687 average_time_ms(G1GCPhaseTimes::MergeER) +
688 average_time_ms(G1GCPhaseTimes::MergeRS) +
689 average_time_ms(G1GCPhaseTimes::OptMergeRS);
690
691 cost_per_remset_card_ms = avg_time_remset_scan / remset_cards_scanned;
692 _analytics->report_cost_per_remset_card_ms(cost_per_remset_card_ms, this_pause_was_young_only);
693 }
694
695 if (_max_rs_length > 0) {
696 double cards_per_entry_ratio =
697 (double) remset_cards_scanned / (double) _max_rs_length;
698 _analytics->report_cards_per_entry_ratio(cards_per_entry_ratio, this_pause_was_young_only);
699 }
700
769 size_t last_unrestrained_young_length = update_young_list_max_and_target_length();
770
771 update_ihop_prediction(app_time_ms / 1000.0,
772 _bytes_allocated_in_old_since_last_gc,
773 last_unrestrained_young_length * HeapRegion::GrainBytes,
774 this_pause_was_young_only);
775 _bytes_allocated_in_old_since_last_gc = 0;
776
777 _ihop_control->send_trace_event(_g1h->gc_tracer_stw());
778 } else {
779 // Any garbage collection triggered as periodic collection resets the time-to-mixed
780 // measurement. Periodic collection typically means that the application is "inactive", i.e.
781 // the marking threads may have received an uncharacterisic amount of cpu time
782 // for completing the marking, i.e. are faster than expected.
783 // This skews the predicted marking length towards smaller values which might cause
784 // the mark start being too late.
785 _initial_mark_to_mixed.reset();
786 }
787
788 // Note that _mmu_tracker->max_gc_time() returns the time in seconds.
789 double scan_logged_cards_time_goal_ms = _mmu_tracker->max_gc_time() * MILLIUNITS * G1RSetUpdatingPauseTimePercent / 100.0;
790
791 if (scan_logged_cards_time_goal_ms < scan_hcc_time_ms) {
792 log_debug(gc, ergo, refine)("Adjust concurrent refinement thresholds (scanning the HCC expected to take longer than Update RS time goal)."
793 "Logged Cards Scan time goal: %1.2fms Scan HCC time: %1.2fms",
794 scan_logged_cards_time_goal_ms, scan_hcc_time_ms);
795
796 scan_logged_cards_time_goal_ms = 0;
797 } else {
798 scan_logged_cards_time_goal_ms -= scan_hcc_time_ms;
799 }
800
801 double const logged_cards_time = logged_cards_processing_time();
802
803 log_debug(gc, ergo, refine)("Concurrent refinement times: Logged Cards Scan time goal: %1.2fms Logged Cards Scan time: %1.2fms HCC time: %1.2fms",
804 scan_logged_cards_time_goal_ms, logged_cards_time, scan_hcc_time_ms);
805
806 _g1h->concurrent_refine()->adjust(logged_cards_time,
807 phase_times()->sum_thread_work_items(G1GCPhaseTimes::MergeLB, G1GCPhaseTimes::MergeLBDirtyCards),
808 scan_logged_cards_time_goal_ms);
809 }
810
811 G1IHOPControl* G1Policy::create_ihop_control(const G1Predictions* predictor){
812 if (G1UseAdaptiveIHOP) {
813 return new G1AdaptiveIHOPControl(InitiatingHeapOccupancyPercent,
814 predictor,
815 G1ReservePercent,
816 G1HeapWastePercent);
817 } else {
818 return new G1StaticIHOPControl(InitiatingHeapOccupancyPercent);
819 }
820 }
821
822 void G1Policy::update_ihop_prediction(double mutator_time_s,
823 size_t mutator_alloc_bytes,
824 size_t young_gen_size,
825 bool this_gc_was_young_only) {
826 // Always try to update IHOP prediction. Even evacuation failures give information
827 // about e.g. whether to start IHOP earlier next time.
828
|