< prev index next >

src/hotspot/share/gc/g1/g1Policy.cpp

Print this page
rev 60594 : [mq]: 8240556-abort-conc-mark-new


 646   update_survivors_policy();
 647 
 648   assert(max_survivor_regions() + _g1h->num_used_regions() <= _g1h->max_regions(),
 649          "Maximum survivor regions %u plus used regions %u exceeds max regions %u",
 650          max_survivor_regions(), _g1h->num_used_regions(), _g1h->max_regions());
 651   assert_used_and_recalculate_used_equal(_g1h);
 652 
 653   phase_times()->record_cur_collection_start_sec(start_time_sec);
 654 
 655   record_concurrent_refinement_stats();
 656 
 657   _collection_set->reset_bytes_used_before();
 658 
 659   // do that for any other surv rate groups
 660   _eden_surv_rate_group->stop_adding_regions();
 661   _survivors_age_table.clear();
 662 
 663   assert(_g1h->collection_set()->verify_young_ages(), "region age verification failed");
 664 }
 665 
 666 void G1Policy::record_concurrent_mark_init_end(double mark_init_elapsed_time_ms) {
 667   assert(!collector_state()->initiate_conc_mark_if_possible(), "we should have cleared it by now");
 668   collector_state()->set_in_concurrent_start_gc(false);
 669 }
 670 
 671 void G1Policy::record_concurrent_mark_remark_start() {
 672   _mark_remark_start_sec = os::elapsedTime();
 673 }
 674 
 675 void G1Policy::record_concurrent_mark_remark_end() {
 676   double end_time_sec = os::elapsedTime();
 677   double elapsed_time_ms = (end_time_sec - _mark_remark_start_sec)*1000.0;
 678   _analytics->report_concurrent_mark_remark_times_ms(elapsed_time_ms);
 679   _analytics->append_prev_collection_pause_end_ms(elapsed_time_ms);
 680 
 681   record_pause(Remark, _mark_remark_start_sec, end_time_sec);
 682 }
 683 
 684 void G1Policy::record_concurrent_mark_cleanup_start() {
 685   _mark_cleanup_start_sec = os::elapsedTime();
 686 }


 731   }
 732 
 733   return result;
 734 }
 735 
 736 double G1Policy::logged_cards_processing_time() const {
 737   double all_cards_processing_time = average_time_ms(G1GCPhaseTimes::ScanHR) + average_time_ms(G1GCPhaseTimes::OptScanHR);
 738   size_t logged_dirty_cards = phase_times()->sum_thread_work_items(G1GCPhaseTimes::MergeLB, G1GCPhaseTimes::MergeLBDirtyCards);
 739   size_t scan_heap_roots_cards = phase_times()->sum_thread_work_items(G1GCPhaseTimes::ScanHR, G1GCPhaseTimes::ScanHRScannedCards) +
 740                                  phase_times()->sum_thread_work_items(G1GCPhaseTimes::OptScanHR, G1GCPhaseTimes::ScanHRScannedCards);
 741   // This may happen if there are duplicate cards in different log buffers.
 742   if (logged_dirty_cards > scan_heap_roots_cards) {
 743     return all_cards_processing_time + average_time_ms(G1GCPhaseTimes::MergeLB);
 744   }
 745   return (all_cards_processing_time * logged_dirty_cards / scan_heap_roots_cards) + average_time_ms(G1GCPhaseTimes::MergeLB);
 746 }
 747 
 748 // Anything below that is considered to be zero
 749 #define MIN_TIMER_GRANULARITY 0.0000001
 750 
 751 void G1Policy::record_collection_pause_end(double pause_time_ms) {
 752   G1GCPhaseTimes* p = phase_times();
 753 
 754   double end_time_sec = os::elapsedTime();
 755 
 756   PauseKind this_pause = young_gc_pause_kind();
 757 
 758   bool update_stats = !_g1h->evacuation_failed();
 759 
 760   record_pause(this_pause, end_time_sec - pause_time_ms / 1000.0, end_time_sec);
 761 
 762   if (is_concurrent_start_pause(this_pause)) {
 763     record_concurrent_mark_init_end(0.0);
 764   } else {
 765     maybe_start_marking();
 766   }
 767 
 768   double app_time_ms = (phase_times()->cur_collection_start_sec() * 1000.0 - _analytics->prev_collection_pause_end_ms());
 769   if (app_time_ms < MIN_TIMER_GRANULARITY) {
 770     // This usually happens due to the timer not having the required
 771     // granularity. Some Linuxes are the usual culprits.
 772     // We'll just set it to something (arbitrarily) small.
 773     app_time_ms = 1.0;
 774   }
 775 
 776   if (update_stats) {
 777     // We maintain the invariant that all objects allocated by mutator
 778     // threads will be allocated out of eden regions. So, we can use
 779     // the eden region number allocated since the previous GC to
 780     // calculate the application's allocate rate. The only exception
 781     // to that is humongous objects that are allocated separately. But
 782     // given that humongous object allocations do not really affect
 783     // either the pause's duration nor when the next pause will take


 883     if (_collection_set->old_region_length() > 0) {
 884       _analytics->report_non_young_other_cost_per_region_ms(non_young_other_time_ms() /
 885                                                             _collection_set->old_region_length());
 886     }
 887 
 888     _analytics->report_constant_other_time_ms(constant_other_time_ms(pause_time_ms));
 889 
 890     // Do not update RS lengths and the number of pending cards with information from mixed gc:
 891     // these are is wildly different to during young only gc and mess up young gen sizing right
 892     // after the mixed gc phase.
 893     // During mixed gc we do not use them for young gen sizing.
 894     if (is_young_only_pause(this_pause)) {
 895       _analytics->report_pending_cards((double) _pending_cards_at_gc_start);
 896       _analytics->report_rs_length((double) _rs_length);
 897     }
 898   }
 899 
 900   assert(!(is_concurrent_start_pause(this_pause) && collector_state()->mark_or_rebuild_in_progress()),
 901          "If the last pause has been concurrent start, we should not have been in the marking window");
 902   if (is_concurrent_start_pause(this_pause)) {
 903     collector_state()->set_mark_or_rebuild_in_progress(true);



 904   }
 905 
 906   _free_regions_at_end_of_collection = _g1h->num_free_regions();
 907 
 908   update_rs_length_prediction();
 909 
 910   // Do not update dynamic IHOP due to G1 periodic collection as it is highly likely
 911   // that in this case we are not running in a "normal" operating mode.
 912   if (_g1h->gc_cause() != GCCause::_g1_periodic_collection) {
 913     update_young_length_bounds();
 914 
 915     _old_gen_alloc_tracker.reset_after_young_gc(app_time_ms / 1000.0);
 916     update_ihop_prediction(_old_gen_alloc_tracker.last_cycle_duration(),
 917                            _old_gen_alloc_tracker.last_cycle_old_bytes(),
 918                            is_young_only_pause(this_pause));
 919 
 920     _ihop_control->send_trace_event(_g1h->gc_tracer_stw());
 921   } else {
 922     // Any garbage collection triggered as periodic collection resets the time-to-mixed
 923     // measurement. Periodic collection typically means that the application is "inactive", i.e.
 924     // the marking threads may have received an uncharacterisic amount of cpu time
 925     // for completing the marking, i.e. are faster than expected.
 926     // This skews the predicted marking length towards smaller values which might cause
 927     // the mark start being too late.
 928     _concurrent_start_to_mixed.reset();
 929   }
 930 
 931   // Note that _mmu_tracker->max_gc_time() returns the time in seconds.
 932   double scan_logged_cards_time_goal_ms = _mmu_tracker->max_gc_time() * MILLIUNITS * G1RSetUpdatingPauseTimePercent / 100.0;
 933 
 934   if (scan_logged_cards_time_goal_ms < merge_hcc_time_ms) {
 935     log_debug(gc, ergo, refine)("Adjust concurrent refinement thresholds (scanning the HCC expected to take longer than Update RS time goal)."
 936                                 "Logged Cards Scan time goal: %1.2fms Scan HCC time: %1.2fms",
 937                                 scan_logged_cards_time_goal_ms, merge_hcc_time_ms);
 938 
 939     scan_logged_cards_time_goal_ms = 0;
 940   } else {
 941     scan_logged_cards_time_goal_ms -= merge_hcc_time_ms;
 942   }
 943 
 944   double const logged_cards_time = logged_cards_processing_time();
 945 
 946   log_debug(gc, ergo, refine)("Concurrent refinement times: Logged Cards Scan time goal: %1.2fms Logged Cards Scan time: %1.2fms HCC time: %1.2fms",
 947                               scan_logged_cards_time_goal_ms, logged_cards_time, merge_hcc_time_ms);
 948 




 646   update_survivors_policy();
 647 
 648   assert(max_survivor_regions() + _g1h->num_used_regions() <= _g1h->max_regions(),
 649          "Maximum survivor regions %u plus used regions %u exceeds max regions %u",
 650          max_survivor_regions(), _g1h->num_used_regions(), _g1h->max_regions());
 651   assert_used_and_recalculate_used_equal(_g1h);
 652 
 653   phase_times()->record_cur_collection_start_sec(start_time_sec);
 654 
 655   record_concurrent_refinement_stats();
 656 
 657   _collection_set->reset_bytes_used_before();
 658 
 659   // do that for any other surv rate groups
 660   _eden_surv_rate_group->stop_adding_regions();
 661   _survivors_age_table.clear();
 662 
 663   assert(_g1h->collection_set()->verify_young_ages(), "region age verification failed");
 664 }
 665 
 666 void G1Policy::record_concurrent_mark_init_end() {
 667   assert(!collector_state()->initiate_conc_mark_if_possible(), "we should have cleared it by now");
 668   collector_state()->set_in_concurrent_start_gc(false);
 669 }
 670 
 671 void G1Policy::record_concurrent_mark_remark_start() {
 672   _mark_remark_start_sec = os::elapsedTime();
 673 }
 674 
 675 void G1Policy::record_concurrent_mark_remark_end() {
 676   double end_time_sec = os::elapsedTime();
 677   double elapsed_time_ms = (end_time_sec - _mark_remark_start_sec)*1000.0;
 678   _analytics->report_concurrent_mark_remark_times_ms(elapsed_time_ms);
 679   _analytics->append_prev_collection_pause_end_ms(elapsed_time_ms);
 680 
 681   record_pause(Remark, _mark_remark_start_sec, end_time_sec);
 682 }
 683 
 684 void G1Policy::record_concurrent_mark_cleanup_start() {
 685   _mark_cleanup_start_sec = os::elapsedTime();
 686 }


 731   }
 732 
 733   return result;
 734 }
 735 
 736 double G1Policy::logged_cards_processing_time() const {
 737   double all_cards_processing_time = average_time_ms(G1GCPhaseTimes::ScanHR) + average_time_ms(G1GCPhaseTimes::OptScanHR);
 738   size_t logged_dirty_cards = phase_times()->sum_thread_work_items(G1GCPhaseTimes::MergeLB, G1GCPhaseTimes::MergeLBDirtyCards);
 739   size_t scan_heap_roots_cards = phase_times()->sum_thread_work_items(G1GCPhaseTimes::ScanHR, G1GCPhaseTimes::ScanHRScannedCards) +
 740                                  phase_times()->sum_thread_work_items(G1GCPhaseTimes::OptScanHR, G1GCPhaseTimes::ScanHRScannedCards);
 741   // This may happen if there are duplicate cards in different log buffers.
 742   if (logged_dirty_cards > scan_heap_roots_cards) {
 743     return all_cards_processing_time + average_time_ms(G1GCPhaseTimes::MergeLB);
 744   }
 745   return (all_cards_processing_time * logged_dirty_cards / scan_heap_roots_cards) + average_time_ms(G1GCPhaseTimes::MergeLB);
 746 }
 747 
 748 // Anything below that is considered to be zero
 749 #define MIN_TIMER_GRANULARITY 0.0000001
 750 
 751 void G1Policy::record_collection_pause_end(double pause_time_ms, bool start_concurrent_mark_cycle) {
 752   G1GCPhaseTimes* p = phase_times();
 753 
 754   double end_time_sec = os::elapsedTime();
 755 
 756   PauseKind this_pause = young_gc_pause_kind();
 757 
 758   bool update_stats = !_g1h->evacuation_failed();
 759 
 760   record_pause(this_pause, end_time_sec - pause_time_ms / 1000.0, end_time_sec);
 761 
 762   if (is_concurrent_start_pause(this_pause)) {
 763     record_concurrent_mark_init_end();
 764   } else {
 765     maybe_start_marking();
 766   }
 767 
 768   double app_time_ms = (phase_times()->cur_collection_start_sec() * 1000.0 - _analytics->prev_collection_pause_end_ms());
 769   if (app_time_ms < MIN_TIMER_GRANULARITY) {
 770     // This usually happens due to the timer not having the required
 771     // granularity. Some Linuxes are the usual culprits.
 772     // We'll just set it to something (arbitrarily) small.
 773     app_time_ms = 1.0;
 774   }
 775 
 776   if (update_stats) {
 777     // We maintain the invariant that all objects allocated by mutator
 778     // threads will be allocated out of eden regions. So, we can use
 779     // the eden region number allocated since the previous GC to
 780     // calculate the application's allocate rate. The only exception
 781     // to that is humongous objects that are allocated separately. But
 782     // given that humongous object allocations do not really affect
 783     // either the pause's duration nor when the next pause will take


 883     if (_collection_set->old_region_length() > 0) {
 884       _analytics->report_non_young_other_cost_per_region_ms(non_young_other_time_ms() /
 885                                                             _collection_set->old_region_length());
 886     }
 887 
 888     _analytics->report_constant_other_time_ms(constant_other_time_ms(pause_time_ms));
 889 
 890     // Do not update RS lengths and the number of pending cards with information from mixed gc:
 891     // these are is wildly different to during young only gc and mess up young gen sizing right
 892     // after the mixed gc phase.
 893     // During mixed gc we do not use them for young gen sizing.
 894     if (is_young_only_pause(this_pause)) {
 895       _analytics->report_pending_cards((double) _pending_cards_at_gc_start);
 896       _analytics->report_rs_length((double) _rs_length);
 897     }
 898   }
 899 
 900   assert(!(is_concurrent_start_pause(this_pause) && collector_state()->mark_or_rebuild_in_progress()),
 901          "If the last pause has been concurrent start, we should not have been in the marking window");
 902   if (is_concurrent_start_pause(this_pause)) {
 903     if (!start_concurrent_mark_cycle) {
 904       abort_time_to_mixed_tracking();
 905     }
 906     collector_state()->set_mark_or_rebuild_in_progress(start_concurrent_mark_cycle);
 907   }
 908 
 909   _free_regions_at_end_of_collection = _g1h->num_free_regions();
 910 
 911   update_rs_length_prediction();
 912 
 913   // Do not update dynamic IHOP due to G1 periodic collection as it is highly likely
 914   // that in this case we are not running in a "normal" operating mode.
 915   if (_g1h->gc_cause() != GCCause::_g1_periodic_collection) {
 916     update_young_length_bounds();
 917 
 918     _old_gen_alloc_tracker.reset_after_young_gc(app_time_ms / 1000.0);
 919     update_ihop_prediction(_old_gen_alloc_tracker.last_cycle_duration(),
 920                            _old_gen_alloc_tracker.last_cycle_old_bytes(),
 921                            is_young_only_pause(this_pause));
 922 
 923     _ihop_control->send_trace_event(_g1h->gc_tracer_stw());
 924   } else {
 925     // Any garbage collection triggered as periodic collection resets the time-to-mixed
 926     // measurement. Periodic collection typically means that the application is "inactive", i.e.
 927     // the marking threads may have received an uncharacterisic amount of cpu time
 928     // for completing the marking, i.e. are faster than expected.
 929     // This skews the predicted marking length towards smaller values which might cause
 930     // the mark start being too late.
 931     abort_time_to_mixed_tracking();
 932   }
 933 
 934   // Note that _mmu_tracker->max_gc_time() returns the time in seconds.
 935   double scan_logged_cards_time_goal_ms = _mmu_tracker->max_gc_time() * MILLIUNITS * G1RSetUpdatingPauseTimePercent / 100.0;
 936 
 937   if (scan_logged_cards_time_goal_ms < merge_hcc_time_ms) {
 938     log_debug(gc, ergo, refine)("Adjust concurrent refinement thresholds (scanning the HCC expected to take longer than Update RS time goal)."
 939                                 "Logged Cards Scan time goal: %1.2fms Scan HCC time: %1.2fms",
 940                                 scan_logged_cards_time_goal_ms, merge_hcc_time_ms);
 941 
 942     scan_logged_cards_time_goal_ms = 0;
 943   } else {
 944     scan_logged_cards_time_goal_ms -= merge_hcc_time_ms;
 945   }
 946 
 947   double const logged_cards_time = logged_cards_processing_time();
 948 
 949   log_debug(gc, ergo, refine)("Concurrent refinement times: Logged Cards Scan time goal: %1.2fms Logged Cards Scan time: %1.2fms HCC time: %1.2fms",
 950                               scan_logged_cards_time_goal_ms, logged_cards_time, merge_hcc_time_ms);
 951 


< prev index next >