src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp
Index Unified diffs Context diffs Sdiffs Patch New Old Previous File Next File hs-gc-g1-logging-remove-serial Sdiff src/share/vm/gc_implementation/g1

src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp

Print this page
rev 3484 : 7178363: G1: Remove the serial code for PrintGCDetails and make it a special case of the parallel code
Summary: Introduced the WorkerDataArray class.
Reviewed-by: mgerdin


 780 
 781 void G1CollectorPolicy::record_stop_world_start() {
 782   _stop_world_start = os::elapsedTime();
 783 }
 784 
 785 void G1CollectorPolicy::record_collection_pause_start(double start_time_sec,
 786                                                       size_t start_used) {
 787   // We only need to do this here as the policy will only be applied
 788   // to the GC we're about to start. so, no point is calculating this
 789   // every time we calculate / recalculate the target young length.
 790   update_survivors_policy();
 791 
 792   assert(_g1->used() == _g1->recalculate_used(),
 793          err_msg("sanity, used: "SIZE_FORMAT" recalculate_used: "SIZE_FORMAT,
 794                  _g1->used(), _g1->recalculate_used()));
 795 
 796   double s_w_t_ms = (start_time_sec - _stop_world_start) * 1000.0;
 797   _trace_gen0_time_data.record_start_collection(s_w_t_ms);
 798   _stop_world_start = 0.0;
 799 
 800   phase_times()->_cur_collection_start_sec = start_time_sec;
 801   _cur_collection_pause_used_at_start_bytes = start_used;
 802   _cur_collection_pause_used_regions_at_start = _g1->used_regions();
 803   _pending_cards = _g1->pending_card_num();
 804   _max_pending_cards = _g1->max_pending_card_num();
 805 
 806   _bytes_in_collection_set_before_gc = 0;
 807   _bytes_copied_during_gc = 0;
 808 
 809   YoungList* young_list = _g1->young_list();
 810   _eden_bytes_before_gc = young_list->eden_used_bytes();
 811   _survivor_bytes_before_gc = young_list->survivor_used_bytes();
 812   _capacity_before_gc = _g1->capacity();
 813 
 814   _last_gc_was_young = false;
 815 
 816   // do that for any other surv rate groups
 817   _short_lived_surv_rate_group->stop_adding_regions();
 818   _survivors_age_table.clear();
 819 
 820   assert( verify_young_ages(), "region age verification" );


 933     // pause we decided to start a cycle but at the beginning of
 934     // this pause we decided to postpone it. That's OK.
 935     set_initiate_conc_mark_if_possible();
 936   }
 937 
 938   _mmu_tracker->add_pause(end_time_sec - pause_time_ms/1000.0,
 939                           end_time_sec, false);
 940 
 941   size_t freed_bytes =
 942     _cur_collection_pause_used_at_start_bytes - cur_used_bytes;
 943   size_t surviving_bytes = _collection_set_bytes_used_before - freed_bytes;
 944 
 945   double survival_fraction =
 946     (double)surviving_bytes/
 947     (double)_collection_set_bytes_used_before;
 948 
 949   if (update_stats) {
 950     _trace_gen0_time_data.record_end_collection(pause_time_ms, phase_times());
 951     // this is where we update the allocation rate of the application
 952     double app_time_ms =
 953       (phase_times()->_cur_collection_start_sec * 1000.0 - _prev_collection_pause_end_ms);
 954     if (app_time_ms < MIN_TIMER_GRANULARITY) {
 955       // This usually happens due to the timer not having the required
 956       // granularity. Some Linuxes are the usual culprits.
 957       // We'll just set it to something (arbitrarily) small.
 958       app_time_ms = 1.0;
 959     }
 960     // We maintain the invariant that all objects allocated by mutator
 961     // threads will be allocated out of eden regions. So, we can use
 962     // the eden region number allocated since the previous GC to
 963     // calculate the application's allocate rate. The only exception
 964     // to that is humongous objects that are allocated separately. But
 965     // given that humongous object allocations do not really affect
 966     // either the pause's duration nor when the next pause will take
 967     // place we can safely ignore them here.
 968     uint regions_allocated = eden_cset_region_length();
 969     double alloc_rate_ms = (double) regions_allocated / app_time_ms;
 970     _alloc_rate_ms_seq->add(alloc_rate_ms);
 971 
 972     double interval_ms =
 973       (end_time_sec - _recent_prev_end_times_for_all_gcs_sec->oldest()) * 1000.0;


1027     // mixed GCs or not.
1028 
1029     if (!next_gc_should_be_mixed("continue mixed GCs",
1030                                  "do not continue mixed GCs")) {
1031       set_gcs_are_young(true);
1032     }
1033   }
1034 
1035   _short_lived_surv_rate_group->start_adding_regions();
1036   // do that for any other surv rate groupsx
1037 
1038   if (update_stats) {
1039     size_t diff = 0;
1040     if (_max_pending_cards >= _pending_cards) {
1041       diff = _max_pending_cards - _pending_cards;
1042     }
1043     _pending_card_diff_seq->add((double) diff);
1044 
1045     double cost_per_card_ms = 0.0;
1046     if (_pending_cards > 0) {
1047       cost_per_card_ms = phase_times()->_update_rs_time / (double) _pending_cards;
1048       _cost_per_card_ms_seq->add(cost_per_card_ms);
1049     }
1050 
1051     size_t cards_scanned = _g1->cards_scanned();
1052 
1053     double cost_per_entry_ms = 0.0;
1054     if (cards_scanned > 10) {
1055       cost_per_entry_ms = phase_times()->_scan_rs_time / (double) cards_scanned;
1056       if (_last_gc_was_young) {
1057         _cost_per_entry_ms_seq->add(cost_per_entry_ms);
1058       } else {
1059         _mixed_cost_per_entry_ms_seq->add(cost_per_entry_ms);
1060       }
1061     }
1062 
1063     if (_max_rs_lengths > 0) {
1064       double cards_per_entry_ratio =
1065         (double) cards_scanned / (double) _max_rs_lengths;
1066       if (_last_gc_was_young) {
1067         _young_cards_per_entry_ratio_seq->add(cards_per_entry_ratio);
1068       } else {
1069         _mixed_cards_per_entry_ratio_seq->add(cards_per_entry_ratio);
1070       }
1071     }
1072 
1073     // This is defensive. For a while _max_rs_lengths could get
1074     // smaller than _recorded_rs_lengths which was causing
1075     // rs_length_diff to get very large and mess up the RSet length
1076     // predictions. The reason was unsafe concurrent updates to the
1077     // _inc_cset_recorded_rs_lengths field which the code below guards
1078     // against (see CR 7118202). This bug has now been fixed (see CR
1079     // 7119027). However, I'm still worried that
1080     // _inc_cset_recorded_rs_lengths might still end up somewhat
1081     // inaccurate. The concurrent refinement thread calculates an
1082     // RSet's length concurrently with other CR threads updating it
1083     // which might cause it to calculate the length incorrectly (if,
1084     // say, it's in mid-coarsening). So I'll leave in the defensive
1085     // conditional below just in case.
1086     size_t rs_length_diff = 0;
1087     if (_max_rs_lengths > _recorded_rs_lengths) {
1088       rs_length_diff = _max_rs_lengths - _recorded_rs_lengths;
1089     }
1090     _rs_length_diff_seq->add((double) rs_length_diff);
1091 
1092     size_t copied_bytes = surviving_bytes;
1093     double cost_per_byte_ms = 0.0;
1094     if (copied_bytes > 0) {
1095       cost_per_byte_ms = phase_times()->_obj_copy_time / (double) copied_bytes;
1096       if (_in_marking_window) {
1097         _cost_per_byte_ms_during_cm_seq->add(cost_per_byte_ms);
1098       } else {
1099         _cost_per_byte_ms_seq->add(cost_per_byte_ms);
1100       }
1101     }
1102 
1103     double all_other_time_ms = pause_time_ms -
1104       (phase_times()->_update_rs_time + phase_times()->_scan_rs_time + phase_times()->_obj_copy_time + phase_times()->_termination_time);

1105 
1106     double young_other_time_ms = 0.0;
1107     if (young_cset_region_length() > 0) {
1108       young_other_time_ms =
1109         phase_times()->_recorded_young_cset_choice_time_ms +
1110         phase_times()->_recorded_young_free_cset_time_ms;
1111       _young_other_cost_per_region_ms_seq->add(young_other_time_ms /
1112                                           (double) young_cset_region_length());
1113     }
1114     double non_young_other_time_ms = 0.0;
1115     if (old_cset_region_length() > 0) {
1116       non_young_other_time_ms =
1117         phase_times()->_recorded_non_young_cset_choice_time_ms +
1118         phase_times()->_recorded_non_young_free_cset_time_ms;
1119 
1120       _non_young_other_cost_per_region_ms_seq->add(non_young_other_time_ms /
1121                                             (double) old_cset_region_length());
1122     }
1123 
1124     double constant_other_time_ms = all_other_time_ms -
1125       (young_other_time_ms + non_young_other_time_ms);
1126     _constant_other_time_ms_seq->add(constant_other_time_ms);
1127 
1128     double survival_ratio = 0.0;
1129     if (_bytes_in_collection_set_before_gc > 0) {
1130       survival_ratio = (double) _bytes_copied_during_gc /
1131                                    (double) _bytes_in_collection_set_before_gc;
1132     }
1133 
1134     _pending_cards_seq->add((double) _pending_cards);
1135     _rs_lengths_seq->add((double) _max_rs_lengths);
1136   }
1137 
1138   _in_marking_window = new_in_marking_window;
1139   _in_marking_window_im = new_in_marking_window_im;
1140   _free_regions_at_end_of_collection = _g1->free_regions();
1141   update_young_list_target_length();
1142 
1143   // Note that _mmu_tracker->max_gc_time() returns the time in seconds.
1144   double update_rs_time_goal_ms = _mmu_tracker->max_gc_time() * MILLIUNITS * G1RSetUpdatingPauseTimePercent / 100.0;
1145   adjust_concurrent_refinement(phase_times()->_update_rs_time, phase_times()->_update_rs_processed_buffers, update_rs_time_goal_ms);

1146 
1147   _collectionSetChooser->verify();
1148 }
1149 
1150 #define EXT_SIZE_FORMAT "%.1f%s"
1151 #define EXT_SIZE_PARAMS(bytes)                                  \
1152   byte_size_in_proper_unit((double)(bytes)),                    \
1153   proper_unit_for_byte_size((bytes))
1154 
1155 void G1CollectorPolicy::print_heap_transition() {
1156   if (G1Log::finer()) {
1157     YoungList* young_list = _g1->young_list();
1158     size_t eden_bytes = young_list->eden_used_bytes();
1159     size_t survivor_bytes = young_list->survivor_used_bytes();
1160     size_t used_before_gc = _cur_collection_pause_used_at_start_bytes;
1161     size_t used = _g1->used();
1162     size_t capacity = _g1->capacity();
1163     size_t eden_capacity =
1164       (_young_list_target_length * HeapRegion::GrainBytes) - survivor_bytes;
1165 


1934   young_list->clear_survivors();
1935 
1936   _collection_set = _inc_cset_head;
1937   _collection_set_bytes_used_before = _inc_cset_bytes_used_before;
1938   time_remaining_ms -= _inc_cset_predicted_elapsed_time_ms;
1939   predicted_pause_time_ms += _inc_cset_predicted_elapsed_time_ms;
1940 
1941   ergo_verbose3(ErgoCSetConstruction | ErgoHigh,
1942                 "add young regions to CSet",
1943                 ergo_format_region("eden")
1944                 ergo_format_region("survivors")
1945                 ergo_format_ms("predicted young region time"),
1946                 eden_region_length, survivor_region_length,
1947                 _inc_cset_predicted_elapsed_time_ms);
1948 
1949   // The number of recorded young regions is the incremental
1950   // collection set's current size
1951   set_recorded_rs_lengths(_inc_cset_recorded_rs_lengths);
1952 
1953   double young_end_time_sec = os::elapsedTime();
1954   phase_times()->_recorded_young_cset_choice_time_ms =
1955     (young_end_time_sec - young_start_time_sec) * 1000.0;
1956 
1957   // We are doing young collections so reset this.
1958   non_young_start_time_sec = young_end_time_sec;
1959 
1960   if (!gcs_are_young()) {
1961     CollectionSetChooser* cset_chooser = _collectionSetChooser;
1962     cset_chooser->verify();
1963     const uint min_old_cset_length = cset_chooser->calc_min_old_cset_length();
1964     const uint max_old_cset_length = cset_chooser->calc_max_old_cset_length();
1965 
1966     uint expensive_region_num = 0;
1967     bool check_time_remaining = adaptive_young_list_length();
1968     HeapRegion* hr = cset_chooser->peek();
1969     while (hr != NULL) {
1970       if (old_cset_region_length() >= max_old_cset_length) {
1971         // Added maximum number of old regions to the CSet.
1972         ergo_verbose2(ErgoCSetConstruction,
1973                       "finish adding old regions to CSet",
1974                       ergo_format_reason("old CSet region num reached max")
1975                       ergo_format_region("old")


2050 
2051     cset_chooser->verify();
2052   }
2053 
2054   stop_incremental_cset_building();
2055 
2056   count_CS_bytes_used();
2057 
2058   ergo_verbose5(ErgoCSetConstruction,
2059                 "finish choosing CSet",
2060                 ergo_format_region("eden")
2061                 ergo_format_region("survivors")
2062                 ergo_format_region("old")
2063                 ergo_format_ms("predicted pause time")
2064                 ergo_format_ms("target pause time"),
2065                 eden_region_length, survivor_region_length,
2066                 old_cset_region_length(),
2067                 predicted_pause_time_ms, target_pause_time_ms);
2068 
2069   double non_young_end_time_sec = os::elapsedTime();
2070   phase_times()->_recorded_non_young_cset_choice_time_ms =
2071     (non_young_end_time_sec - non_young_start_time_sec) * 1000.0;
2072 }
2073 
2074 void TraceGen0TimeData::record_start_collection(double time_to_stop_the_world_ms) {
2075   if(TraceGen0Time) {
2076     _all_stop_world_times_ms.add(time_to_stop_the_world_ms);
2077   }
2078 }
2079 
2080 void TraceGen0TimeData::record_yield_time(double yield_time_ms) {
2081   if(TraceGen0Time) {
2082     _all_yield_times_ms.add(yield_time_ms);
2083   }
2084 }
2085 
2086 void TraceGen0TimeData::record_end_collection(double pause_time_ms, G1GCPhaseTimes* phase_times) {
2087   if(TraceGen0Time) {
2088     _total.add(pause_time_ms);
2089     _other.add(pause_time_ms - phase_times->accounted_time_ms());
2090     _root_region_scan_wait.add(phase_times->_root_region_scan_wait_time_ms);
2091     _parallel.add(phase_times->_cur_collection_par_time_ms);
2092     _ext_root_scan.add(phase_times->_ext_root_scan_time);
2093     _satb_filtering.add(phase_times->_satb_filtering_time);
2094     _update_rs.add(phase_times->_update_rs_time);
2095     _scan_rs.add(phase_times->_scan_rs_time);
2096     _obj_copy.add(phase_times->_obj_copy_time);
2097     _termination.add(phase_times->_termination_time);
2098 
2099     double parallel_known_time = phase_times->_ext_root_scan_time +
2100       phase_times->_satb_filtering_time +
2101       phase_times->_update_rs_time +
2102       phase_times->_scan_rs_time +
2103       phase_times->_obj_copy_time +
2104       + phase_times->_termination_time;
2105 
2106     double parallel_other_time = phase_times->_cur_collection_par_time_ms - parallel_known_time;
2107     _parallel_other.add(parallel_other_time);
2108     _clear_ct.add(phase_times->_cur_clear_ct_time_ms);
2109   }
2110 }
2111 
2112 void TraceGen0TimeData::increment_young_collection_count() {
2113   if(TraceGen0Time) {
2114     ++_young_pause_num;
2115   }
2116 }
2117 
2118 void TraceGen0TimeData::increment_mixed_collection_count() {
2119   if(TraceGen0Time) {
2120     ++_mixed_pause_num;
2121   }
2122 }
2123 
2124 void TraceGen0TimeData::print_summary(const char* str,
2125                                       const NumberSeq* seq) const {
2126   double sum = seq->sum();
2127   gclog_or_tty->print_cr("%-27s = %8.2lf s (avg = %8.2lf ms)",
2128                 str, sum / 1000.0, seq->avg());




 780 
 781 void G1CollectorPolicy::record_stop_world_start() {
 782   _stop_world_start = os::elapsedTime();
 783 }
 784 
 785 void G1CollectorPolicy::record_collection_pause_start(double start_time_sec,
 786                                                       size_t start_used) {
 787   // We only need to do this here as the policy will only be applied
 788   // to the GC we're about to start. so, no point is calculating this
 789   // every time we calculate / recalculate the target young length.
 790   update_survivors_policy();
 791 
 792   assert(_g1->used() == _g1->recalculate_used(),
 793          err_msg("sanity, used: "SIZE_FORMAT" recalculate_used: "SIZE_FORMAT,
 794                  _g1->used(), _g1->recalculate_used()));
 795 
 796   double s_w_t_ms = (start_time_sec - _stop_world_start) * 1000.0;
 797   _trace_gen0_time_data.record_start_collection(s_w_t_ms);
 798   _stop_world_start = 0.0;
 799 
 800   phase_times()->record_cur_collection_start_sec(start_time_sec);
 801   _cur_collection_pause_used_at_start_bytes = start_used;
 802   _cur_collection_pause_used_regions_at_start = _g1->used_regions();
 803   _pending_cards = _g1->pending_card_num();
 804   _max_pending_cards = _g1->max_pending_card_num();
 805 
 806   _bytes_in_collection_set_before_gc = 0;
 807   _bytes_copied_during_gc = 0;
 808 
 809   YoungList* young_list = _g1->young_list();
 810   _eden_bytes_before_gc = young_list->eden_used_bytes();
 811   _survivor_bytes_before_gc = young_list->survivor_used_bytes();
 812   _capacity_before_gc = _g1->capacity();
 813 
 814   _last_gc_was_young = false;
 815 
 816   // do that for any other surv rate groups
 817   _short_lived_surv_rate_group->stop_adding_regions();
 818   _survivors_age_table.clear();
 819 
 820   assert( verify_young_ages(), "region age verification" );


 933     // pause we decided to start a cycle but at the beginning of
 934     // this pause we decided to postpone it. That's OK.
 935     set_initiate_conc_mark_if_possible();
 936   }
 937 
 938   _mmu_tracker->add_pause(end_time_sec - pause_time_ms/1000.0,
 939                           end_time_sec, false);
 940 
 941   size_t freed_bytes =
 942     _cur_collection_pause_used_at_start_bytes - cur_used_bytes;
 943   size_t surviving_bytes = _collection_set_bytes_used_before - freed_bytes;
 944 
 945   double survival_fraction =
 946     (double)surviving_bytes/
 947     (double)_collection_set_bytes_used_before;
 948 
 949   if (update_stats) {
 950     _trace_gen0_time_data.record_end_collection(pause_time_ms, phase_times());
 951     // this is where we update the allocation rate of the application
 952     double app_time_ms =
 953       (phase_times()->cur_collection_start_sec() * 1000.0 - _prev_collection_pause_end_ms);
 954     if (app_time_ms < MIN_TIMER_GRANULARITY) {
 955       // This usually happens due to the timer not having the required
 956       // granularity. Some Linuxes are the usual culprits.
 957       // We'll just set it to something (arbitrarily) small.
 958       app_time_ms = 1.0;
 959     }
 960     // We maintain the invariant that all objects allocated by mutator
 961     // threads will be allocated out of eden regions. So, we can use
 962     // the eden region number allocated since the previous GC to
 963     // calculate the application's allocate rate. The only exception
 964     // to that is humongous objects that are allocated separately. But
 965     // given that humongous object allocations do not really affect
 966     // either the pause's duration nor when the next pause will take
 967     // place we can safely ignore them here.
 968     uint regions_allocated = eden_cset_region_length();
 969     double alloc_rate_ms = (double) regions_allocated / app_time_ms;
 970     _alloc_rate_ms_seq->add(alloc_rate_ms);
 971 
 972     double interval_ms =
 973       (end_time_sec - _recent_prev_end_times_for_all_gcs_sec->oldest()) * 1000.0;


1027     // mixed GCs or not.
1028 
1029     if (!next_gc_should_be_mixed("continue mixed GCs",
1030                                  "do not continue mixed GCs")) {
1031       set_gcs_are_young(true);
1032     }
1033   }
1034 
1035   _short_lived_surv_rate_group->start_adding_regions();
1036   // do that for any other surv rate groupsx
1037 
1038   if (update_stats) {
1039     size_t diff = 0;
1040     if (_max_pending_cards >= _pending_cards) {
1041       diff = _max_pending_cards - _pending_cards;
1042     }
1043     _pending_card_diff_seq->add((double) diff);
1044 
1045     double cost_per_card_ms = 0.0;
1046     if (_pending_cards > 0) {
1047       cost_per_card_ms = phase_times()->average_last_update_rs_time() / (double) _pending_cards;
1048       _cost_per_card_ms_seq->add(cost_per_card_ms);
1049     }
1050 
1051     size_t cards_scanned = _g1->cards_scanned();
1052 
1053     double cost_per_entry_ms = 0.0;
1054     if (cards_scanned > 10) {
1055       cost_per_entry_ms = phase_times()->average_last_scan_rs_time() / (double) cards_scanned;
1056       if (_last_gc_was_young) {
1057         _cost_per_entry_ms_seq->add(cost_per_entry_ms);
1058       } else {
1059         _mixed_cost_per_entry_ms_seq->add(cost_per_entry_ms);
1060       }
1061     }
1062 
1063     if (_max_rs_lengths > 0) {
1064       double cards_per_entry_ratio =
1065         (double) cards_scanned / (double) _max_rs_lengths;
1066       if (_last_gc_was_young) {
1067         _young_cards_per_entry_ratio_seq->add(cards_per_entry_ratio);
1068       } else {
1069         _mixed_cards_per_entry_ratio_seq->add(cards_per_entry_ratio);
1070       }
1071     }
1072 
1073     // This is defensive. For a while _max_rs_lengths could get
1074     // smaller than _recorded_rs_lengths which was causing
1075     // rs_length_diff to get very large and mess up the RSet length
1076     // predictions. The reason was unsafe concurrent updates to the
1077     // _inc_cset_recorded_rs_lengths field which the code below guards
1078     // against (see CR 7118202). This bug has now been fixed (see CR
1079     // 7119027). However, I'm still worried that
1080     // _inc_cset_recorded_rs_lengths might still end up somewhat
1081     // inaccurate. The concurrent refinement thread calculates an
1082     // RSet's length concurrently with other CR threads updating it
1083     // which might cause it to calculate the length incorrectly (if,
1084     // say, it's in mid-coarsening). So I'll leave in the defensive
1085     // conditional below just in case.
1086     size_t rs_length_diff = 0;
1087     if (_max_rs_lengths > _recorded_rs_lengths) {
1088       rs_length_diff = _max_rs_lengths - _recorded_rs_lengths;
1089     }
1090     _rs_length_diff_seq->add((double) rs_length_diff);
1091 
1092     size_t copied_bytes = surviving_bytes;
1093     double cost_per_byte_ms = 0.0;
1094     if (copied_bytes > 0) {
1095       cost_per_byte_ms = phase_times()->average_last_obj_copy_time() / (double) copied_bytes;
1096       if (_in_marking_window) {
1097         _cost_per_byte_ms_during_cm_seq->add(cost_per_byte_ms);
1098       } else {
1099         _cost_per_byte_ms_seq->add(cost_per_byte_ms);
1100       }
1101     }
1102 
1103     double all_other_time_ms = pause_time_ms -
1104       (phase_times()->average_last_update_rs_time() + phase_times()->average_last_scan_rs_time()
1105       + phase_times()->average_last_obj_copy_time() + phase_times()->average_last_termination_time());
1106 
1107     double young_other_time_ms = 0.0;
1108     if (young_cset_region_length() > 0) {
1109       young_other_time_ms =
1110         phase_times()->young_cset_choice_time_ms() +
1111         phase_times()->young_free_cset_time_ms();
1112       _young_other_cost_per_region_ms_seq->add(young_other_time_ms /
1113                                           (double) young_cset_region_length());
1114     }
1115     double non_young_other_time_ms = 0.0;
1116     if (old_cset_region_length() > 0) {
1117       non_young_other_time_ms =
1118         phase_times()->non_young_cset_choice_time_ms() +
1119         phase_times()->non_young_free_cset_time_ms();
1120 
1121       _non_young_other_cost_per_region_ms_seq->add(non_young_other_time_ms /
1122                                             (double) old_cset_region_length());
1123     }
1124 
1125     double constant_other_time_ms = all_other_time_ms -
1126       (young_other_time_ms + non_young_other_time_ms);
1127     _constant_other_time_ms_seq->add(constant_other_time_ms);
1128 
1129     double survival_ratio = 0.0;
1130     if (_bytes_in_collection_set_before_gc > 0) {
1131       survival_ratio = (double) _bytes_copied_during_gc /
1132                                    (double) _bytes_in_collection_set_before_gc;
1133     }
1134 
1135     _pending_cards_seq->add((double) _pending_cards);
1136     _rs_lengths_seq->add((double) _max_rs_lengths);
1137   }
1138 
1139   _in_marking_window = new_in_marking_window;
1140   _in_marking_window_im = new_in_marking_window_im;
1141   _free_regions_at_end_of_collection = _g1->free_regions();
1142   update_young_list_target_length();
1143 
1144   // Note that _mmu_tracker->max_gc_time() returns the time in seconds.
1145   double update_rs_time_goal_ms = _mmu_tracker->max_gc_time() * MILLIUNITS * G1RSetUpdatingPauseTimePercent / 100.0;
1146   adjust_concurrent_refinement(phase_times()->average_last_update_rs_time(),
1147                                phase_times()->sum_last_update_rs_processed_buffers(), update_rs_time_goal_ms);
1148 
1149   _collectionSetChooser->verify();
1150 }
1151 
1152 #define EXT_SIZE_FORMAT "%.1f%s"
1153 #define EXT_SIZE_PARAMS(bytes)                                  \
1154   byte_size_in_proper_unit((double)(bytes)),                    \
1155   proper_unit_for_byte_size((bytes))
1156 
1157 void G1CollectorPolicy::print_heap_transition() {
1158   if (G1Log::finer()) {
1159     YoungList* young_list = _g1->young_list();
1160     size_t eden_bytes = young_list->eden_used_bytes();
1161     size_t survivor_bytes = young_list->survivor_used_bytes();
1162     size_t used_before_gc = _cur_collection_pause_used_at_start_bytes;
1163     size_t used = _g1->used();
1164     size_t capacity = _g1->capacity();
1165     size_t eden_capacity =
1166       (_young_list_target_length * HeapRegion::GrainBytes) - survivor_bytes;
1167 


1936   young_list->clear_survivors();
1937 
1938   _collection_set = _inc_cset_head;
1939   _collection_set_bytes_used_before = _inc_cset_bytes_used_before;
1940   time_remaining_ms -= _inc_cset_predicted_elapsed_time_ms;
1941   predicted_pause_time_ms += _inc_cset_predicted_elapsed_time_ms;
1942 
1943   ergo_verbose3(ErgoCSetConstruction | ErgoHigh,
1944                 "add young regions to CSet",
1945                 ergo_format_region("eden")
1946                 ergo_format_region("survivors")
1947                 ergo_format_ms("predicted young region time"),
1948                 eden_region_length, survivor_region_length,
1949                 _inc_cset_predicted_elapsed_time_ms);
1950 
1951   // The number of recorded young regions is the incremental
1952   // collection set's current size
1953   set_recorded_rs_lengths(_inc_cset_recorded_rs_lengths);
1954 
1955   double young_end_time_sec = os::elapsedTime();
1956   phase_times()->record_young_cset_choice_time_ms((young_end_time_sec - young_start_time_sec) * 1000.0);

1957 
1958   // We are doing young collections so reset this.
1959   non_young_start_time_sec = young_end_time_sec;
1960 
1961   if (!gcs_are_young()) {
1962     CollectionSetChooser* cset_chooser = _collectionSetChooser;
1963     cset_chooser->verify();
1964     const uint min_old_cset_length = cset_chooser->calc_min_old_cset_length();
1965     const uint max_old_cset_length = cset_chooser->calc_max_old_cset_length();
1966 
1967     uint expensive_region_num = 0;
1968     bool check_time_remaining = adaptive_young_list_length();
1969     HeapRegion* hr = cset_chooser->peek();
1970     while (hr != NULL) {
1971       if (old_cset_region_length() >= max_old_cset_length) {
1972         // Added maximum number of old regions to the CSet.
1973         ergo_verbose2(ErgoCSetConstruction,
1974                       "finish adding old regions to CSet",
1975                       ergo_format_reason("old CSet region num reached max")
1976                       ergo_format_region("old")


2051 
2052     cset_chooser->verify();
2053   }
2054 
2055   stop_incremental_cset_building();
2056 
2057   count_CS_bytes_used();
2058 
2059   ergo_verbose5(ErgoCSetConstruction,
2060                 "finish choosing CSet",
2061                 ergo_format_region("eden")
2062                 ergo_format_region("survivors")
2063                 ergo_format_region("old")
2064                 ergo_format_ms("predicted pause time")
2065                 ergo_format_ms("target pause time"),
2066                 eden_region_length, survivor_region_length,
2067                 old_cset_region_length(),
2068                 predicted_pause_time_ms, target_pause_time_ms);
2069 
2070   double non_young_end_time_sec = os::elapsedTime();
2071   phase_times()->record_non_young_cset_choice_time_ms((non_young_end_time_sec - non_young_start_time_sec) * 1000.0);

2072 }
2073 
2074 void TraceGen0TimeData::record_start_collection(double time_to_stop_the_world_ms) {
2075   if(TraceGen0Time) {
2076     _all_stop_world_times_ms.add(time_to_stop_the_world_ms);
2077   }
2078 }
2079 
2080 void TraceGen0TimeData::record_yield_time(double yield_time_ms) {
2081   if(TraceGen0Time) {
2082     _all_yield_times_ms.add(yield_time_ms);
2083   }
2084 }
2085 
2086 void TraceGen0TimeData::record_end_collection(double pause_time_ms, G1GCPhaseTimes* phase_times) {
2087   if(TraceGen0Time) {
2088     _total.add(pause_time_ms);
2089     _other.add(pause_time_ms - phase_times->accounted_time_ms());
2090     _root_region_scan_wait.add(phase_times->root_region_scan_wait_time_ms());
2091     _parallel.add(phase_times->cur_collection_par_time_ms());
2092     _ext_root_scan.add(phase_times->average_last_ext_root_scan_time());
2093     _satb_filtering.add(phase_times->sum_last_satb_filtering_times_ms());
2094     _update_rs.add(phase_times->average_last_update_rs_time());
2095     _scan_rs.add(phase_times->average_last_scan_rs_time());
2096     _obj_copy.add(phase_times->average_last_obj_copy_time());
2097     _termination.add(phase_times->average_last_termination_time());
2098 
2099     double parallel_known_time = phase_times->average_last_ext_root_scan_time() +
2100       phase_times->sum_last_satb_filtering_times_ms() +
2101       phase_times->average_last_update_rs_time() +
2102       phase_times->average_last_scan_rs_time() +
2103       phase_times->average_last_obj_copy_time() +
2104       + phase_times->average_last_termination_time();
2105 
2106     double parallel_other_time = phase_times->cur_collection_par_time_ms() - parallel_known_time;
2107     _parallel_other.add(parallel_other_time);
2108     _clear_ct.add(phase_times->cur_clear_ct_time_ms());
2109   }
2110 }
2111 
2112 void TraceGen0TimeData::increment_young_collection_count() {
2113   if(TraceGen0Time) {
2114     ++_young_pause_num;
2115   }
2116 }
2117 
2118 void TraceGen0TimeData::increment_mixed_collection_count() {
2119   if(TraceGen0Time) {
2120     ++_mixed_pause_num;
2121   }
2122 }
2123 
2124 void TraceGen0TimeData::print_summary(const char* str,
2125                                       const NumberSeq* seq) const {
2126   double sum = seq->sum();
2127   gclog_or_tty->print_cr("%-27s = %8.2lf s (avg = %8.2lf ms)",
2128                 str, sum / 1000.0, seq->avg());


src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp
Index Unified diffs Context diffs Sdiffs Patch New Old Previous File Next File