787 }
788
789 return ret;
790 }
791 #endif // PRODUCT
792
793 void G1CollectorPolicy::record_full_collection_start() {
794 _full_collection_start_sec = os::elapsedTime();
795 record_heap_size_info_at_start(true /* full */);
796 // Release the future to-space so that it is available for compaction into.
797 _g1->set_full_collection();
798 }
799
800 void G1CollectorPolicy::record_full_collection_end() {
801 // Consider this like a collection pause for the purposes of allocation
802 // since last pause.
803 double end_sec = os::elapsedTime();
804 double full_gc_time_sec = end_sec - _full_collection_start_sec;
805 double full_gc_time_ms = full_gc_time_sec * 1000.0;
806
807 _trace_gen1_time_data.record_full_collection(full_gc_time_ms);
808
809 update_recent_gc_times(end_sec, full_gc_time_ms);
810
811 _g1->clear_full_collection();
812
813 // "Nuke" the heuristics that control the young/mixed GC
814 // transitions and make sure we start with young GCs after the Full GC.
815 set_gcs_are_young(true);
816 _last_young_gc = false;
817 clear_initiate_conc_mark_if_possible();
818 clear_during_initial_mark_pause();
819 _in_marking_window = false;
820 _in_marking_window_im = false;
821
822 _short_lived_surv_rate_group->start_adding_regions();
823 // also call this on any additional surv rate groups
824
825 record_survivor_regions(0, NULL, NULL);
826
827 _free_regions_at_end_of_collection = _g1->free_regions();
829 _survivor_surv_rate_group->reset();
830 update_young_list_target_length();
831 _collectionSetChooser->clear();
832 }
833
834 void G1CollectorPolicy::record_stop_world_start() {
835 _stop_world_start = os::elapsedTime();
836 }
837
838 void G1CollectorPolicy::record_collection_pause_start(double start_time_sec) {
839 // We only need to do this here as the policy will only be applied
840 // to the GC we're about to start. so, no point is calculating this
841 // every time we calculate / recalculate the target young length.
842 update_survivors_policy();
843
844 assert(_g1->used() == _g1->recalculate_used(),
845 err_msg("sanity, used: "SIZE_FORMAT" recalculate_used: "SIZE_FORMAT,
846 _g1->used(), _g1->recalculate_used()));
847
848 double s_w_t_ms = (start_time_sec - _stop_world_start) * 1000.0;
849 _trace_gen0_time_data.record_start_collection(s_w_t_ms);
850 _stop_world_start = 0.0;
851
852 record_heap_size_info_at_start(false /* full */);
853
854 phase_times()->record_cur_collection_start_sec(start_time_sec);
855 _pending_cards = _g1->pending_card_num();
856
857 _collection_set_bytes_used_before = 0;
858 _bytes_copied_during_gc = 0;
859
860 _last_gc_was_young = false;
861
862 // do that for any other surv rate groups
863 _short_lived_surv_rate_group->stop_adding_regions();
864 _survivors_age_table.clear();
865
866 assert( verify_young_ages(), "region age verification" );
867 }
868
869 void G1CollectorPolicy::record_concurrent_mark_init_end(double
884 double elapsed_time_ms = (end_time_sec - _mark_remark_start_sec)*1000.0;
885 _concurrent_mark_remark_times_ms->add(elapsed_time_ms);
886 _cur_mark_stop_world_time_ms += elapsed_time_ms;
887 _prev_collection_pause_end_ms += elapsed_time_ms;
888
889 _mmu_tracker->add_pause(_mark_remark_start_sec, end_time_sec, true);
890 }
891
892 void G1CollectorPolicy::record_concurrent_mark_cleanup_start() {
893 _mark_cleanup_start_sec = os::elapsedTime();
894 }
895
896 void G1CollectorPolicy::record_concurrent_mark_cleanup_completed() {
897 _last_young_gc = true;
898 _in_marking_window = false;
899 }
900
901 void G1CollectorPolicy::record_concurrent_pause() {
902 if (_stop_world_start > 0.0) {
903 double yield_ms = (os::elapsedTime() - _stop_world_start) * 1000.0;
904 _trace_gen0_time_data.record_yield_time(yield_ms);
905 }
906 }
907
908 bool G1CollectorPolicy::need_to_start_conc_mark(const char* source, size_t alloc_word_size) {
909 if (_g1->concurrent_mark()->cmThread()->during_cycle()) {
910 return false;
911 }
912
913 size_t marking_initiating_used_threshold =
914 (_g1->capacity() / 100) * InitiatingHeapOccupancyPercent;
915 size_t cur_used_bytes = _g1->non_young_capacity_bytes();
916 size_t alloc_byte_size = alloc_word_size * HeapWordSize;
917
918 if ((cur_used_bytes + alloc_byte_size) > marking_initiating_used_threshold) {
919 if (gcs_are_young() && !_last_young_gc) {
920 ergo_verbose5(ErgoConcCycles,
921 "request concurrent cycle initiation",
922 ergo_format_reason("occupancy higher than threshold")
923 ergo_format_byte("occupancy")
924 ergo_format_byte("allocation request")
971 }
972 #endif // PRODUCT
973
974 last_pause_included_initial_mark = during_initial_mark_pause();
975 if (last_pause_included_initial_mark) {
976 record_concurrent_mark_init_end(0.0);
977 } else if (need_to_start_conc_mark("end of GC")) {
978 // Note: this might have already been set, if during the last
979 // pause we decided to start a cycle but at the beginning of
980 // this pause we decided to postpone it. That's OK.
981 set_initiate_conc_mark_if_possible();
982 }
983
984 _mmu_tracker->add_pause(end_time_sec - pause_time_ms/1000.0,
985 end_time_sec, false);
986
987 evacuation_info.set_collectionset_used_before(_collection_set_bytes_used_before);
988 evacuation_info.set_bytes_copied(_bytes_copied_during_gc);
989
990 if (update_stats) {
991 _trace_gen0_time_data.record_end_collection(pause_time_ms, phase_times());
992 // this is where we update the allocation rate of the application
993 double app_time_ms =
994 (phase_times()->cur_collection_start_sec() * 1000.0 - _prev_collection_pause_end_ms);
995 if (app_time_ms < MIN_TIMER_GRANULARITY) {
996 // This usually happens due to the timer not having the required
997 // granularity. Some Linuxes are the usual culprits.
998 // We'll just set it to something (arbitrarily) small.
999 app_time_ms = 1.0;
1000 }
1001 // We maintain the invariant that all objects allocated by mutator
1002 // threads will be allocated out of eden regions. So, we can use
1003 // the eden region number allocated since the previous GC to
1004 // calculate the application's allocate rate. The only exception
1005 // to that is humongous objects that are allocated separately. But
1006 // given that humongous object allocations do not really affect
1007 // either the pause's duration nor when the next pause will take
1008 // place we can safely ignore them here.
1009 uint regions_allocated = eden_cset_region_length();
1010 double alloc_rate_ms = (double) regions_allocated / app_time_ms;
1011 _alloc_rate_ms_seq->add(alloc_rate_ms);
1393
1394 ergo_verbose5(ErgoHeapSizing,
1395 "attempt heap expansion",
1396 ergo_format_reason("recent GC overhead higher than "
1397 "threshold after GC")
1398 ergo_format_perc("recent GC overhead")
1399 ergo_format_perc("threshold")
1400 ergo_format_byte("uncommitted")
1401 ergo_format_byte_perc("calculated expansion amount"),
1402 recent_gc_overhead, threshold,
1403 uncommitted_bytes,
1404 expand_bytes_via_pct, (double) G1ExpandByPercentOfAvailable);
1405
1406 return expand_bytes;
1407 } else {
1408 return 0;
1409 }
1410 }
1411
1412 void G1CollectorPolicy::print_tracing_info() const {
1413 _trace_gen0_time_data.print();
1414 _trace_gen1_time_data.print();
1415 }
1416
1417 void G1CollectorPolicy::print_yg_surv_rate_info() const {
1418 #ifndef PRODUCT
1419 _short_lived_surv_rate_group->print_surv_rate_summary();
1420 // add this call for any other surv rate groups
1421 #endif // PRODUCT
1422 }
1423
1424 uint G1CollectorPolicy::max_regions(int purpose) {
1425 switch (purpose) {
1426 case GCAllocForSurvived:
1427 return _max_survivor_regions;
1428 case GCAllocForTenured:
1429 return REGIONS_UNLIMITED;
1430 default:
1431 ShouldNotReachHere();
1432 return REGIONS_UNLIMITED;
1433 };
1434 }
1951 guarantee(target_pause_time_ms > 0.0,
1952 err_msg("target_pause_time_ms = %1.6lf should be positive",
1953 target_pause_time_ms));
1954 guarantee(_collection_set == NULL, "Precondition");
1955
1956 double base_time_ms = predict_base_elapsed_time_ms(_pending_cards);
1957 double predicted_pause_time_ms = base_time_ms;
1958 double time_remaining_ms = MAX2(target_pause_time_ms - base_time_ms, 0.0);
1959
1960 ergo_verbose4(ErgoCSetConstruction | ErgoHigh,
1961 "start choosing CSet",
1962 ergo_format_size("_pending_cards")
1963 ergo_format_ms("predicted base time")
1964 ergo_format_ms("remaining time")
1965 ergo_format_ms("target pause time"),
1966 _pending_cards, base_time_ms, time_remaining_ms, target_pause_time_ms);
1967
1968 _last_gc_was_young = gcs_are_young() ? true : false;
1969
1970 if (_last_gc_was_young) {
1971 _trace_gen0_time_data.increment_young_collection_count();
1972 } else {
1973 _trace_gen0_time_data.increment_mixed_collection_count();
1974 }
1975
1976 // The young list is laid with the survivor regions from the previous
1977 // pause are appended to the RHS of the young list, i.e.
1978 // [Newly Young Regions ++ Survivors from last pause].
1979
1980 uint survivor_region_length = young_list->survivor_length();
1981 uint eden_region_length = young_list->length() - survivor_region_length;
1982 init_cset_region_lengths(eden_region_length, survivor_region_length);
1983
1984 HeapRegion* hr = young_list->first_survivor_region();
1985 while (hr != NULL) {
1986 assert(hr->is_survivor(), "badly formed young list");
1987 hr->set_young();
1988 hr = hr->get_next_young_region();
1989 }
1990
1991 // Clear the fields that point to the survivor list - they are all young now.
1992 young_list->clear_survivors();
1993
2134 }
2135
2136 stop_incremental_cset_building();
2137
2138 ergo_verbose5(ErgoCSetConstruction,
2139 "finish choosing CSet",
2140 ergo_format_region("eden")
2141 ergo_format_region("survivors")
2142 ergo_format_region("old")
2143 ergo_format_ms("predicted pause time")
2144 ergo_format_ms("target pause time"),
2145 eden_region_length, survivor_region_length,
2146 old_cset_region_length(),
2147 predicted_pause_time_ms, target_pause_time_ms);
2148
2149 double non_young_end_time_sec = os::elapsedTime();
2150 phase_times()->record_non_young_cset_choice_time_ms((non_young_end_time_sec - non_young_start_time_sec) * 1000.0);
2151 evacuation_info.set_collectionset_regions(cset_region_length());
2152 }
2153
2154 void TraceGen0TimeData::record_start_collection(double time_to_stop_the_world_ms) {
2155 if(TraceGen0Time) {
2156 _all_stop_world_times_ms.add(time_to_stop_the_world_ms);
2157 }
2158 }
2159
2160 void TraceGen0TimeData::record_yield_time(double yield_time_ms) {
2161 if(TraceGen0Time) {
2162 _all_yield_times_ms.add(yield_time_ms);
2163 }
2164 }
2165
2166 void TraceGen0TimeData::record_end_collection(double pause_time_ms, G1GCPhaseTimes* phase_times) {
2167 if(TraceGen0Time) {
2168 _total.add(pause_time_ms);
2169 _other.add(pause_time_ms - phase_times->accounted_time_ms());
2170 _root_region_scan_wait.add(phase_times->root_region_scan_wait_time_ms());
2171 _parallel.add(phase_times->cur_collection_par_time_ms());
2172 _ext_root_scan.add(phase_times->average_last_ext_root_scan_time());
2173 _satb_filtering.add(phase_times->average_last_satb_filtering_times_ms());
2174 _update_rs.add(phase_times->average_last_update_rs_time());
2175 _scan_rs.add(phase_times->average_last_scan_rs_time());
2176 _obj_copy.add(phase_times->average_last_obj_copy_time());
2177 _termination.add(phase_times->average_last_termination_time());
2178
2179 double parallel_known_time = phase_times->average_last_ext_root_scan_time() +
2180 phase_times->average_last_satb_filtering_times_ms() +
2181 phase_times->average_last_update_rs_time() +
2182 phase_times->average_last_scan_rs_time() +
2183 phase_times->average_last_obj_copy_time() +
2184 + phase_times->average_last_termination_time();
2185
2186 double parallel_other_time = phase_times->cur_collection_par_time_ms() - parallel_known_time;
2187 _parallel_other.add(parallel_other_time);
2188 _clear_ct.add(phase_times->cur_clear_ct_time_ms());
2189 }
2190 }
2191
2192 void TraceGen0TimeData::increment_young_collection_count() {
2193 if(TraceGen0Time) {
2194 ++_young_pause_num;
2195 }
2196 }
2197
2198 void TraceGen0TimeData::increment_mixed_collection_count() {
2199 if(TraceGen0Time) {
2200 ++_mixed_pause_num;
2201 }
2202 }
2203
2204 void TraceGen0TimeData::print_summary(const char* str,
2205 const NumberSeq* seq) const {
2206 double sum = seq->sum();
2207 gclog_or_tty->print_cr("%-27s = %8.2lf s (avg = %8.2lf ms)",
2208 str, sum / 1000.0, seq->avg());
2209 }
2210
2211 void TraceGen0TimeData::print_summary_sd(const char* str,
2212 const NumberSeq* seq) const {
2213 print_summary(str, seq);
2214 gclog_or_tty->print_cr("%+45s = %5d, std dev = %8.2lf ms, max = %8.2lf ms)",
2215 "(num", seq->num(), seq->sd(), seq->maximum());
2216 }
2217
2218 void TraceGen0TimeData::print() const {
2219 if (!TraceGen0Time) {
2220 return;
2221 }
2222
2223 gclog_or_tty->print_cr("ALL PAUSES");
2224 print_summary_sd(" Total", &_total);
2225 gclog_or_tty->print_cr("");
2226 gclog_or_tty->print_cr("");
2227 gclog_or_tty->print_cr(" Young GC Pauses: %8d", _young_pause_num);
2228 gclog_or_tty->print_cr(" Mixed GC Pauses: %8d", _mixed_pause_num);
2229 gclog_or_tty->print_cr("");
2230
2231 gclog_or_tty->print_cr("EVACUATION PAUSES");
2232
2233 if (_young_pause_num == 0 && _mixed_pause_num == 0) {
2234 gclog_or_tty->print_cr("none");
2235 } else {
2236 print_summary_sd(" Evacuation Pauses", &_total);
2237 print_summary(" Root Region Scan Wait", &_root_region_scan_wait);
2238 print_summary(" Parallel Time", &_parallel);
2239 print_summary(" Ext Root Scanning", &_ext_root_scan);
2240 print_summary(" SATB Filtering", &_satb_filtering);
2241 print_summary(" Update RS", &_update_rs);
2242 print_summary(" Scan RS", &_scan_rs);
2243 print_summary(" Object Copy", &_obj_copy);
2244 print_summary(" Termination", &_termination);
2245 print_summary(" Parallel Other", &_parallel_other);
2246 print_summary(" Clear CT", &_clear_ct);
2247 print_summary(" Other", &_other);
2248 }
2249 gclog_or_tty->print_cr("");
2250
2251 gclog_or_tty->print_cr("MISC");
2252 print_summary_sd(" Stop World", &_all_stop_world_times_ms);
2253 print_summary_sd(" Yields", &_all_yield_times_ms);
2254 }
2255
2256 void TraceGen1TimeData::record_full_collection(double full_gc_time_ms) {
2257 if (TraceGen1Time) {
2258 _all_full_gc_times.add(full_gc_time_ms);
2259 }
2260 }
2261
2262 void TraceGen1TimeData::print() const {
2263 if (!TraceGen1Time) {
2264 return;
2265 }
2266
2267 if (_all_full_gc_times.num() > 0) {
2268 gclog_or_tty->print("\n%4d full_gcs: total time = %8.2f s",
2269 _all_full_gc_times.num(),
2270 _all_full_gc_times.sum() / 1000.0);
2271 gclog_or_tty->print_cr(" (avg = %8.2fms).", _all_full_gc_times.avg());
2272 gclog_or_tty->print_cr(" [std. dev = %8.2f ms, max = %8.2f ms]",
2273 _all_full_gc_times.sd(),
2274 _all_full_gc_times.maximum());
2275 }
2276 }
|
787 }
788
789 return ret;
790 }
791 #endif // PRODUCT
792
793 void G1CollectorPolicy::record_full_collection_start() {
794 _full_collection_start_sec = os::elapsedTime();
795 record_heap_size_info_at_start(true /* full */);
796 // Release the future to-space so that it is available for compaction into.
797 _g1->set_full_collection();
798 }
799
800 void G1CollectorPolicy::record_full_collection_end() {
801 // Consider this like a collection pause for the purposes of allocation
802 // since last pause.
803 double end_sec = os::elapsedTime();
804 double full_gc_time_sec = end_sec - _full_collection_start_sec;
805 double full_gc_time_ms = full_gc_time_sec * 1000.0;
806
807 _trace_old_gen_time_data.record_full_collection(full_gc_time_ms);
808
809 update_recent_gc_times(end_sec, full_gc_time_ms);
810
811 _g1->clear_full_collection();
812
813 // "Nuke" the heuristics that control the young/mixed GC
814 // transitions and make sure we start with young GCs after the Full GC.
815 set_gcs_are_young(true);
816 _last_young_gc = false;
817 clear_initiate_conc_mark_if_possible();
818 clear_during_initial_mark_pause();
819 _in_marking_window = false;
820 _in_marking_window_im = false;
821
822 _short_lived_surv_rate_group->start_adding_regions();
823 // also call this on any additional surv rate groups
824
825 record_survivor_regions(0, NULL, NULL);
826
827 _free_regions_at_end_of_collection = _g1->free_regions();
829 _survivor_surv_rate_group->reset();
830 update_young_list_target_length();
831 _collectionSetChooser->clear();
832 }
833
834 void G1CollectorPolicy::record_stop_world_start() {
835 _stop_world_start = os::elapsedTime();
836 }
837
838 void G1CollectorPolicy::record_collection_pause_start(double start_time_sec) {
839 // We only need to do this here as the policy will only be applied
840 // to the GC we're about to start. so, no point is calculating this
841 // every time we calculate / recalculate the target young length.
842 update_survivors_policy();
843
844 assert(_g1->used() == _g1->recalculate_used(),
845 err_msg("sanity, used: "SIZE_FORMAT" recalculate_used: "SIZE_FORMAT,
846 _g1->used(), _g1->recalculate_used()));
847
848 double s_w_t_ms = (start_time_sec - _stop_world_start) * 1000.0;
849 _trace_young_gen_time_data.record_start_collection(s_w_t_ms);
850 _stop_world_start = 0.0;
851
852 record_heap_size_info_at_start(false /* full */);
853
854 phase_times()->record_cur_collection_start_sec(start_time_sec);
855 _pending_cards = _g1->pending_card_num();
856
857 _collection_set_bytes_used_before = 0;
858 _bytes_copied_during_gc = 0;
859
860 _last_gc_was_young = false;
861
862 // do that for any other surv rate groups
863 _short_lived_surv_rate_group->stop_adding_regions();
864 _survivors_age_table.clear();
865
866 assert( verify_young_ages(), "region age verification" );
867 }
868
869 void G1CollectorPolicy::record_concurrent_mark_init_end(double
884 double elapsed_time_ms = (end_time_sec - _mark_remark_start_sec)*1000.0;
885 _concurrent_mark_remark_times_ms->add(elapsed_time_ms);
886 _cur_mark_stop_world_time_ms += elapsed_time_ms;
887 _prev_collection_pause_end_ms += elapsed_time_ms;
888
889 _mmu_tracker->add_pause(_mark_remark_start_sec, end_time_sec, true);
890 }
891
892 void G1CollectorPolicy::record_concurrent_mark_cleanup_start() {
893 _mark_cleanup_start_sec = os::elapsedTime();
894 }
895
896 void G1CollectorPolicy::record_concurrent_mark_cleanup_completed() {
897 _last_young_gc = true;
898 _in_marking_window = false;
899 }
900
901 void G1CollectorPolicy::record_concurrent_pause() {
902 if (_stop_world_start > 0.0) {
903 double yield_ms = (os::elapsedTime() - _stop_world_start) * 1000.0;
904 _trace_young_gen_time_data.record_yield_time(yield_ms);
905 }
906 }
907
908 bool G1CollectorPolicy::need_to_start_conc_mark(const char* source, size_t alloc_word_size) {
909 if (_g1->concurrent_mark()->cmThread()->during_cycle()) {
910 return false;
911 }
912
913 size_t marking_initiating_used_threshold =
914 (_g1->capacity() / 100) * InitiatingHeapOccupancyPercent;
915 size_t cur_used_bytes = _g1->non_young_capacity_bytes();
916 size_t alloc_byte_size = alloc_word_size * HeapWordSize;
917
918 if ((cur_used_bytes + alloc_byte_size) > marking_initiating_used_threshold) {
919 if (gcs_are_young() && !_last_young_gc) {
920 ergo_verbose5(ErgoConcCycles,
921 "request concurrent cycle initiation",
922 ergo_format_reason("occupancy higher than threshold")
923 ergo_format_byte("occupancy")
924 ergo_format_byte("allocation request")
971 }
972 #endif // PRODUCT
973
974 last_pause_included_initial_mark = during_initial_mark_pause();
975 if (last_pause_included_initial_mark) {
976 record_concurrent_mark_init_end(0.0);
977 } else if (need_to_start_conc_mark("end of GC")) {
978 // Note: this might have already been set, if during the last
979 // pause we decided to start a cycle but at the beginning of
980 // this pause we decided to postpone it. That's OK.
981 set_initiate_conc_mark_if_possible();
982 }
983
984 _mmu_tracker->add_pause(end_time_sec - pause_time_ms/1000.0,
985 end_time_sec, false);
986
987 evacuation_info.set_collectionset_used_before(_collection_set_bytes_used_before);
988 evacuation_info.set_bytes_copied(_bytes_copied_during_gc);
989
990 if (update_stats) {
991 _trace_young_gen_time_data.record_end_collection(pause_time_ms, phase_times());
992 // this is where we update the allocation rate of the application
993 double app_time_ms =
994 (phase_times()->cur_collection_start_sec() * 1000.0 - _prev_collection_pause_end_ms);
995 if (app_time_ms < MIN_TIMER_GRANULARITY) {
996 // This usually happens due to the timer not having the required
997 // granularity. Some Linuxes are the usual culprits.
998 // We'll just set it to something (arbitrarily) small.
999 app_time_ms = 1.0;
1000 }
1001 // We maintain the invariant that all objects allocated by mutator
1002 // threads will be allocated out of eden regions. So, we can use
1003 // the eden region number allocated since the previous GC to
1004 // calculate the application's allocate rate. The only exception
1005 // to that is humongous objects that are allocated separately. But
1006 // given that humongous object allocations do not really affect
1007 // either the pause's duration nor when the next pause will take
1008 // place we can safely ignore them here.
1009 uint regions_allocated = eden_cset_region_length();
1010 double alloc_rate_ms = (double) regions_allocated / app_time_ms;
1011 _alloc_rate_ms_seq->add(alloc_rate_ms);
1393
1394 ergo_verbose5(ErgoHeapSizing,
1395 "attempt heap expansion",
1396 ergo_format_reason("recent GC overhead higher than "
1397 "threshold after GC")
1398 ergo_format_perc("recent GC overhead")
1399 ergo_format_perc("threshold")
1400 ergo_format_byte("uncommitted")
1401 ergo_format_byte_perc("calculated expansion amount"),
1402 recent_gc_overhead, threshold,
1403 uncommitted_bytes,
1404 expand_bytes_via_pct, (double) G1ExpandByPercentOfAvailable);
1405
1406 return expand_bytes;
1407 } else {
1408 return 0;
1409 }
1410 }
1411
1412 void G1CollectorPolicy::print_tracing_info() const {
1413 _trace_young_gen_time_data.print();
1414 _trace_old_gen_time_data.print();
1415 }
1416
1417 void G1CollectorPolicy::print_yg_surv_rate_info() const {
1418 #ifndef PRODUCT
1419 _short_lived_surv_rate_group->print_surv_rate_summary();
1420 // add this call for any other surv rate groups
1421 #endif // PRODUCT
1422 }
1423
1424 uint G1CollectorPolicy::max_regions(int purpose) {
1425 switch (purpose) {
1426 case GCAllocForSurvived:
1427 return _max_survivor_regions;
1428 case GCAllocForTenured:
1429 return REGIONS_UNLIMITED;
1430 default:
1431 ShouldNotReachHere();
1432 return REGIONS_UNLIMITED;
1433 };
1434 }
1951 guarantee(target_pause_time_ms > 0.0,
1952 err_msg("target_pause_time_ms = %1.6lf should be positive",
1953 target_pause_time_ms));
1954 guarantee(_collection_set == NULL, "Precondition");
1955
1956 double base_time_ms = predict_base_elapsed_time_ms(_pending_cards);
1957 double predicted_pause_time_ms = base_time_ms;
1958 double time_remaining_ms = MAX2(target_pause_time_ms - base_time_ms, 0.0);
1959
1960 ergo_verbose4(ErgoCSetConstruction | ErgoHigh,
1961 "start choosing CSet",
1962 ergo_format_size("_pending_cards")
1963 ergo_format_ms("predicted base time")
1964 ergo_format_ms("remaining time")
1965 ergo_format_ms("target pause time"),
1966 _pending_cards, base_time_ms, time_remaining_ms, target_pause_time_ms);
1967
1968 _last_gc_was_young = gcs_are_young() ? true : false;
1969
1970 if (_last_gc_was_young) {
1971 _trace_young_gen_time_data.increment_young_collection_count();
1972 } else {
1973 _trace_young_gen_time_data.increment_mixed_collection_count();
1974 }
1975
1976 // The young list is laid with the survivor regions from the previous
1977 // pause are appended to the RHS of the young list, i.e.
1978 // [Newly Young Regions ++ Survivors from last pause].
1979
1980 uint survivor_region_length = young_list->survivor_length();
1981 uint eden_region_length = young_list->length() - survivor_region_length;
1982 init_cset_region_lengths(eden_region_length, survivor_region_length);
1983
1984 HeapRegion* hr = young_list->first_survivor_region();
1985 while (hr != NULL) {
1986 assert(hr->is_survivor(), "badly formed young list");
1987 hr->set_young();
1988 hr = hr->get_next_young_region();
1989 }
1990
1991 // Clear the fields that point to the survivor list - they are all young now.
1992 young_list->clear_survivors();
1993
2134 }
2135
2136 stop_incremental_cset_building();
2137
2138 ergo_verbose5(ErgoCSetConstruction,
2139 "finish choosing CSet",
2140 ergo_format_region("eden")
2141 ergo_format_region("survivors")
2142 ergo_format_region("old")
2143 ergo_format_ms("predicted pause time")
2144 ergo_format_ms("target pause time"),
2145 eden_region_length, survivor_region_length,
2146 old_cset_region_length(),
2147 predicted_pause_time_ms, target_pause_time_ms);
2148
2149 double non_young_end_time_sec = os::elapsedTime();
2150 phase_times()->record_non_young_cset_choice_time_ms((non_young_end_time_sec - non_young_start_time_sec) * 1000.0);
2151 evacuation_info.set_collectionset_regions(cset_region_length());
2152 }
2153
2154 void TraceYoungGenTimeData::record_start_collection(double time_to_stop_the_world_ms) {
2155 if(TraceYoungGenTime) {
2156 _all_stop_world_times_ms.add(time_to_stop_the_world_ms);
2157 }
2158 }
2159
2160 void TraceYoungGenTimeData::record_yield_time(double yield_time_ms) {
2161 if(TraceYoungGenTime) {
2162 _all_yield_times_ms.add(yield_time_ms);
2163 }
2164 }
2165
2166 void TraceYoungGenTimeData::record_end_collection(double pause_time_ms, G1GCPhaseTimes* phase_times) {
2167 if(TraceYoungGenTime) {
2168 _total.add(pause_time_ms);
2169 _other.add(pause_time_ms - phase_times->accounted_time_ms());
2170 _root_region_scan_wait.add(phase_times->root_region_scan_wait_time_ms());
2171 _parallel.add(phase_times->cur_collection_par_time_ms());
2172 _ext_root_scan.add(phase_times->average_last_ext_root_scan_time());
2173 _satb_filtering.add(phase_times->average_last_satb_filtering_times_ms());
2174 _update_rs.add(phase_times->average_last_update_rs_time());
2175 _scan_rs.add(phase_times->average_last_scan_rs_time());
2176 _obj_copy.add(phase_times->average_last_obj_copy_time());
2177 _termination.add(phase_times->average_last_termination_time());
2178
2179 double parallel_known_time = phase_times->average_last_ext_root_scan_time() +
2180 phase_times->average_last_satb_filtering_times_ms() +
2181 phase_times->average_last_update_rs_time() +
2182 phase_times->average_last_scan_rs_time() +
2183 phase_times->average_last_obj_copy_time() +
2184 + phase_times->average_last_termination_time();
2185
2186 double parallel_other_time = phase_times->cur_collection_par_time_ms() - parallel_known_time;
2187 _parallel_other.add(parallel_other_time);
2188 _clear_ct.add(phase_times->cur_clear_ct_time_ms());
2189 }
2190 }
2191
2192 void TraceYoungGenTimeData::increment_young_collection_count() {
2193 if(TraceYoungGenTime) {
2194 ++_young_pause_num;
2195 }
2196 }
2197
2198 void TraceYoungGenTimeData::increment_mixed_collection_count() {
2199 if(TraceYoungGenTime) {
2200 ++_mixed_pause_num;
2201 }
2202 }
2203
2204 void TraceYoungGenTimeData::print_summary(const char* str,
2205 const NumberSeq* seq) const {
2206 double sum = seq->sum();
2207 gclog_or_tty->print_cr("%-27s = %8.2lf s (avg = %8.2lf ms)",
2208 str, sum / 1000.0, seq->avg());
2209 }
2210
2211 void TraceYoungGenTimeData::print_summary_sd(const char* str,
2212 const NumberSeq* seq) const {
2213 print_summary(str, seq);
2214 gclog_or_tty->print_cr("%+45s = %5d, std dev = %8.2lf ms, max = %8.2lf ms)",
2215 "(num", seq->num(), seq->sd(), seq->maximum());
2216 }
2217
2218 void TraceYoungGenTimeData::print() const {
2219 if (!TraceYoungGenTime) {
2220 return;
2221 }
2222
2223 gclog_or_tty->print_cr("ALL PAUSES");
2224 print_summary_sd(" Total", &_total);
2225 gclog_or_tty->print_cr("");
2226 gclog_or_tty->print_cr("");
2227 gclog_or_tty->print_cr(" Young GC Pauses: %8d", _young_pause_num);
2228 gclog_or_tty->print_cr(" Mixed GC Pauses: %8d", _mixed_pause_num);
2229 gclog_or_tty->print_cr("");
2230
2231 gclog_or_tty->print_cr("EVACUATION PAUSES");
2232
2233 if (_young_pause_num == 0 && _mixed_pause_num == 0) {
2234 gclog_or_tty->print_cr("none");
2235 } else {
2236 print_summary_sd(" Evacuation Pauses", &_total);
2237 print_summary(" Root Region Scan Wait", &_root_region_scan_wait);
2238 print_summary(" Parallel Time", &_parallel);
2239 print_summary(" Ext Root Scanning", &_ext_root_scan);
2240 print_summary(" SATB Filtering", &_satb_filtering);
2241 print_summary(" Update RS", &_update_rs);
2242 print_summary(" Scan RS", &_scan_rs);
2243 print_summary(" Object Copy", &_obj_copy);
2244 print_summary(" Termination", &_termination);
2245 print_summary(" Parallel Other", &_parallel_other);
2246 print_summary(" Clear CT", &_clear_ct);
2247 print_summary(" Other", &_other);
2248 }
2249 gclog_or_tty->print_cr("");
2250
2251 gclog_or_tty->print_cr("MISC");
2252 print_summary_sd(" Stop World", &_all_stop_world_times_ms);
2253 print_summary_sd(" Yields", &_all_yield_times_ms);
2254 }
2255
2256 void TraceOldGenTimeData::record_full_collection(double full_gc_time_ms) {
2257 if (TraceOldGenTime) {
2258 _all_full_gc_times.add(full_gc_time_ms);
2259 }
2260 }
2261
2262 void TraceOldGenTimeData::print() const {
2263 if (!TraceOldGenTime) {
2264 return;
2265 }
2266
2267 if (_all_full_gc_times.num() > 0) {
2268 gclog_or_tty->print("\n%4d full_gcs: total time = %8.2f s",
2269 _all_full_gc_times.num(),
2270 _all_full_gc_times.sum() / 1000.0);
2271 gclog_or_tty->print_cr(" (avg = %8.2fms).", _all_full_gc_times.avg());
2272 gclog_or_tty->print_cr(" [std. dev = %8.2f ms, max = %8.2f ms]",
2273 _all_full_gc_times.sd(),
2274 _all_full_gc_times.maximum());
2275 }
2276 }
|