src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp

Print this page
rev 2591 : 6814390: G1: remove the concept of non-generational G1
Summary: Removed the possibility to turn off generational mode for G1.
Reviewed-by: johnc, ysr, tonyp


 153 #ifndef PRODUCT
 154   _cur_clear_ct_time_ms(0.0),
 155   _min_clear_cc_time_ms(-1.0),
 156   _max_clear_cc_time_ms(-1.0),
 157   _cur_clear_cc_time_ms(0.0),
 158   _cum_clear_cc_time_ms(0.0),
 159   _num_cc_clears(0L),
 160 #endif
 161 
 162   _region_num_young(0),
 163   _region_num_tenured(0),
 164   _prev_region_num_young(0),
 165   _prev_region_num_tenured(0),
 166 
 167   _aux_num(10),
 168   _all_aux_times_ms(new NumberSeq[_aux_num]),
 169   _cur_aux_start_times_ms(new double[_aux_num]),
 170   _cur_aux_times_ms(new double[_aux_num]),
 171   _cur_aux_times_set(new bool[_aux_num]),
 172 
 173   _concurrent_mark_init_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)),
 174   _concurrent_mark_remark_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)),
 175   _concurrent_mark_cleanup_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)),
 176 
 177   // <NEW PREDICTION>
 178 
 179   _alloc_rate_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
 180   _prev_collection_pause_end_ms(0.0),
 181   _pending_card_diff_seq(new TruncatedSeq(TruncatedSeqLength)),
 182   _rs_length_diff_seq(new TruncatedSeq(TruncatedSeqLength)),
 183   _cost_per_card_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
 184   _fully_young_cards_per_entry_ratio_seq(new TruncatedSeq(TruncatedSeqLength)),
 185   _partially_young_cards_per_entry_ratio_seq(
 186                                          new TruncatedSeq(TruncatedSeqLength)),
 187   _cost_per_entry_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
 188   _partially_young_cost_per_entry_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
 189   _cost_per_byte_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
 190   _cost_per_byte_ms_during_cm_seq(new TruncatedSeq(TruncatedSeqLength)),
 191   _constant_other_time_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
 192   _young_other_cost_per_region_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
 193   _non_young_other_cost_per_region_ms_seq(
 194                                          new TruncatedSeq(TruncatedSeqLength)),
 195 
 196   _pending_cards_seq(new TruncatedSeq(TruncatedSeqLength)),
 197   _scanned_cards_seq(new TruncatedSeq(TruncatedSeqLength)),
 198   _rs_lengths_seq(new TruncatedSeq(TruncatedSeqLength)),
 199 
 200   _pause_time_target_ms((double) MaxGCPauseMillis),
 201 
 202   // </NEW PREDICTION>
 203 
 204   _in_young_gc_mode(false),
 205   _full_young_gcs(true),
 206   _full_young_pause_num(0),
 207   _partial_young_pause_num(0),
 208 
 209   _during_marking(false),
 210   _in_marking_window(false),
 211   _in_marking_window_im(false),
 212 
 213   _known_garbage_ratio(0.0),
 214   _known_garbage_bytes(0),
 215 
 216   _young_gc_eff_seq(new TruncatedSeq(TruncatedSeqLength)),
 217 
 218    _recent_prev_end_times_for_all_gcs_sec(new TruncatedSeq(NumPrevPausesForHeuristics)),
 219 
 220   _recent_CS_bytes_used_before(new TruncatedSeq(NumPrevPausesForHeuristics)),
 221   _recent_CS_bytes_surviving(new TruncatedSeq(NumPrevPausesForHeuristics)),
 222 
 223   _recent_avg_pause_time_ratio(0.0),
 224   _num_markings(0),


 383   if (FLAG_IS_DEFAULT(GCPauseIntervalMillis)) {
 384     FLAG_SET_DEFAULT(GCPauseIntervalMillis, MaxGCPauseMillis + 1);
 385   }
 386 
 387   // Finally, make sure that the two parameters are consistent.
 388   if (MaxGCPauseMillis >= GCPauseIntervalMillis) {
 389     char buffer[256];
 390     jio_snprintf(buffer, 256,
 391                  "MaxGCPauseMillis (%u) should be less than "
 392                  "GCPauseIntervalMillis (%u)",
 393                  MaxGCPauseMillis, GCPauseIntervalMillis);
 394     vm_exit_during_initialization(buffer);
 395   }
 396 
 397   double max_gc_time = (double) MaxGCPauseMillis / 1000.0;
 398   double time_slice  = (double) GCPauseIntervalMillis / 1000.0;
 399   _mmu_tracker = new G1MMUTrackerQueue(time_slice, max_gc_time);
 400   _sigma = (double) G1ConfidencePercent / 100.0;
 401 
 402   // start conservatively (around 50ms is about right)
 403   _concurrent_mark_init_times_ms->add(0.05);
 404   _concurrent_mark_remark_times_ms->add(0.05);
 405   _concurrent_mark_cleanup_times_ms->add(0.20);
 406   _tenuring_threshold = MaxTenuringThreshold;
 407 
 408   // if G1FixedSurvivorSpaceSize is 0 which means the size is not
 409   // fixed, then _max_survivor_regions will be calculated at
 410   // calculate_young_list_target_length during initialization
 411   _max_survivor_regions = G1FixedSurvivorSpaceSize / HeapRegion::GrainBytes;
 412 
 413   assert(GCTimeRatio > 0,
 414          "we should have set it to a default value set_g1_gc_flags() "
 415          "if a user set it to 0");
 416   _gc_overhead_perc = 100.0 * (1.0 / (1.0 + GCTimeRatio));
 417 
 418   initialize_all();
 419 }
 420 
 421 // Increment "i", mod "len"
 422 static void inc_mod(int& i, int len) {
 423   i++; if (i == len) i = 0;


 451 
 452   size_t min_young_region_num() {
 453     return size_to_region_num(_min_gen0_size);
 454   }
 455   size_t initial_young_region_num() {
 456     return size_to_region_num(_initial_gen0_size);
 457   }
 458   size_t max_young_region_num() {
 459     return size_to_region_num(_max_gen0_size);
 460   }
 461 };
 462 
 463 void G1CollectorPolicy::init() {
 464   // Set aside an initial future to_space.
 465   _g1 = G1CollectedHeap::heap();
 466 
 467   assert(Heap_lock->owned_by_self(), "Locking discipline.");
 468 
 469   initialize_gc_policy_counters();
 470 
 471   if (G1Gen) {
 472     _in_young_gc_mode = true;
 473 
 474     G1YoungGenSizer sizer;
 475     size_t initial_region_num = sizer.initial_young_region_num();
 476 
 477     if (UseAdaptiveSizePolicy) {
 478       set_adaptive_young_list_length(true);
 479       _young_list_fixed_length = 0;
 480     } else {
 481       set_adaptive_young_list_length(false);
 482       _young_list_fixed_length = initial_region_num;
 483     }
 484     _free_regions_at_end_of_collection = _g1->free_regions();
 485     calculate_young_list_min_length();
 486     guarantee( _young_list_min_length == 0, "invariant, not enough info" );
 487     calculate_young_list_target_length();
 488   } else {
 489      _young_list_fixed_length = 0;
 490     _in_young_gc_mode = false;
 491   }
 492 
 493   // We may immediately start allocating regions and placing them on the
 494   // collection set list. Initialize the per-collection set info
 495   start_incremental_cset_building();
 496 }
 497 
 498 // Create the jstat counters for the policy.
 499 void G1CollectorPolicy::initialize_gc_policy_counters()
 500 {
 501   _gc_policy_counters = new GCPolicyCounters("GarbageFirst", 1, 2 + G1Gen);
 502 }
 503 
 504 void G1CollectorPolicy::calculate_young_list_min_length() {
 505   _young_list_min_length = 0;
 506 
 507   if (!adaptive_young_list_length())
 508     return;
 509 
 510   if (_alloc_rate_ms_seq->num() > 3) {
 511     double now_sec = os::elapsedTime();
 512     double when_ms = _mmu_tracker->when_max_gc_sec(now_sec) * 1000.0;
 513     double alloc_rate_ms = predict_alloc_rate_ms();
 514     size_t min_regions = (size_t) ceil(alloc_rate_ms * when_ms);
 515     size_t current_region_num = _g1->young_list()->length();
 516     _young_list_min_length = min_regions + current_region_num;
 517   }
 518 }
 519 
 520 void G1CollectorPolicy::calculate_young_list_target_length() {
 521   if (adaptive_young_list_length()) {


 851 
 852   _prev_region_num_young   = _region_num_young;
 853   _prev_region_num_tenured = _region_num_tenured;
 854 
 855   _free_regions_at_end_of_collection = _g1->free_regions();
 856   // Reset survivors SurvRateGroup.
 857   _survivor_surv_rate_group->reset();
 858   calculate_young_list_min_length();
 859   calculate_young_list_target_length();
 860 }
 861 
 862 void G1CollectorPolicy::record_stop_world_start() {
 863   _stop_world_start = os::elapsedTime();
 864 }
 865 
 866 void G1CollectorPolicy::record_collection_pause_start(double start_time_sec,
 867                                                       size_t start_used) {
 868   if (PrintGCDetails) {
 869     gclog_or_tty->stamp(PrintGCTimeStamps);
 870     gclog_or_tty->print("[GC pause");
 871     if (in_young_gc_mode())
 872       gclog_or_tty->print(" (%s)", full_young_gcs() ? "young" : "partial");
 873   }
 874 
 875   assert(_g1->used() == _g1->recalculate_used(),
 876          err_msg("sanity, used: "SIZE_FORMAT" recalculate_used: "SIZE_FORMAT,
 877                  _g1->used(), _g1->recalculate_used()));
 878 
 879   double s_w_t_ms = (start_time_sec - _stop_world_start) * 1000.0;
 880   _all_stop_world_times_ms->add(s_w_t_ms);
 881   _stop_world_start = 0.0;
 882 
 883   _cur_collection_start_sec = start_time_sec;
 884   _cur_collection_pause_used_at_start_bytes = start_used;
 885   _cur_collection_pause_used_regions_at_start = _g1->used_regions();
 886   _pending_cards = _g1->pending_card_num();
 887   _max_pending_cards = _g1->max_pending_card_num();
 888 
 889   _bytes_in_collection_set_before_gc = 0;
 890   _bytes_copied_during_gc = 0;
 891 


 904     _par_last_mark_stack_scan_times_ms[i] = -1234.0;
 905     _par_last_update_rs_times_ms[i] = -1234.0;
 906     _par_last_update_rs_processed_buffers[i] = -1234.0;
 907     _par_last_scan_rs_times_ms[i] = -1234.0;
 908     _par_last_obj_copy_times_ms[i] = -1234.0;
 909     _par_last_termination_times_ms[i] = -1234.0;
 910     _par_last_termination_attempts[i] = -1234.0;
 911     _par_last_gc_worker_end_times_ms[i] = -1234.0;
 912     _par_last_gc_worker_times_ms[i] = -1234.0;
 913   }
 914 #endif
 915 
 916   for (int i = 0; i < _aux_num; ++i) {
 917     _cur_aux_times_ms[i] = 0.0;
 918     _cur_aux_times_set[i] = false;
 919   }
 920 
 921   _satb_drain_time_set = false;
 922   _last_satb_drain_processed_buffers = -1;
 923 
 924   if (in_young_gc_mode())
 925     _last_young_gc_full = false;
 926 
 927   // do that for any other surv rate groups
 928   _short_lived_surv_rate_group->stop_adding_regions();
 929   _survivors_age_table.clear();
 930 
 931   assert( verify_young_ages(), "region age verification" );
 932 }
 933 
 934 void G1CollectorPolicy::record_mark_closure_time(double mark_closure_time_ms) {
 935   _mark_closure_time_ms = mark_closure_time_ms;
 936 }
 937 
 938 void G1CollectorPolicy::record_concurrent_mark_init_start() {
 939   _mark_init_start_sec = os::elapsedTime();
 940   guarantee(!in_young_gc_mode(), "should not do be here in young GC mode");
 941 }
 942 
 943 void G1CollectorPolicy::record_concurrent_mark_init_end_pre(double
 944                                                    mark_init_elapsed_time_ms) {
 945   _during_marking = true;
 946   assert(!initiate_conc_mark_if_possible(), "we should have cleared it by now");
 947   clear_during_initial_mark_pause();
 948   _cur_mark_stop_world_time_ms = mark_init_elapsed_time_ms;
 949 }
 950 
 951 void G1CollectorPolicy::record_concurrent_mark_init_end() {
 952   double end_time_sec = os::elapsedTime();
 953   double elapsed_time_ms = (end_time_sec - _mark_init_start_sec) * 1000.0;
 954   _concurrent_mark_init_times_ms->add(elapsed_time_ms);
 955   record_concurrent_mark_init_end_pre(elapsed_time_ms);
 956 
 957   _mmu_tracker->add_pause(_mark_init_start_sec, end_time_sec, true);
 958 }
 959 
 960 void G1CollectorPolicy::record_concurrent_mark_remark_start() {
 961   _mark_remark_start_sec = os::elapsedTime();
 962   _during_marking = false;
 963 }
 964 
 965 void G1CollectorPolicy::record_concurrent_mark_remark_end() {
 966   double end_time_sec = os::elapsedTime();
 967   double elapsed_time_ms = (end_time_sec - _mark_remark_start_sec)*1000.0;
 968   _concurrent_mark_remark_times_ms->add(elapsed_time_ms);
 969   _cur_mark_stop_world_time_ms += elapsed_time_ms;
 970   _prev_collection_pause_end_ms += elapsed_time_ms;
 971 
 972   _mmu_tracker->add_pause(_mark_remark_start_sec, end_time_sec, true);
 973 }
 974 
 975 void G1CollectorPolicy::record_concurrent_mark_cleanup_start() {
 976   _mark_cleanup_start_sec = os::elapsedTime();
 977 }
 978 
 979 void


1002   _cur_mark_stop_world_time_ms += elapsed_time_ms;
1003   _prev_collection_pause_end_ms += elapsed_time_ms;
1004 
1005   _mmu_tracker->add_pause(_mark_cleanup_start_sec, end_time_sec, true);
1006 
1007   _num_markings++;
1008 
1009   // We did a marking, so reset the "since_last_mark" variables.
1010   double considerConcMarkCost = 1.0;
1011   // If there are available processors, concurrent activity is free...
1012   if (Threads::number_of_non_daemon_threads() * 2 <
1013       os::active_processor_count()) {
1014     considerConcMarkCost = 0.0;
1015   }
1016   _n_pauses_at_mark_end = _n_pauses;
1017   _n_marks_since_last_pause++;
1018 }
1019 
1020 void
1021 G1CollectorPolicy::record_concurrent_mark_cleanup_completed() {
1022   if (in_young_gc_mode()) {
1023     _should_revert_to_full_young_gcs = false;
1024     _last_full_young_gc = true;
1025     _in_marking_window = false;
1026     if (adaptive_young_list_length())
1027       calculate_young_list_target_length();
1028   }
1029 }
1030 
1031 void G1CollectorPolicy::record_concurrent_pause() {
1032   if (_stop_world_start > 0.0) {
1033     double yield_ms = (os::elapsedTime() - _stop_world_start) * 1000.0;
1034     _all_yield_times_ms->add(yield_ms);
1035   }
1036 }
1037 
1038 void G1CollectorPolicy::record_concurrent_pause_end() {
1039 }
1040 
1041 template<class T>
1042 T sum_of(T* sum_arr, int start, int n, int N) {
1043   T sum = (T)0;
1044   for (int i = 0; i < n; i++) {
1045     int j = (start + i) % N;
1046     sum += sum_arr[j];
1047   }
1048   return sum;


1157 
1158 void G1CollectorPolicy::record_collection_pause_end() {
1159   double end_time_sec = os::elapsedTime();
1160   double elapsed_ms = _last_pause_time_ms;
1161   bool parallel = G1CollectedHeap::use_parallel_gc_threads();
1162   size_t rs_size =
1163     _cur_collection_pause_used_regions_at_start - collection_set_size();
1164   size_t cur_used_bytes = _g1->used();
1165   assert(cur_used_bytes == _g1->recalculate_used(), "It should!");
1166   bool last_pause_included_initial_mark = false;
1167   bool update_stats = !_g1->evacuation_failed();
1168 
1169 #ifndef PRODUCT
1170   if (G1YoungSurvRateVerbose) {
1171     gclog_or_tty->print_cr("");
1172     _short_lived_surv_rate_group->print();
1173     // do that for any other surv rate groups too
1174   }
1175 #endif // PRODUCT
1176 
1177   if (in_young_gc_mode()) {
1178     last_pause_included_initial_mark = during_initial_mark_pause();
1179     if (last_pause_included_initial_mark)
1180       record_concurrent_mark_init_end_pre(0.0);
1181 
1182     size_t min_used_targ =
1183       (_g1->capacity() / 100) * InitiatingHeapOccupancyPercent;
1184 
1185 
1186     if (!_g1->mark_in_progress() && !_last_full_young_gc) {
1187       assert(!last_pause_included_initial_mark, "invariant");
1188       if (cur_used_bytes > min_used_targ &&
1189           cur_used_bytes > _prev_collection_pause_used_at_end_bytes) {
1190         assert(!during_initial_mark_pause(), "we should not see this here");
1191 
1192         // Note: this might have already been set, if during the last
1193         // pause we decided to start a cycle but at the beginning of
1194         // this pause we decided to postpone it. That's OK.
1195         set_initiate_conc_mark_if_possible();
1196       }
1197     }
1198 
1199     _prev_collection_pause_used_at_end_bytes = cur_used_bytes;
1200   }
1201 
1202   _mmu_tracker->add_pause(end_time_sec - elapsed_ms/1000.0,
1203                           end_time_sec, false);
1204 
1205   guarantee(_cur_collection_pause_used_regions_at_start >=
1206             collection_set_size(),
1207             "Negative RS size?");
1208 
1209   // This assert is exempted when we're doing parallel collection pauses,
1210   // because the fragmentation caused by the parallel GC allocation buffers
1211   // can lead to more memory being used during collection than was used
1212   // before. Best leave this out until the fragmentation problem is fixed.
1213   // Pauses in which evacuation failed can also lead to negative
1214   // collections, since no space is reclaimed from a region containing an
1215   // object whose evacuation failed.
1216   // Further, we're now always doing parallel collection.  But I'm still
1217   // leaving this here as a placeholder for a more precise assertion later.
1218   // (DLD, 10/05.)
1219   assert((true || parallel) // Always using GC LABs now.
1220          || _g1->evacuation_failed()


1451   // Reset marks-between-pauses counter.
1452   _n_marks_since_last_pause = 0;
1453 
1454   // Update the efficiency-since-mark vars.
1455   double proc_ms = elapsed_ms * (double) _parallel_gc_threads;
1456   if (elapsed_ms < MIN_TIMER_GRANULARITY) {
1457     // This usually happens due to the timer not having the required
1458     // granularity. Some Linuxes are the usual culprits.
1459     // We'll just set it to something (arbitrarily) small.
1460     proc_ms = 1.0;
1461   }
1462   double cur_efficiency = (double) freed_bytes / proc_ms;
1463 
1464   bool new_in_marking_window = _in_marking_window;
1465   bool new_in_marking_window_im = false;
1466   if (during_initial_mark_pause()) {
1467     new_in_marking_window = true;
1468     new_in_marking_window_im = true;
1469   }
1470 
1471   if (in_young_gc_mode()) {
1472     if (_last_full_young_gc) {
1473       set_full_young_gcs(false);
1474       _last_full_young_gc = false;
1475     }
1476 
1477     if ( !_last_young_gc_full ) {
1478       if ( _should_revert_to_full_young_gcs ||
1479            _known_garbage_ratio < 0.05 ||
1480            (adaptive_young_list_length() &&
1481            (get_gc_eff_factor() * cur_efficiency < predict_young_gc_eff())) ) {
1482         set_full_young_gcs(true);
1483       }
1484     }
1485     _should_revert_to_full_young_gcs = false;
1486 
1487     if (_last_young_gc_full && !_during_marking)
1488       _young_gc_eff_seq->add(cur_efficiency);
1489   }
1490 
1491   _short_lived_surv_rate_group->start_adding_regions();
1492   // do that for any other surv rate groupsx
1493 
1494   // <NEW PREDICTION>
1495 
1496   if (update_stats) {
1497     double pause_time_ms = elapsed_ms;
1498 
1499     size_t diff = 0;
1500     if (_max_pending_cards >= _pending_cards)
1501       diff = _max_pending_cards - _pending_cards;
1502     _pending_card_diff_seq->add((double) diff);
1503 
1504     double cost_per_card_ms = 0.0;
1505     if (_pending_cards > 0) {
1506       cost_per_card_ms = update_rs_time / (double) _pending_cards;
1507       _cost_per_card_ms_seq->add(cost_per_card_ms);


1893   _predicted_young_other_time_ms =
1894     predict_young_other_time_ms(_recorded_young_regions);
1895   _predicted_non_young_other_time_ms =
1896     predict_non_young_other_time_ms(_recorded_non_young_regions);
1897 
1898   _predicted_pause_time_ms =
1899     _predicted_rs_update_time_ms +
1900     _predicted_rs_scan_time_ms +
1901     _predicted_object_copy_time_ms +
1902     _predicted_constant_other_time_ms +
1903     _predicted_young_other_time_ms +
1904     _predicted_non_young_other_time_ms;
1905 #endif // PREDICTIONS_VERBOSE
1906 }
1907 
1908 void G1CollectorPolicy::check_if_region_is_too_expensive(double
1909                                                            predicted_time_ms) {
1910   // I don't think we need to do this when in young GC mode since
1911   // marking will be initiated next time we hit the soft limit anyway...
1912   if (predicted_time_ms > _expensive_region_limit_ms) {
1913     if (!in_young_gc_mode()) {
1914         set_full_young_gcs(true);
1915         // We might want to do something different here. However,
1916         // right now we don't support the non-generational G1 mode
1917         // (and in fact we are planning to remove the associated code,
1918         // see CR 6814390). So, let's leave it as is and this will be
1919         // removed some time in the future
1920         ShouldNotReachHere();
1921         set_during_initial_mark_pause();
1922     } else
1923       // no point in doing another partial one
1924       _should_revert_to_full_young_gcs = true;
1925   }
1926 }
1927 
1928 // </NEW PREDICTION>
1929 
1930 
1931 void G1CollectorPolicy::update_recent_gc_times(double end_time_sec,
1932                                                double elapsed_ms) {
1933   _recent_gc_times_ms->add(elapsed_ms);
1934   _recent_prev_end_times_for_all_gcs_sec->add(end_time_sec);
1935   _prev_collection_pause_end_ms = end_time_sec * 1000.0;
1936 }
1937 
1938 double G1CollectorPolicy::recent_avg_time_for_pauses_ms() {
1939   if (_recent_pause_times_ms->num() == 0) {
1940     return (double) MaxGCPauseMillis;
1941   }
1942   return _recent_pause_times_ms->avg();


2600     _g1->concurrent_mark()->registerCSetRegion(hr);
2601 
2602   assert(!hr->in_collection_set(), "should not already be in the CSet");
2603   hr->set_in_collection_set(true);
2604   hr->set_next_in_collection_set(_collection_set);
2605   _collection_set = hr;
2606   _collection_set_size++;
2607   _collection_set_bytes_used_before += hr->used();
2608   _g1->register_region_with_in_cset_fast_test(hr);
2609 }
2610 
2611 // Initialize the per-collection-set information
2612 void G1CollectorPolicy::start_incremental_cset_building() {
2613   assert(_inc_cset_build_state == Inactive, "Precondition");
2614 
2615   _inc_cset_head = NULL;
2616   _inc_cset_tail = NULL;
2617   _inc_cset_size = 0;
2618   _inc_cset_bytes_used_before = 0;
2619 
2620   if (in_young_gc_mode()) {
2621     _inc_cset_young_index = 0;
2622   }
2623 
2624   _inc_cset_max_finger = 0;
2625   _inc_cset_recorded_young_bytes = 0;
2626   _inc_cset_recorded_rs_lengths = 0;
2627   _inc_cset_predicted_elapsed_time_ms = 0;
2628   _inc_cset_predicted_bytes_to_copy = 0;
2629   _inc_cset_build_state = Active;
2630 }
2631 
2632 void G1CollectorPolicy::add_to_incremental_cset_info(HeapRegion* hr, size_t rs_length) {
2633   // This routine is used when:
2634   // * adding survivor regions to the incremental cset at the end of an
2635   //   evacuation pause,
2636   // * adding the current allocation region to the incremental cset
2637   //   when it is retired, and
2638   // * updating existing policy information for a region in the
2639   //   incremental cset via young list RSet sampling.
2640   // Therefore this routine may be called at a safepoint by the
2641   // VM thread, or in-between safepoints by mutator threads (when
2642   // retiring the current allocation region) or a concurrent


2831   }
2832 
2833   // We figure out the number of bytes available for future to-space.
2834   // For new regions without marking information, we must assume the
2835   // worst-case of complete survival.  If we have marking information for a
2836   // region, we can bound the amount of live data.  We can add a number of
2837   // such regions, as long as the sum of the live data bounds does not
2838   // exceed the available evacuation space.
2839   size_t max_live_bytes = _g1->free_regions() * HeapRegion::GrainBytes;
2840 
2841   size_t expansion_bytes =
2842     _g1->expansion_regions() * HeapRegion::GrainBytes;
2843 
2844   _collection_set_bytes_used_before = 0;
2845   _collection_set_size = 0;
2846 
2847   // Adjust for expansion and slop.
2848   max_live_bytes = max_live_bytes + expansion_bytes;
2849 
2850   HeapRegion* hr;
2851   if (in_young_gc_mode()) {
2852     double young_start_time_sec = os::elapsedTime();
2853 
2854     if (G1PolicyVerbose > 0) {
2855       gclog_or_tty->print_cr("Adding %d young regions to the CSet",
2856                     _g1->young_list()->length());
2857     }
2858 
2859     _young_cset_length  = 0;
2860     _last_young_gc_full = full_young_gcs() ? true : false;
2861 
2862     if (_last_young_gc_full)
2863       ++_full_young_pause_num;
2864     else
2865       ++_partial_young_pause_num;
2866 
2867     // The young list is laid with the survivor regions from the previous
2868     // pause are appended to the RHS of the young list, i.e.
2869     //   [Newly Young Regions ++ Survivors from last pause].
2870 
2871     hr = _g1->young_list()->first_survivor_region();


2902 #if PREDICTIONS_VERBOSE
2903     set_predicted_bytes_to_copy(_inc_cset_predicted_bytes_to_copy);
2904 #endif // PREDICTIONS_VERBOSE
2905 
2906     if (G1PolicyVerbose > 0) {
2907       gclog_or_tty->print_cr("  Added " PTR_FORMAT " Young Regions to CS.",
2908                              _inc_cset_size);
2909       gclog_or_tty->print_cr("    (" SIZE_FORMAT " KB left in heap.)",
2910                             max_live_bytes/K);
2911     }
2912 
2913     assert(_inc_cset_size == _g1->young_list()->length(), "Invariant");
2914 
2915     double young_end_time_sec = os::elapsedTime();
2916     _recorded_young_cset_choice_time_ms =
2917       (young_end_time_sec - young_start_time_sec) * 1000.0;
2918 
2919     // We are doing young collections so reset this.
2920     non_young_start_time_sec = young_end_time_sec;
2921 
2922     // Note we can use either _collection_set_size or
2923     // _young_cset_length here
2924     if (_collection_set_size > 0 && _last_young_gc_full) {
2925       // don't bother adding more regions...
2926       goto choose_collection_set_end;
2927     }
2928   }
2929 
2930   if (!in_young_gc_mode() || !full_young_gcs()) {
2931     bool should_continue = true;
2932     NumberSeq seq;
2933     double avg_prediction = 100000000000000000.0; // something very large
2934 
2935     do {
2936       hr = _collectionSetChooser->getNextMarkedRegion(time_remaining_ms,
2937                                                       avg_prediction);
2938       if (hr != NULL) {
2939         double predicted_time_ms = predict_region_elapsed_time_ms(hr, false);
2940         time_remaining_ms -= predicted_time_ms;
2941         predicted_pause_time_ms += predicted_time_ms;
2942         add_to_collection_set(hr);
2943         record_non_young_cset_region(hr);
2944         max_live_bytes -= MIN2(hr->max_live_bytes(), max_live_bytes);
2945         if (G1PolicyVerbose > 0) {
2946           gclog_or_tty->print_cr("    (" SIZE_FORMAT " KB left in heap.)",
2947                         max_live_bytes/K);
2948         }
2949         seq.add(predicted_time_ms);
2950         avg_prediction = seq.avg() + seq.sd();
2951       }
2952       should_continue =
2953         ( hr != NULL) &&
2954         ( (adaptive_young_list_length()) ? time_remaining_ms > 0.0
2955           : _collection_set_size < _young_list_fixed_length );
2956     } while (should_continue);
2957 
2958     if (!adaptive_young_list_length() &&
2959         _collection_set_size < _young_list_fixed_length)
2960       _should_revert_to_full_young_gcs  = true;
2961   }
2962 
2963 choose_collection_set_end:
2964   stop_incremental_cset_building();
2965 
2966   count_CS_bytes_used();
2967 
2968   end_recording_regions();
2969 
2970   double non_young_end_time_sec = os::elapsedTime();
2971   _recorded_non_young_cset_choice_time_ms =
2972     (non_young_end_time_sec - non_young_start_time_sec) * 1000.0;
2973 }
2974 
2975 void G1CollectorPolicy_BestRegionsFirst::record_full_collection_end() {
2976   G1CollectorPolicy::record_full_collection_end();
2977   _collectionSetChooser->updateAfterFullCollection();
2978 }
2979 
2980 void G1CollectorPolicy_BestRegionsFirst::
2981 expand_if_possible(size_t numRegions) {
2982   size_t expansion_bytes = numRegions * HeapRegion::GrainBytes;
2983   _g1->expand(expansion_bytes);


 153 #ifndef PRODUCT
 154   _cur_clear_ct_time_ms(0.0),
 155   _min_clear_cc_time_ms(-1.0),
 156   _max_clear_cc_time_ms(-1.0),
 157   _cur_clear_cc_time_ms(0.0),
 158   _cum_clear_cc_time_ms(0.0),
 159   _num_cc_clears(0L),
 160 #endif
 161 
 162   _region_num_young(0),
 163   _region_num_tenured(0),
 164   _prev_region_num_young(0),
 165   _prev_region_num_tenured(0),
 166 
 167   _aux_num(10),
 168   _all_aux_times_ms(new NumberSeq[_aux_num]),
 169   _cur_aux_start_times_ms(new double[_aux_num]),
 170   _cur_aux_times_ms(new double[_aux_num]),
 171   _cur_aux_times_set(new bool[_aux_num]),
 172 

 173   _concurrent_mark_remark_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)),
 174   _concurrent_mark_cleanup_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)),
 175 
 176   // <NEW PREDICTION>
 177 
 178   _alloc_rate_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
 179   _prev_collection_pause_end_ms(0.0),
 180   _pending_card_diff_seq(new TruncatedSeq(TruncatedSeqLength)),
 181   _rs_length_diff_seq(new TruncatedSeq(TruncatedSeqLength)),
 182   _cost_per_card_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
 183   _fully_young_cards_per_entry_ratio_seq(new TruncatedSeq(TruncatedSeqLength)),
 184   _partially_young_cards_per_entry_ratio_seq(
 185                                          new TruncatedSeq(TruncatedSeqLength)),
 186   _cost_per_entry_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
 187   _partially_young_cost_per_entry_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
 188   _cost_per_byte_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
 189   _cost_per_byte_ms_during_cm_seq(new TruncatedSeq(TruncatedSeqLength)),
 190   _constant_other_time_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
 191   _young_other_cost_per_region_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
 192   _non_young_other_cost_per_region_ms_seq(
 193                                          new TruncatedSeq(TruncatedSeqLength)),
 194 
 195   _pending_cards_seq(new TruncatedSeq(TruncatedSeqLength)),
 196   _scanned_cards_seq(new TruncatedSeq(TruncatedSeqLength)),
 197   _rs_lengths_seq(new TruncatedSeq(TruncatedSeqLength)),
 198 
 199   _pause_time_target_ms((double) MaxGCPauseMillis),
 200 
 201   // </NEW PREDICTION>
 202 

 203   _full_young_gcs(true),
 204   _full_young_pause_num(0),
 205   _partial_young_pause_num(0),
 206 
 207   _during_marking(false),
 208   _in_marking_window(false),
 209   _in_marking_window_im(false),
 210 
 211   _known_garbage_ratio(0.0),
 212   _known_garbage_bytes(0),
 213 
 214   _young_gc_eff_seq(new TruncatedSeq(TruncatedSeqLength)),
 215 
 216    _recent_prev_end_times_for_all_gcs_sec(new TruncatedSeq(NumPrevPausesForHeuristics)),
 217 
 218   _recent_CS_bytes_used_before(new TruncatedSeq(NumPrevPausesForHeuristics)),
 219   _recent_CS_bytes_surviving(new TruncatedSeq(NumPrevPausesForHeuristics)),
 220 
 221   _recent_avg_pause_time_ratio(0.0),
 222   _num_markings(0),


 381   if (FLAG_IS_DEFAULT(GCPauseIntervalMillis)) {
 382     FLAG_SET_DEFAULT(GCPauseIntervalMillis, MaxGCPauseMillis + 1);
 383   }
 384 
 385   // Finally, make sure that the two parameters are consistent.
 386   if (MaxGCPauseMillis >= GCPauseIntervalMillis) {
 387     char buffer[256];
 388     jio_snprintf(buffer, 256,
 389                  "MaxGCPauseMillis (%u) should be less than "
 390                  "GCPauseIntervalMillis (%u)",
 391                  MaxGCPauseMillis, GCPauseIntervalMillis);
 392     vm_exit_during_initialization(buffer);
 393   }
 394 
 395   double max_gc_time = (double) MaxGCPauseMillis / 1000.0;
 396   double time_slice  = (double) GCPauseIntervalMillis / 1000.0;
 397   _mmu_tracker = new G1MMUTrackerQueue(time_slice, max_gc_time);
 398   _sigma = (double) G1ConfidencePercent / 100.0;
 399 
 400   // start conservatively (around 50ms is about right)

 401   _concurrent_mark_remark_times_ms->add(0.05);
 402   _concurrent_mark_cleanup_times_ms->add(0.20);
 403   _tenuring_threshold = MaxTenuringThreshold;
 404 
 405   // if G1FixedSurvivorSpaceSize is 0 which means the size is not
 406   // fixed, then _max_survivor_regions will be calculated at
 407   // calculate_young_list_target_length during initialization
 408   _max_survivor_regions = G1FixedSurvivorSpaceSize / HeapRegion::GrainBytes;
 409 
 410   assert(GCTimeRatio > 0,
 411          "we should have set it to a default value set_g1_gc_flags() "
 412          "if a user set it to 0");
 413   _gc_overhead_perc = 100.0 * (1.0 / (1.0 + GCTimeRatio));
 414 
 415   initialize_all();
 416 }
 417 
 418 // Increment "i", mod "len"
 419 static void inc_mod(int& i, int len) {
 420   i++; if (i == len) i = 0;


 448 
 449   size_t min_young_region_num() {
 450     return size_to_region_num(_min_gen0_size);
 451   }
 452   size_t initial_young_region_num() {
 453     return size_to_region_num(_initial_gen0_size);
 454   }
 455   size_t max_young_region_num() {
 456     return size_to_region_num(_max_gen0_size);
 457   }
 458 };
 459 
 460 void G1CollectorPolicy::init() {
 461   // Set aside an initial future to_space.
 462   _g1 = G1CollectedHeap::heap();
 463 
 464   assert(Heap_lock->owned_by_self(), "Locking discipline.");
 465 
 466   initialize_gc_policy_counters();
 467 



 468   G1YoungGenSizer sizer;
 469   size_t initial_region_num = sizer.initial_young_region_num();
 470 
 471   if (UseAdaptiveSizePolicy) {
 472     set_adaptive_young_list_length(true);
 473     _young_list_fixed_length = 0;
 474   } else {
 475     set_adaptive_young_list_length(false);
 476     _young_list_fixed_length = initial_region_num;
 477   }
 478   _free_regions_at_end_of_collection = _g1->free_regions();
 479   calculate_young_list_min_length();
 480   guarantee( _young_list_min_length == 0, "invariant, not enough info" );
 481   calculate_young_list_target_length();




 482 
 483   // We may immediately start allocating regions and placing them on the
 484   // collection set list. Initialize the per-collection set info
 485   start_incremental_cset_building();
 486 }
 487 
 488 // Create the jstat counters for the policy.
 489 void G1CollectorPolicy::initialize_gc_policy_counters()
 490 {
 491   _gc_policy_counters = new GCPolicyCounters("GarbageFirst", 1, 3);
 492 }
 493 
 494 void G1CollectorPolicy::calculate_young_list_min_length() {
 495   _young_list_min_length = 0;
 496 
 497   if (!adaptive_young_list_length())
 498     return;
 499 
 500   if (_alloc_rate_ms_seq->num() > 3) {
 501     double now_sec = os::elapsedTime();
 502     double when_ms = _mmu_tracker->when_max_gc_sec(now_sec) * 1000.0;
 503     double alloc_rate_ms = predict_alloc_rate_ms();
 504     size_t min_regions = (size_t) ceil(alloc_rate_ms * when_ms);
 505     size_t current_region_num = _g1->young_list()->length();
 506     _young_list_min_length = min_regions + current_region_num;
 507   }
 508 }
 509 
 510 void G1CollectorPolicy::calculate_young_list_target_length() {
 511   if (adaptive_young_list_length()) {


 841 
 842   _prev_region_num_young   = _region_num_young;
 843   _prev_region_num_tenured = _region_num_tenured;
 844 
 845   _free_regions_at_end_of_collection = _g1->free_regions();
 846   // Reset survivors SurvRateGroup.
 847   _survivor_surv_rate_group->reset();
 848   calculate_young_list_min_length();
 849   calculate_young_list_target_length();
 850 }
 851 
 852 void G1CollectorPolicy::record_stop_world_start() {
 853   _stop_world_start = os::elapsedTime();
 854 }
 855 
 856 void G1CollectorPolicy::record_collection_pause_start(double start_time_sec,
 857                                                       size_t start_used) {
 858   if (PrintGCDetails) {
 859     gclog_or_tty->stamp(PrintGCTimeStamps);
 860     gclog_or_tty->print("[GC pause");

 861     gclog_or_tty->print(" (%s)", full_young_gcs() ? "young" : "partial");
 862   }
 863 
 864   assert(_g1->used() == _g1->recalculate_used(),
 865          err_msg("sanity, used: "SIZE_FORMAT" recalculate_used: "SIZE_FORMAT,
 866                  _g1->used(), _g1->recalculate_used()));
 867 
 868   double s_w_t_ms = (start_time_sec - _stop_world_start) * 1000.0;
 869   _all_stop_world_times_ms->add(s_w_t_ms);
 870   _stop_world_start = 0.0;
 871 
 872   _cur_collection_start_sec = start_time_sec;
 873   _cur_collection_pause_used_at_start_bytes = start_used;
 874   _cur_collection_pause_used_regions_at_start = _g1->used_regions();
 875   _pending_cards = _g1->pending_card_num();
 876   _max_pending_cards = _g1->max_pending_card_num();
 877 
 878   _bytes_in_collection_set_before_gc = 0;
 879   _bytes_copied_during_gc = 0;
 880 


 893     _par_last_mark_stack_scan_times_ms[i] = -1234.0;
 894     _par_last_update_rs_times_ms[i] = -1234.0;
 895     _par_last_update_rs_processed_buffers[i] = -1234.0;
 896     _par_last_scan_rs_times_ms[i] = -1234.0;
 897     _par_last_obj_copy_times_ms[i] = -1234.0;
 898     _par_last_termination_times_ms[i] = -1234.0;
 899     _par_last_termination_attempts[i] = -1234.0;
 900     _par_last_gc_worker_end_times_ms[i] = -1234.0;
 901     _par_last_gc_worker_times_ms[i] = -1234.0;
 902   }
 903 #endif
 904 
 905   for (int i = 0; i < _aux_num; ++i) {
 906     _cur_aux_times_ms[i] = 0.0;
 907     _cur_aux_times_set[i] = false;
 908   }
 909 
 910   _satb_drain_time_set = false;
 911   _last_satb_drain_processed_buffers = -1;
 912 

 913   _last_young_gc_full = false;
 914 
 915   // do that for any other surv rate groups
 916   _short_lived_surv_rate_group->stop_adding_regions();
 917   _survivors_age_table.clear();
 918 
 919   assert( verify_young_ages(), "region age verification" );
 920 }
 921 
 922 void G1CollectorPolicy::record_mark_closure_time(double mark_closure_time_ms) {
 923   _mark_closure_time_ms = mark_closure_time_ms;
 924 }
 925 
 926 void G1CollectorPolicy::record_concurrent_mark_init_end(double





 927                                                    mark_init_elapsed_time_ms) {
 928   _during_marking = true;
 929   assert(!initiate_conc_mark_if_possible(), "we should have cleared it by now");
 930   clear_during_initial_mark_pause();
 931   _cur_mark_stop_world_time_ms = mark_init_elapsed_time_ms;
 932 }
 933 









 934 void G1CollectorPolicy::record_concurrent_mark_remark_start() {
 935   _mark_remark_start_sec = os::elapsedTime();
 936   _during_marking = false;
 937 }
 938 
 939 void G1CollectorPolicy::record_concurrent_mark_remark_end() {
 940   double end_time_sec = os::elapsedTime();
 941   double elapsed_time_ms = (end_time_sec - _mark_remark_start_sec)*1000.0;
 942   _concurrent_mark_remark_times_ms->add(elapsed_time_ms);
 943   _cur_mark_stop_world_time_ms += elapsed_time_ms;
 944   _prev_collection_pause_end_ms += elapsed_time_ms;
 945 
 946   _mmu_tracker->add_pause(_mark_remark_start_sec, end_time_sec, true);
 947 }
 948 
 949 void G1CollectorPolicy::record_concurrent_mark_cleanup_start() {
 950   _mark_cleanup_start_sec = os::elapsedTime();
 951 }
 952 
 953 void


 976   _cur_mark_stop_world_time_ms += elapsed_time_ms;
 977   _prev_collection_pause_end_ms += elapsed_time_ms;
 978 
 979   _mmu_tracker->add_pause(_mark_cleanup_start_sec, end_time_sec, true);
 980 
 981   _num_markings++;
 982 
 983   // We did a marking, so reset the "since_last_mark" variables.
 984   double considerConcMarkCost = 1.0;
 985   // If there are available processors, concurrent activity is free...
 986   if (Threads::number_of_non_daemon_threads() * 2 <
 987       os::active_processor_count()) {
 988     considerConcMarkCost = 0.0;
 989   }
 990   _n_pauses_at_mark_end = _n_pauses;
 991   _n_marks_since_last_pause++;
 992 }
 993 
 994 void
 995 G1CollectorPolicy::record_concurrent_mark_cleanup_completed() {

 996   _should_revert_to_full_young_gcs = false;
 997   _last_full_young_gc = true;
 998   _in_marking_window = false;
 999   if (adaptive_young_list_length())
1000     calculate_young_list_target_length();

1001 }
1002 
1003 void G1CollectorPolicy::record_concurrent_pause() {
1004   if (_stop_world_start > 0.0) {
1005     double yield_ms = (os::elapsedTime() - _stop_world_start) * 1000.0;
1006     _all_yield_times_ms->add(yield_ms);
1007   }
1008 }
1009 
1010 void G1CollectorPolicy::record_concurrent_pause_end() {
1011 }
1012 
1013 template<class T>
1014 T sum_of(T* sum_arr, int start, int n, int N) {
1015   T sum = (T)0;
1016   for (int i = 0; i < n; i++) {
1017     int j = (start + i) % N;
1018     sum += sum_arr[j];
1019   }
1020   return sum;


1129 
1130 void G1CollectorPolicy::record_collection_pause_end() {
1131   double end_time_sec = os::elapsedTime();
1132   double elapsed_ms = _last_pause_time_ms;
1133   bool parallel = G1CollectedHeap::use_parallel_gc_threads();
1134   size_t rs_size =
1135     _cur_collection_pause_used_regions_at_start - collection_set_size();
1136   size_t cur_used_bytes = _g1->used();
1137   assert(cur_used_bytes == _g1->recalculate_used(), "It should!");
1138   bool last_pause_included_initial_mark = false;
1139   bool update_stats = !_g1->evacuation_failed();
1140 
1141 #ifndef PRODUCT
1142   if (G1YoungSurvRateVerbose) {
1143     gclog_or_tty->print_cr("");
1144     _short_lived_surv_rate_group->print();
1145     // do that for any other surv rate groups too
1146   }
1147 #endif // PRODUCT
1148 

1149   last_pause_included_initial_mark = during_initial_mark_pause();
1150   if (last_pause_included_initial_mark)
1151     record_concurrent_mark_init_end(0.0);
1152 
1153   size_t min_used_targ =
1154     (_g1->capacity() / 100) * InitiatingHeapOccupancyPercent;
1155 
1156 
1157   if (!_g1->mark_in_progress() && !_last_full_young_gc) {
1158     assert(!last_pause_included_initial_mark, "invariant");
1159     if (cur_used_bytes > min_used_targ &&
1160       cur_used_bytes > _prev_collection_pause_used_at_end_bytes) {
1161         assert(!during_initial_mark_pause(), "we should not see this here");
1162 
1163         // Note: this might have already been set, if during the last
1164         // pause we decided to start a cycle but at the beginning of
1165         // this pause we decided to postpone it. That's OK.
1166         set_initiate_conc_mark_if_possible();
1167     }
1168   }
1169 
1170   _prev_collection_pause_used_at_end_bytes = cur_used_bytes;

1171 
1172   _mmu_tracker->add_pause(end_time_sec - elapsed_ms/1000.0,
1173                           end_time_sec, false);
1174 
1175   guarantee(_cur_collection_pause_used_regions_at_start >=
1176             collection_set_size(),
1177             "Negative RS size?");
1178 
1179   // This assert is exempted when we're doing parallel collection pauses,
1180   // because the fragmentation caused by the parallel GC allocation buffers
1181   // can lead to more memory being used during collection than was used
1182   // before. Best leave this out until the fragmentation problem is fixed.
1183   // Pauses in which evacuation failed can also lead to negative
1184   // collections, since no space is reclaimed from a region containing an
1185   // object whose evacuation failed.
1186   // Further, we're now always doing parallel collection.  But I'm still
1187   // leaving this here as a placeholder for a more precise assertion later.
1188   // (DLD, 10/05.)
1189   assert((true || parallel) // Always using GC LABs now.
1190          || _g1->evacuation_failed()


1421   // Reset marks-between-pauses counter.
1422   _n_marks_since_last_pause = 0;
1423 
1424   // Update the efficiency-since-mark vars.
1425   double proc_ms = elapsed_ms * (double) _parallel_gc_threads;
1426   if (elapsed_ms < MIN_TIMER_GRANULARITY) {
1427     // This usually happens due to the timer not having the required
1428     // granularity. Some Linuxes are the usual culprits.
1429     // We'll just set it to something (arbitrarily) small.
1430     proc_ms = 1.0;
1431   }
1432   double cur_efficiency = (double) freed_bytes / proc_ms;
1433 
1434   bool new_in_marking_window = _in_marking_window;
1435   bool new_in_marking_window_im = false;
1436   if (during_initial_mark_pause()) {
1437     new_in_marking_window = true;
1438     new_in_marking_window_im = true;
1439   }
1440 

1441   if (_last_full_young_gc) {
1442     set_full_young_gcs(false);
1443     _last_full_young_gc = false;
1444   }
1445 
1446   if ( !_last_young_gc_full ) {
1447     if ( _should_revert_to_full_young_gcs ||
1448       _known_garbage_ratio < 0.05 ||
1449       (adaptive_young_list_length() &&
1450       (get_gc_eff_factor() * cur_efficiency < predict_young_gc_eff())) ) {
1451         set_full_young_gcs(true);
1452     }
1453   }
1454   _should_revert_to_full_young_gcs = false;
1455 
1456   if (_last_young_gc_full && !_during_marking) {
1457     _young_gc_eff_seq->add(cur_efficiency);
1458   }
1459 
1460   _short_lived_surv_rate_group->start_adding_regions();
1461   // do that for any other surv rate groupsx
1462 
1463   // <NEW PREDICTION>
1464 
1465   if (update_stats) {
1466     double pause_time_ms = elapsed_ms;
1467 
1468     size_t diff = 0;
1469     if (_max_pending_cards >= _pending_cards)
1470       diff = _max_pending_cards - _pending_cards;
1471     _pending_card_diff_seq->add((double) diff);
1472 
1473     double cost_per_card_ms = 0.0;
1474     if (_pending_cards > 0) {
1475       cost_per_card_ms = update_rs_time / (double) _pending_cards;
1476       _cost_per_card_ms_seq->add(cost_per_card_ms);


1862   _predicted_young_other_time_ms =
1863     predict_young_other_time_ms(_recorded_young_regions);
1864   _predicted_non_young_other_time_ms =
1865     predict_non_young_other_time_ms(_recorded_non_young_regions);
1866 
1867   _predicted_pause_time_ms =
1868     _predicted_rs_update_time_ms +
1869     _predicted_rs_scan_time_ms +
1870     _predicted_object_copy_time_ms +
1871     _predicted_constant_other_time_ms +
1872     _predicted_young_other_time_ms +
1873     _predicted_non_young_other_time_ms;
1874 #endif // PREDICTIONS_VERBOSE
1875 }
1876 
1877 void G1CollectorPolicy::check_if_region_is_too_expensive(double
1878                                                            predicted_time_ms) {
1879   // I don't think we need to do this when in young GC mode since
1880   // marking will be initiated next time we hit the soft limit anyway...
1881   if (predicted_time_ms > _expensive_region_limit_ms) {










1882     // no point in doing another partial one
1883     _should_revert_to_full_young_gcs = true;
1884   }
1885 }
1886 
1887 // </NEW PREDICTION>
1888 
1889 
1890 void G1CollectorPolicy::update_recent_gc_times(double end_time_sec,
1891                                                double elapsed_ms) {
1892   _recent_gc_times_ms->add(elapsed_ms);
1893   _recent_prev_end_times_for_all_gcs_sec->add(end_time_sec);
1894   _prev_collection_pause_end_ms = end_time_sec * 1000.0;
1895 }
1896 
1897 double G1CollectorPolicy::recent_avg_time_for_pauses_ms() {
1898   if (_recent_pause_times_ms->num() == 0) {
1899     return (double) MaxGCPauseMillis;
1900   }
1901   return _recent_pause_times_ms->avg();


2559     _g1->concurrent_mark()->registerCSetRegion(hr);
2560 
2561   assert(!hr->in_collection_set(), "should not already be in the CSet");
2562   hr->set_in_collection_set(true);
2563   hr->set_next_in_collection_set(_collection_set);
2564   _collection_set = hr;
2565   _collection_set_size++;
2566   _collection_set_bytes_used_before += hr->used();
2567   _g1->register_region_with_in_cset_fast_test(hr);
2568 }
2569 
2570 // Initialize the per-collection-set information
2571 void G1CollectorPolicy::start_incremental_cset_building() {
2572   assert(_inc_cset_build_state == Inactive, "Precondition");
2573 
2574   _inc_cset_head = NULL;
2575   _inc_cset_tail = NULL;
2576   _inc_cset_size = 0;
2577   _inc_cset_bytes_used_before = 0;
2578 

2579   _inc_cset_young_index = 0;

2580 
2581   _inc_cset_max_finger = 0;
2582   _inc_cset_recorded_young_bytes = 0;
2583   _inc_cset_recorded_rs_lengths = 0;
2584   _inc_cset_predicted_elapsed_time_ms = 0;
2585   _inc_cset_predicted_bytes_to_copy = 0;
2586   _inc_cset_build_state = Active;
2587 }
2588 
2589 void G1CollectorPolicy::add_to_incremental_cset_info(HeapRegion* hr, size_t rs_length) {
2590   // This routine is used when:
2591   // * adding survivor regions to the incremental cset at the end of an
2592   //   evacuation pause,
2593   // * adding the current allocation region to the incremental cset
2594   //   when it is retired, and
2595   // * updating existing policy information for a region in the
2596   //   incremental cset via young list RSet sampling.
2597   // Therefore this routine may be called at a safepoint by the
2598   // VM thread, or in-between safepoints by mutator threads (when
2599   // retiring the current allocation region) or a concurrent


2788   }
2789 
2790   // We figure out the number of bytes available for future to-space.
2791   // For new regions without marking information, we must assume the
2792   // worst-case of complete survival.  If we have marking information for a
2793   // region, we can bound the amount of live data.  We can add a number of
2794   // such regions, as long as the sum of the live data bounds does not
2795   // exceed the available evacuation space.
2796   size_t max_live_bytes = _g1->free_regions() * HeapRegion::GrainBytes;
2797 
2798   size_t expansion_bytes =
2799     _g1->expansion_regions() * HeapRegion::GrainBytes;
2800 
2801   _collection_set_bytes_used_before = 0;
2802   _collection_set_size = 0;
2803 
2804   // Adjust for expansion and slop.
2805   max_live_bytes = max_live_bytes + expansion_bytes;
2806 
2807   HeapRegion* hr;

2808   double young_start_time_sec = os::elapsedTime();
2809 
2810   if (G1PolicyVerbose > 0) {
2811     gclog_or_tty->print_cr("Adding %d young regions to the CSet",
2812       _g1->young_list()->length());
2813   }
2814 
2815   _young_cset_length  = 0;
2816   _last_young_gc_full = full_young_gcs() ? true : false;
2817 
2818   if (_last_young_gc_full)
2819     ++_full_young_pause_num;
2820   else
2821     ++_partial_young_pause_num;
2822 
2823   // The young list is laid with the survivor regions from the previous
2824   // pause are appended to the RHS of the young list, i.e.
2825   //   [Newly Young Regions ++ Survivors from last pause].
2826 
2827   hr = _g1->young_list()->first_survivor_region();


2858 #if PREDICTIONS_VERBOSE
2859   set_predicted_bytes_to_copy(_inc_cset_predicted_bytes_to_copy);
2860 #endif // PREDICTIONS_VERBOSE
2861 
2862   if (G1PolicyVerbose > 0) {
2863     gclog_or_tty->print_cr("  Added " PTR_FORMAT " Young Regions to CS.",
2864       _inc_cset_size);
2865     gclog_or_tty->print_cr("    (" SIZE_FORMAT " KB left in heap.)",
2866       max_live_bytes/K);
2867   }
2868 
2869   assert(_inc_cset_size == _g1->young_list()->length(), "Invariant");
2870 
2871   double young_end_time_sec = os::elapsedTime();
2872   _recorded_young_cset_choice_time_ms =
2873     (young_end_time_sec - young_start_time_sec) * 1000.0;
2874 
2875   // We are doing young collections so reset this.
2876   non_young_start_time_sec = young_end_time_sec;
2877 
2878   if (!full_young_gcs()) {








2879     bool should_continue = true;
2880     NumberSeq seq;
2881     double avg_prediction = 100000000000000000.0; // something very large
2882 
2883     do {
2884       hr = _collectionSetChooser->getNextMarkedRegion(time_remaining_ms,
2885                                                       avg_prediction);
2886       if (hr != NULL) {
2887         double predicted_time_ms = predict_region_elapsed_time_ms(hr, false);
2888         time_remaining_ms -= predicted_time_ms;
2889         predicted_pause_time_ms += predicted_time_ms;
2890         add_to_collection_set(hr);
2891         record_non_young_cset_region(hr);
2892         max_live_bytes -= MIN2(hr->max_live_bytes(), max_live_bytes);
2893         if (G1PolicyVerbose > 0) {
2894           gclog_or_tty->print_cr("    (" SIZE_FORMAT " KB left in heap.)",
2895                         max_live_bytes/K);
2896         }
2897         seq.add(predicted_time_ms);
2898         avg_prediction = seq.avg() + seq.sd();
2899       }
2900       should_continue =
2901         ( hr != NULL) &&
2902         ( (adaptive_young_list_length()) ? time_remaining_ms > 0.0
2903           : _collection_set_size < _young_list_fixed_length );
2904     } while (should_continue);
2905 
2906     if (!adaptive_young_list_length() &&
2907         _collection_set_size < _young_list_fixed_length)
2908       _should_revert_to_full_young_gcs  = true;
2909   }
2910 

2911   stop_incremental_cset_building();
2912 
2913   count_CS_bytes_used();
2914 
2915   end_recording_regions();
2916 
2917   double non_young_end_time_sec = os::elapsedTime();
2918   _recorded_non_young_cset_choice_time_ms =
2919     (non_young_end_time_sec - non_young_start_time_sec) * 1000.0;
2920 }
2921 
2922 void G1CollectorPolicy_BestRegionsFirst::record_full_collection_end() {
2923   G1CollectorPolicy::record_full_collection_end();
2924   _collectionSetChooser->updateAfterFullCollection();
2925 }
2926 
2927 void G1CollectorPolicy_BestRegionsFirst::
2928 expand_if_possible(size_t numRegions) {
2929   size_t expansion_bytes = numRegions * HeapRegion::GrainBytes;
2930   _g1->expand(expansion_bytes);