src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp

Print this page
rev 2591 : 6814390: G1: remove the concept of non-generational G1
Summary: Removed the possibility to turn off generational mode for G1.
Reviewed-by:


 184   _fully_young_cards_per_entry_ratio_seq(new TruncatedSeq(TruncatedSeqLength)),
 185   _partially_young_cards_per_entry_ratio_seq(
 186                                          new TruncatedSeq(TruncatedSeqLength)),
 187   _cost_per_entry_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
 188   _partially_young_cost_per_entry_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
 189   _cost_per_byte_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
 190   _cost_per_byte_ms_during_cm_seq(new TruncatedSeq(TruncatedSeqLength)),
 191   _constant_other_time_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
 192   _young_other_cost_per_region_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
 193   _non_young_other_cost_per_region_ms_seq(
 194                                          new TruncatedSeq(TruncatedSeqLength)),
 195 
 196   _pending_cards_seq(new TruncatedSeq(TruncatedSeqLength)),
 197   _scanned_cards_seq(new TruncatedSeq(TruncatedSeqLength)),
 198   _rs_lengths_seq(new TruncatedSeq(TruncatedSeqLength)),
 199 
 200   _pause_time_target_ms((double) MaxGCPauseMillis),
 201 
 202   // </NEW PREDICTION>
 203 
 204   _in_young_gc_mode(false),
 205   _full_young_gcs(true),
 206   _full_young_pause_num(0),
 207   _partial_young_pause_num(0),
 208 
 209   _during_marking(false),
 210   _in_marking_window(false),
 211   _in_marking_window_im(false),
 212 
 213   _known_garbage_ratio(0.0),
 214   _known_garbage_bytes(0),
 215 
 216   _young_gc_eff_seq(new TruncatedSeq(TruncatedSeqLength)),
 217 
 218    _recent_prev_end_times_for_all_gcs_sec(new TruncatedSeq(NumPrevPausesForHeuristics)),
 219 
 220   _recent_CS_bytes_used_before(new TruncatedSeq(NumPrevPausesForHeuristics)),
 221   _recent_CS_bytes_surviving(new TruncatedSeq(NumPrevPausesForHeuristics)),
 222 
 223   _recent_avg_pause_time_ratio(0.0),
 224   _num_markings(0),


 451 
 452   size_t min_young_region_num() {
 453     return size_to_region_num(_min_gen0_size);
 454   }
 455   size_t initial_young_region_num() {
 456     return size_to_region_num(_initial_gen0_size);
 457   }
 458   size_t max_young_region_num() {
 459     return size_to_region_num(_max_gen0_size);
 460   }
 461 };
 462 
 463 void G1CollectorPolicy::init() {
 464   // Set aside an initial future to_space.
 465   _g1 = G1CollectedHeap::heap();
 466 
 467   assert(Heap_lock->owned_by_self(), "Locking discipline.");
 468 
 469   initialize_gc_policy_counters();
 470 
 471   if (G1Gen) {
 472     _in_young_gc_mode = true;
 473 
 474     G1YoungGenSizer sizer;
 475     size_t initial_region_num = sizer.initial_young_region_num();
 476 
 477     if (UseAdaptiveSizePolicy) {
 478       set_adaptive_young_list_length(true);
 479       _young_list_fixed_length = 0;
 480     } else {
 481       set_adaptive_young_list_length(false);
 482       _young_list_fixed_length = initial_region_num;
 483     }
 484     _free_regions_at_end_of_collection = _g1->free_regions();
 485     calculate_young_list_min_length();
 486     guarantee( _young_list_min_length == 0, "invariant, not enough info" );
 487     calculate_young_list_target_length();
 488   } else {
 489      _young_list_fixed_length = 0;
 490     _in_young_gc_mode = false;
 491   }
 492 
 493   // We may immediately start allocating regions and placing them on the
 494   // collection set list. Initialize the per-collection set info
 495   start_incremental_cset_building();
 496 }
 497 
 498 // Create the jstat counters for the policy.
 499 void G1CollectorPolicy::initialize_gc_policy_counters()
 500 {
 501   _gc_policy_counters = new GCPolicyCounters("GarbageFirst", 1, 2 + G1Gen);
 502 }
 503 
 504 void G1CollectorPolicy::calculate_young_list_min_length() {
 505   _young_list_min_length = 0;
 506 
 507   if (!adaptive_young_list_length())
 508     return;
 509 
 510   if (_alloc_rate_ms_seq->num() > 3) {
 511     double now_sec = os::elapsedTime();
 512     double when_ms = _mmu_tracker->when_max_gc_sec(now_sec) * 1000.0;
 513     double alloc_rate_ms = predict_alloc_rate_ms();
 514     size_t min_regions = (size_t) ceil(alloc_rate_ms * when_ms);
 515     size_t current_region_num = _g1->young_list()->length();
 516     _young_list_min_length = min_regions + current_region_num;
 517   }
 518 }
 519 
 520 void G1CollectorPolicy::calculate_young_list_target_length() {
 521   if (adaptive_young_list_length()) {


 851 
 852   _prev_region_num_young   = _region_num_young;
 853   _prev_region_num_tenured = _region_num_tenured;
 854 
 855   _free_regions_at_end_of_collection = _g1->free_regions();
 856   // Reset survivors SurvRateGroup.
 857   _survivor_surv_rate_group->reset();
 858   calculate_young_list_min_length();
 859   calculate_young_list_target_length();
 860 }
 861 
 862 void G1CollectorPolicy::record_stop_world_start() {
 863   _stop_world_start = os::elapsedTime();
 864 }
 865 
 866 void G1CollectorPolicy::record_collection_pause_start(double start_time_sec,
 867                                                       size_t start_used) {
 868   if (PrintGCDetails) {
 869     gclog_or_tty->stamp(PrintGCTimeStamps);
 870     gclog_or_tty->print("[GC pause");
 871     if (in_young_gc_mode())
 872       gclog_or_tty->print(" (%s)", full_young_gcs() ? "young" : "partial");
 873   }
 874 
 875   assert(_g1->used() == _g1->recalculate_used(),
 876          err_msg("sanity, used: "SIZE_FORMAT" recalculate_used: "SIZE_FORMAT,
 877                  _g1->used(), _g1->recalculate_used()));
 878 
 879   double s_w_t_ms = (start_time_sec - _stop_world_start) * 1000.0;
 880   _all_stop_world_times_ms->add(s_w_t_ms);
 881   _stop_world_start = 0.0;
 882 
 883   _cur_collection_start_sec = start_time_sec;
 884   _cur_collection_pause_used_at_start_bytes = start_used;
 885   _cur_collection_pause_used_regions_at_start = _g1->used_regions();
 886   _pending_cards = _g1->pending_card_num();
 887   _max_pending_cards = _g1->max_pending_card_num();
 888 
 889   _bytes_in_collection_set_before_gc = 0;
 890   _bytes_copied_during_gc = 0;
 891 


 904     _par_last_mark_stack_scan_times_ms[i] = -1234.0;
 905     _par_last_update_rs_times_ms[i] = -1234.0;
 906     _par_last_update_rs_processed_buffers[i] = -1234.0;
 907     _par_last_scan_rs_times_ms[i] = -1234.0;
 908     _par_last_obj_copy_times_ms[i] = -1234.0;
 909     _par_last_termination_times_ms[i] = -1234.0;
 910     _par_last_termination_attempts[i] = -1234.0;
 911     _par_last_gc_worker_end_times_ms[i] = -1234.0;
 912     _par_last_gc_worker_times_ms[i] = -1234.0;
 913   }
 914 #endif
 915 
 916   for (int i = 0; i < _aux_num; ++i) {
 917     _cur_aux_times_ms[i] = 0.0;
 918     _cur_aux_times_set[i] = false;
 919   }
 920 
 921   _satb_drain_time_set = false;
 922   _last_satb_drain_processed_buffers = -1;
 923 
 924   if (in_young_gc_mode())
 925     _last_young_gc_full = false;
 926 
 927   // do that for any other surv rate groups
 928   _short_lived_surv_rate_group->stop_adding_regions();
 929   _survivors_age_table.clear();
 930 
 931   assert( verify_young_ages(), "region age verification" );
 932 }
 933 
 934 void G1CollectorPolicy::record_mark_closure_time(double mark_closure_time_ms) {
 935   _mark_closure_time_ms = mark_closure_time_ms;
 936 }
 937 
 938 void G1CollectorPolicy::record_concurrent_mark_init_start() {
 939   _mark_init_start_sec = os::elapsedTime();
 940   guarantee(!in_young_gc_mode(), "should not do be here in young GC mode");
 941 }
 942 
 943 void G1CollectorPolicy::record_concurrent_mark_init_end_pre(double
 944                                                    mark_init_elapsed_time_ms) {
 945   _during_marking = true;
 946   assert(!initiate_conc_mark_if_possible(), "we should have cleared it by now");
 947   clear_during_initial_mark_pause();
 948   _cur_mark_stop_world_time_ms = mark_init_elapsed_time_ms;
 949 }
 950 
 951 void G1CollectorPolicy::record_concurrent_mark_init_end() {
 952   double end_time_sec = os::elapsedTime();
 953   double elapsed_time_ms = (end_time_sec - _mark_init_start_sec) * 1000.0;
 954   _concurrent_mark_init_times_ms->add(elapsed_time_ms);
 955   record_concurrent_mark_init_end_pre(elapsed_time_ms);
 956 
 957   _mmu_tracker->add_pause(_mark_init_start_sec, end_time_sec, true);
 958 }
 959 
 960 void G1CollectorPolicy::record_concurrent_mark_remark_start() {


1002   _cur_mark_stop_world_time_ms += elapsed_time_ms;
1003   _prev_collection_pause_end_ms += elapsed_time_ms;
1004 
1005   _mmu_tracker->add_pause(_mark_cleanup_start_sec, end_time_sec, true);
1006 
1007   _num_markings++;
1008 
1009   // We did a marking, so reset the "since_last_mark" variables.
1010   double considerConcMarkCost = 1.0;
1011   // If there are available processors, concurrent activity is free...
1012   if (Threads::number_of_non_daemon_threads() * 2 <
1013       os::active_processor_count()) {
1014     considerConcMarkCost = 0.0;
1015   }
1016   _n_pauses_at_mark_end = _n_pauses;
1017   _n_marks_since_last_pause++;
1018 }
1019 
1020 void
1021 G1CollectorPolicy::record_concurrent_mark_cleanup_completed() {
1022   if (in_young_gc_mode()) {
1023     _should_revert_to_full_young_gcs = false;
1024     _last_full_young_gc = true;
1025     _in_marking_window = false;
1026     if (adaptive_young_list_length())
1027       calculate_young_list_target_length();
1028   }
1029 }
1030 
1031 void G1CollectorPolicy::record_concurrent_pause() {
1032   if (_stop_world_start > 0.0) {
1033     double yield_ms = (os::elapsedTime() - _stop_world_start) * 1000.0;
1034     _all_yield_times_ms->add(yield_ms);
1035   }
1036 }
1037 
1038 void G1CollectorPolicy::record_concurrent_pause_end() {
1039 }
1040 
1041 template<class T>
1042 T sum_of(T* sum_arr, int start, int n, int N) {
1043   T sum = (T)0;
1044   for (int i = 0; i < n; i++) {
1045     int j = (start + i) % N;
1046     sum += sum_arr[j];
1047   }
1048   return sum;


1157 
1158 void G1CollectorPolicy::record_collection_pause_end() {
1159   double end_time_sec = os::elapsedTime();
1160   double elapsed_ms = _last_pause_time_ms;
1161   bool parallel = G1CollectedHeap::use_parallel_gc_threads();
1162   size_t rs_size =
1163     _cur_collection_pause_used_regions_at_start - collection_set_size();
1164   size_t cur_used_bytes = _g1->used();
1165   assert(cur_used_bytes == _g1->recalculate_used(), "It should!");
1166   bool last_pause_included_initial_mark = false;
1167   bool update_stats = !_g1->evacuation_failed();
1168 
1169 #ifndef PRODUCT
1170   if (G1YoungSurvRateVerbose) {
1171     gclog_or_tty->print_cr("");
1172     _short_lived_surv_rate_group->print();
1173     // do that for any other surv rate groups too
1174   }
1175 #endif // PRODUCT
1176 
1177   if (in_young_gc_mode()) {
1178     last_pause_included_initial_mark = during_initial_mark_pause();
1179     if (last_pause_included_initial_mark)
1180       record_concurrent_mark_init_end_pre(0.0);
1181 
1182     size_t min_used_targ =
1183       (_g1->capacity() / 100) * InitiatingHeapOccupancyPercent;
1184 
1185 
1186     if (!_g1->mark_in_progress() && !_last_full_young_gc) {
1187       assert(!last_pause_included_initial_mark, "invariant");
1188       if (cur_used_bytes > min_used_targ &&
1189           cur_used_bytes > _prev_collection_pause_used_at_end_bytes) {
1190         assert(!during_initial_mark_pause(), "we should not see this here");
1191 
1192         // Note: this might have already been set, if during the last
1193         // pause we decided to start a cycle but at the beginning of
1194         // this pause we decided to postpone it. That's OK.
1195         set_initiate_conc_mark_if_possible();
1196       }
1197     }
1198 
1199     _prev_collection_pause_used_at_end_bytes = cur_used_bytes;
1200   }
1201 
1202   _mmu_tracker->add_pause(end_time_sec - elapsed_ms/1000.0,
1203                           end_time_sec, false);
1204 
1205   guarantee(_cur_collection_pause_used_regions_at_start >=
1206             collection_set_size(),
1207             "Negative RS size?");
1208 
1209   // This assert is exempted when we're doing parallel collection pauses,
1210   // because the fragmentation caused by the parallel GC allocation buffers
1211   // can lead to more memory being used during collection than was used
1212   // before. Best leave this out until the fragmentation problem is fixed.
1213   // Pauses in which evacuation failed can also lead to negative
1214   // collections, since no space is reclaimed from a region containing an
1215   // object whose evacuation failed.
1216   // Further, we're now always doing parallel collection.  But I'm still
1217   // leaving this here as a placeholder for a more precise assertion later.
1218   // (DLD, 10/05.)
1219   assert((true || parallel) // Always using GC LABs now.
1220          || _g1->evacuation_failed()


1451   // Reset marks-between-pauses counter.
1452   _n_marks_since_last_pause = 0;
1453 
1454   // Update the efficiency-since-mark vars.
1455   double proc_ms = elapsed_ms * (double) _parallel_gc_threads;
1456   if (elapsed_ms < MIN_TIMER_GRANULARITY) {
1457     // This usually happens due to the timer not having the required
1458     // granularity. Some Linuxes are the usual culprits.
1459     // We'll just set it to something (arbitrarily) small.
1460     proc_ms = 1.0;
1461   }
1462   double cur_efficiency = (double) freed_bytes / proc_ms;
1463 
1464   bool new_in_marking_window = _in_marking_window;
1465   bool new_in_marking_window_im = false;
1466   if (during_initial_mark_pause()) {
1467     new_in_marking_window = true;
1468     new_in_marking_window_im = true;
1469   }
1470 
1471   if (in_young_gc_mode()) {
1472     if (_last_full_young_gc) {
1473       set_full_young_gcs(false);
1474       _last_full_young_gc = false;
1475     }
1476 
1477     if ( !_last_young_gc_full ) {
1478       if ( _should_revert_to_full_young_gcs ||
1479            _known_garbage_ratio < 0.05 ||
1480            (adaptive_young_list_length() &&
1481            (get_gc_eff_factor() * cur_efficiency < predict_young_gc_eff())) ) {
1482         set_full_young_gcs(true);
1483       }
1484     }
1485     _should_revert_to_full_young_gcs = false;
1486 
1487     if (_last_young_gc_full && !_during_marking)
1488       _young_gc_eff_seq->add(cur_efficiency);
1489   }
1490 
1491   _short_lived_surv_rate_group->start_adding_regions();
1492   // do that for any other surv rate groupsx
1493 
1494   // <NEW PREDICTION>
1495 
1496   if (update_stats) {
1497     double pause_time_ms = elapsed_ms;
1498 
1499     size_t diff = 0;
1500     if (_max_pending_cards >= _pending_cards)
1501       diff = _max_pending_cards - _pending_cards;
1502     _pending_card_diff_seq->add((double) diff);
1503 
1504     double cost_per_card_ms = 0.0;
1505     if (_pending_cards > 0) {
1506       cost_per_card_ms = update_rs_time / (double) _pending_cards;
1507       _cost_per_card_ms_seq->add(cost_per_card_ms);


1893   _predicted_young_other_time_ms =
1894     predict_young_other_time_ms(_recorded_young_regions);
1895   _predicted_non_young_other_time_ms =
1896     predict_non_young_other_time_ms(_recorded_non_young_regions);
1897 
1898   _predicted_pause_time_ms =
1899     _predicted_rs_update_time_ms +
1900     _predicted_rs_scan_time_ms +
1901     _predicted_object_copy_time_ms +
1902     _predicted_constant_other_time_ms +
1903     _predicted_young_other_time_ms +
1904     _predicted_non_young_other_time_ms;
1905 #endif // PREDICTIONS_VERBOSE
1906 }
1907 
1908 void G1CollectorPolicy::check_if_region_is_too_expensive(double
1909                                                            predicted_time_ms) {
1910   // I don't think we need to do this when in young GC mode since
1911   // marking will be initiated next time we hit the soft limit anyway...
1912   if (predicted_time_ms > _expensive_region_limit_ms) {
1913     if (!in_young_gc_mode()) {
1914         set_full_young_gcs(true);
1915         // We might want to do something different here. However,
1916         // right now we don't support the non-generational G1 mode
1917         // (and in fact we are planning to remove the associated code,
1918         // see CR 6814390). So, let's leave it as is and this will be
1919         // removed some time in the future
1920         ShouldNotReachHere();
1921         set_during_initial_mark_pause();
1922     } else
1923       // no point in doing another partial one
1924       _should_revert_to_full_young_gcs = true;
1925   }
1926 }
1927 
1928 // </NEW PREDICTION>
1929 
1930 
1931 void G1CollectorPolicy::update_recent_gc_times(double end_time_sec,
1932                                                double elapsed_ms) {
1933   _recent_gc_times_ms->add(elapsed_ms);
1934   _recent_prev_end_times_for_all_gcs_sec->add(end_time_sec);
1935   _prev_collection_pause_end_ms = end_time_sec * 1000.0;
1936 }
1937 
1938 double G1CollectorPolicy::recent_avg_time_for_pauses_ms() {
1939   if (_recent_pause_times_ms->num() == 0) {
1940     return (double) MaxGCPauseMillis;
1941   }
1942   return _recent_pause_times_ms->avg();


2600     _g1->concurrent_mark()->registerCSetRegion(hr);
2601 
2602   assert(!hr->in_collection_set(), "should not already be in the CSet");
2603   hr->set_in_collection_set(true);
2604   hr->set_next_in_collection_set(_collection_set);
2605   _collection_set = hr;
2606   _collection_set_size++;
2607   _collection_set_bytes_used_before += hr->used();
2608   _g1->register_region_with_in_cset_fast_test(hr);
2609 }
2610 
2611 // Initialize the per-collection-set information
2612 void G1CollectorPolicy::start_incremental_cset_building() {
2613   assert(_inc_cset_build_state == Inactive, "Precondition");
2614 
2615   _inc_cset_head = NULL;
2616   _inc_cset_tail = NULL;
2617   _inc_cset_size = 0;
2618   _inc_cset_bytes_used_before = 0;
2619 
2620   if (in_young_gc_mode()) {
2621     _inc_cset_young_index = 0;
2622   }
2623 
2624   _inc_cset_max_finger = 0;
2625   _inc_cset_recorded_young_bytes = 0;
2626   _inc_cset_recorded_rs_lengths = 0;
2627   _inc_cset_predicted_elapsed_time_ms = 0;
2628   _inc_cset_predicted_bytes_to_copy = 0;
2629   _inc_cset_build_state = Active;
2630 }
2631 
2632 void G1CollectorPolicy::add_to_incremental_cset_info(HeapRegion* hr, size_t rs_length) {
2633   // This routine is used when:
2634   // * adding survivor regions to the incremental cset at the end of an
2635   //   evacuation pause,
2636   // * adding the current allocation region to the incremental cset
2637   //   when it is retired, and
2638   // * updating existing policy information for a region in the
2639   //   incremental cset via young list RSet sampling.
2640   // Therefore this routine may be called at a safepoint by the
2641   // VM thread, or in-between safepoints by mutator threads (when
2642   // retiring the current allocation region) or a concurrent


2831   }
2832 
2833   // We figure out the number of bytes available for future to-space.
2834   // For new regions without marking information, we must assume the
2835   // worst-case of complete survival.  If we have marking information for a
2836   // region, we can bound the amount of live data.  We can add a number of
2837   // such regions, as long as the sum of the live data bounds does not
2838   // exceed the available evacuation space.
2839   size_t max_live_bytes = _g1->free_regions() * HeapRegion::GrainBytes;
2840 
2841   size_t expansion_bytes =
2842     _g1->expansion_regions() * HeapRegion::GrainBytes;
2843 
2844   _collection_set_bytes_used_before = 0;
2845   _collection_set_size = 0;
2846 
2847   // Adjust for expansion and slop.
2848   max_live_bytes = max_live_bytes + expansion_bytes;
2849 
2850   HeapRegion* hr;
2851   if (in_young_gc_mode()) {
2852     double young_start_time_sec = os::elapsedTime();
2853 
2854     if (G1PolicyVerbose > 0) {
2855       gclog_or_tty->print_cr("Adding %d young regions to the CSet",
2856                     _g1->young_list()->length());
2857     }
2858 
2859     _young_cset_length  = 0;
2860     _last_young_gc_full = full_young_gcs() ? true : false;
2861 
2862     if (_last_young_gc_full)
2863       ++_full_young_pause_num;
2864     else
2865       ++_partial_young_pause_num;
2866 
2867     // The young list is laid with the survivor regions from the previous
2868     // pause are appended to the RHS of the young list, i.e.
2869     //   [Newly Young Regions ++ Survivors from last pause].
2870 
2871     hr = _g1->young_list()->first_survivor_region();


2908                              _inc_cset_size);
2909       gclog_or_tty->print_cr("    (" SIZE_FORMAT " KB left in heap.)",
2910                             max_live_bytes/K);
2911     }
2912 
2913     assert(_inc_cset_size == _g1->young_list()->length(), "Invariant");
2914 
2915     double young_end_time_sec = os::elapsedTime();
2916     _recorded_young_cset_choice_time_ms =
2917       (young_end_time_sec - young_start_time_sec) * 1000.0;
2918 
2919     // We are doing young collections so reset this.
2920     non_young_start_time_sec = young_end_time_sec;
2921 
2922     // Note we can use either _collection_set_size or
2923     // _young_cset_length here
2924     if (_collection_set_size > 0 && _last_young_gc_full) {
2925       // don't bother adding more regions...
2926       goto choose_collection_set_end;
2927     }
2928   }
2929 
2930   if (!in_young_gc_mode() || !full_young_gcs()) {
2931     bool should_continue = true;
2932     NumberSeq seq;
2933     double avg_prediction = 100000000000000000.0; // something very large
2934 
2935     do {
2936       hr = _collectionSetChooser->getNextMarkedRegion(time_remaining_ms,
2937                                                       avg_prediction);
2938       if (hr != NULL) {
2939         double predicted_time_ms = predict_region_elapsed_time_ms(hr, false);
2940         time_remaining_ms -= predicted_time_ms;
2941         predicted_pause_time_ms += predicted_time_ms;
2942         add_to_collection_set(hr);
2943         record_non_young_cset_region(hr);
2944         max_live_bytes -= MIN2(hr->max_live_bytes(), max_live_bytes);
2945         if (G1PolicyVerbose > 0) {
2946           gclog_or_tty->print_cr("    (" SIZE_FORMAT " KB left in heap.)",
2947                         max_live_bytes/K);
2948         }
2949         seq.add(predicted_time_ms);
2950         avg_prediction = seq.avg() + seq.sd();




 184   _fully_young_cards_per_entry_ratio_seq(new TruncatedSeq(TruncatedSeqLength)),
 185   _partially_young_cards_per_entry_ratio_seq(
 186                                          new TruncatedSeq(TruncatedSeqLength)),
 187   _cost_per_entry_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
 188   _partially_young_cost_per_entry_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
 189   _cost_per_byte_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
 190   _cost_per_byte_ms_during_cm_seq(new TruncatedSeq(TruncatedSeqLength)),
 191   _constant_other_time_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
 192   _young_other_cost_per_region_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
 193   _non_young_other_cost_per_region_ms_seq(
 194                                          new TruncatedSeq(TruncatedSeqLength)),
 195 
 196   _pending_cards_seq(new TruncatedSeq(TruncatedSeqLength)),
 197   _scanned_cards_seq(new TruncatedSeq(TruncatedSeqLength)),
 198   _rs_lengths_seq(new TruncatedSeq(TruncatedSeqLength)),
 199 
 200   _pause_time_target_ms((double) MaxGCPauseMillis),
 201 
 202   // </NEW PREDICTION>
 203 

 204   _full_young_gcs(true),
 205   _full_young_pause_num(0),
 206   _partial_young_pause_num(0),
 207 
 208   _during_marking(false),
 209   _in_marking_window(false),
 210   _in_marking_window_im(false),
 211 
 212   _known_garbage_ratio(0.0),
 213   _known_garbage_bytes(0),
 214 
 215   _young_gc_eff_seq(new TruncatedSeq(TruncatedSeqLength)),
 216 
 217    _recent_prev_end_times_for_all_gcs_sec(new TruncatedSeq(NumPrevPausesForHeuristics)),
 218 
 219   _recent_CS_bytes_used_before(new TruncatedSeq(NumPrevPausesForHeuristics)),
 220   _recent_CS_bytes_surviving(new TruncatedSeq(NumPrevPausesForHeuristics)),
 221 
 222   _recent_avg_pause_time_ratio(0.0),
 223   _num_markings(0),


 450 
 451   size_t min_young_region_num() {
 452     return size_to_region_num(_min_gen0_size);
 453   }
 454   size_t initial_young_region_num() {
 455     return size_to_region_num(_initial_gen0_size);
 456   }
 457   size_t max_young_region_num() {
 458     return size_to_region_num(_max_gen0_size);
 459   }
 460 };
 461 
 462 void G1CollectorPolicy::init() {
 463   // Set aside an initial future to_space.
 464   _g1 = G1CollectedHeap::heap();
 465 
 466   assert(Heap_lock->owned_by_self(), "Locking discipline.");
 467 
 468   initialize_gc_policy_counters();
 469 



 470   G1YoungGenSizer sizer;
 471   size_t initial_region_num = sizer.initial_young_region_num();
 472 
 473   if (UseAdaptiveSizePolicy) {
 474     set_adaptive_young_list_length(true);
 475     _young_list_fixed_length = 0;
 476   } else {
 477     set_adaptive_young_list_length(false);
 478     _young_list_fixed_length = initial_region_num;
 479   }
 480   _free_regions_at_end_of_collection = _g1->free_regions();
 481   calculate_young_list_min_length();
 482   guarantee( _young_list_min_length == 0, "invariant, not enough info" );
 483   calculate_young_list_target_length();




 484 
 485   // We may immediately start allocating regions and placing them on the
 486   // collection set list. Initialize the per-collection set info
 487   start_incremental_cset_building();
 488 }
 489 
 490 // Create the jstat counters for the policy.
 491 void G1CollectorPolicy::initialize_gc_policy_counters()
 492 {
 493   _gc_policy_counters = new GCPolicyCounters("GarbageFirst", 1, 3);
 494 }
 495 
 496 void G1CollectorPolicy::calculate_young_list_min_length() {
 497   _young_list_min_length = 0;
 498 
 499   if (!adaptive_young_list_length())
 500     return;
 501 
 502   if (_alloc_rate_ms_seq->num() > 3) {
 503     double now_sec = os::elapsedTime();
 504     double when_ms = _mmu_tracker->when_max_gc_sec(now_sec) * 1000.0;
 505     double alloc_rate_ms = predict_alloc_rate_ms();
 506     size_t min_regions = (size_t) ceil(alloc_rate_ms * when_ms);
 507     size_t current_region_num = _g1->young_list()->length();
 508     _young_list_min_length = min_regions + current_region_num;
 509   }
 510 }
 511 
 512 void G1CollectorPolicy::calculate_young_list_target_length() {
 513   if (adaptive_young_list_length()) {


 843 
 844   _prev_region_num_young   = _region_num_young;
 845   _prev_region_num_tenured = _region_num_tenured;
 846 
 847   _free_regions_at_end_of_collection = _g1->free_regions();
 848   // Reset survivors SurvRateGroup.
 849   _survivor_surv_rate_group->reset();
 850   calculate_young_list_min_length();
 851   calculate_young_list_target_length();
 852 }
 853 
 854 void G1CollectorPolicy::record_stop_world_start() {
 855   _stop_world_start = os::elapsedTime();
 856 }
 857 
 858 void G1CollectorPolicy::record_collection_pause_start(double start_time_sec,
 859                                                       size_t start_used) {
 860   if (PrintGCDetails) {
 861     gclog_or_tty->stamp(PrintGCTimeStamps);
 862     gclog_or_tty->print("[GC pause");

 863     gclog_or_tty->print(" (%s)", full_young_gcs() ? "young" : "partial");
 864   }
 865 
 866   assert(_g1->used() == _g1->recalculate_used(),
 867          err_msg("sanity, used: "SIZE_FORMAT" recalculate_used: "SIZE_FORMAT,
 868                  _g1->used(), _g1->recalculate_used()));
 869 
 870   double s_w_t_ms = (start_time_sec - _stop_world_start) * 1000.0;
 871   _all_stop_world_times_ms->add(s_w_t_ms);
 872   _stop_world_start = 0.0;
 873 
 874   _cur_collection_start_sec = start_time_sec;
 875   _cur_collection_pause_used_at_start_bytes = start_used;
 876   _cur_collection_pause_used_regions_at_start = _g1->used_regions();
 877   _pending_cards = _g1->pending_card_num();
 878   _max_pending_cards = _g1->max_pending_card_num();
 879 
 880   _bytes_in_collection_set_before_gc = 0;
 881   _bytes_copied_during_gc = 0;
 882 


 895     _par_last_mark_stack_scan_times_ms[i] = -1234.0;
 896     _par_last_update_rs_times_ms[i] = -1234.0;
 897     _par_last_update_rs_processed_buffers[i] = -1234.0;
 898     _par_last_scan_rs_times_ms[i] = -1234.0;
 899     _par_last_obj_copy_times_ms[i] = -1234.0;
 900     _par_last_termination_times_ms[i] = -1234.0;
 901     _par_last_termination_attempts[i] = -1234.0;
 902     _par_last_gc_worker_end_times_ms[i] = -1234.0;
 903     _par_last_gc_worker_times_ms[i] = -1234.0;
 904   }
 905 #endif
 906 
 907   for (int i = 0; i < _aux_num; ++i) {
 908     _cur_aux_times_ms[i] = 0.0;
 909     _cur_aux_times_set[i] = false;
 910   }
 911 
 912   _satb_drain_time_set = false;
 913   _last_satb_drain_processed_buffers = -1;
 914 

 915   _last_young_gc_full = false;
 916 
 917   // do that for any other surv rate groups
 918   _short_lived_surv_rate_group->stop_adding_regions();
 919   _survivors_age_table.clear();
 920 
 921   assert( verify_young_ages(), "region age verification" );
 922 }
 923 
 924 void G1CollectorPolicy::record_mark_closure_time(double mark_closure_time_ms) {
 925   _mark_closure_time_ms = mark_closure_time_ms;
 926 }
 927 
 928 void G1CollectorPolicy::record_concurrent_mark_init_start() {
 929   _mark_init_start_sec = os::elapsedTime();

 930 }
 931 
 932 void G1CollectorPolicy::record_concurrent_mark_init_end_pre(double
 933                                                    mark_init_elapsed_time_ms) {
 934   _during_marking = true;
 935   assert(!initiate_conc_mark_if_possible(), "we should have cleared it by now");
 936   clear_during_initial_mark_pause();
 937   _cur_mark_stop_world_time_ms = mark_init_elapsed_time_ms;
 938 }
 939 
 940 void G1CollectorPolicy::record_concurrent_mark_init_end() {
 941   double end_time_sec = os::elapsedTime();
 942   double elapsed_time_ms = (end_time_sec - _mark_init_start_sec) * 1000.0;
 943   _concurrent_mark_init_times_ms->add(elapsed_time_ms);
 944   record_concurrent_mark_init_end_pre(elapsed_time_ms);
 945 
 946   _mmu_tracker->add_pause(_mark_init_start_sec, end_time_sec, true);
 947 }
 948 
 949 void G1CollectorPolicy::record_concurrent_mark_remark_start() {


 991   _cur_mark_stop_world_time_ms += elapsed_time_ms;
 992   _prev_collection_pause_end_ms += elapsed_time_ms;
 993 
 994   _mmu_tracker->add_pause(_mark_cleanup_start_sec, end_time_sec, true);
 995 
 996   _num_markings++;
 997 
 998   // We did a marking, so reset the "since_last_mark" variables.
 999   double considerConcMarkCost = 1.0;
1000   // If there are available processors, concurrent activity is free...
1001   if (Threads::number_of_non_daemon_threads() * 2 <
1002       os::active_processor_count()) {
1003     considerConcMarkCost = 0.0;
1004   }
1005   _n_pauses_at_mark_end = _n_pauses;
1006   _n_marks_since_last_pause++;
1007 }
1008 
1009 void
1010 G1CollectorPolicy::record_concurrent_mark_cleanup_completed() {

1011   _should_revert_to_full_young_gcs = false;
1012   _last_full_young_gc = true;
1013   _in_marking_window = false;
1014   if (adaptive_young_list_length())
1015     calculate_young_list_target_length();

1016 }
1017 
1018 void G1CollectorPolicy::record_concurrent_pause() {
1019   if (_stop_world_start > 0.0) {
1020     double yield_ms = (os::elapsedTime() - _stop_world_start) * 1000.0;
1021     _all_yield_times_ms->add(yield_ms);
1022   }
1023 }
1024 
1025 void G1CollectorPolicy::record_concurrent_pause_end() {
1026 }
1027 
1028 template<class T>
1029 T sum_of(T* sum_arr, int start, int n, int N) {
1030   T sum = (T)0;
1031   for (int i = 0; i < n; i++) {
1032     int j = (start + i) % N;
1033     sum += sum_arr[j];
1034   }
1035   return sum;


1144 
1145 void G1CollectorPolicy::record_collection_pause_end() {
1146   double end_time_sec = os::elapsedTime();
1147   double elapsed_ms = _last_pause_time_ms;
1148   bool parallel = G1CollectedHeap::use_parallel_gc_threads();
1149   size_t rs_size =
1150     _cur_collection_pause_used_regions_at_start - collection_set_size();
1151   size_t cur_used_bytes = _g1->used();
1152   assert(cur_used_bytes == _g1->recalculate_used(), "It should!");
1153   bool last_pause_included_initial_mark = false;
1154   bool update_stats = !_g1->evacuation_failed();
1155 
1156 #ifndef PRODUCT
1157   if (G1YoungSurvRateVerbose) {
1158     gclog_or_tty->print_cr("");
1159     _short_lived_surv_rate_group->print();
1160     // do that for any other surv rate groups too
1161   }
1162 #endif // PRODUCT
1163 

1164   last_pause_included_initial_mark = during_initial_mark_pause();
1165   if (last_pause_included_initial_mark)
1166     record_concurrent_mark_init_end_pre(0.0);
1167 
1168   size_t min_used_targ =
1169     (_g1->capacity() / 100) * InitiatingHeapOccupancyPercent;
1170 
1171 
1172   if (!_g1->mark_in_progress() && !_last_full_young_gc) {
1173     assert(!last_pause_included_initial_mark, "invariant");
1174     if (cur_used_bytes > min_used_targ &&
1175       cur_used_bytes > _prev_collection_pause_used_at_end_bytes) {
1176         assert(!during_initial_mark_pause(), "we should not see this here");
1177 
1178         // Note: this might have already been set, if during the last
1179         // pause we decided to start a cycle but at the beginning of
1180         // this pause we decided to postpone it. That's OK.
1181         set_initiate_conc_mark_if_possible();
1182     }
1183   }
1184 
1185   _prev_collection_pause_used_at_end_bytes = cur_used_bytes;

1186 
1187   _mmu_tracker->add_pause(end_time_sec - elapsed_ms/1000.0,
1188                           end_time_sec, false);
1189 
1190   guarantee(_cur_collection_pause_used_regions_at_start >=
1191             collection_set_size(),
1192             "Negative RS size?");
1193 
1194   // This assert is exempted when we're doing parallel collection pauses,
1195   // because the fragmentation caused by the parallel GC allocation buffers
1196   // can lead to more memory being used during collection than was used
1197   // before. Best leave this out until the fragmentation problem is fixed.
1198   // Pauses in which evacuation failed can also lead to negative
1199   // collections, since no space is reclaimed from a region containing an
1200   // object whose evacuation failed.
1201   // Further, we're now always doing parallel collection.  But I'm still
1202   // leaving this here as a placeholder for a more precise assertion later.
1203   // (DLD, 10/05.)
1204   assert((true || parallel) // Always using GC LABs now.
1205          || _g1->evacuation_failed()


1436   // Reset marks-between-pauses counter.
1437   _n_marks_since_last_pause = 0;
1438 
1439   // Update the efficiency-since-mark vars.
1440   double proc_ms = elapsed_ms * (double) _parallel_gc_threads;
1441   if (elapsed_ms < MIN_TIMER_GRANULARITY) {
1442     // This usually happens due to the timer not having the required
1443     // granularity. Some Linuxes are the usual culprits.
1444     // We'll just set it to something (arbitrarily) small.
1445     proc_ms = 1.0;
1446   }
1447   double cur_efficiency = (double) freed_bytes / proc_ms;
1448 
1449   bool new_in_marking_window = _in_marking_window;
1450   bool new_in_marking_window_im = false;
1451   if (during_initial_mark_pause()) {
1452     new_in_marking_window = true;
1453     new_in_marking_window_im = true;
1454   }
1455 

1456   if (_last_full_young_gc) {
1457     set_full_young_gcs(false);
1458     _last_full_young_gc = false;
1459   }
1460 
1461   if ( !_last_young_gc_full ) {
1462     if ( _should_revert_to_full_young_gcs ||
1463       _known_garbage_ratio < 0.05 ||
1464       (adaptive_young_list_length() &&
1465       (get_gc_eff_factor() * cur_efficiency < predict_young_gc_eff())) ) {
1466         set_full_young_gcs(true);
1467     }
1468   }
1469   _should_revert_to_full_young_gcs = false;
1470 
1471   if (_last_young_gc_full && !_during_marking) {
1472     _young_gc_eff_seq->add(cur_efficiency);
1473   }
1474 
1475   _short_lived_surv_rate_group->start_adding_regions();
1476   // do that for any other surv rate groupsx
1477 
1478   // <NEW PREDICTION>
1479 
1480   if (update_stats) {
1481     double pause_time_ms = elapsed_ms;
1482 
1483     size_t diff = 0;
1484     if (_max_pending_cards >= _pending_cards)
1485       diff = _max_pending_cards - _pending_cards;
1486     _pending_card_diff_seq->add((double) diff);
1487 
1488     double cost_per_card_ms = 0.0;
1489     if (_pending_cards > 0) {
1490       cost_per_card_ms = update_rs_time / (double) _pending_cards;
1491       _cost_per_card_ms_seq->add(cost_per_card_ms);


1877   _predicted_young_other_time_ms =
1878     predict_young_other_time_ms(_recorded_young_regions);
1879   _predicted_non_young_other_time_ms =
1880     predict_non_young_other_time_ms(_recorded_non_young_regions);
1881 
1882   _predicted_pause_time_ms =
1883     _predicted_rs_update_time_ms +
1884     _predicted_rs_scan_time_ms +
1885     _predicted_object_copy_time_ms +
1886     _predicted_constant_other_time_ms +
1887     _predicted_young_other_time_ms +
1888     _predicted_non_young_other_time_ms;
1889 #endif // PREDICTIONS_VERBOSE
1890 }
1891 
1892 void G1CollectorPolicy::check_if_region_is_too_expensive(double
1893                                                            predicted_time_ms) {
1894   // I don't think we need to do this when in young GC mode since
1895   // marking will be initiated next time we hit the soft limit anyway...
1896   if (predicted_time_ms > _expensive_region_limit_ms) {










1897     // no point in doing another partial one
1898     _should_revert_to_full_young_gcs = true;
1899   }
1900 }
1901 
1902 // </NEW PREDICTION>
1903 
1904 
1905 void G1CollectorPolicy::update_recent_gc_times(double end_time_sec,
1906                                                double elapsed_ms) {
1907   _recent_gc_times_ms->add(elapsed_ms);
1908   _recent_prev_end_times_for_all_gcs_sec->add(end_time_sec);
1909   _prev_collection_pause_end_ms = end_time_sec * 1000.0;
1910 }
1911 
1912 double G1CollectorPolicy::recent_avg_time_for_pauses_ms() {
1913   if (_recent_pause_times_ms->num() == 0) {
1914     return (double) MaxGCPauseMillis;
1915   }
1916   return _recent_pause_times_ms->avg();


2574     _g1->concurrent_mark()->registerCSetRegion(hr);
2575 
2576   assert(!hr->in_collection_set(), "should not already be in the CSet");
2577   hr->set_in_collection_set(true);
2578   hr->set_next_in_collection_set(_collection_set);
2579   _collection_set = hr;
2580   _collection_set_size++;
2581   _collection_set_bytes_used_before += hr->used();
2582   _g1->register_region_with_in_cset_fast_test(hr);
2583 }
2584 
2585 // Initialize the per-collection-set information
2586 void G1CollectorPolicy::start_incremental_cset_building() {
2587   assert(_inc_cset_build_state == Inactive, "Precondition");
2588 
2589   _inc_cset_head = NULL;
2590   _inc_cset_tail = NULL;
2591   _inc_cset_size = 0;
2592   _inc_cset_bytes_used_before = 0;
2593 

2594   _inc_cset_young_index = 0;

2595 
2596   _inc_cset_max_finger = 0;
2597   _inc_cset_recorded_young_bytes = 0;
2598   _inc_cset_recorded_rs_lengths = 0;
2599   _inc_cset_predicted_elapsed_time_ms = 0;
2600   _inc_cset_predicted_bytes_to_copy = 0;
2601   _inc_cset_build_state = Active;
2602 }
2603 
2604 void G1CollectorPolicy::add_to_incremental_cset_info(HeapRegion* hr, size_t rs_length) {
2605   // This routine is used when:
2606   // * adding survivor regions to the incremental cset at the end of an
2607   //   evacuation pause,
2608   // * adding the current allocation region to the incremental cset
2609   //   when it is retired, and
2610   // * updating existing policy information for a region in the
2611   //   incremental cset via young list RSet sampling.
2612   // Therefore this routine may be called at a safepoint by the
2613   // VM thread, or in-between safepoints by mutator threads (when
2614   // retiring the current allocation region) or a concurrent


2803   }
2804 
2805   // We figure out the number of bytes available for future to-space.
2806   // For new regions without marking information, we must assume the
2807   // worst-case of complete survival.  If we have marking information for a
2808   // region, we can bound the amount of live data.  We can add a number of
2809   // such regions, as long as the sum of the live data bounds does not
2810   // exceed the available evacuation space.
2811   size_t max_live_bytes = _g1->free_regions() * HeapRegion::GrainBytes;
2812 
2813   size_t expansion_bytes =
2814     _g1->expansion_regions() * HeapRegion::GrainBytes;
2815 
2816   _collection_set_bytes_used_before = 0;
2817   _collection_set_size = 0;
2818 
2819   // Adjust for expansion and slop.
2820   max_live_bytes = max_live_bytes + expansion_bytes;
2821 
2822   HeapRegion* hr;

2823   double young_start_time_sec = os::elapsedTime();
2824 
2825   if (G1PolicyVerbose > 0) {
2826     gclog_or_tty->print_cr("Adding %d young regions to the CSet",
2827       _g1->young_list()->length());
2828   }
2829 
2830   _young_cset_length  = 0;
2831   _last_young_gc_full = full_young_gcs() ? true : false;
2832 
2833   if (_last_young_gc_full)
2834     ++_full_young_pause_num;
2835   else
2836     ++_partial_young_pause_num;
2837 
2838   // The young list is laid with the survivor regions from the previous
2839   // pause are appended to the RHS of the young list, i.e.
2840   //   [Newly Young Regions ++ Survivors from last pause].
2841 
2842   hr = _g1->young_list()->first_survivor_region();


2879       _inc_cset_size);
2880     gclog_or_tty->print_cr("    (" SIZE_FORMAT " KB left in heap.)",
2881       max_live_bytes/K);
2882   }
2883 
2884   assert(_inc_cset_size == _g1->young_list()->length(), "Invariant");
2885 
2886   double young_end_time_sec = os::elapsedTime();
2887   _recorded_young_cset_choice_time_ms =
2888     (young_end_time_sec - young_start_time_sec) * 1000.0;
2889 
2890   // We are doing young collections so reset this.
2891   non_young_start_time_sec = young_end_time_sec;
2892 
2893   // Note we can use either _collection_set_size or
2894   // _young_cset_length here
2895   if (_collection_set_size > 0 && _last_young_gc_full) {
2896     // don't bother adding more regions...
2897     goto choose_collection_set_end;
2898   }

2899 
2900   if (!full_young_gcs()) {
2901     bool should_continue = true;
2902     NumberSeq seq;
2903     double avg_prediction = 100000000000000000.0; // something very large
2904 
2905     do {
2906       hr = _collectionSetChooser->getNextMarkedRegion(time_remaining_ms,
2907                                                       avg_prediction);
2908       if (hr != NULL) {
2909         double predicted_time_ms = predict_region_elapsed_time_ms(hr, false);
2910         time_remaining_ms -= predicted_time_ms;
2911         predicted_pause_time_ms += predicted_time_ms;
2912         add_to_collection_set(hr);
2913         record_non_young_cset_region(hr);
2914         max_live_bytes -= MIN2(hr->max_live_bytes(), max_live_bytes);
2915         if (G1PolicyVerbose > 0) {
2916           gclog_or_tty->print_cr("    (" SIZE_FORMAT " KB left in heap.)",
2917                         max_live_bytes/K);
2918         }
2919         seq.add(predicted_time_ms);
2920         avg_prediction = seq.avg() + seq.sd();