< prev index next >

src/share/vm/gc/g1/g1CollectorPolicy.cpp

Print this page




  64 static double cost_per_byte_ms_defaults[] = {
  65   0.00006, 0.00003, 0.00003, 0.000015, 0.000015, 0.00001, 0.00001, 0.000009
  66 };
  67 
  68 // these should be pretty consistent
  69 static double constant_other_time_ms_defaults[] = {
  70   5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0
  71 };
  72 
  73 
  74 static double young_other_cost_per_region_ms_defaults[] = {
  75   0.3, 0.2, 0.2, 0.15, 0.15, 0.12, 0.12, 0.1
  76 };
  77 
  78 static double non_young_other_cost_per_region_ms_defaults[] = {
  79   1.0, 0.7, 0.7, 0.5, 0.5, 0.42, 0.42, 0.30
  80 };
  81 
  82 G1CollectorPolicy::G1CollectorPolicy() :
  83   _predictor(G1ConfidencePercent / 100.0),
  84   _parallel_gc_threads(ParallelGCThreads),
  85 
  86   _recent_gc_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)),
  87   _stop_world_start(0.0),
  88 
  89   _concurrent_mark_remark_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)),
  90   _concurrent_mark_cleanup_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)),
  91 
  92   _alloc_rate_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
  93   _prev_collection_pause_end_ms(0.0),
  94   _rs_length_diff_seq(new TruncatedSeq(TruncatedSeqLength)),
  95   _cost_per_card_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
  96   _cost_scan_hcc_seq(new TruncatedSeq(TruncatedSeqLength)),
  97   _young_cards_per_entry_ratio_seq(new TruncatedSeq(TruncatedSeqLength)),
  98   _mixed_cards_per_entry_ratio_seq(new TruncatedSeq(TruncatedSeqLength)),
  99   _cost_per_entry_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
 100   _mixed_cost_per_entry_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
 101   _cost_per_byte_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
 102   _cost_per_byte_ms_during_cm_seq(new TruncatedSeq(TruncatedSeqLength)),
 103   _constant_other_time_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
 104   _young_other_cost_per_region_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
 105   _non_young_other_cost_per_region_ms_seq(
 106                                          new TruncatedSeq(TruncatedSeqLength)),
 107 


 112 
 113   _recent_prev_end_times_for_all_gcs_sec(
 114                                 new TruncatedSeq(NumPrevPausesForHeuristics)),
 115 
 116   _recent_avg_pause_time_ratio(0.0),
 117   _rs_lengths_prediction(0),
 118   _max_survivor_regions(0),
 119 
 120   _eden_cset_region_length(0),
 121   _survivor_cset_region_length(0),
 122   _old_cset_region_length(0),
 123 
 124   _collection_set(NULL),
 125   _collection_set_bytes_used_before(0),
 126 
 127   // Incremental CSet attributes
 128   _inc_cset_build_state(Inactive),
 129   _inc_cset_head(NULL),
 130   _inc_cset_tail(NULL),
 131   _inc_cset_bytes_used_before(0),
 132   _inc_cset_max_finger(NULL),
 133   _inc_cset_recorded_rs_lengths(0),
 134   _inc_cset_recorded_rs_lengths_diffs(0),
 135   _inc_cset_predicted_elapsed_time_ms(0.0),
 136   _inc_cset_predicted_elapsed_time_ms_diffs(0.0),
 137 
 138   // add here any more surv rate groups
 139   _recorded_survivor_regions(0),
 140   _recorded_survivor_head(NULL),
 141   _recorded_survivor_tail(NULL),
 142   _survivors_age_table(true),
 143 
 144   _gc_overhead_perc(0.0),
 145 
 146   _bytes_allocated_in_old_since_last_gc(0),
 147   _ihop_control(NULL),
 148   _initial_mark_to_mixed() {
 149 
 150   // SurvRateGroups below must be initialized after the predictor because they
 151   // indirectly use it through this object passed to their constructor.
 152   _short_lived_surv_rate_group =


 155     new SurvRateGroup(&_predictor, "Survivor", G1YoungSurvRateNumRegionsSummary);
 156 
 157   // Set up the region size and associated fields. Given that the
 158   // policy is created before the heap, we have to set this up here,
 159   // so it's done as soon as possible.
 160 
 161   // It would have been natural to pass initial_heap_byte_size() and
 162   // max_heap_byte_size() to setup_heap_region_size() but those have
 163   // not been set up at this point since they should be aligned with
 164   // the region size. So, there is a circular dependency here. We base
 165   // the region size on the heap size, but the heap size should be
 166   // aligned with the region size. To get around this we use the
 167   // unaligned values for the heap.
 168   HeapRegion::setup_heap_region_size(InitialHeapSize, MaxHeapSize);
 169   HeapRegionRemSet::setup_remset_size();
 170 
 171   _recent_prev_end_times_for_all_gcs_sec->add(os::elapsedTime());
 172   _prev_collection_pause_end_ms = os::elapsedTime() * 1000.0;
 173   clear_ratio_check_data();
 174 
 175   _phase_times = new G1GCPhaseTimes(_parallel_gc_threads);
 176 
 177   int index = MIN2(_parallel_gc_threads - 1, 7);
 178 
 179   _rs_length_diff_seq->add(rs_length_diff_defaults[index]);
 180   _cost_per_card_ms_seq->add(cost_per_card_ms_defaults[index]);
 181   _cost_scan_hcc_seq->add(0.0);
 182   _young_cards_per_entry_ratio_seq->add(
 183                                   young_cards_per_entry_ratio_defaults[index]);
 184   _cost_per_entry_ms_seq->add(cost_per_entry_ms_defaults[index]);
 185   _cost_per_byte_ms_seq->add(cost_per_byte_ms_defaults[index]);
 186   _constant_other_time_ms_seq->add(constant_other_time_ms_defaults[index]);
 187   _young_other_cost_per_region_ms_seq->add(
 188                                young_other_cost_per_region_ms_defaults[index]);
 189   _non_young_other_cost_per_region_ms_seq->add(
 190                            non_young_other_cost_per_region_ms_defaults[index]);
 191 
 192   // Below, we might need to calculate the pause time target based on
 193   // the pause interval. When we do so we are going to give G1 maximum
 194   // flexibility and allow it to do pauses when it needs to. So, we'll
 195   // arrange that the pause interval to be pause time target + 1 to
 196   // ensure that a) the pause time target is maximized with respect to
 197   // the pause interval and b) we maintain the invariant that pause


 794     }
 795   }
 796 
 797   return ret;
 798 }
 799 #endif // PRODUCT
 800 
 801 void G1CollectorPolicy::record_full_collection_start() {
 802   _full_collection_start_sec = os::elapsedTime();
 803   // Release the future to-space so that it is available for compaction into.
 804   collector_state()->set_full_collection(true);
 805 }
 806 
 807 void G1CollectorPolicy::record_full_collection_end() {
 808   // Consider this like a collection pause for the purposes of allocation
 809   // since last pause.
 810   double end_sec = os::elapsedTime();
 811   double full_gc_time_sec = end_sec - _full_collection_start_sec;
 812   double full_gc_time_ms = full_gc_time_sec * 1000.0;
 813 
 814   _trace_old_gen_time_data.record_full_collection(full_gc_time_ms);
 815 
 816   update_recent_gc_times(end_sec, full_gc_time_ms);
 817 
 818   collector_state()->set_full_collection(false);
 819 
 820   // "Nuke" the heuristics that control the young/mixed GC
 821   // transitions and make sure we start with young GCs after the Full GC.
 822   collector_state()->set_gcs_are_young(true);
 823   collector_state()->set_last_young_gc(false);
 824   collector_state()->set_initiate_conc_mark_if_possible(need_to_start_conc_mark("end of Full GC", 0));
 825   collector_state()->set_during_initial_mark_pause(false);
 826   collector_state()->set_in_marking_window(false);
 827   collector_state()->set_in_marking_window_im(false);
 828 
 829   _short_lived_surv_rate_group->start_adding_regions();
 830   // also call this on any additional surv rate groups
 831 
 832   record_survivor_regions(0, NULL, NULL);
 833 
 834   _free_regions_at_end_of_collection = _g1->num_free_regions();
 835   // Reset survivors SurvRateGroup.
 836   _survivor_surv_rate_group->reset();
 837   update_young_list_max_and_target_length();
 838   update_rs_lengths_prediction();
 839   cset_chooser()->clear();
 840 
 841   _bytes_allocated_in_old_since_last_gc = 0;
 842 
 843   record_pause(FullGC, _full_collection_start_sec, end_sec);
 844 }
 845 
 846 void G1CollectorPolicy::record_stop_world_start() {
 847   _stop_world_start = os::elapsedTime();
 848 }
 849 
 850 void G1CollectorPolicy::record_collection_pause_start(double start_time_sec) {
 851   // We only need to do this here as the policy will only be applied
 852   // to the GC we're about to start. so, no point is calculating this
 853   // every time we calculate / recalculate the target young length.
 854   update_survivors_policy();
 855 
 856   assert(_g1->used() == _g1->recalculate_used(),
 857          "sanity, used: " SIZE_FORMAT " recalculate_used: " SIZE_FORMAT,
 858          _g1->used(), _g1->recalculate_used());
 859 
 860   double s_w_t_ms = (start_time_sec - _stop_world_start) * 1000.0;
 861   _trace_young_gen_time_data.record_start_collection(s_w_t_ms);
 862   _stop_world_start = 0.0;
 863 
 864   phase_times()->record_cur_collection_start_sec(start_time_sec);
 865   _pending_cards = _g1->pending_card_num();
 866 
 867   _collection_set_bytes_used_before = 0;
 868   _bytes_copied_during_gc = 0;
 869 
 870   collector_state()->set_last_gc_was_young(false);
 871 
 872   // do that for any other surv rate groups
 873   _short_lived_surv_rate_group->stop_adding_regions();
 874   _survivors_age_table.clear();
 875 
 876   assert( verify_young_ages(), "region age verification" );
 877 }
 878 
 879 void G1CollectorPolicy::record_concurrent_mark_init_end(double
 880                                                    mark_init_elapsed_time_ms) {
 881   collector_state()->set_during_marking(true);
 882   assert(!collector_state()->initiate_conc_mark_if_possible(), "we should have cleared it by now");
 883   collector_state()->set_during_initial_mark_pause(false);


 895   _prev_collection_pause_end_ms += elapsed_time_ms;
 896 
 897   record_pause(Remark, _mark_remark_start_sec, end_time_sec);
 898 }
 899 
 900 void G1CollectorPolicy::record_concurrent_mark_cleanup_start() {
 901   _mark_cleanup_start_sec = os::elapsedTime();
 902 }
 903 
 904 void G1CollectorPolicy::record_concurrent_mark_cleanup_completed() {
 905   bool should_continue_with_reclaim = next_gc_should_be_mixed("request last young-only gc",
 906                                                               "skip last young-only gc");
 907   collector_state()->set_last_young_gc(should_continue_with_reclaim);
 908   // We skip the marking phase.
 909   if (!should_continue_with_reclaim) {
 910     abort_time_to_mixed_tracking();
 911   }
 912   collector_state()->set_in_marking_window(false);
 913 }
 914 
 915 void G1CollectorPolicy::record_concurrent_pause() {
 916   if (_stop_world_start > 0.0) {
 917     double yield_ms = (os::elapsedTime() - _stop_world_start) * 1000.0;
 918     _trace_young_gen_time_data.record_yield_time(yield_ms);
 919   }
 920 }
 921 
 922 double G1CollectorPolicy::average_time_ms(G1GCPhaseTimes::GCParPhases phase) const {
 923   return phase_times()->average_time_ms(phase);
 924 }
 925 
 926 double G1CollectorPolicy::young_other_time_ms() const {
 927   return phase_times()->young_cset_choice_time_ms() +
 928          phase_times()->young_free_cset_time_ms();
 929 }
 930 
 931 double G1CollectorPolicy::non_young_other_time_ms() const {
 932   return phase_times()->non_young_cset_choice_time_ms() +
 933          phase_times()->non_young_free_cset_time_ms();
 934 
 935 }
 936 
 937 double G1CollectorPolicy::other_time_ms(double pause_time_ms) const {
 938   return pause_time_ms -
 939          average_time_ms(G1GCPhaseTimes::UpdateRS) -
 940          average_time_ms(G1GCPhaseTimes::ScanRS) -
 941          average_time_ms(G1GCPhaseTimes::ObjCopy) -


 986   NOT_PRODUCT(_short_lived_surv_rate_group->print());
 987 
 988   record_pause(young_gc_pause_kind(), end_time_sec - pause_time_ms / 1000.0, end_time_sec);
 989 
 990   last_pause_included_initial_mark = collector_state()->during_initial_mark_pause();
 991   if (last_pause_included_initial_mark) {
 992     record_concurrent_mark_init_end(0.0);
 993   } else {
 994     maybe_start_marking();
 995   }
 996 
 997   double app_time_ms = (phase_times()->cur_collection_start_sec() * 1000.0 - _prev_collection_pause_end_ms);
 998   if (app_time_ms < MIN_TIMER_GRANULARITY) {
 999     // This usually happens due to the timer not having the required
1000     // granularity. Some Linuxes are the usual culprits.
1001     // We'll just set it to something (arbitrarily) small.
1002     app_time_ms = 1.0;
1003   }
1004 
1005   if (update_stats) {
1006     _trace_young_gen_time_data.record_end_collection(pause_time_ms, phase_times());
1007     // We maintain the invariant that all objects allocated by mutator
1008     // threads will be allocated out of eden regions. So, we can use
1009     // the eden region number allocated since the previous GC to
1010     // calculate the application's allocate rate. The only exception
1011     // to that is humongous objects that are allocated separately. But
1012     // given that humongous object allocations do not really affect
1013     // either the pause's duration nor when the next pause will take
1014     // place we can safely ignore them here.
1015     uint regions_allocated = eden_cset_region_length();
1016     double alloc_rate_ms = (double) regions_allocated / app_time_ms;
1017     _alloc_rate_ms_seq->add(alloc_rate_ms);
1018 
1019     double interval_ms =
1020       (end_time_sec - _recent_prev_end_times_for_all_gcs_sec->oldest()) * 1000.0;
1021     update_recent_gc_times(end_time_sec, pause_time_ms);
1022     _recent_avg_pause_time_ratio = _recent_gc_times_ms->sum()/interval_ms;
1023     if (recent_avg_pause_time_ratio() < 0.0 ||
1024         (recent_avg_pause_time_ratio() - 1.0 > 0.0)) {
1025       // Clip ratio between 0.0 and 1.0, and continue. This will be fixed in
1026       // CR 6902692 by redoing the manner in which the ratio is incrementally computed.


1576     expand_bytes = MAX2(expand_bytes, min_expand_bytes);
1577     expand_bytes = MIN2(expand_bytes, uncommitted_bytes);
1578 
1579     clear_ratio_check_data();
1580   } else {
1581     // An expansion was not triggered. If we've started counting, increment
1582     // the number of checks we've made in the current window.  If we've
1583     // reached the end of the window without resizing, clear the counters to
1584     // start again the next time we see a ratio above the threshold.
1585     if (_ratio_over_threshold_count > 0) {
1586       _pauses_since_start++;
1587       if (_pauses_since_start > NumPrevPausesForHeuristics) {
1588         clear_ratio_check_data();
1589       }
1590     }
1591   }
1592 
1593   return expand_bytes;
1594 }
1595 
1596 void G1CollectorPolicy::print_tracing_info() const {
1597   _trace_young_gen_time_data.print();
1598   _trace_old_gen_time_data.print();
1599 }
1600 
1601 void G1CollectorPolicy::print_yg_surv_rate_info() const {
1602 #ifndef PRODUCT
1603   _short_lived_surv_rate_group->print_surv_rate_summary();
1604   // add this call for any other surv rate groups
1605 #endif // PRODUCT
1606 }
1607 
1608 bool G1CollectorPolicy::is_young_list_full() const {
1609   uint young_list_length = _g1->young_list()->length();
1610   uint young_list_target_length = _young_list_target_length;
1611   return young_list_length >= young_list_target_length;
1612 }
1613 
1614 bool G1CollectorPolicy::can_expand_young_list() const {
1615   uint young_list_length = _g1->young_list()->length();
1616   uint young_list_max_length = _young_list_max_length;
1617   return young_list_length < young_list_max_length;
1618 }
1619 
1620 void G1CollectorPolicy::update_max_gc_locker_expansion() {


1787   assert(hr->is_old(), "the region should be old");
1788 
1789   assert(!hr->in_collection_set(), "should not already be in the CSet");
1790   _g1->register_old_region_with_cset(hr);
1791   hr->set_next_in_collection_set(_collection_set);
1792   _collection_set = hr;
1793   _collection_set_bytes_used_before += hr->used();
1794   size_t rs_length = hr->rem_set()->occupied();
1795   _recorded_rs_lengths += rs_length;
1796   _old_cset_region_length += 1;
1797 }
1798 
1799 // Initialize the per-collection-set information
1800 void G1CollectorPolicy::start_incremental_cset_building() {
1801   assert(_inc_cset_build_state == Inactive, "Precondition");
1802 
1803   _inc_cset_head = NULL;
1804   _inc_cset_tail = NULL;
1805   _inc_cset_bytes_used_before = 0;
1806 
1807   _inc_cset_max_finger = 0;
1808   _inc_cset_recorded_rs_lengths = 0;
1809   _inc_cset_recorded_rs_lengths_diffs = 0;
1810   _inc_cset_predicted_elapsed_time_ms = 0.0;
1811   _inc_cset_predicted_elapsed_time_ms_diffs = 0.0;
1812   _inc_cset_build_state = Active;
1813 }
1814 
1815 void G1CollectorPolicy::finalize_incremental_cset_building() {
1816   assert(_inc_cset_build_state == Active, "Precondition");
1817   assert(SafepointSynchronize::is_at_safepoint(), "should be at a safepoint");
1818 
1819   // The two "main" fields, _inc_cset_recorded_rs_lengths and
1820   // _inc_cset_predicted_elapsed_time_ms, are updated by the thread
1821   // that adds a new region to the CSet. Further updates by the
1822   // concurrent refinement thread that samples the young RSet lengths
1823   // are accumulated in the *_diffs fields. Here we add the diffs to
1824   // the "main" fields.
1825 
1826   if (_inc_cset_recorded_rs_lengths_diffs >= 0) {
1827     _inc_cset_recorded_rs_lengths += _inc_cset_recorded_rs_lengths_diffs;


1899 
1900   hr->set_recorded_rs_length(new_rs_length);
1901   hr->set_predicted_elapsed_time_ms(new_region_elapsed_time_ms);
1902 }
1903 
1904 void G1CollectorPolicy::add_region_to_incremental_cset_common(HeapRegion* hr) {
1905   assert(hr->is_young(), "invariant");
1906   assert(hr->young_index_in_cset() > -1, "should have already been set");
1907   assert(_inc_cset_build_state == Active, "Precondition");
1908 
1909   // We need to clear and set the cached recorded/cached collection set
1910   // information in the heap region here (before the region gets added
1911   // to the collection set). An individual heap region's cached values
1912   // are calculated, aggregated with the policy collection set info,
1913   // and cached in the heap region here (initially) and (subsequently)
1914   // by the Young List sampling code.
1915 
1916   size_t rs_length = hr->rem_set()->occupied();
1917   add_to_incremental_cset_info(hr, rs_length);
1918 
1919   HeapWord* hr_end = hr->end();
1920   _inc_cset_max_finger = MAX2(_inc_cset_max_finger, hr_end);
1921 
1922   assert(!hr->in_collection_set(), "invariant");
1923   _g1->register_young_region_with_cset(hr);
1924   assert(hr->next_in_collection_set() == NULL, "invariant");
1925 }
1926 
1927 // Add the region at the RHS of the incremental cset
1928 void G1CollectorPolicy::add_region_to_incremental_cset_rhs(HeapRegion* hr) {
1929   // We should only ever be appending survivors at the end of a pause
1930   assert(hr->is_survivor(), "Logic");
1931 
1932   // Do the 'common' stuff
1933   add_region_to_incremental_cset_common(hr);
1934 
1935   // Now add the region at the right hand side
1936   if (_inc_cset_tail == NULL) {
1937     assert(_inc_cset_head == NULL, "invariant");
1938     _inc_cset_head = hr;
1939   } else {
1940     _inc_cset_tail->set_next_in_collection_set(hr);
1941   }


2108 
2109 
2110 double G1CollectorPolicy::finalize_young_cset_part(double target_pause_time_ms) {
2111   double young_start_time_sec = os::elapsedTime();
2112 
2113   YoungList* young_list = _g1->young_list();
2114   finalize_incremental_cset_building();
2115 
2116   guarantee(target_pause_time_ms > 0.0,
2117             "target_pause_time_ms = %1.6lf should be positive", target_pause_time_ms);
2118   guarantee(_collection_set == NULL, "Precondition");
2119 
2120   double base_time_ms = predict_base_elapsed_time_ms(_pending_cards);
2121   double time_remaining_ms = MAX2(target_pause_time_ms - base_time_ms, 0.0);
2122 
2123   log_trace(gc, ergo, cset)("Start choosing CSet. pending cards: " SIZE_FORMAT " predicted base time: %1.2fms remaining time: %1.2fms target pause time: %1.2fms",
2124                             _pending_cards, base_time_ms, time_remaining_ms, target_pause_time_ms);
2125 
2126   collector_state()->set_last_gc_was_young(collector_state()->gcs_are_young());
2127 
2128   if (collector_state()->last_gc_was_young()) {
2129     _trace_young_gen_time_data.increment_young_collection_count();
2130   } else {
2131     _trace_young_gen_time_data.increment_mixed_collection_count();
2132   }
2133 
2134   // The young list is laid with the survivor regions from the previous
2135   // pause are appended to the RHS of the young list, i.e.
2136   //   [Newly Young Regions ++ Survivors from last pause].
2137 
2138   uint survivor_region_length = young_list->survivor_length();
2139   uint eden_region_length = young_list->eden_length();
2140   init_cset_region_lengths(eden_region_length, survivor_region_length);
2141 
2142   HeapRegion* hr = young_list->first_survivor_region();
2143   while (hr != NULL) {
2144     assert(hr->is_survivor(), "badly formed young list");
2145     // There is a convention that all the young regions in the CSet
2146     // are tagged as "eden", so we do this for the survivors here. We
2147     // use the special set_eden_pre_gc() as it doesn't check that the
2148     // region is free (which is not the case here).
2149     hr->set_eden_pre_gc();
2150     hr = hr->get_next_young_region();
2151   }
2152 
2153   // Clear the fields that point to the survivor list - they are all young now.


2252 
2253     if (expensive_region_num > 0) {
2254       // We print the information once here at the end, predicated on
2255       // whether we added any apparently expensive regions or not, to
2256       // avoid generating output per region.
2257       log_debug(gc, ergo, cset)("Added expensive regions to CSet (old CSet region num not reached min)."
2258                                 "old: %u regions, expensive: %u regions, min: %u regions, remaining time: %1.2fms",
2259                                 old_cset_region_length(), expensive_region_num, min_old_cset_length, time_remaining_ms);
2260     }
2261 
2262     cset_chooser()->verify();
2263   }
2264 
2265   stop_incremental_cset_building();
2266 
2267   log_debug(gc, ergo, cset)("Finish choosing CSet. old: %u regions, predicted old region time: %1.2fms, time remaining: %1.2f",
2268                             old_cset_region_length(), predicted_old_time_ms, time_remaining_ms);
2269 
2270   double non_young_end_time_sec = os::elapsedTime();
2271   phase_times()->record_non_young_cset_choice_time_ms((non_young_end_time_sec - non_young_start_time_sec) * 1000.0);
2272 }
2273 
2274 void TraceYoungGenTimeData::record_start_collection(double time_to_stop_the_world_ms) {
2275   if(TraceYoungGenTime) {
2276     _all_stop_world_times_ms.add(time_to_stop_the_world_ms);
2277   }
2278 }
2279 
2280 void TraceYoungGenTimeData::record_yield_time(double yield_time_ms) {
2281   if(TraceYoungGenTime) {
2282     _all_yield_times_ms.add(yield_time_ms);
2283   }
2284 }
2285 
2286 void TraceYoungGenTimeData::record_end_collection(double pause_time_ms, G1GCPhaseTimes* phase_times) {
2287   if(TraceYoungGenTime) {
2288     _total.add(pause_time_ms);
2289     _other.add(pause_time_ms - phase_times->accounted_time_ms());
2290     _root_region_scan_wait.add(phase_times->root_region_scan_wait_time_ms());
2291     _parallel.add(phase_times->cur_collection_par_time_ms());
2292     _ext_root_scan.add(phase_times->average_time_ms(G1GCPhaseTimes::ExtRootScan));
2293     _satb_filtering.add(phase_times->average_time_ms(G1GCPhaseTimes::SATBFiltering));
2294     _update_rs.add(phase_times->average_time_ms(G1GCPhaseTimes::UpdateRS));
2295     _scan_rs.add(phase_times->average_time_ms(G1GCPhaseTimes::ScanRS));
2296     _obj_copy.add(phase_times->average_time_ms(G1GCPhaseTimes::ObjCopy));
2297     _termination.add(phase_times->average_time_ms(G1GCPhaseTimes::Termination));
2298 
2299     double parallel_known_time = phase_times->average_time_ms(G1GCPhaseTimes::ExtRootScan) +
2300       phase_times->average_time_ms(G1GCPhaseTimes::SATBFiltering) +
2301       phase_times->average_time_ms(G1GCPhaseTimes::UpdateRS) +
2302       phase_times->average_time_ms(G1GCPhaseTimes::ScanRS) +
2303       phase_times->average_time_ms(G1GCPhaseTimes::ObjCopy) +
2304       phase_times->average_time_ms(G1GCPhaseTimes::Termination);
2305 
2306     double parallel_other_time = phase_times->cur_collection_par_time_ms() - parallel_known_time;
2307     _parallel_other.add(parallel_other_time);
2308     _clear_ct.add(phase_times->cur_clear_ct_time_ms());
2309   }
2310 }
2311 
2312 void TraceYoungGenTimeData::increment_young_collection_count() {
2313   if(TraceYoungGenTime) {
2314     ++_young_pause_num;
2315   }
2316 }
2317 
2318 void TraceYoungGenTimeData::increment_mixed_collection_count() {
2319   if(TraceYoungGenTime) {
2320     ++_mixed_pause_num;
2321   }
2322 }
2323 
2324 void TraceYoungGenTimeData::print_summary(const char* str,
2325                                           const NumberSeq* seq) const {
2326   double sum = seq->sum();
2327   tty->print_cr("%-27s = %8.2lf s (avg = %8.2lf ms)",
2328                 str, sum / 1000.0, seq->avg());
2329 }
2330 
2331 void TraceYoungGenTimeData::print_summary_sd(const char* str,
2332                                              const NumberSeq* seq) const {
2333   print_summary(str, seq);
2334   tty->print_cr("%45s = %5d, std dev = %8.2lf ms, max = %8.2lf ms)",
2335                 "(num", seq->num(), seq->sd(), seq->maximum());
2336 }
2337 
2338 void TraceYoungGenTimeData::print() const {
2339   if (!TraceYoungGenTime) {
2340     return;
2341   }
2342 
2343   tty->print_cr("ALL PAUSES");
2344   print_summary_sd("   Total", &_total);
2345   tty->cr();
2346   tty->cr();
2347   tty->print_cr("   Young GC Pauses: %8d", _young_pause_num);
2348   tty->print_cr("   Mixed GC Pauses: %8d", _mixed_pause_num);
2349   tty->cr();
2350 
2351   tty->print_cr("EVACUATION PAUSES");
2352 
2353   if (_young_pause_num == 0 && _mixed_pause_num == 0) {
2354     tty->print_cr("none");
2355   } else {
2356     print_summary_sd("   Evacuation Pauses", &_total);
2357     print_summary("      Root Region Scan Wait", &_root_region_scan_wait);
2358     print_summary("      Parallel Time", &_parallel);
2359     print_summary("         Ext Root Scanning", &_ext_root_scan);
2360     print_summary("         SATB Filtering", &_satb_filtering);
2361     print_summary("         Update RS", &_update_rs);
2362     print_summary("         Scan RS", &_scan_rs);
2363     print_summary("         Object Copy", &_obj_copy);
2364     print_summary("         Termination", &_termination);
2365     print_summary("         Parallel Other", &_parallel_other);
2366     print_summary("      Clear CT", &_clear_ct);
2367     print_summary("      Other", &_other);
2368   }
2369   tty->cr();
2370 
2371   tty->print_cr("MISC");
2372   print_summary_sd("   Stop World", &_all_stop_world_times_ms);
2373   print_summary_sd("   Yields", &_all_yield_times_ms);
2374 }
2375 
2376 void TraceOldGenTimeData::record_full_collection(double full_gc_time_ms) {
2377   if (TraceOldGenTime) {
2378     _all_full_gc_times.add(full_gc_time_ms);
2379   }
2380 }
2381 
2382 void TraceOldGenTimeData::print() const {
2383   if (!TraceOldGenTime) {
2384     return;
2385   }
2386 
2387   if (_all_full_gc_times.num() > 0) {
2388     tty->print("\n%4d full_gcs: total time = %8.2f s",
2389       _all_full_gc_times.num(),
2390       _all_full_gc_times.sum() / 1000.0);
2391     tty->print_cr(" (avg = %8.2fms).", _all_full_gc_times.avg());
2392     tty->print_cr("                     [std. dev = %8.2f ms, max = %8.2f ms]",
2393       _all_full_gc_times.sd(),
2394       _all_full_gc_times.maximum());
2395   }
2396 }


  64 static double cost_per_byte_ms_defaults[] = {
  65   0.00006, 0.00003, 0.00003, 0.000015, 0.000015, 0.00001, 0.00001, 0.000009
  66 };
  67 
  68 // these should be pretty consistent
  69 static double constant_other_time_ms_defaults[] = {
  70   5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0, 5.0
  71 };
  72 
  73 
  74 static double young_other_cost_per_region_ms_defaults[] = {
  75   0.3, 0.2, 0.2, 0.15, 0.15, 0.12, 0.12, 0.1
  76 };
  77 
  78 static double non_young_other_cost_per_region_ms_defaults[] = {
  79   1.0, 0.7, 0.7, 0.5, 0.5, 0.42, 0.42, 0.30
  80 };
  81 
  82 G1CollectorPolicy::G1CollectorPolicy() :
  83   _predictor(G1ConfidencePercent / 100.0),

  84 
  85   _recent_gc_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)),

  86 
  87   _concurrent_mark_remark_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)),
  88   _concurrent_mark_cleanup_times_ms(new TruncatedSeq(NumPrevPausesForHeuristics)),
  89 
  90   _alloc_rate_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
  91   _prev_collection_pause_end_ms(0.0),
  92   _rs_length_diff_seq(new TruncatedSeq(TruncatedSeqLength)),
  93   _cost_per_card_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
  94   _cost_scan_hcc_seq(new TruncatedSeq(TruncatedSeqLength)),
  95   _young_cards_per_entry_ratio_seq(new TruncatedSeq(TruncatedSeqLength)),
  96   _mixed_cards_per_entry_ratio_seq(new TruncatedSeq(TruncatedSeqLength)),
  97   _cost_per_entry_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
  98   _mixed_cost_per_entry_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
  99   _cost_per_byte_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
 100   _cost_per_byte_ms_during_cm_seq(new TruncatedSeq(TruncatedSeqLength)),
 101   _constant_other_time_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
 102   _young_other_cost_per_region_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
 103   _non_young_other_cost_per_region_ms_seq(
 104                                          new TruncatedSeq(TruncatedSeqLength)),
 105 


 110 
 111   _recent_prev_end_times_for_all_gcs_sec(
 112                                 new TruncatedSeq(NumPrevPausesForHeuristics)),
 113 
 114   _recent_avg_pause_time_ratio(0.0),
 115   _rs_lengths_prediction(0),
 116   _max_survivor_regions(0),
 117 
 118   _eden_cset_region_length(0),
 119   _survivor_cset_region_length(0),
 120   _old_cset_region_length(0),
 121 
 122   _collection_set(NULL),
 123   _collection_set_bytes_used_before(0),
 124 
 125   // Incremental CSet attributes
 126   _inc_cset_build_state(Inactive),
 127   _inc_cset_head(NULL),
 128   _inc_cset_tail(NULL),
 129   _inc_cset_bytes_used_before(0),

 130   _inc_cset_recorded_rs_lengths(0),
 131   _inc_cset_recorded_rs_lengths_diffs(0),
 132   _inc_cset_predicted_elapsed_time_ms(0.0),
 133   _inc_cset_predicted_elapsed_time_ms_diffs(0.0),
 134 
 135   // add here any more surv rate groups
 136   _recorded_survivor_regions(0),
 137   _recorded_survivor_head(NULL),
 138   _recorded_survivor_tail(NULL),
 139   _survivors_age_table(true),
 140 
 141   _gc_overhead_perc(0.0),
 142 
 143   _bytes_allocated_in_old_since_last_gc(0),
 144   _ihop_control(NULL),
 145   _initial_mark_to_mixed() {
 146 
 147   // SurvRateGroups below must be initialized after the predictor because they
 148   // indirectly use it through this object passed to their constructor.
 149   _short_lived_surv_rate_group =


 152     new SurvRateGroup(&_predictor, "Survivor", G1YoungSurvRateNumRegionsSummary);
 153 
 154   // Set up the region size and associated fields. Given that the
 155   // policy is created before the heap, we have to set this up here,
 156   // so it's done as soon as possible.
 157 
 158   // It would have been natural to pass initial_heap_byte_size() and
 159   // max_heap_byte_size() to setup_heap_region_size() but those have
 160   // not been set up at this point since they should be aligned with
 161   // the region size. So, there is a circular dependency here. We base
 162   // the region size on the heap size, but the heap size should be
 163   // aligned with the region size. To get around this we use the
 164   // unaligned values for the heap.
 165   HeapRegion::setup_heap_region_size(InitialHeapSize, MaxHeapSize);
 166   HeapRegionRemSet::setup_remset_size();
 167 
 168   _recent_prev_end_times_for_all_gcs_sec->add(os::elapsedTime());
 169   _prev_collection_pause_end_ms = os::elapsedTime() * 1000.0;
 170   clear_ratio_check_data();
 171 
 172   _phase_times = new G1GCPhaseTimes(ParallelGCThreads);
 173 
 174   int index = MIN2(ParallelGCThreads - 1, 7u);
 175 
 176   _rs_length_diff_seq->add(rs_length_diff_defaults[index]);
 177   _cost_per_card_ms_seq->add(cost_per_card_ms_defaults[index]);
 178   _cost_scan_hcc_seq->add(0.0);
 179   _young_cards_per_entry_ratio_seq->add(
 180                                   young_cards_per_entry_ratio_defaults[index]);
 181   _cost_per_entry_ms_seq->add(cost_per_entry_ms_defaults[index]);
 182   _cost_per_byte_ms_seq->add(cost_per_byte_ms_defaults[index]);
 183   _constant_other_time_ms_seq->add(constant_other_time_ms_defaults[index]);
 184   _young_other_cost_per_region_ms_seq->add(
 185                                young_other_cost_per_region_ms_defaults[index]);
 186   _non_young_other_cost_per_region_ms_seq->add(
 187                            non_young_other_cost_per_region_ms_defaults[index]);
 188 
 189   // Below, we might need to calculate the pause time target based on
 190   // the pause interval. When we do so we are going to give G1 maximum
 191   // flexibility and allow it to do pauses when it needs to. So, we'll
 192   // arrange that the pause interval to be pause time target + 1 to
 193   // ensure that a) the pause time target is maximized with respect to
 194   // the pause interval and b) we maintain the invariant that pause


 791     }
 792   }
 793 
 794   return ret;
 795 }
 796 #endif // PRODUCT
 797 
 798 void G1CollectorPolicy::record_full_collection_start() {
 799   _full_collection_start_sec = os::elapsedTime();
 800   // Release the future to-space so that it is available for compaction into.
 801   collector_state()->set_full_collection(true);
 802 }
 803 
 804 void G1CollectorPolicy::record_full_collection_end() {
 805   // Consider this like a collection pause for the purposes of allocation
 806   // since last pause.
 807   double end_sec = os::elapsedTime();
 808   double full_gc_time_sec = end_sec - _full_collection_start_sec;
 809   double full_gc_time_ms = full_gc_time_sec * 1000.0;
 810 


 811   update_recent_gc_times(end_sec, full_gc_time_ms);
 812 
 813   collector_state()->set_full_collection(false);
 814 
 815   // "Nuke" the heuristics that control the young/mixed GC
 816   // transitions and make sure we start with young GCs after the Full GC.
 817   collector_state()->set_gcs_are_young(true);
 818   collector_state()->set_last_young_gc(false);
 819   collector_state()->set_initiate_conc_mark_if_possible(need_to_start_conc_mark("end of Full GC", 0));
 820   collector_state()->set_during_initial_mark_pause(false);
 821   collector_state()->set_in_marking_window(false);
 822   collector_state()->set_in_marking_window_im(false);
 823 
 824   _short_lived_surv_rate_group->start_adding_regions();
 825   // also call this on any additional surv rate groups
 826 
 827   record_survivor_regions(0, NULL, NULL);
 828 
 829   _free_regions_at_end_of_collection = _g1->num_free_regions();
 830   // Reset survivors SurvRateGroup.
 831   _survivor_surv_rate_group->reset();
 832   update_young_list_max_and_target_length();
 833   update_rs_lengths_prediction();
 834   cset_chooser()->clear();
 835 
 836   _bytes_allocated_in_old_since_last_gc = 0;
 837 
 838   record_pause(FullGC, _full_collection_start_sec, end_sec);
 839 }
 840 




 841 void G1CollectorPolicy::record_collection_pause_start(double start_time_sec) {
 842   // We only need to do this here as the policy will only be applied
 843   // to the GC we're about to start. so, no point is calculating this
 844   // every time we calculate / recalculate the target young length.
 845   update_survivors_policy();
 846 
 847   assert(_g1->used() == _g1->recalculate_used(),
 848          "sanity, used: " SIZE_FORMAT " recalculate_used: " SIZE_FORMAT,
 849          _g1->used(), _g1->recalculate_used());
 850 




 851   phase_times()->record_cur_collection_start_sec(start_time_sec);
 852   _pending_cards = _g1->pending_card_num();
 853 
 854   _collection_set_bytes_used_before = 0;
 855   _bytes_copied_during_gc = 0;
 856 
 857   collector_state()->set_last_gc_was_young(false);
 858 
 859   // do that for any other surv rate groups
 860   _short_lived_surv_rate_group->stop_adding_regions();
 861   _survivors_age_table.clear();
 862 
 863   assert( verify_young_ages(), "region age verification" );
 864 }
 865 
 866 void G1CollectorPolicy::record_concurrent_mark_init_end(double
 867                                                    mark_init_elapsed_time_ms) {
 868   collector_state()->set_during_marking(true);
 869   assert(!collector_state()->initiate_conc_mark_if_possible(), "we should have cleared it by now");
 870   collector_state()->set_during_initial_mark_pause(false);


 882   _prev_collection_pause_end_ms += elapsed_time_ms;
 883 
 884   record_pause(Remark, _mark_remark_start_sec, end_time_sec);
 885 }
 886 
 887 void G1CollectorPolicy::record_concurrent_mark_cleanup_start() {
 888   _mark_cleanup_start_sec = os::elapsedTime();
 889 }
 890 
 891 void G1CollectorPolicy::record_concurrent_mark_cleanup_completed() {
 892   bool should_continue_with_reclaim = next_gc_should_be_mixed("request last young-only gc",
 893                                                               "skip last young-only gc");
 894   collector_state()->set_last_young_gc(should_continue_with_reclaim);
 895   // We skip the marking phase.
 896   if (!should_continue_with_reclaim) {
 897     abort_time_to_mixed_tracking();
 898   }
 899   collector_state()->set_in_marking_window(false);
 900 }
 901 







 902 double G1CollectorPolicy::average_time_ms(G1GCPhaseTimes::GCParPhases phase) const {
 903   return phase_times()->average_time_ms(phase);
 904 }
 905 
 906 double G1CollectorPolicy::young_other_time_ms() const {
 907   return phase_times()->young_cset_choice_time_ms() +
 908          phase_times()->young_free_cset_time_ms();
 909 }
 910 
 911 double G1CollectorPolicy::non_young_other_time_ms() const {
 912   return phase_times()->non_young_cset_choice_time_ms() +
 913          phase_times()->non_young_free_cset_time_ms();
 914 
 915 }
 916 
 917 double G1CollectorPolicy::other_time_ms(double pause_time_ms) const {
 918   return pause_time_ms -
 919          average_time_ms(G1GCPhaseTimes::UpdateRS) -
 920          average_time_ms(G1GCPhaseTimes::ScanRS) -
 921          average_time_ms(G1GCPhaseTimes::ObjCopy) -


 966   NOT_PRODUCT(_short_lived_surv_rate_group->print());
 967 
 968   record_pause(young_gc_pause_kind(), end_time_sec - pause_time_ms / 1000.0, end_time_sec);
 969 
 970   last_pause_included_initial_mark = collector_state()->during_initial_mark_pause();
 971   if (last_pause_included_initial_mark) {
 972     record_concurrent_mark_init_end(0.0);
 973   } else {
 974     maybe_start_marking();
 975   }
 976 
 977   double app_time_ms = (phase_times()->cur_collection_start_sec() * 1000.0 - _prev_collection_pause_end_ms);
 978   if (app_time_ms < MIN_TIMER_GRANULARITY) {
 979     // This usually happens due to the timer not having the required
 980     // granularity. Some Linuxes are the usual culprits.
 981     // We'll just set it to something (arbitrarily) small.
 982     app_time_ms = 1.0;
 983   }
 984 
 985   if (update_stats) {

 986     // We maintain the invariant that all objects allocated by mutator
 987     // threads will be allocated out of eden regions. So, we can use
 988     // the eden region number allocated since the previous GC to
 989     // calculate the application's allocate rate. The only exception
 990     // to that is humongous objects that are allocated separately. But
 991     // given that humongous object allocations do not really affect
 992     // either the pause's duration nor when the next pause will take
 993     // place we can safely ignore them here.
 994     uint regions_allocated = eden_cset_region_length();
 995     double alloc_rate_ms = (double) regions_allocated / app_time_ms;
 996     _alloc_rate_ms_seq->add(alloc_rate_ms);
 997 
 998     double interval_ms =
 999       (end_time_sec - _recent_prev_end_times_for_all_gcs_sec->oldest()) * 1000.0;
1000     update_recent_gc_times(end_time_sec, pause_time_ms);
1001     _recent_avg_pause_time_ratio = _recent_gc_times_ms->sum()/interval_ms;
1002     if (recent_avg_pause_time_ratio() < 0.0 ||
1003         (recent_avg_pause_time_ratio() - 1.0 > 0.0)) {
1004       // Clip ratio between 0.0 and 1.0, and continue. This will be fixed in
1005       // CR 6902692 by redoing the manner in which the ratio is incrementally computed.


1555     expand_bytes = MAX2(expand_bytes, min_expand_bytes);
1556     expand_bytes = MIN2(expand_bytes, uncommitted_bytes);
1557 
1558     clear_ratio_check_data();
1559   } else {
1560     // An expansion was not triggered. If we've started counting, increment
1561     // the number of checks we've made in the current window.  If we've
1562     // reached the end of the window without resizing, clear the counters to
1563     // start again the next time we see a ratio above the threshold.
1564     if (_ratio_over_threshold_count > 0) {
1565       _pauses_since_start++;
1566       if (_pauses_since_start > NumPrevPausesForHeuristics) {
1567         clear_ratio_check_data();
1568       }
1569     }
1570   }
1571 
1572   return expand_bytes;
1573 }
1574 





1575 void G1CollectorPolicy::print_yg_surv_rate_info() const {
1576 #ifndef PRODUCT
1577   _short_lived_surv_rate_group->print_surv_rate_summary();
1578   // add this call for any other surv rate groups
1579 #endif // PRODUCT
1580 }
1581 
1582 bool G1CollectorPolicy::is_young_list_full() const {
1583   uint young_list_length = _g1->young_list()->length();
1584   uint young_list_target_length = _young_list_target_length;
1585   return young_list_length >= young_list_target_length;
1586 }
1587 
1588 bool G1CollectorPolicy::can_expand_young_list() const {
1589   uint young_list_length = _g1->young_list()->length();
1590   uint young_list_max_length = _young_list_max_length;
1591   return young_list_length < young_list_max_length;
1592 }
1593 
1594 void G1CollectorPolicy::update_max_gc_locker_expansion() {


1761   assert(hr->is_old(), "the region should be old");
1762 
1763   assert(!hr->in_collection_set(), "should not already be in the CSet");
1764   _g1->register_old_region_with_cset(hr);
1765   hr->set_next_in_collection_set(_collection_set);
1766   _collection_set = hr;
1767   _collection_set_bytes_used_before += hr->used();
1768   size_t rs_length = hr->rem_set()->occupied();
1769   _recorded_rs_lengths += rs_length;
1770   _old_cset_region_length += 1;
1771 }
1772 
1773 // Initialize the per-collection-set information
1774 void G1CollectorPolicy::start_incremental_cset_building() {
1775   assert(_inc_cset_build_state == Inactive, "Precondition");
1776 
1777   _inc_cset_head = NULL;
1778   _inc_cset_tail = NULL;
1779   _inc_cset_bytes_used_before = 0;
1780 

1781   _inc_cset_recorded_rs_lengths = 0;
1782   _inc_cset_recorded_rs_lengths_diffs = 0;
1783   _inc_cset_predicted_elapsed_time_ms = 0.0;
1784   _inc_cset_predicted_elapsed_time_ms_diffs = 0.0;
1785   _inc_cset_build_state = Active;
1786 }
1787 
1788 void G1CollectorPolicy::finalize_incremental_cset_building() {
1789   assert(_inc_cset_build_state == Active, "Precondition");
1790   assert(SafepointSynchronize::is_at_safepoint(), "should be at a safepoint");
1791 
1792   // The two "main" fields, _inc_cset_recorded_rs_lengths and
1793   // _inc_cset_predicted_elapsed_time_ms, are updated by the thread
1794   // that adds a new region to the CSet. Further updates by the
1795   // concurrent refinement thread that samples the young RSet lengths
1796   // are accumulated in the *_diffs fields. Here we add the diffs to
1797   // the "main" fields.
1798 
1799   if (_inc_cset_recorded_rs_lengths_diffs >= 0) {
1800     _inc_cset_recorded_rs_lengths += _inc_cset_recorded_rs_lengths_diffs;


1872 
1873   hr->set_recorded_rs_length(new_rs_length);
1874   hr->set_predicted_elapsed_time_ms(new_region_elapsed_time_ms);
1875 }
1876 
1877 void G1CollectorPolicy::add_region_to_incremental_cset_common(HeapRegion* hr) {
1878   assert(hr->is_young(), "invariant");
1879   assert(hr->young_index_in_cset() > -1, "should have already been set");
1880   assert(_inc_cset_build_state == Active, "Precondition");
1881 
1882   // We need to clear and set the cached recorded/cached collection set
1883   // information in the heap region here (before the region gets added
1884   // to the collection set). An individual heap region's cached values
1885   // are calculated, aggregated with the policy collection set info,
1886   // and cached in the heap region here (initially) and (subsequently)
1887   // by the Young List sampling code.
1888 
1889   size_t rs_length = hr->rem_set()->occupied();
1890   add_to_incremental_cset_info(hr, rs_length);
1891 



1892   assert(!hr->in_collection_set(), "invariant");
1893   _g1->register_young_region_with_cset(hr);
1894   assert(hr->next_in_collection_set() == NULL, "invariant");
1895 }
1896 
1897 // Add the region at the RHS of the incremental cset
1898 void G1CollectorPolicy::add_region_to_incremental_cset_rhs(HeapRegion* hr) {
1899   // We should only ever be appending survivors at the end of a pause
1900   assert(hr->is_survivor(), "Logic");
1901 
1902   // Do the 'common' stuff
1903   add_region_to_incremental_cset_common(hr);
1904 
1905   // Now add the region at the right hand side
1906   if (_inc_cset_tail == NULL) {
1907     assert(_inc_cset_head == NULL, "invariant");
1908     _inc_cset_head = hr;
1909   } else {
1910     _inc_cset_tail->set_next_in_collection_set(hr);
1911   }


2078 
2079 
2080 double G1CollectorPolicy::finalize_young_cset_part(double target_pause_time_ms) {
2081   double young_start_time_sec = os::elapsedTime();
2082 
2083   YoungList* young_list = _g1->young_list();
2084   finalize_incremental_cset_building();
2085 
2086   guarantee(target_pause_time_ms > 0.0,
2087             "target_pause_time_ms = %1.6lf should be positive", target_pause_time_ms);
2088   guarantee(_collection_set == NULL, "Precondition");
2089 
2090   double base_time_ms = predict_base_elapsed_time_ms(_pending_cards);
2091   double time_remaining_ms = MAX2(target_pause_time_ms - base_time_ms, 0.0);
2092 
2093   log_trace(gc, ergo, cset)("Start choosing CSet. pending cards: " SIZE_FORMAT " predicted base time: %1.2fms remaining time: %1.2fms target pause time: %1.2fms",
2094                             _pending_cards, base_time_ms, time_remaining_ms, target_pause_time_ms);
2095 
2096   collector_state()->set_last_gc_was_young(collector_state()->gcs_are_young());
2097 






2098   // The young list is laid with the survivor regions from the previous
2099   // pause are appended to the RHS of the young list, i.e.
2100   //   [Newly Young Regions ++ Survivors from last pause].
2101 
2102   uint survivor_region_length = young_list->survivor_length();
2103   uint eden_region_length = young_list->eden_length();
2104   init_cset_region_lengths(eden_region_length, survivor_region_length);
2105 
2106   HeapRegion* hr = young_list->first_survivor_region();
2107   while (hr != NULL) {
2108     assert(hr->is_survivor(), "badly formed young list");
2109     // There is a convention that all the young regions in the CSet
2110     // are tagged as "eden", so we do this for the survivors here. We
2111     // use the special set_eden_pre_gc() as it doesn't check that the
2112     // region is free (which is not the case here).
2113     hr->set_eden_pre_gc();
2114     hr = hr->get_next_young_region();
2115   }
2116 
2117   // Clear the fields that point to the survivor list - they are all young now.


2216 
2217     if (expensive_region_num > 0) {
2218       // We print the information once here at the end, predicated on
2219       // whether we added any apparently expensive regions or not, to
2220       // avoid generating output per region.
2221       log_debug(gc, ergo, cset)("Added expensive regions to CSet (old CSet region num not reached min)."
2222                                 "old: %u regions, expensive: %u regions, min: %u regions, remaining time: %1.2fms",
2223                                 old_cset_region_length(), expensive_region_num, min_old_cset_length, time_remaining_ms);
2224     }
2225 
2226     cset_chooser()->verify();
2227   }
2228 
2229   stop_incremental_cset_building();
2230 
2231   log_debug(gc, ergo, cset)("Finish choosing CSet. old: %u regions, predicted old region time: %1.2fms, time remaining: %1.2f",
2232                             old_cset_region_length(), predicted_old_time_ms, time_remaining_ms);
2233 
2234   double non_young_end_time_sec = os::elapsedTime();
2235   phase_times()->record_non_young_cset_choice_time_ms((non_young_end_time_sec - non_young_start_time_sec) * 1000.0);




























































































































2236 }
< prev index next >