< prev index next >

src/share/vm/gc/g1/g1CollectorPolicy.cpp

Print this page




  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "gc/g1/concurrentG1Refine.hpp"
  27 #include "gc/g1/concurrentMark.hpp"
  28 #include "gc/g1/concurrentMarkThread.inline.hpp"
  29 #include "gc/g1/g1CollectedHeap.inline.hpp"
  30 #include "gc/g1/g1CollectorPolicy.hpp"
  31 #include "gc/g1/g1ErgoVerbose.hpp"
  32 #include "gc/g1/g1GCPhaseTimes.hpp"
  33 #include "gc/g1/g1Log.hpp"
  34 #include "gc/g1/heapRegion.inline.hpp"
  35 #include "gc/g1/heapRegionRemSet.hpp"
  36 #include "gc/shared/gcPolicyCounters.hpp"

  37 #include "runtime/arguments.hpp"
  38 #include "runtime/java.hpp"
  39 #include "runtime/mutexLocker.hpp"
  40 #include "utilities/debug.hpp"
  41 
  42 // Different defaults for different number of GC threads
  43 // They were chosen by running GCOld and SPECjbb on debris with different
  44 //   numbers of GC threads and choosing them based on the results
  45 
  46 // all the same
  47 static double rs_length_diff_defaults[] = {
  48   0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0
  49 };
  50 
  51 static double cost_per_card_ms_defaults[] = {
  52   0.01, 0.005, 0.005, 0.003, 0.003, 0.002, 0.002, 0.0015
  53 };
  54 
  55 // all the same
  56 static double young_cards_per_entry_ratio_defaults[] = {


 102   _cost_per_byte_ms_during_cm_seq(new TruncatedSeq(TruncatedSeqLength)),
 103   _constant_other_time_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
 104   _young_other_cost_per_region_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
 105   _non_young_other_cost_per_region_ms_seq(
 106                                          new TruncatedSeq(TruncatedSeqLength)),
 107 
 108   _pending_cards_seq(new TruncatedSeq(TruncatedSeqLength)),
 109   _rs_lengths_seq(new TruncatedSeq(TruncatedSeqLength)),
 110 
 111   _pause_time_target_ms((double) MaxGCPauseMillis),
 112 
 113   _recent_prev_end_times_for_all_gcs_sec(
 114                                 new TruncatedSeq(NumPrevPausesForHeuristics)),
 115 
 116   _recent_avg_pause_time_ratio(0.0),
 117   _rs_lengths_prediction(0),
 118   _max_survivor_regions(0),
 119 
 120   _eden_used_bytes_before_gc(0),
 121   _survivor_used_bytes_before_gc(0),

 122   _heap_used_bytes_before_gc(0),
 123   _metaspace_used_bytes_before_gc(0),
 124   _eden_capacity_bytes_before_gc(0),
 125   _heap_capacity_bytes_before_gc(0),
 126 
 127   _eden_cset_region_length(0),
 128   _survivor_cset_region_length(0),
 129   _old_cset_region_length(0),
 130 
 131   _collection_set(NULL),
 132   _collection_set_bytes_used_before(0),
 133 
 134   // Incremental CSet attributes
 135   _inc_cset_build_state(Inactive),
 136   _inc_cset_head(NULL),
 137   _inc_cset_tail(NULL),
 138   _inc_cset_bytes_used_before(0),
 139   _inc_cset_max_finger(NULL),
 140   _inc_cset_recorded_rs_lengths(0),
 141   _inc_cset_recorded_rs_lengths_diffs(0),


 154   // indirectly use it through this object passed to their constructor.
 155   _short_lived_surv_rate_group =
 156     new SurvRateGroup(&_predictor, "Short Lived", G1YoungSurvRateNumRegionsSummary);
 157   _survivor_surv_rate_group =
 158     new SurvRateGroup(&_predictor, "Survivor", G1YoungSurvRateNumRegionsSummary);
 159 
 160   // Set up the region size and associated fields. Given that the
 161   // policy is created before the heap, we have to set this up here,
 162   // so it's done as soon as possible.
 163 
 164   // It would have been natural to pass initial_heap_byte_size() and
 165   // max_heap_byte_size() to setup_heap_region_size() but those have
 166   // not been set up at this point since they should be aligned with
 167   // the region size. So, there is a circular dependency here. We base
 168   // the region size on the heap size, but the heap size should be
 169   // aligned with the region size. To get around this we use the
 170   // unaligned values for the heap.
 171   HeapRegion::setup_heap_region_size(InitialHeapSize, MaxHeapSize);
 172   HeapRegionRemSet::setup_remset_size();
 173 
 174   G1ErgoVerbose::initialize();
 175   if (PrintAdaptiveSizePolicy) {
 176     // Currently, we only use a single switch for all the heuristics.
 177     G1ErgoVerbose::set_enabled(true);
 178     // Given that we don't currently have a verboseness level
 179     // parameter, we'll hardcode this to high. This can be easily
 180     // changed in the future.
 181     G1ErgoVerbose::set_level(ErgoHigh);
 182   } else {
 183     G1ErgoVerbose::set_enabled(false);
 184   }
 185 
 186   _recent_prev_end_times_for_all_gcs_sec->add(os::elapsedTime());
 187   _prev_collection_pause_end_ms = os::elapsedTime() * 1000.0;
 188 
 189   _phase_times = new G1GCPhaseTimes(_parallel_gc_threads);
 190 
 191   int index = MIN2(_parallel_gc_threads - 1, 7);
 192 
 193   _rs_length_diff_seq->add(rs_length_diff_defaults[index]);
 194   _cost_per_card_ms_seq->add(cost_per_card_ms_defaults[index]);
 195   _cost_scan_hcc_seq->add(0.0);
 196   _young_cards_per_entry_ratio_seq->add(
 197                                   young_cards_per_entry_ratio_defaults[index]);
 198   _cost_per_entry_ms_seq->add(cost_per_entry_ms_defaults[index]);
 199   _cost_per_byte_ms_seq->add(cost_per_byte_ms_defaults[index]);
 200   _constant_other_time_ms_seq->add(constant_other_time_ms_defaults[index]);
 201   _young_other_cost_per_region_ms_seq->add(
 202                                young_other_cost_per_region_ms_defaults[index]);
 203   _non_young_other_cost_per_region_ms_seq->add(
 204                            non_young_other_cost_per_region_ms_defaults[index]);
 205 


 745   HeapRegion* head = _g1->young_list()->first_region();
 746   return
 747     verify_young_ages(head, _short_lived_surv_rate_group);
 748   // also call verify_young_ages on any additional surv rate groups
 749 }
 750 
 751 bool
 752 G1CollectorPolicy::verify_young_ages(HeapRegion* head,
 753                                      SurvRateGroup *surv_rate_group) {
 754   guarantee( surv_rate_group != NULL, "pre-condition" );
 755 
 756   const char* name = surv_rate_group->name();
 757   bool ret = true;
 758   int prev_age = -1;
 759 
 760   for (HeapRegion* curr = head;
 761        curr != NULL;
 762        curr = curr->get_next_young_region()) {
 763     SurvRateGroup* group = curr->surv_rate_group();
 764     if (group == NULL && !curr->is_survivor()) {
 765       gclog_or_tty->print_cr("## %s: encountered NULL surv_rate_group", name);
 766       ret = false;
 767     }
 768 
 769     if (surv_rate_group == group) {
 770       int age = curr->age_in_surv_rate_group();
 771 
 772       if (age < 0) {
 773         gclog_or_tty->print_cr("## %s: encountered negative age", name);
 774         ret = false;
 775       }
 776 
 777       if (age <= prev_age) {
 778         gclog_or_tty->print_cr("## %s: region ages are not strictly increasing "
 779                                "(%d, %d)", name, age, prev_age);
 780         ret = false;
 781       }
 782       prev_age = age;
 783     }
 784   }
 785 
 786   return ret;
 787 }
 788 #endif // PRODUCT
 789 
 790 void G1CollectorPolicy::record_full_collection_start() {
 791   _full_collection_start_sec = os::elapsedTime();
 792   record_heap_size_info_at_start(true /* full */);
 793   // Release the future to-space so that it is available for compaction into.
 794   collector_state()->set_full_collection(true);
 795 }
 796 
 797 void G1CollectorPolicy::record_full_collection_end() {
 798   // Consider this like a collection pause for the purposes of allocation
 799   // since last pause.


 925          average_time_ms(G1GCPhaseTimes::ObjCopy) -
 926          average_time_ms(G1GCPhaseTimes::Termination);
 927 }
 928 
 929 double G1CollectorPolicy::constant_other_time_ms(double pause_time_ms) const {
 930   return other_time_ms(pause_time_ms) - young_other_time_ms() - non_young_other_time_ms();
 931 }
 932 
 933 bool G1CollectorPolicy::need_to_start_conc_mark(const char* source, size_t alloc_word_size) {
 934   if (_g1->concurrent_mark()->cmThread()->during_cycle()) {
 935     return false;
 936   }
 937 
 938   size_t marking_initiating_used_threshold =
 939     (_g1->capacity() / 100) * InitiatingHeapOccupancyPercent;
 940   size_t cur_used_bytes = _g1->non_young_capacity_bytes();
 941   size_t alloc_byte_size = alloc_word_size * HeapWordSize;
 942 
 943   if ((cur_used_bytes + alloc_byte_size) > marking_initiating_used_threshold) {
 944     if (collector_state()->gcs_are_young() && !collector_state()->last_young_gc()) {
 945       ergo_verbose5(ErgoConcCycles,
 946         "request concurrent cycle initiation",
 947         ergo_format_reason("occupancy higher than threshold")
 948         ergo_format_byte("occupancy")
 949         ergo_format_byte("allocation request")
 950         ergo_format_byte_perc("threshold")
 951         ergo_format_str("source"),
 952         cur_used_bytes,
 953         alloc_byte_size,
 954         marking_initiating_used_threshold,
 955         (double) InitiatingHeapOccupancyPercent,
 956         source);
 957       return true;
 958     } else {
 959       ergo_verbose5(ErgoConcCycles,
 960         "do not request concurrent cycle initiation",
 961         ergo_format_reason("still doing mixed collections")
 962         ergo_format_byte("occupancy")
 963         ergo_format_byte("allocation request")
 964         ergo_format_byte_perc("threshold")
 965         ergo_format_str("source"),
 966         cur_used_bytes,
 967         alloc_byte_size,
 968         marking_initiating_used_threshold,
 969         (double) InitiatingHeapOccupancyPercent,
 970         source);
 971     }
 972   }
 973 
 974   return false;
 975 }
 976 
 977 // Anything below that is considered to be zero
 978 #define MIN_TIMER_GRANULARITY 0.0000001
 979 
 980 void G1CollectorPolicy::record_collection_pause_end(double pause_time_ms, size_t cards_scanned) {
 981   double end_time_sec = os::elapsedTime();
 982   assert(_cur_collection_pause_used_regions_at_start >= cset_region_length(),
 983          "otherwise, the subtraction below does not make sense");
 984   size_t rs_size =
 985             _cur_collection_pause_used_regions_at_start - cset_region_length();
 986   size_t cur_used_bytes = _g1->used();
 987   assert(cur_used_bytes == _g1->recalculate_used(), "It should!");
 988   bool last_pause_included_initial_mark = false;
 989   bool update_stats = !_g1->evacuation_failed();
 990 
 991 #ifndef PRODUCT
 992   if (G1YoungSurvRateVerbose) {
 993     gclog_or_tty->cr();
 994     _short_lived_surv_rate_group->print();
 995     // do that for any other surv rate groups too
 996   }
 997 #endif // PRODUCT
 998 
 999   last_pause_included_initial_mark = collector_state()->during_initial_mark_pause();
1000   if (last_pause_included_initial_mark) {
1001     record_concurrent_mark_init_end(0.0);
1002   } else if (need_to_start_conc_mark("end of GC")) {
1003     // Note: this might have already been set, if during the last
1004     // pause we decided to start a cycle but at the beginning of
1005     // this pause we decided to postpone it. That's OK.
1006     collector_state()->set_initiate_conc_mark_if_possible(true);
1007   }
1008 
1009   _mmu_tracker->add_pause(end_time_sec - pause_time_ms/1000.0, end_time_sec);
1010 
1011   if (update_stats) {
1012     _trace_young_gen_time_data.record_end_collection(pause_time_ms, phase_times());
1013     // this is where we update the allocation rate of the application
1014     double app_time_ms =
1015       (phase_times()->cur_collection_start_sec() * 1000.0 - _prev_collection_pause_end_ms);
1016     if (app_time_ms < MIN_TIMER_GRANULARITY) {
1017       // This usually happens due to the timer not having the required


1048     }
1049   }
1050 
1051   bool new_in_marking_window = collector_state()->in_marking_window();
1052   bool new_in_marking_window_im = false;
1053   if (last_pause_included_initial_mark) {
1054     new_in_marking_window = true;
1055     new_in_marking_window_im = true;
1056   }
1057 
1058   if (collector_state()->last_young_gc()) {
1059     // This is supposed to to be the "last young GC" before we start
1060     // doing mixed GCs. Here we decide whether to start mixed GCs or not.
1061 
1062     if (!last_pause_included_initial_mark) {
1063       if (next_gc_should_be_mixed("start mixed GCs",
1064                                   "do not start mixed GCs")) {
1065         collector_state()->set_gcs_are_young(false);
1066       }
1067     } else {
1068       ergo_verbose0(ErgoMixedGCs,
1069                     "do not start mixed GCs",
1070                     ergo_format_reason("concurrent cycle is about to start"));
1071     }
1072     collector_state()->set_last_young_gc(false);
1073   }
1074 
1075   if (!collector_state()->last_gc_was_young()) {
1076     // This is a mixed GC. Here we decide whether to continue doing
1077     // mixed GCs or not.
1078 
1079     if (!next_gc_should_be_mixed("continue mixed GCs",
1080                                  "do not continue mixed GCs")) {
1081       collector_state()->set_gcs_are_young(true);
1082     }
1083   }
1084 
1085   _short_lived_surv_rate_group->start_adding_regions();
1086   // Do that for any other surv rate groups
1087 
1088   if (update_stats) {
1089     double cost_per_card_ms = 0.0;
1090     double cost_scan_hcc = average_time_ms(G1GCPhaseTimes::ScanHCC);


1157     }
1158 
1159     _constant_other_time_ms_seq->add(constant_other_time_ms(pause_time_ms));
1160 
1161     _pending_cards_seq->add((double) _pending_cards);
1162     _rs_lengths_seq->add((double) _max_rs_lengths);
1163   }
1164 
1165   collector_state()->set_in_marking_window(new_in_marking_window);
1166   collector_state()->set_in_marking_window_im(new_in_marking_window_im);
1167   _free_regions_at_end_of_collection = _g1->num_free_regions();
1168   update_young_list_max_and_target_length();
1169   update_rs_lengths_prediction();
1170 
1171   // Note that _mmu_tracker->max_gc_time() returns the time in seconds.
1172   double update_rs_time_goal_ms = _mmu_tracker->max_gc_time() * MILLIUNITS * G1RSetUpdatingPauseTimePercent / 100.0;
1173 
1174   double scan_hcc_time_ms = average_time_ms(G1GCPhaseTimes::ScanHCC);
1175 
1176   if (update_rs_time_goal_ms < scan_hcc_time_ms) {
1177     ergo_verbose2(ErgoTiming,
1178                   "adjust concurrent refinement thresholds",
1179                   ergo_format_reason("Scanning the HCC expected to take longer than Update RS time goal")
1180                   ergo_format_ms("Update RS time goal")
1181                   ergo_format_ms("Scan HCC time"),
1182                   update_rs_time_goal_ms,
1183                   scan_hcc_time_ms);
1184 
1185     update_rs_time_goal_ms = 0;
1186   } else {
1187     update_rs_time_goal_ms -= scan_hcc_time_ms;
1188   }
1189   adjust_concurrent_refinement(average_time_ms(G1GCPhaseTimes::UpdateRS) - scan_hcc_time_ms,
1190                                phase_times()->sum_thread_work_items(G1GCPhaseTimes::UpdateRS),
1191                                update_rs_time_goal_ms);
1192 
1193   _collectionSetChooser->verify();
1194 }
1195 
1196 #define EXT_SIZE_FORMAT "%.1f%s"
1197 #define EXT_SIZE_PARAMS(bytes)                                  \
1198   byte_size_in_proper_unit((double)(bytes)),                    \
1199   proper_unit_for_byte_size((bytes))
1200 
1201 void G1CollectorPolicy::record_heap_size_info_at_start(bool full) {
1202   YoungList* young_list = _g1->young_list();
1203   _eden_used_bytes_before_gc = young_list->eden_used_bytes();
1204   _survivor_used_bytes_before_gc = young_list->survivor_used_bytes();
1205   _heap_capacity_bytes_before_gc = _g1->capacity();
1206   _heap_used_bytes_before_gc = _g1->used();

1207   _cur_collection_pause_used_regions_at_start = _g1->num_used_regions();
1208 
1209   _eden_capacity_bytes_before_gc =
1210          (_young_list_target_length * HeapRegion::GrainBytes) - _survivor_used_bytes_before_gc;
1211 
1212   if (full) {
1213     _metaspace_used_bytes_before_gc = MetaspaceAux::used_bytes();
1214   }
1215 }
1216 
1217 void G1CollectorPolicy::print_heap_transition(size_t bytes_before) const {
1218   size_t bytes_after = _g1->used();
1219   size_t capacity = _g1->capacity();
1220 
1221   gclog_or_tty->print(" " SIZE_FORMAT "%s->" SIZE_FORMAT "%s(" SIZE_FORMAT "%s)",
1222       byte_size_in_proper_unit(bytes_before),
1223       proper_unit_for_byte_size(bytes_before),
1224       byte_size_in_proper_unit(bytes_after),
1225       proper_unit_for_byte_size(bytes_after),
1226       byte_size_in_proper_unit(capacity),
1227       proper_unit_for_byte_size(capacity));
1228 }
1229 
1230 void G1CollectorPolicy::print_heap_transition() const {
1231   print_heap_transition(_heap_used_bytes_before_gc);
1232 }
1233 
1234 void G1CollectorPolicy::print_detailed_heap_transition(bool full) const {
1235   YoungList* young_list = _g1->young_list();
1236 
1237   size_t eden_used_bytes_after_gc = young_list->eden_used_bytes();
1238   size_t survivor_used_bytes_after_gc = young_list->survivor_used_bytes();
1239   size_t heap_used_bytes_after_gc = _g1->used();
1240 
1241   size_t heap_capacity_bytes_after_gc = _g1->capacity();
1242   size_t eden_capacity_bytes_after_gc =
1243     (_young_list_target_length * HeapRegion::GrainBytes) - survivor_used_bytes_after_gc;

1244 
1245   gclog_or_tty->print(
1246     "   [Eden: " EXT_SIZE_FORMAT "(" EXT_SIZE_FORMAT ")->" EXT_SIZE_FORMAT "(" EXT_SIZE_FORMAT ") "
1247     "Survivors: " EXT_SIZE_FORMAT "->" EXT_SIZE_FORMAT " "
1248     "Heap: " EXT_SIZE_FORMAT "(" EXT_SIZE_FORMAT ")->"
1249     EXT_SIZE_FORMAT "(" EXT_SIZE_FORMAT ")]",
1250     EXT_SIZE_PARAMS(_eden_used_bytes_before_gc),
1251     EXT_SIZE_PARAMS(_eden_capacity_bytes_before_gc),
1252     EXT_SIZE_PARAMS(eden_used_bytes_after_gc),
1253     EXT_SIZE_PARAMS(eden_capacity_bytes_after_gc),
1254     EXT_SIZE_PARAMS(_survivor_used_bytes_before_gc),
1255     EXT_SIZE_PARAMS(survivor_used_bytes_after_gc),
1256     EXT_SIZE_PARAMS(_heap_used_bytes_before_gc),
1257     EXT_SIZE_PARAMS(_heap_capacity_bytes_before_gc),
1258     EXT_SIZE_PARAMS(heap_used_bytes_after_gc),
1259     EXT_SIZE_PARAMS(heap_capacity_bytes_after_gc));
1260 
1261   if (full) {
1262     MetaspaceAux::print_metaspace_change(_metaspace_used_bytes_before_gc);
1263   }
1264 
1265   gclog_or_tty->cr();



1266 }
1267 
1268 void G1CollectorPolicy::print_phases(double pause_time_sec) {
1269   phase_times()->print(pause_time_sec);
1270 }
1271 
1272 void G1CollectorPolicy::adjust_concurrent_refinement(double update_rs_time,
1273                                                      double update_rs_processed_buffers,
1274                                                      double goal_ms) {
1275   DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
1276   ConcurrentG1Refine *cg1r = G1CollectedHeap::heap()->concurrent_g1_refine();
1277 
1278   if (G1UseAdaptiveConcRefinement) {
1279     const int k_gy = 3, k_gr = 6;
1280     const double inc_k = 1.1, dec_k = 0.9;
1281 
1282     int g = cg1r->green_zone();
1283     if (update_rs_time > goal_ms) {
1284       g = (int)(g * dec_k);  // Can become 0, that's OK. That would mean a mutator-only processing.
1285     } else {


1500 
1501 size_t G1CollectorPolicy::expansion_amount() const {
1502   double recent_gc_overhead = recent_avg_pause_time_ratio() * 100.0;
1503   double threshold = _gc_overhead_perc;
1504   if (recent_gc_overhead > threshold) {
1505     // We will double the existing space, or take
1506     // G1ExpandByPercentOfAvailable % of the available expansion
1507     // space, whichever is smaller, bounded below by a minimum
1508     // expansion (unless that's all that's left.)
1509     const size_t min_expand_bytes = 1*M;
1510     size_t reserved_bytes = _g1->max_capacity();
1511     size_t committed_bytes = _g1->capacity();
1512     size_t uncommitted_bytes = reserved_bytes - committed_bytes;
1513     size_t expand_bytes;
1514     size_t expand_bytes_via_pct =
1515       uncommitted_bytes * G1ExpandByPercentOfAvailable / 100;
1516     expand_bytes = MIN2(expand_bytes_via_pct, committed_bytes);
1517     expand_bytes = MAX2(expand_bytes, min_expand_bytes);
1518     expand_bytes = MIN2(expand_bytes, uncommitted_bytes);
1519 
1520     ergo_verbose5(ErgoHeapSizing,
1521                   "attempt heap expansion",
1522                   ergo_format_reason("recent GC overhead higher than "
1523                                      "threshold after GC")
1524                   ergo_format_perc("recent GC overhead")
1525                   ergo_format_perc("threshold")
1526                   ergo_format_byte("uncommitted")
1527                   ergo_format_byte_perc("calculated expansion amount"),
1528                   recent_gc_overhead, threshold,
1529                   uncommitted_bytes,
1530                   expand_bytes_via_pct, (double) G1ExpandByPercentOfAvailable);
1531 
1532     return expand_bytes;
1533   } else {
1534     return 0;
1535   }
1536 }
1537 
1538 void G1CollectorPolicy::print_tracing_info() const {
1539   _trace_young_gen_time_data.print();
1540   _trace_old_gen_time_data.print();
1541 }
1542 
1543 void G1CollectorPolicy::print_yg_surv_rate_info() const {
1544 #ifndef PRODUCT
1545   _short_lived_surv_rate_group->print_surv_rate_summary();
1546   // add this call for any other surv rate groups
1547 #endif // PRODUCT
1548 }
1549 
1550 bool G1CollectorPolicy::is_young_list_full() const {


1573   _young_list_max_length = _young_list_target_length + expansion_region_num;
1574   assert(_young_list_target_length <= _young_list_max_length, "post-condition");
1575 }
1576 
1577 // Calculates survivor space parameters.
1578 void G1CollectorPolicy::update_survivors_policy() {
1579   double max_survivor_regions_d =
1580                  (double) _young_list_target_length / (double) SurvivorRatio;
1581   // We use ceiling so that if max_survivor_regions_d is > 0.0 (but
1582   // smaller than 1.0) we'll get 1.
1583   _max_survivor_regions = (uint) ceil(max_survivor_regions_d);
1584 
1585   _tenuring_threshold = _survivors_age_table.compute_tenuring_threshold(
1586         HeapRegion::GrainWords * _max_survivor_regions, counters());
1587 }
1588 
1589 bool G1CollectorPolicy::force_initial_mark_if_outside_cycle(
1590                                                      GCCause::Cause gc_cause) {
1591   bool during_cycle = _g1->concurrent_mark()->cmThread()->during_cycle();
1592   if (!during_cycle) {
1593     ergo_verbose1(ErgoConcCycles,
1594                   "request concurrent cycle initiation",
1595                   ergo_format_reason("requested by GC cause")
1596                   ergo_format_str("GC cause"),
1597                   GCCause::to_string(gc_cause));
1598     collector_state()->set_initiate_conc_mark_if_possible(true);
1599     return true;
1600   } else {
1601     ergo_verbose1(ErgoConcCycles,
1602                   "do not request concurrent cycle initiation",
1603                   ergo_format_reason("concurrent cycle already in progress")
1604                   ergo_format_str("GC cause"),
1605                   GCCause::to_string(gc_cause));
1606     return false;
1607   }
1608 }
1609 
1610 void
1611 G1CollectorPolicy::decide_on_conc_mark_initiation() {
1612   // We are about to decide on whether this pause will be an
1613   // initial-mark pause.
1614 
1615   // First, collector_state()->during_initial_mark_pause() should not be already set. We
1616   // will set it here if we have to. However, it should be cleared by
1617   // the end of the pause (it's only set for the duration of an
1618   // initial-mark pause).
1619   assert(!collector_state()->during_initial_mark_pause(), "pre-condition");
1620 
1621   if (collector_state()->initiate_conc_mark_if_possible()) {
1622     // We had noticed on a previous pause that the heap occupancy has
1623     // gone over the initiating threshold and we should start a
1624     // concurrent marking cycle. So we might initiate one.
1625 
1626     bool during_cycle = _g1->concurrent_mark()->cmThread()->during_cycle();
1627     if (!during_cycle) {
1628       // The concurrent marking thread is not "during a cycle", i.e.,
1629       // it has completed the last one. So we can go ahead and
1630       // initiate a new cycle.
1631 
1632       collector_state()->set_during_initial_mark_pause(true);
1633       // We do not allow mixed GCs during marking.
1634       if (!collector_state()->gcs_are_young()) {
1635         collector_state()->set_gcs_are_young(true);
1636         ergo_verbose0(ErgoMixedGCs,
1637                       "end mixed GCs",
1638                       ergo_format_reason("concurrent cycle is about to start"));
1639       }
1640 
1641       // And we can now clear initiate_conc_mark_if_possible() as
1642       // we've already acted on it.
1643       collector_state()->set_initiate_conc_mark_if_possible(false);
1644 
1645       ergo_verbose0(ErgoConcCycles,
1646                   "initiate concurrent cycle",
1647                   ergo_format_reason("concurrent cycle initiation requested"));
1648     } else {
1649       // The concurrent marking thread is still finishing up the
1650       // previous cycle. If we start one right now the two cycles
1651       // overlap. In particular, the concurrent marking thread might
1652       // be in the process of clearing the next marking bitmap (which
1653       // we will use for the next cycle if we start one). Starting a
1654       // cycle now will be bad given that parts of the marking
1655       // information might get cleared by the marking thread. And we
1656       // cannot wait for the marking thread to finish the cycle as it
1657       // periodically yields while clearing the next marking bitmap
1658       // and, if it's in a yield point, it's waiting for us to
1659       // finish. So, at this point we will not start a cycle and we'll
1660       // let the concurrent marking thread complete the last one.
1661       ergo_verbose0(ErgoConcCycles,
1662                     "do not initiate concurrent cycle",
1663                     ergo_format_reason("concurrent cycle already in progress"));
1664     }
1665   }
1666 }
1667 
1668 class ParKnownGarbageHRClosure: public HeapRegionClosure {
1669   G1CollectedHeap* _g1h;
1670   CSetChooserParUpdater _cset_updater;
1671 
1672 public:
1673   ParKnownGarbageHRClosure(CollectionSetChooser* hrSorted,
1674                            uint chunk_size) :
1675     _g1h(G1CollectedHeap::heap()),
1676     _cset_updater(hrSorted, true /* parallel */, chunk_size) { }
1677 
1678   bool doHeapRegion(HeapRegion* r) {
1679     // Do we have any marking information for this region?
1680     if (r->is_marked()) {
1681       // We will skip any region that's currently used as an old GC
1682       // alloc region (we should not consider those for collection
1683       // before we fill them up).


1928                  HR_FORMAT_PARAMS(csr),
1929                  p2i(csr->prev_top_at_mark_start()), p2i(csr->next_top_at_mark_start()),
1930                  csr->age_in_surv_rate_group_cond());
1931     csr = next;
1932   }
1933 }
1934 #endif // !PRODUCT
1935 
1936 double G1CollectorPolicy::reclaimable_bytes_perc(size_t reclaimable_bytes) const {
1937   // Returns the given amount of reclaimable bytes (that represents
1938   // the amount of reclaimable space still to be collected) as a
1939   // percentage of the current heap capacity.
1940   size_t capacity_bytes = _g1->capacity();
1941   return (double) reclaimable_bytes * 100.0 / (double) capacity_bytes;
1942 }
1943 
1944 bool G1CollectorPolicy::next_gc_should_be_mixed(const char* true_action_str,
1945                                                 const char* false_action_str) const {
1946   CollectionSetChooser* cset_chooser = _collectionSetChooser;
1947   if (cset_chooser->is_empty()) {
1948     ergo_verbose0(ErgoMixedGCs,
1949                   false_action_str,
1950                   ergo_format_reason("candidate old regions not available"));
1951     return false;
1952   }
1953 
1954   // Is the amount of uncollected reclaimable space above G1HeapWastePercent?
1955   size_t reclaimable_bytes = cset_chooser->remaining_reclaimable_bytes();
1956   double reclaimable_perc = reclaimable_bytes_perc(reclaimable_bytes);
1957   double threshold = (double) G1HeapWastePercent;
1958   if (reclaimable_perc <= threshold) {
1959     ergo_verbose4(ErgoMixedGCs,
1960               false_action_str,
1961               ergo_format_reason("reclaimable percentage not over threshold")
1962               ergo_format_region("candidate old regions")
1963               ergo_format_byte_perc("reclaimable")
1964               ergo_format_perc("threshold"),
1965               cset_chooser->remaining_regions(),
1966               reclaimable_bytes,
1967               reclaimable_perc, threshold);
1968     return false;
1969   }
1970 
1971   ergo_verbose4(ErgoMixedGCs,
1972                 true_action_str,
1973                 ergo_format_reason("candidate old regions available")
1974                 ergo_format_region("candidate old regions")
1975                 ergo_format_byte_perc("reclaimable")
1976                 ergo_format_perc("threshold"),
1977                 cset_chooser->remaining_regions(),
1978                 reclaimable_bytes,
1979                 reclaimable_perc, threshold);
1980   return true;
1981 }
1982 
1983 uint G1CollectorPolicy::calc_min_old_cset_length() const {
1984   // The min old CSet region bound is based on the maximum desired
1985   // number of mixed GCs after a cycle. I.e., even if some old regions
1986   // look expensive, we should add them to the CSet anyway to make
1987   // sure we go through the available old regions in no more than the
1988   // maximum desired number of mixed GCs.
1989   //
1990   // The calculation is based on the number of marked regions we added
1991   // to the CSet chooser in the first place, not how many remain, so
1992   // that the result is the same during all mixed GCs that follow a cycle.
1993 
1994   const size_t region_num = (size_t) _collectionSetChooser->length();
1995   const size_t gc_num = (size_t) MAX2(G1MixedGCCountTarget, (uintx) 1);
1996   size_t result = region_num / gc_num;
1997   // emulate ceiling
1998   if (result * gc_num < region_num) {
1999     result += 1;


2015   if (100 * result < region_num * perc) {
2016     result += 1;
2017   }
2018   return (uint) result;
2019 }
2020 
2021 
2022 double G1CollectorPolicy::finalize_young_cset_part(double target_pause_time_ms) {
2023   double young_start_time_sec = os::elapsedTime();
2024 
2025   YoungList* young_list = _g1->young_list();
2026   finalize_incremental_cset_building();
2027 
2028   guarantee(target_pause_time_ms > 0.0,
2029             "target_pause_time_ms = %1.6lf should be positive", target_pause_time_ms);
2030   guarantee(_collection_set == NULL, "Precondition");
2031 
2032   double base_time_ms = predict_base_elapsed_time_ms(_pending_cards);
2033   double time_remaining_ms = MAX2(target_pause_time_ms - base_time_ms, 0.0);
2034 
2035   ergo_verbose4(ErgoCSetConstruction | ErgoHigh,
2036                 "start choosing CSet",
2037                 ergo_format_size("_pending_cards")
2038                 ergo_format_ms("predicted base time")
2039                 ergo_format_ms("remaining time")
2040                 ergo_format_ms("target pause time"),
2041                 _pending_cards, base_time_ms, time_remaining_ms, target_pause_time_ms);
2042 
2043   collector_state()->set_last_gc_was_young(collector_state()->gcs_are_young());
2044 
2045   if (collector_state()->last_gc_was_young()) {
2046     _trace_young_gen_time_data.increment_young_collection_count();
2047   } else {
2048     _trace_young_gen_time_data.increment_mixed_collection_count();
2049   }
2050 
2051   // The young list is laid with the survivor regions from the previous
2052   // pause are appended to the RHS of the young list, i.e.
2053   //   [Newly Young Regions ++ Survivors from last pause].
2054 
2055   uint survivor_region_length = young_list->survivor_length();
2056   uint eden_region_length = young_list->eden_length();
2057   init_cset_region_lengths(eden_region_length, survivor_region_length);
2058 
2059   HeapRegion* hr = young_list->first_survivor_region();
2060   while (hr != NULL) {
2061     assert(hr->is_survivor(), "badly formed young list");
2062     // There is a convention that all the young regions in the CSet
2063     // are tagged as "eden", so we do this for the survivors here. We
2064     // use the special set_eden_pre_gc() as it doesn't check that the
2065     // region is free (which is not the case here).
2066     hr->set_eden_pre_gc();
2067     hr = hr->get_next_young_region();
2068   }
2069 
2070   // Clear the fields that point to the survivor list - they are all young now.
2071   young_list->clear_survivors();
2072 
2073   _collection_set = _inc_cset_head;
2074   _collection_set_bytes_used_before = _inc_cset_bytes_used_before;
2075   time_remaining_ms = MAX2(time_remaining_ms - _inc_cset_predicted_elapsed_time_ms, 0.0);
2076 
2077   ergo_verbose4(ErgoCSetConstruction | ErgoHigh,
2078                 "add young regions to CSet",
2079                 ergo_format_region("eden")
2080                 ergo_format_region("survivors")
2081                 ergo_format_ms("predicted young region time")
2082                 ergo_format_ms("target pause time"),
2083                 eden_region_length, survivor_region_length,
2084                 _inc_cset_predicted_elapsed_time_ms,
2085                 target_pause_time_ms);
2086 
2087   // The number of recorded young regions is the incremental
2088   // collection set's current size
2089   set_recorded_rs_lengths(_inc_cset_recorded_rs_lengths);
2090 
2091   double young_end_time_sec = os::elapsedTime();
2092   phase_times()->record_young_cset_choice_time_ms((young_end_time_sec - young_start_time_sec) * 1000.0);
2093 
2094   return time_remaining_ms;
2095 }
2096 
2097 void G1CollectorPolicy::finalize_old_cset_part(double time_remaining_ms) {
2098   double non_young_start_time_sec = os::elapsedTime();
2099   double predicted_old_time_ms = 0.0;
2100 
2101 
2102   if (!collector_state()->gcs_are_young()) {
2103     CollectionSetChooser* cset_chooser = _collectionSetChooser;
2104     cset_chooser->verify();
2105     const uint min_old_cset_length = calc_min_old_cset_length();
2106     const uint max_old_cset_length = calc_max_old_cset_length();
2107 
2108     uint expensive_region_num = 0;
2109     bool check_time_remaining = adaptive_young_list_length();
2110 
2111     HeapRegion* hr = cset_chooser->peek();
2112     while (hr != NULL) {
2113       if (old_cset_region_length() >= max_old_cset_length) {
2114         // Added maximum number of old regions to the CSet.
2115         ergo_verbose2(ErgoCSetConstruction,
2116                       "finish adding old regions to CSet",
2117                       ergo_format_reason("old CSet region num reached max")
2118                       ergo_format_region("old")
2119                       ergo_format_region("max"),
2120                       old_cset_region_length(), max_old_cset_length);
2121         break;
2122       }
2123 
2124 
2125       // Stop adding regions if the remaining reclaimable space is
2126       // not above G1HeapWastePercent.
2127       size_t reclaimable_bytes = cset_chooser->remaining_reclaimable_bytes();
2128       double reclaimable_perc = reclaimable_bytes_perc(reclaimable_bytes);
2129       double threshold = (double) G1HeapWastePercent;
2130       if (reclaimable_perc <= threshold) {
2131         // We've added enough old regions that the amount of uncollected
2132         // reclaimable space is at or below the waste threshold. Stop
2133         // adding old regions to the CSet.
2134         ergo_verbose5(ErgoCSetConstruction,
2135                       "finish adding old regions to CSet",
2136                       ergo_format_reason("reclaimable percentage not over threshold")
2137                       ergo_format_region("old")
2138                       ergo_format_region("max")
2139                       ergo_format_byte_perc("reclaimable")
2140                       ergo_format_perc("threshold"),
2141                       old_cset_region_length(),
2142                       max_old_cset_length,
2143                       reclaimable_bytes,
2144                       reclaimable_perc, threshold);
2145         break;
2146       }
2147 
2148       double predicted_time_ms = predict_region_elapsed_time_ms(hr, collector_state()->gcs_are_young());
2149       if (check_time_remaining) {
2150         if (predicted_time_ms > time_remaining_ms) {
2151           // Too expensive for the current CSet.
2152 
2153           if (old_cset_region_length() >= min_old_cset_length) {
2154             // We have added the minimum number of old regions to the CSet,
2155             // we are done with this CSet.
2156             ergo_verbose4(ErgoCSetConstruction,
2157                           "finish adding old regions to CSet",
2158                           ergo_format_reason("predicted time is too high")
2159                           ergo_format_ms("predicted time")
2160                           ergo_format_ms("remaining time")
2161                           ergo_format_region("old")
2162                           ergo_format_region("min"),
2163                           predicted_time_ms, time_remaining_ms,
2164                           old_cset_region_length(), min_old_cset_length);
2165             break;
2166           }
2167 
2168           // We'll add it anyway given that we haven't reached the
2169           // minimum number of old regions.
2170           expensive_region_num += 1;
2171         }
2172       } else {
2173         if (old_cset_region_length() >= min_old_cset_length) {
2174           // In the non-auto-tuning case, we'll finish adding regions
2175           // to the CSet if we reach the minimum.
2176           ergo_verbose2(ErgoCSetConstruction,
2177                         "finish adding old regions to CSet",
2178                         ergo_format_reason("old CSet region num reached min")
2179                         ergo_format_region("old")
2180                         ergo_format_region("min"),
2181                         old_cset_region_length(), min_old_cset_length);
2182           break;
2183         }
2184       }
2185 
2186       // We will add this region to the CSet.
2187       time_remaining_ms = MAX2(time_remaining_ms - predicted_time_ms, 0.0);
2188       predicted_old_time_ms += predicted_time_ms;
2189       cset_chooser->pop(); // already have region via peek()
2190       _g1->old_set_remove(hr);
2191       add_old_region_to_cset(hr);
2192 
2193       hr = cset_chooser->peek();
2194     }
2195     if (hr == NULL) {
2196       ergo_verbose0(ErgoCSetConstruction,
2197                     "finish adding old regions to CSet",
2198                     ergo_format_reason("candidate old regions not available"));
2199     }
2200 
2201     if (expensive_region_num > 0) {
2202       // We print the information once here at the end, predicated on
2203       // whether we added any apparently expensive regions or not, to
2204       // avoid generating output per region.
2205       ergo_verbose4(ErgoCSetConstruction,
2206                     "added expensive regions to CSet",
2207                     ergo_format_reason("old CSet region num not reached min")
2208                     ergo_format_region("old")
2209                     ergo_format_region("expensive")
2210                     ergo_format_region("min")
2211                     ergo_format_ms("remaining time"),
2212                     old_cset_region_length(),
2213                     expensive_region_num,
2214                     min_old_cset_length,
2215                     time_remaining_ms);
2216     }
2217 
2218     cset_chooser->verify();
2219   }
2220 
2221   stop_incremental_cset_building();
2222 
2223   ergo_verbose3(ErgoCSetConstruction,
2224                 "finish choosing CSet",
2225                 ergo_format_region("old")
2226                 ergo_format_ms("predicted old region time")
2227                 ergo_format_ms("time remaining"),
2228                 old_cset_region_length(),
2229                 predicted_old_time_ms, time_remaining_ms);
2230 
2231   double non_young_end_time_sec = os::elapsedTime();
2232   phase_times()->record_non_young_cset_choice_time_ms((non_young_end_time_sec - non_young_start_time_sec) * 1000.0);
2233 }
2234 
2235 void TraceYoungGenTimeData::record_start_collection(double time_to_stop_the_world_ms) {
2236   if(TraceYoungGenTime) {
2237     _all_stop_world_times_ms.add(time_to_stop_the_world_ms);
2238   }
2239 }
2240 
2241 void TraceYoungGenTimeData::record_yield_time(double yield_time_ms) {
2242   if(TraceYoungGenTime) {
2243     _all_yield_times_ms.add(yield_time_ms);
2244   }
2245 }
2246 
2247 void TraceYoungGenTimeData::record_end_collection(double pause_time_ms, G1GCPhaseTimes* phase_times) {
2248   if(TraceYoungGenTime) {
2249     _total.add(pause_time_ms);


2268     _parallel_other.add(parallel_other_time);
2269     _clear_ct.add(phase_times->cur_clear_ct_time_ms());
2270   }
2271 }
2272 
2273 void TraceYoungGenTimeData::increment_young_collection_count() {
2274   if(TraceYoungGenTime) {
2275     ++_young_pause_num;
2276   }
2277 }
2278 
2279 void TraceYoungGenTimeData::increment_mixed_collection_count() {
2280   if(TraceYoungGenTime) {
2281     ++_mixed_pause_num;
2282   }
2283 }
2284 
2285 void TraceYoungGenTimeData::print_summary(const char* str,
2286                                           const NumberSeq* seq) const {
2287   double sum = seq->sum();
2288   gclog_or_tty->print_cr("%-27s = %8.2lf s (avg = %8.2lf ms)",
2289                 str, sum / 1000.0, seq->avg());
2290 }
2291 
2292 void TraceYoungGenTimeData::print_summary_sd(const char* str,
2293                                              const NumberSeq* seq) const {
2294   print_summary(str, seq);
2295   gclog_or_tty->print_cr("%45s = %5d, std dev = %8.2lf ms, max = %8.2lf ms)",
2296                 "(num", seq->num(), seq->sd(), seq->maximum());
2297 }
2298 
2299 void TraceYoungGenTimeData::print() const {
2300   if (!TraceYoungGenTime) {
2301     return;
2302   }
2303 
2304   gclog_or_tty->print_cr("ALL PAUSES");
2305   print_summary_sd("   Total", &_total);
2306   gclog_or_tty->cr();
2307   gclog_or_tty->cr();
2308   gclog_or_tty->print_cr("   Young GC Pauses: %8d", _young_pause_num);
2309   gclog_or_tty->print_cr("   Mixed GC Pauses: %8d", _mixed_pause_num);
2310   gclog_or_tty->cr();
2311 
2312   gclog_or_tty->print_cr("EVACUATION PAUSES");
2313 
2314   if (_young_pause_num == 0 && _mixed_pause_num == 0) {
2315     gclog_or_tty->print_cr("none");
2316   } else {
2317     print_summary_sd("   Evacuation Pauses", &_total);
2318     print_summary("      Root Region Scan Wait", &_root_region_scan_wait);
2319     print_summary("      Parallel Time", &_parallel);
2320     print_summary("         Ext Root Scanning", &_ext_root_scan);
2321     print_summary("         SATB Filtering", &_satb_filtering);
2322     print_summary("         Update RS", &_update_rs);
2323     print_summary("         Scan RS", &_scan_rs);
2324     print_summary("         Object Copy", &_obj_copy);
2325     print_summary("         Termination", &_termination);
2326     print_summary("         Parallel Other", &_parallel_other);
2327     print_summary("      Clear CT", &_clear_ct);
2328     print_summary("      Other", &_other);
2329   }
2330   gclog_or_tty->cr();
2331 
2332   gclog_or_tty->print_cr("MISC");
2333   print_summary_sd("   Stop World", &_all_stop_world_times_ms);
2334   print_summary_sd("   Yields", &_all_yield_times_ms);
2335 }
2336 
2337 void TraceOldGenTimeData::record_full_collection(double full_gc_time_ms) {
2338   if (TraceOldGenTime) {
2339     _all_full_gc_times.add(full_gc_time_ms);
2340   }
2341 }
2342 
2343 void TraceOldGenTimeData::print() const {
2344   if (!TraceOldGenTime) {
2345     return;
2346   }
2347 
2348   if (_all_full_gc_times.num() > 0) {
2349     gclog_or_tty->print("\n%4d full_gcs: total time = %8.2f s",
2350       _all_full_gc_times.num(),
2351       _all_full_gc_times.sum() / 1000.0);
2352     gclog_or_tty->print_cr(" (avg = %8.2fms).", _all_full_gc_times.avg());
2353     gclog_or_tty->print_cr("                     [std. dev = %8.2f ms, max = %8.2f ms]",
2354       _all_full_gc_times.sd(),
2355       _all_full_gc_times.maximum());
2356   }
2357 }


  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "gc/g1/concurrentG1Refine.hpp"
  27 #include "gc/g1/concurrentMark.hpp"
  28 #include "gc/g1/concurrentMarkThread.inline.hpp"
  29 #include "gc/g1/g1CollectedHeap.inline.hpp"
  30 #include "gc/g1/g1CollectorPolicy.hpp"

  31 #include "gc/g1/g1GCPhaseTimes.hpp"

  32 #include "gc/g1/heapRegion.inline.hpp"
  33 #include "gc/g1/heapRegionRemSet.hpp"
  34 #include "gc/shared/gcPolicyCounters.hpp"
  35 #include "logging/log.hpp"
  36 #include "runtime/arguments.hpp"
  37 #include "runtime/java.hpp"
  38 #include "runtime/mutexLocker.hpp"
  39 #include "utilities/debug.hpp"
  40 
  41 // Different defaults for different number of GC threads
  42 // They were chosen by running GCOld and SPECjbb on debris with different
  43 //   numbers of GC threads and choosing them based on the results
  44 
  45 // all the same
  46 static double rs_length_diff_defaults[] = {
  47   0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0
  48 };
  49 
  50 static double cost_per_card_ms_defaults[] = {
  51   0.01, 0.005, 0.005, 0.003, 0.003, 0.002, 0.002, 0.0015
  52 };
  53 
  54 // all the same
  55 static double young_cards_per_entry_ratio_defaults[] = {


 101   _cost_per_byte_ms_during_cm_seq(new TruncatedSeq(TruncatedSeqLength)),
 102   _constant_other_time_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
 103   _young_other_cost_per_region_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
 104   _non_young_other_cost_per_region_ms_seq(
 105                                          new TruncatedSeq(TruncatedSeqLength)),
 106 
 107   _pending_cards_seq(new TruncatedSeq(TruncatedSeqLength)),
 108   _rs_lengths_seq(new TruncatedSeq(TruncatedSeqLength)),
 109 
 110   _pause_time_target_ms((double) MaxGCPauseMillis),
 111 
 112   _recent_prev_end_times_for_all_gcs_sec(
 113                                 new TruncatedSeq(NumPrevPausesForHeuristics)),
 114 
 115   _recent_avg_pause_time_ratio(0.0),
 116   _rs_lengths_prediction(0),
 117   _max_survivor_regions(0),
 118 
 119   _eden_used_bytes_before_gc(0),
 120   _survivor_used_bytes_before_gc(0),
 121   _old_used_bytes_before_gc(0),
 122   _heap_used_bytes_before_gc(0),
 123   _metaspace_used_bytes_before_gc(0),
 124   _eden_capacity_bytes_before_gc(0),
 125   _heap_capacity_bytes_before_gc(0),
 126 
 127   _eden_cset_region_length(0),
 128   _survivor_cset_region_length(0),
 129   _old_cset_region_length(0),
 130 
 131   _collection_set(NULL),
 132   _collection_set_bytes_used_before(0),
 133 
 134   // Incremental CSet attributes
 135   _inc_cset_build_state(Inactive),
 136   _inc_cset_head(NULL),
 137   _inc_cset_tail(NULL),
 138   _inc_cset_bytes_used_before(0),
 139   _inc_cset_max_finger(NULL),
 140   _inc_cset_recorded_rs_lengths(0),
 141   _inc_cset_recorded_rs_lengths_diffs(0),


 154   // indirectly use it through this object passed to their constructor.
 155   _short_lived_surv_rate_group =
 156     new SurvRateGroup(&_predictor, "Short Lived", G1YoungSurvRateNumRegionsSummary);
 157   _survivor_surv_rate_group =
 158     new SurvRateGroup(&_predictor, "Survivor", G1YoungSurvRateNumRegionsSummary);
 159 
 160   // Set up the region size and associated fields. Given that the
 161   // policy is created before the heap, we have to set this up here,
 162   // so it's done as soon as possible.
 163 
 164   // It would have been natural to pass initial_heap_byte_size() and
 165   // max_heap_byte_size() to setup_heap_region_size() but those have
 166   // not been set up at this point since they should be aligned with
 167   // the region size. So, there is a circular dependency here. We base
 168   // the region size on the heap size, but the heap size should be
 169   // aligned with the region size. To get around this we use the
 170   // unaligned values for the heap.
 171   HeapRegion::setup_heap_region_size(InitialHeapSize, MaxHeapSize);
 172   HeapRegionRemSet::setup_remset_size();
 173 












 174   _recent_prev_end_times_for_all_gcs_sec->add(os::elapsedTime());
 175   _prev_collection_pause_end_ms = os::elapsedTime() * 1000.0;
 176 
 177   _phase_times = new G1GCPhaseTimes(_parallel_gc_threads);
 178 
 179   int index = MIN2(_parallel_gc_threads - 1, 7);
 180 
 181   _rs_length_diff_seq->add(rs_length_diff_defaults[index]);
 182   _cost_per_card_ms_seq->add(cost_per_card_ms_defaults[index]);
 183   _cost_scan_hcc_seq->add(0.0);
 184   _young_cards_per_entry_ratio_seq->add(
 185                                   young_cards_per_entry_ratio_defaults[index]);
 186   _cost_per_entry_ms_seq->add(cost_per_entry_ms_defaults[index]);
 187   _cost_per_byte_ms_seq->add(cost_per_byte_ms_defaults[index]);
 188   _constant_other_time_ms_seq->add(constant_other_time_ms_defaults[index]);
 189   _young_other_cost_per_region_ms_seq->add(
 190                                young_other_cost_per_region_ms_defaults[index]);
 191   _non_young_other_cost_per_region_ms_seq->add(
 192                            non_young_other_cost_per_region_ms_defaults[index]);
 193 


 733   HeapRegion* head = _g1->young_list()->first_region();
 734   return
 735     verify_young_ages(head, _short_lived_surv_rate_group);
 736   // also call verify_young_ages on any additional surv rate groups
 737 }
 738 
 739 bool
 740 G1CollectorPolicy::verify_young_ages(HeapRegion* head,
 741                                      SurvRateGroup *surv_rate_group) {
 742   guarantee( surv_rate_group != NULL, "pre-condition" );
 743 
 744   const char* name = surv_rate_group->name();
 745   bool ret = true;
 746   int prev_age = -1;
 747 
 748   for (HeapRegion* curr = head;
 749        curr != NULL;
 750        curr = curr->get_next_young_region()) {
 751     SurvRateGroup* group = curr->surv_rate_group();
 752     if (group == NULL && !curr->is_survivor()) {
 753       log_info(gc, verify)("## %s: encountered NULL surv_rate_group", name);
 754       ret = false;
 755     }
 756 
 757     if (surv_rate_group == group) {
 758       int age = curr->age_in_surv_rate_group();
 759 
 760       if (age < 0) {
 761         log_info(gc, verify)("## %s: encountered negative age", name);
 762         ret = false;
 763       }
 764 
 765       if (age <= prev_age) {
 766         log_info(gc, verify)("## %s: region ages are not strictly increasing (%d, %d)", name, age, prev_age);

 767         ret = false;
 768       }
 769       prev_age = age;
 770     }
 771   }
 772 
 773   return ret;
 774 }
 775 #endif // PRODUCT
 776 
 777 void G1CollectorPolicy::record_full_collection_start() {
 778   _full_collection_start_sec = os::elapsedTime();
 779   record_heap_size_info_at_start(true /* full */);
 780   // Release the future to-space so that it is available for compaction into.
 781   collector_state()->set_full_collection(true);
 782 }
 783 
 784 void G1CollectorPolicy::record_full_collection_end() {
 785   // Consider this like a collection pause for the purposes of allocation
 786   // since last pause.


 912          average_time_ms(G1GCPhaseTimes::ObjCopy) -
 913          average_time_ms(G1GCPhaseTimes::Termination);
 914 }
 915 
 916 double G1CollectorPolicy::constant_other_time_ms(double pause_time_ms) const {
 917   return other_time_ms(pause_time_ms) - young_other_time_ms() - non_young_other_time_ms();
 918 }
 919 
 920 bool G1CollectorPolicy::need_to_start_conc_mark(const char* source, size_t alloc_word_size) {
 921   if (_g1->concurrent_mark()->cmThread()->during_cycle()) {
 922     return false;
 923   }
 924 
 925   size_t marking_initiating_used_threshold =
 926     (_g1->capacity() / 100) * InitiatingHeapOccupancyPercent;
 927   size_t cur_used_bytes = _g1->non_young_capacity_bytes();
 928   size_t alloc_byte_size = alloc_word_size * HeapWordSize;
 929 
 930   if ((cur_used_bytes + alloc_byte_size) > marking_initiating_used_threshold) {
 931     if (collector_state()->gcs_are_young() && !collector_state()->last_young_gc()) {
 932       log_debug(gc, ergo, conc)("Request concurrent cycle initiation (occupancy higher than threshold)"
 933                                 "occupancy: " SIZE_FORMAT "B allocation request: " SIZE_FORMAT "B threshold: " SIZE_FORMAT "B (" UINTX_FORMAT "%%) source: %s",
 934                                 cur_used_bytes, alloc_byte_size, marking_initiating_used_threshold, InitiatingHeapOccupancyPercent, source);









 935       return true;
 936     } else {
 937       log_debug(gc, ergo, conc)("Do not request concurrent cycle initiation (still doing mixed collections)"
 938                                 "occupancy: " SIZE_FORMAT "B allocation request: " SIZE_FORMAT "B threshold: " SIZE_FORMAT "B (" UINTX_FORMAT "%%) source: %s",
 939                                 cur_used_bytes, alloc_byte_size, marking_initiating_used_threshold, InitiatingHeapOccupancyPercent, source);    }










 940   }
 941 
 942   return false;
 943 }
 944 
 945 // Anything below that is considered to be zero
 946 #define MIN_TIMER_GRANULARITY 0.0000001
 947 
 948 void G1CollectorPolicy::record_collection_pause_end(double pause_time_ms, size_t cards_scanned) {
 949   double end_time_sec = os::elapsedTime();
 950   assert(_cur_collection_pause_used_regions_at_start >= cset_region_length(),
 951          "otherwise, the subtraction below does not make sense");
 952   size_t rs_size =
 953             _cur_collection_pause_used_regions_at_start - cset_region_length();
 954   size_t cur_used_bytes = _g1->used();
 955   assert(cur_used_bytes == _g1->recalculate_used(), "It should!");
 956   bool last_pause_included_initial_mark = false;
 957   bool update_stats = !_g1->evacuation_failed();
 958 
 959   NOT_PRODUCT(_short_lived_surv_rate_group->print());






 960 
 961   last_pause_included_initial_mark = collector_state()->during_initial_mark_pause();
 962   if (last_pause_included_initial_mark) {
 963     record_concurrent_mark_init_end(0.0);
 964   } else if (need_to_start_conc_mark("end of GC")) {
 965     // Note: this might have already been set, if during the last
 966     // pause we decided to start a cycle but at the beginning of
 967     // this pause we decided to postpone it. That's OK.
 968     collector_state()->set_initiate_conc_mark_if_possible(true);
 969   }
 970 
 971   _mmu_tracker->add_pause(end_time_sec - pause_time_ms/1000.0, end_time_sec);
 972 
 973   if (update_stats) {
 974     _trace_young_gen_time_data.record_end_collection(pause_time_ms, phase_times());
 975     // this is where we update the allocation rate of the application
 976     double app_time_ms =
 977       (phase_times()->cur_collection_start_sec() * 1000.0 - _prev_collection_pause_end_ms);
 978     if (app_time_ms < MIN_TIMER_GRANULARITY) {
 979       // This usually happens due to the timer not having the required


1010     }
1011   }
1012 
1013   bool new_in_marking_window = collector_state()->in_marking_window();
1014   bool new_in_marking_window_im = false;
1015   if (last_pause_included_initial_mark) {
1016     new_in_marking_window = true;
1017     new_in_marking_window_im = true;
1018   }
1019 
1020   if (collector_state()->last_young_gc()) {
1021     // This is supposed to to be the "last young GC" before we start
1022     // doing mixed GCs. Here we decide whether to start mixed GCs or not.
1023 
1024     if (!last_pause_included_initial_mark) {
1025       if (next_gc_should_be_mixed("start mixed GCs",
1026                                   "do not start mixed GCs")) {
1027         collector_state()->set_gcs_are_young(false);
1028       }
1029     } else {
1030       log_debug(gc, ergo)("Do not start mixed GCs (concurrent cycle is about to start)");


1031     }
1032     collector_state()->set_last_young_gc(false);
1033   }
1034 
1035   if (!collector_state()->last_gc_was_young()) {
1036     // This is a mixed GC. Here we decide whether to continue doing
1037     // mixed GCs or not.
1038 
1039     if (!next_gc_should_be_mixed("continue mixed GCs",
1040                                  "do not continue mixed GCs")) {
1041       collector_state()->set_gcs_are_young(true);
1042     }
1043   }
1044 
1045   _short_lived_surv_rate_group->start_adding_regions();
1046   // Do that for any other surv rate groups
1047 
1048   if (update_stats) {
1049     double cost_per_card_ms = 0.0;
1050     double cost_scan_hcc = average_time_ms(G1GCPhaseTimes::ScanHCC);


1117     }
1118 
1119     _constant_other_time_ms_seq->add(constant_other_time_ms(pause_time_ms));
1120 
1121     _pending_cards_seq->add((double) _pending_cards);
1122     _rs_lengths_seq->add((double) _max_rs_lengths);
1123   }
1124 
1125   collector_state()->set_in_marking_window(new_in_marking_window);
1126   collector_state()->set_in_marking_window_im(new_in_marking_window_im);
1127   _free_regions_at_end_of_collection = _g1->num_free_regions();
1128   update_young_list_max_and_target_length();
1129   update_rs_lengths_prediction();
1130 
1131   // Note that _mmu_tracker->max_gc_time() returns the time in seconds.
1132   double update_rs_time_goal_ms = _mmu_tracker->max_gc_time() * MILLIUNITS * G1RSetUpdatingPauseTimePercent / 100.0;
1133 
1134   double scan_hcc_time_ms = average_time_ms(G1GCPhaseTimes::ScanHCC);
1135 
1136   if (update_rs_time_goal_ms < scan_hcc_time_ms) {
1137     log_debug(gc, ergo, refine)("Adjust concurrent refinement thresholds (scanning the HCC expected to take longer than Update RS time goal)."
1138                                 "Update RS time goal: %1.2fms Scan HCC time: %1.2fms",
1139                                 update_rs_time_goal_ms, scan_hcc_time_ms);




1140 
1141     update_rs_time_goal_ms = 0;
1142   } else {
1143     update_rs_time_goal_ms -= scan_hcc_time_ms;
1144   }
1145   adjust_concurrent_refinement(average_time_ms(G1GCPhaseTimes::UpdateRS) - scan_hcc_time_ms,
1146                                phase_times()->sum_thread_work_items(G1GCPhaseTimes::UpdateRS),
1147                                update_rs_time_goal_ms);
1148 
1149   _collectionSetChooser->verify();
1150 }
1151 
1152 #define EXT_SIZE_FORMAT "%.1f%s"
1153 #define EXT_SIZE_PARAMS(bytes)                                  \
1154   byte_size_in_proper_unit((double)(bytes)),                    \
1155   proper_unit_for_byte_size((bytes))
1156 
1157 void G1CollectorPolicy::record_heap_size_info_at_start(bool full) {
1158   YoungList* young_list = _g1->young_list();
1159   _eden_used_bytes_before_gc = young_list->eden_used_bytes();
1160   _survivor_used_bytes_before_gc = young_list->survivor_used_bytes();
1161   _heap_capacity_bytes_before_gc = _g1->capacity();
1162   _heap_used_bytes_before_gc = _g1->used();
1163   _old_used_bytes_before_gc = _heap_used_bytes_before_gc - _survivor_used_bytes_before_gc - _eden_used_bytes_before_gc;
1164   _cur_collection_pause_used_regions_at_start = _g1->num_used_regions();
1165 
1166   _eden_capacity_bytes_before_gc =
1167          (_young_list_target_length * HeapRegion::GrainBytes) - _survivor_used_bytes_before_gc;
1168 

1169   _metaspace_used_bytes_before_gc = MetaspaceAux::used_bytes();


















1170 }
1171 
1172 void G1CollectorPolicy::print_detailed_heap_transition() const {
1173   YoungList* young_list = _g1->young_list();
1174 
1175   size_t eden_used_bytes_after_gc = young_list->eden_used_bytes();
1176   size_t survivor_used_bytes_after_gc = young_list->survivor_used_bytes();
1177   size_t old_used_bytes_after_gc = _g1->used() - eden_used_bytes_after_gc - eden_used_bytes_after_gc;
1178 
1179   size_t heap_capacity_bytes_after_gc = _g1->capacity();
1180   size_t eden_capacity_bytes_after_gc =
1181     (_young_list_target_length * HeapRegion::GrainBytes) - survivor_used_bytes_after_gc;
1182   size_t survivor_capacity_bytes_after_gc = _max_survivor_regions * HeapRegion::GrainBytes;
1183 
1184   log_info(gc, heap)("Eden: " SIZE_FORMAT "K->" SIZE_FORMAT "K("  SIZE_FORMAT "K)",
1185       _eden_used_bytes_before_gc / K, eden_used_bytes_after_gc /K, eden_capacity_bytes_after_gc /K);













1186 
1187   log_info(gc, heap)("Survivor: " SIZE_FORMAT "K->" SIZE_FORMAT "K("  SIZE_FORMAT "K)",
1188       _survivor_used_bytes_before_gc / K, survivor_used_bytes_after_gc /K, survivor_capacity_bytes_after_gc /K);

1189 
1190   log_info(gc, heap)("Old: " SIZE_FORMAT "K->" SIZE_FORMAT "K("  SIZE_FORMAT "K)",
1191       _old_used_bytes_before_gc / K, old_used_bytes_after_gc /K, heap_capacity_bytes_after_gc /K);
1192 
1193   MetaspaceAux::print_metaspace_change(_metaspace_used_bytes_before_gc);
1194 }
1195 
1196 void G1CollectorPolicy::print_phases(double pause_time_sec) {
1197   phase_times()->print(pause_time_sec);
1198 }
1199 
1200 void G1CollectorPolicy::adjust_concurrent_refinement(double update_rs_time,
1201                                                      double update_rs_processed_buffers,
1202                                                      double goal_ms) {
1203   DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
1204   ConcurrentG1Refine *cg1r = G1CollectedHeap::heap()->concurrent_g1_refine();
1205 
1206   if (G1UseAdaptiveConcRefinement) {
1207     const int k_gy = 3, k_gr = 6;
1208     const double inc_k = 1.1, dec_k = 0.9;
1209 
1210     int g = cg1r->green_zone();
1211     if (update_rs_time > goal_ms) {
1212       g = (int)(g * dec_k);  // Can become 0, that's OK. That would mean a mutator-only processing.
1213     } else {


1428 
1429 size_t G1CollectorPolicy::expansion_amount() const {
1430   double recent_gc_overhead = recent_avg_pause_time_ratio() * 100.0;
1431   double threshold = _gc_overhead_perc;
1432   if (recent_gc_overhead > threshold) {
1433     // We will double the existing space, or take
1434     // G1ExpandByPercentOfAvailable % of the available expansion
1435     // space, whichever is smaller, bounded below by a minimum
1436     // expansion (unless that's all that's left.)
1437     const size_t min_expand_bytes = 1*M;
1438     size_t reserved_bytes = _g1->max_capacity();
1439     size_t committed_bytes = _g1->capacity();
1440     size_t uncommitted_bytes = reserved_bytes - committed_bytes;
1441     size_t expand_bytes;
1442     size_t expand_bytes_via_pct =
1443       uncommitted_bytes * G1ExpandByPercentOfAvailable / 100;
1444     expand_bytes = MIN2(expand_bytes_via_pct, committed_bytes);
1445     expand_bytes = MAX2(expand_bytes, min_expand_bytes);
1446     expand_bytes = MIN2(expand_bytes, uncommitted_bytes);
1447 
1448     log_debug(gc, ergo, heap)("Attempt heap expansion (recent GC overhead higher than threshold after GC) "
1449                               "recent GC overhead: %1.2f %% threshold: %1.2f %% uncommitted: " SIZE_FORMAT "B calculated expansion amount: " SIZE_FORMAT "B (" INTX_FORMAT "%%)",
1450                               recent_gc_overhead, threshold, uncommitted_bytes, expand_bytes_via_pct, G1ExpandByPercentOfAvailable);








1451 
1452     return expand_bytes;
1453   } else {
1454     return 0;
1455   }
1456 }
1457 
1458 void G1CollectorPolicy::print_tracing_info() const {
1459   _trace_young_gen_time_data.print();
1460   _trace_old_gen_time_data.print();
1461 }
1462 
1463 void G1CollectorPolicy::print_yg_surv_rate_info() const {
1464 #ifndef PRODUCT
1465   _short_lived_surv_rate_group->print_surv_rate_summary();
1466   // add this call for any other surv rate groups
1467 #endif // PRODUCT
1468 }
1469 
1470 bool G1CollectorPolicy::is_young_list_full() const {


1493   _young_list_max_length = _young_list_target_length + expansion_region_num;
1494   assert(_young_list_target_length <= _young_list_max_length, "post-condition");
1495 }
1496 
1497 // Calculates survivor space parameters.
1498 void G1CollectorPolicy::update_survivors_policy() {
1499   double max_survivor_regions_d =
1500                  (double) _young_list_target_length / (double) SurvivorRatio;
1501   // We use ceiling so that if max_survivor_regions_d is > 0.0 (but
1502   // smaller than 1.0) we'll get 1.
1503   _max_survivor_regions = (uint) ceil(max_survivor_regions_d);
1504 
1505   _tenuring_threshold = _survivors_age_table.compute_tenuring_threshold(
1506         HeapRegion::GrainWords * _max_survivor_regions, counters());
1507 }
1508 
1509 bool G1CollectorPolicy::force_initial_mark_if_outside_cycle(
1510                                                      GCCause::Cause gc_cause) {
1511   bool during_cycle = _g1->concurrent_mark()->cmThread()->during_cycle();
1512   if (!during_cycle) {
1513     log_debug(gc, ergo, conc)("Request concurrent cycle initiation (requested by GC cause). GC cause: %s", GCCause::to_string(gc_cause));




1514     collector_state()->set_initiate_conc_mark_if_possible(true);
1515     return true;
1516   } else {
1517     log_debug(gc, ergo, conc)("Do not request concurrent cycle initiation (concurrent cycle already in progress). GC cause: %s", GCCause::to_string(gc_cause));




1518     return false;
1519   }
1520 }
1521 
1522 void
1523 G1CollectorPolicy::decide_on_conc_mark_initiation() {
1524   // We are about to decide on whether this pause will be an
1525   // initial-mark pause.
1526 
1527   // First, collector_state()->during_initial_mark_pause() should not be already set. We
1528   // will set it here if we have to. However, it should be cleared by
1529   // the end of the pause (it's only set for the duration of an
1530   // initial-mark pause).
1531   assert(!collector_state()->during_initial_mark_pause(), "pre-condition");
1532 
1533   if (collector_state()->initiate_conc_mark_if_possible()) {
1534     // We had noticed on a previous pause that the heap occupancy has
1535     // gone over the initiating threshold and we should start a
1536     // concurrent marking cycle. So we might initiate one.
1537 
1538     bool during_cycle = _g1->concurrent_mark()->cmThread()->during_cycle();
1539     if (!during_cycle) {
1540       // The concurrent marking thread is not "during a cycle", i.e.,
1541       // it has completed the last one. So we can go ahead and
1542       // initiate a new cycle.
1543 
1544       collector_state()->set_during_initial_mark_pause(true);
1545       // We do not allow mixed GCs during marking.
1546       if (!collector_state()->gcs_are_young()) {
1547         collector_state()->set_gcs_are_young(true);
1548         log_debug(gc, ergo)("End mixed GCs (concurrent cycle is about to start");


1549       }
1550 
1551       // And we can now clear initiate_conc_mark_if_possible() as
1552       // we've already acted on it.
1553       collector_state()->set_initiate_conc_mark_if_possible(false);
1554       log_debug(gc, ergo, conc)("Initiate concurrent cycle (concurrent cycle initiation requested)");



1555     } else {
1556       // The concurrent marking thread is still finishing up the
1557       // previous cycle. If we start one right now the two cycles
1558       // overlap. In particular, the concurrent marking thread might
1559       // be in the process of clearing the next marking bitmap (which
1560       // we will use for the next cycle if we start one). Starting a
1561       // cycle now will be bad given that parts of the marking
1562       // information might get cleared by the marking thread. And we
1563       // cannot wait for the marking thread to finish the cycle as it
1564       // periodically yields while clearing the next marking bitmap
1565       // and, if it's in a yield point, it's waiting for us to
1566       // finish. So, at this point we will not start a cycle and we'll
1567       // let the concurrent marking thread complete the last one.
1568       log_debug(gc, ergo, conc)("Do not initiate concurrent cycle (concurrent cycle already in progress)");


1569     }
1570   }
1571 }
1572 
1573 class ParKnownGarbageHRClosure: public HeapRegionClosure {
1574   G1CollectedHeap* _g1h;
1575   CSetChooserParUpdater _cset_updater;
1576 
1577 public:
1578   ParKnownGarbageHRClosure(CollectionSetChooser* hrSorted,
1579                            uint chunk_size) :
1580     _g1h(G1CollectedHeap::heap()),
1581     _cset_updater(hrSorted, true /* parallel */, chunk_size) { }
1582 
1583   bool doHeapRegion(HeapRegion* r) {
1584     // Do we have any marking information for this region?
1585     if (r->is_marked()) {
1586       // We will skip any region that's currently used as an old GC
1587       // alloc region (we should not consider those for collection
1588       // before we fill them up).


1833                  HR_FORMAT_PARAMS(csr),
1834                  p2i(csr->prev_top_at_mark_start()), p2i(csr->next_top_at_mark_start()),
1835                  csr->age_in_surv_rate_group_cond());
1836     csr = next;
1837   }
1838 }
1839 #endif // !PRODUCT
1840 
1841 double G1CollectorPolicy::reclaimable_bytes_perc(size_t reclaimable_bytes) const {
1842   // Returns the given amount of reclaimable bytes (that represents
1843   // the amount of reclaimable space still to be collected) as a
1844   // percentage of the current heap capacity.
1845   size_t capacity_bytes = _g1->capacity();
1846   return (double) reclaimable_bytes * 100.0 / (double) capacity_bytes;
1847 }
1848 
1849 bool G1CollectorPolicy::next_gc_should_be_mixed(const char* true_action_str,
1850                                                 const char* false_action_str) const {
1851   CollectionSetChooser* cset_chooser = _collectionSetChooser;
1852   if (cset_chooser->is_empty()) {
1853     log_debug(gc, ergo)("%s (candidate old regions not available)", false_action_str);


1854     return false;
1855   }
1856 
1857   // Is the amount of uncollected reclaimable space above G1HeapWastePercent?
1858   size_t reclaimable_bytes = cset_chooser->remaining_reclaimable_bytes();
1859   double reclaimable_perc = reclaimable_bytes_perc(reclaimable_bytes);
1860   double threshold = (double) G1HeapWastePercent;
1861   if (reclaimable_perc <= threshold) {
1862     log_debug(gc, ergo)("%s (reclaimable percentage not over threshold). candidate old regions: %u reclaimable: " SIZE_FORMAT " (%1.2f) threshold: " UINTX_FORMAT,
1863                         false_action_str, cset_chooser->remaining_regions(), reclaimable_bytes, reclaimable_perc, G1HeapWastePercent);







1864     return false;
1865   }
1866 
1867   log_debug(gc, ergo)("%s (candidate old regions available). candidate old regions: %u reclaimable: " SIZE_FORMAT " (%1.2f) threshold: " UINTX_FORMAT,
1868                       true_action_str, cset_chooser->remaining_regions(), reclaimable_bytes, reclaimable_perc, G1HeapWastePercent);







1869   return true;
1870 }
1871 
1872 uint G1CollectorPolicy::calc_min_old_cset_length() const {
1873   // The min old CSet region bound is based on the maximum desired
1874   // number of mixed GCs after a cycle. I.e., even if some old regions
1875   // look expensive, we should add them to the CSet anyway to make
1876   // sure we go through the available old regions in no more than the
1877   // maximum desired number of mixed GCs.
1878   //
1879   // The calculation is based on the number of marked regions we added
1880   // to the CSet chooser in the first place, not how many remain, so
1881   // that the result is the same during all mixed GCs that follow a cycle.
1882 
1883   const size_t region_num = (size_t) _collectionSetChooser->length();
1884   const size_t gc_num = (size_t) MAX2(G1MixedGCCountTarget, (uintx) 1);
1885   size_t result = region_num / gc_num;
1886   // emulate ceiling
1887   if (result * gc_num < region_num) {
1888     result += 1;


1904   if (100 * result < region_num * perc) {
1905     result += 1;
1906   }
1907   return (uint) result;
1908 }
1909 
1910 
1911 double G1CollectorPolicy::finalize_young_cset_part(double target_pause_time_ms) {
1912   double young_start_time_sec = os::elapsedTime();
1913 
1914   YoungList* young_list = _g1->young_list();
1915   finalize_incremental_cset_building();
1916 
1917   guarantee(target_pause_time_ms > 0.0,
1918             "target_pause_time_ms = %1.6lf should be positive", target_pause_time_ms);
1919   guarantee(_collection_set == NULL, "Precondition");
1920 
1921   double base_time_ms = predict_base_elapsed_time_ms(_pending_cards);
1922   double time_remaining_ms = MAX2(target_pause_time_ms - base_time_ms, 0.0);
1923 
1924   log_trace(gc, ergo, cset)("Start choosing CSet. pending cards: " SIZE_FORMAT " predicted base time: %1.2fms remaining time: %1.2fms target pause time: %1.2fms",





1925                             _pending_cards, base_time_ms, time_remaining_ms, target_pause_time_ms);
1926 
1927   collector_state()->set_last_gc_was_young(collector_state()->gcs_are_young());
1928 
1929   if (collector_state()->last_gc_was_young()) {
1930     _trace_young_gen_time_data.increment_young_collection_count();
1931   } else {
1932     _trace_young_gen_time_data.increment_mixed_collection_count();
1933   }
1934 
1935   // The young list is laid with the survivor regions from the previous
1936   // pause are appended to the RHS of the young list, i.e.
1937   //   [Newly Young Regions ++ Survivors from last pause].
1938 
1939   uint survivor_region_length = young_list->survivor_length();
1940   uint eden_region_length = young_list->eden_length();
1941   init_cset_region_lengths(eden_region_length, survivor_region_length);
1942 
1943   HeapRegion* hr = young_list->first_survivor_region();
1944   while (hr != NULL) {
1945     assert(hr->is_survivor(), "badly formed young list");
1946     // There is a convention that all the young regions in the CSet
1947     // are tagged as "eden", so we do this for the survivors here. We
1948     // use the special set_eden_pre_gc() as it doesn't check that the
1949     // region is free (which is not the case here).
1950     hr->set_eden_pre_gc();
1951     hr = hr->get_next_young_region();
1952   }
1953 
1954   // Clear the fields that point to the survivor list - they are all young now.
1955   young_list->clear_survivors();
1956 
1957   _collection_set = _inc_cset_head;
1958   _collection_set_bytes_used_before = _inc_cset_bytes_used_before;
1959   time_remaining_ms = MAX2(time_remaining_ms - _inc_cset_predicted_elapsed_time_ms, 0.0);
1960 
1961   log_trace(gc, ergo, cset)("Add young regions to CSet. eden: %u regions, survivors: %u regions, predicted young region time: %1.2fms, target pause time: %1.2fms",
1962                             eden_region_length, survivor_region_length, _inc_cset_predicted_elapsed_time_ms, target_pause_time_ms);







1963 
1964   // The number of recorded young regions is the incremental
1965   // collection set's current size
1966   set_recorded_rs_lengths(_inc_cset_recorded_rs_lengths);
1967 
1968   double young_end_time_sec = os::elapsedTime();
1969   phase_times()->record_young_cset_choice_time_ms((young_end_time_sec - young_start_time_sec) * 1000.0);
1970 
1971   return time_remaining_ms;
1972 }
1973 
1974 void G1CollectorPolicy::finalize_old_cset_part(double time_remaining_ms) {
1975   double non_young_start_time_sec = os::elapsedTime();
1976   double predicted_old_time_ms = 0.0;
1977 
1978 
1979   if (!collector_state()->gcs_are_young()) {
1980     CollectionSetChooser* cset_chooser = _collectionSetChooser;
1981     cset_chooser->verify();
1982     const uint min_old_cset_length = calc_min_old_cset_length();
1983     const uint max_old_cset_length = calc_max_old_cset_length();
1984 
1985     uint expensive_region_num = 0;
1986     bool check_time_remaining = adaptive_young_list_length();
1987 
1988     HeapRegion* hr = cset_chooser->peek();
1989     while (hr != NULL) {
1990       if (old_cset_region_length() >= max_old_cset_length) {
1991         // Added maximum number of old regions to the CSet.
1992         log_debug(gc, ergo, cset)("Finish adding old regions to CSet (old CSet region num reached max). old %u regions, max %u regions",




1993                                   old_cset_region_length(), max_old_cset_length);
1994         break;
1995       }
1996 
1997 
1998       // Stop adding regions if the remaining reclaimable space is
1999       // not above G1HeapWastePercent.
2000       size_t reclaimable_bytes = cset_chooser->remaining_reclaimable_bytes();
2001       double reclaimable_perc = reclaimable_bytes_perc(reclaimable_bytes);
2002       double threshold = (double) G1HeapWastePercent;
2003       if (reclaimable_perc <= threshold) {
2004         // We've added enough old regions that the amount of uncollected
2005         // reclaimable space is at or below the waste threshold. Stop
2006         // adding old regions to the CSet.
2007         log_debug(gc, ergo, cset)("Finish adding old regions to CSet (reclaimable percentage not over threshold). "
2008                                   "old %u regions, max %u regions, reclaimable: " SIZE_FORMAT "B (%1.2f%%) threshold: " UINTX_FORMAT "%%",
2009                                   old_cset_region_length(), max_old_cset_length, reclaimable_bytes, reclaimable_perc, G1HeapWastePercent);








2010         break;
2011       }
2012 
2013       double predicted_time_ms = predict_region_elapsed_time_ms(hr, collector_state()->gcs_are_young());
2014       if (check_time_remaining) {
2015         if (predicted_time_ms > time_remaining_ms) {
2016           // Too expensive for the current CSet.
2017 
2018           if (old_cset_region_length() >= min_old_cset_length) {
2019             // We have added the minimum number of old regions to the CSet,
2020             // we are done with this CSet.
2021             log_debug(gc, ergo, cset)("Finish adding old regions to CSet (predicted time is too high). "
2022                                       "predicted time: %1.2fms, remaining time: %1.2fms old %u regions, min %u regions",
2023                                       predicted_time_ms, time_remaining_ms, old_cset_region_length(), min_old_cset_length);






2024             break;
2025           }
2026 
2027           // We'll add it anyway given that we haven't reached the
2028           // minimum number of old regions.
2029           expensive_region_num += 1;
2030         }
2031       } else {
2032         if (old_cset_region_length() >= min_old_cset_length) {
2033           // In the non-auto-tuning case, we'll finish adding regions
2034           // to the CSet if we reach the minimum.
2035 
2036           log_debug(gc, ergo, cset)("Finish adding old regions to CSet (old CSet region num reached min). old %u regions, min %u regions",



2037                                     old_cset_region_length(), min_old_cset_length);
2038           break;
2039         }
2040       }
2041 
2042       // We will add this region to the CSet.
2043       time_remaining_ms = MAX2(time_remaining_ms - predicted_time_ms, 0.0);
2044       predicted_old_time_ms += predicted_time_ms;
2045       cset_chooser->pop(); // already have region via peek()
2046       _g1->old_set_remove(hr);
2047       add_old_region_to_cset(hr);
2048 
2049       hr = cset_chooser->peek();
2050     }
2051     if (hr == NULL) {
2052       log_debug(gc, ergo, cset)("Finish adding old regions to CSet (candidate old regions not available)");


2053     }
2054 
2055     if (expensive_region_num > 0) {
2056       // We print the information once here at the end, predicated on
2057       // whether we added any apparently expensive regions or not, to
2058       // avoid generating output per region.
2059       log_debug(gc, ergo, cset)("Added expensive regions to CSet (old CSet region num not reached min)."
2060                                 "old %u regions, expensive: %u regions, min %u regions, remaining time: %1.2fms",
2061                                 old_cset_region_length(), expensive_region_num, min_old_cset_length, time_remaining_ms);








2062     }
2063 
2064     cset_chooser->verify();
2065   }
2066 
2067   stop_incremental_cset_building();
2068 
2069   log_debug(gc, ergo, cset)("Finish choosing CSet. old %u regions, predicted old region time: %1.2fms, time remaining: %1.2f",
2070                             old_cset_region_length(), predicted_old_time_ms, time_remaining_ms);





2071 
2072   double non_young_end_time_sec = os::elapsedTime();
2073   phase_times()->record_non_young_cset_choice_time_ms((non_young_end_time_sec - non_young_start_time_sec) * 1000.0);
2074 }
2075 
2076 void TraceYoungGenTimeData::record_start_collection(double time_to_stop_the_world_ms) {
2077   if(TraceYoungGenTime) {
2078     _all_stop_world_times_ms.add(time_to_stop_the_world_ms);
2079   }
2080 }
2081 
2082 void TraceYoungGenTimeData::record_yield_time(double yield_time_ms) {
2083   if(TraceYoungGenTime) {
2084     _all_yield_times_ms.add(yield_time_ms);
2085   }
2086 }
2087 
2088 void TraceYoungGenTimeData::record_end_collection(double pause_time_ms, G1GCPhaseTimes* phase_times) {
2089   if(TraceYoungGenTime) {
2090     _total.add(pause_time_ms);


2109     _parallel_other.add(parallel_other_time);
2110     _clear_ct.add(phase_times->cur_clear_ct_time_ms());
2111   }
2112 }
2113 
2114 void TraceYoungGenTimeData::increment_young_collection_count() {
2115   if(TraceYoungGenTime) {
2116     ++_young_pause_num;
2117   }
2118 }
2119 
2120 void TraceYoungGenTimeData::increment_mixed_collection_count() {
2121   if(TraceYoungGenTime) {
2122     ++_mixed_pause_num;
2123   }
2124 }
2125 
2126 void TraceYoungGenTimeData::print_summary(const char* str,
2127                                           const NumberSeq* seq) const {
2128   double sum = seq->sum();
2129   tty->print_cr("%-27s = %8.2lf s (avg = %8.2lf ms)",
2130                 str, sum / 1000.0, seq->avg());
2131 }
2132 
2133 void TraceYoungGenTimeData::print_summary_sd(const char* str,
2134                                              const NumberSeq* seq) const {
2135   print_summary(str, seq);
2136   tty->print_cr("%45s = %5d, std dev = %8.2lf ms, max = %8.2lf ms)",
2137                 "(num", seq->num(), seq->sd(), seq->maximum());
2138 }
2139 
2140 void TraceYoungGenTimeData::print() const {
2141   if (!TraceYoungGenTime) {
2142     return;
2143   }
2144 
2145   tty->print_cr("ALL PAUSES");
2146   print_summary_sd("   Total", &_total);
2147   tty->cr();
2148   tty->cr();
2149   tty->print_cr("   Young GC Pauses: %8d", _young_pause_num);
2150   tty->print_cr("   Mixed GC Pauses: %8d", _mixed_pause_num);
2151   tty->cr();
2152 
2153   tty->print_cr("EVACUATION PAUSES");
2154 
2155   if (_young_pause_num == 0 && _mixed_pause_num == 0) {
2156     tty->print_cr("none");
2157   } else {
2158     print_summary_sd("   Evacuation Pauses", &_total);
2159     print_summary("      Root Region Scan Wait", &_root_region_scan_wait);
2160     print_summary("      Parallel Time", &_parallel);
2161     print_summary("         Ext Root Scanning", &_ext_root_scan);
2162     print_summary("         SATB Filtering", &_satb_filtering);
2163     print_summary("         Update RS", &_update_rs);
2164     print_summary("         Scan RS", &_scan_rs);
2165     print_summary("         Object Copy", &_obj_copy);
2166     print_summary("         Termination", &_termination);
2167     print_summary("         Parallel Other", &_parallel_other);
2168     print_summary("      Clear CT", &_clear_ct);
2169     print_summary("      Other", &_other);
2170   }
2171   tty->cr();
2172 
2173   tty->print_cr("MISC");
2174   print_summary_sd("   Stop World", &_all_stop_world_times_ms);
2175   print_summary_sd("   Yields", &_all_yield_times_ms);
2176 }
2177 
2178 void TraceOldGenTimeData::record_full_collection(double full_gc_time_ms) {
2179   if (TraceOldGenTime) {
2180     _all_full_gc_times.add(full_gc_time_ms);
2181   }
2182 }
2183 
2184 void TraceOldGenTimeData::print() const {
2185   if (!TraceOldGenTime) {
2186     return;
2187   }
2188 
2189   if (_all_full_gc_times.num() > 0) {
2190     tty->print("\n%4d full_gcs: total time = %8.2f s",
2191       _all_full_gc_times.num(),
2192       _all_full_gc_times.sum() / 1000.0);
2193     tty->print_cr(" (avg = %8.2fms).", _all_full_gc_times.avg());
2194     tty->print_cr("                     [std. dev = %8.2f ms, max = %8.2f ms]",
2195       _all_full_gc_times.sd(),
2196       _all_full_gc_times.maximum());
2197   }
2198 }
< prev index next >