< prev index next >

src/share/vm/gc/g1/g1CollectorPolicy.cpp

Print this page




  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "gc/g1/concurrentG1Refine.hpp"
  27 #include "gc/g1/concurrentMark.hpp"
  28 #include "gc/g1/concurrentMarkThread.inline.hpp"
  29 #include "gc/g1/g1CollectedHeap.inline.hpp"
  30 #include "gc/g1/g1CollectorPolicy.hpp"
  31 #include "gc/g1/g1IHOPControl.hpp"
  32 #include "gc/g1/g1ErgoVerbose.hpp"
  33 #include "gc/g1/g1GCPhaseTimes.hpp"
  34 #include "gc/g1/g1Log.hpp"
  35 #include "gc/g1/heapRegion.inline.hpp"
  36 #include "gc/g1/heapRegionRemSet.hpp"
  37 #include "gc/shared/gcPolicyCounters.hpp"
  38 #include "runtime/arguments.hpp"
  39 #include "runtime/java.hpp"
  40 #include "runtime/mutexLocker.hpp"
  41 #include "utilities/debug.hpp"
  42 #include "utilities/pair.hpp"
  43 
  44 // Different defaults for different number of GC threads
  45 // They were chosen by running GCOld and SPECjbb on debris with different
  46 //   numbers of GC threads and choosing them based on the results
  47 
  48 // all the same
  49 static double rs_length_diff_defaults[] = {
  50   0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0
  51 };
  52 
  53 static double cost_per_card_ms_defaults[] = {
  54   0.01, 0.005, 0.005, 0.003, 0.003, 0.002, 0.002, 0.0015


 104   _cost_per_byte_ms_during_cm_seq(new TruncatedSeq(TruncatedSeqLength)),
 105   _constant_other_time_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
 106   _young_other_cost_per_region_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
 107   _non_young_other_cost_per_region_ms_seq(
 108                                          new TruncatedSeq(TruncatedSeqLength)),
 109 
 110   _pending_cards_seq(new TruncatedSeq(TruncatedSeqLength)),
 111   _rs_lengths_seq(new TruncatedSeq(TruncatedSeqLength)),
 112 
 113   _pause_time_target_ms((double) MaxGCPauseMillis),
 114 
 115   _recent_prev_end_times_for_all_gcs_sec(
 116                                 new TruncatedSeq(NumPrevPausesForHeuristics)),
 117 
 118   _recent_avg_pause_time_ratio(0.0),
 119   _rs_lengths_prediction(0),
 120   _max_survivor_regions(0),
 121 
 122   _eden_used_bytes_before_gc(0),
 123   _survivor_used_bytes_before_gc(0),


 124   _heap_used_bytes_before_gc(0),
 125   _metaspace_used_bytes_before_gc(0),
 126   _eden_capacity_bytes_before_gc(0),
 127   _heap_capacity_bytes_before_gc(0),
 128 
 129   _eden_cset_region_length(0),
 130   _survivor_cset_region_length(0),
 131   _old_cset_region_length(0),
 132 
 133   _collection_set(NULL),
 134   _collection_set_bytes_used_before(0),
 135 
 136   // Incremental CSet attributes
 137   _inc_cset_build_state(Inactive),
 138   _inc_cset_head(NULL),
 139   _inc_cset_tail(NULL),
 140   _inc_cset_bytes_used_before(0),
 141   _inc_cset_max_finger(NULL),
 142   _inc_cset_recorded_rs_lengths(0),
 143   _inc_cset_recorded_rs_lengths_diffs(0),


 160   // indirectly use it through this object passed to their constructor.
 161   _short_lived_surv_rate_group =
 162     new SurvRateGroup(&_predictor, "Short Lived", G1YoungSurvRateNumRegionsSummary);
 163   _survivor_surv_rate_group =
 164     new SurvRateGroup(&_predictor, "Survivor", G1YoungSurvRateNumRegionsSummary);
 165 
 166   // Set up the region size and associated fields. Given that the
 167   // policy is created before the heap, we have to set this up here,
 168   // so it's done as soon as possible.
 169 
 170   // It would have been natural to pass initial_heap_byte_size() and
 171   // max_heap_byte_size() to setup_heap_region_size() but those have
 172   // not been set up at this point since they should be aligned with
 173   // the region size. So, there is a circular dependency here. We base
 174   // the region size on the heap size, but the heap size should be
 175   // aligned with the region size. To get around this we use the
 176   // unaligned values for the heap.
 177   HeapRegion::setup_heap_region_size(InitialHeapSize, MaxHeapSize);
 178   HeapRegionRemSet::setup_remset_size();
 179 
 180   G1ErgoVerbose::initialize();
 181   if (PrintAdaptiveSizePolicy) {
 182     // Currently, we only use a single switch for all the heuristics.
 183     G1ErgoVerbose::set_enabled(true);
 184     // Given that we don't currently have a verboseness level
 185     // parameter, we'll hardcode this to high. This can be easily
 186     // changed in the future.
 187     G1ErgoVerbose::set_level(ErgoHigh);
 188   } else {
 189     G1ErgoVerbose::set_enabled(false);
 190   }
 191 
 192   _recent_prev_end_times_for_all_gcs_sec->add(os::elapsedTime());
 193   _prev_collection_pause_end_ms = os::elapsedTime() * 1000.0;
 194 
 195   _phase_times = new G1GCPhaseTimes(_parallel_gc_threads);
 196 
 197   int index = MIN2(_parallel_gc_threads - 1, 7);
 198 
 199   _rs_length_diff_seq->add(rs_length_diff_defaults[index]);
 200   _cost_per_card_ms_seq->add(cost_per_card_ms_defaults[index]);
 201   _cost_scan_hcc_seq->add(0.0);
 202   _young_cards_per_entry_ratio_seq->add(
 203                                   young_cards_per_entry_ratio_defaults[index]);
 204   _cost_per_entry_ms_seq->add(cost_per_entry_ms_defaults[index]);
 205   _cost_per_byte_ms_seq->add(cost_per_byte_ms_defaults[index]);
 206   _constant_other_time_ms_seq->add(constant_other_time_ms_defaults[index]);
 207   _young_other_cost_per_region_ms_seq->add(
 208                                young_other_cost_per_region_ms_defaults[index]);
 209   _non_young_other_cost_per_region_ms_seq->add(
 210                            non_young_other_cost_per_region_ms_defaults[index]);
 211 


 773   HeapRegion* head = _g1->young_list()->first_region();
 774   return
 775     verify_young_ages(head, _short_lived_surv_rate_group);
 776   // also call verify_young_ages on any additional surv rate groups
 777 }
 778 
 779 bool
 780 G1CollectorPolicy::verify_young_ages(HeapRegion* head,
 781                                      SurvRateGroup *surv_rate_group) {
 782   guarantee( surv_rate_group != NULL, "pre-condition" );
 783 
 784   const char* name = surv_rate_group->name();
 785   bool ret = true;
 786   int prev_age = -1;
 787 
 788   for (HeapRegion* curr = head;
 789        curr != NULL;
 790        curr = curr->get_next_young_region()) {
 791     SurvRateGroup* group = curr->surv_rate_group();
 792     if (group == NULL && !curr->is_survivor()) {
 793       gclog_or_tty->print_cr("## %s: encountered NULL surv_rate_group", name);
 794       ret = false;
 795     }
 796 
 797     if (surv_rate_group == group) {
 798       int age = curr->age_in_surv_rate_group();
 799 
 800       if (age < 0) {
 801         gclog_or_tty->print_cr("## %s: encountered negative age", name);
 802         ret = false;
 803       }
 804 
 805       if (age <= prev_age) {
 806         gclog_or_tty->print_cr("## %s: region ages are not strictly increasing "
 807                                "(%d, %d)", name, age, prev_age);
 808         ret = false;
 809       }
 810       prev_age = age;
 811     }
 812   }
 813 
 814   return ret;
 815 }
 816 #endif // PRODUCT
 817 
 818 void G1CollectorPolicy::record_full_collection_start() {
 819   _full_collection_start_sec = os::elapsedTime();
 820   record_heap_size_info_at_start(true /* full */);
 821   // Release the future to-space so that it is available for compaction into.
 822   collector_state()->set_full_collection(true);
 823 }
 824 
 825 void G1CollectorPolicy::record_full_collection_end() {
 826   // Consider this like a collection pause for the purposes of allocation
 827   // since last pause.


 966 
 967 double G1CollectorPolicy::constant_other_time_ms(double pause_time_ms) const {
 968   return other_time_ms(pause_time_ms) - young_other_time_ms() - non_young_other_time_ms();
 969 }
 970 
 971 bool G1CollectorPolicy::about_to_start_mixed_phase() const {
 972   return _g1->concurrent_mark()->cmThread()->during_cycle() || collector_state()->last_young_gc();
 973 }
 974 
 975 bool G1CollectorPolicy::need_to_start_conc_mark(const char* source, size_t alloc_word_size) {
 976   if (about_to_start_mixed_phase()) {
 977     return false;
 978   }
 979 
 980   size_t marking_initiating_used_threshold = _ihop_control->get_conc_mark_start_threshold();
 981 
 982   size_t cur_used_bytes = _g1->non_young_capacity_bytes();
 983   size_t alloc_byte_size = alloc_word_size * HeapWordSize;
 984   size_t marking_request_bytes = cur_used_bytes + alloc_byte_size;
 985 

 986   if (marking_request_bytes > marking_initiating_used_threshold) {
 987     if (collector_state()->gcs_are_young() && !collector_state()->last_young_gc()) {
 988       ergo_verbose5(ErgoConcCycles,
 989         "request concurrent cycle initiation",
 990         ergo_format_reason("occupancy higher than threshold")
 991         ergo_format_byte("occupancy")
 992         ergo_format_byte("allocation request")
 993         ergo_format_byte_perc("threshold")
 994         ergo_format_str("source"),
 995         cur_used_bytes,
 996         alloc_byte_size,
 997         marking_initiating_used_threshold,
 998         (double) marking_initiating_used_threshold / _g1->capacity() * 100,
 999         source);
1000       return true;
1001     } else {
1002       ergo_verbose5(ErgoConcCycles,
1003         "do not request concurrent cycle initiation",
1004         ergo_format_reason("still doing mixed collections")
1005         ergo_format_byte("occupancy")
1006         ergo_format_byte("allocation request")
1007         ergo_format_byte_perc("threshold")
1008         ergo_format_str("source"),
1009         cur_used_bytes,
1010         alloc_byte_size,
1011         marking_initiating_used_threshold,
1012         (double) InitiatingHeapOccupancyPercent,
1013         source);
1014     }
1015   }
1016 
1017   return false;
1018 }
1019 
1020 // Anything below that is considered to be zero
1021 #define MIN_TIMER_GRANULARITY 0.0000001
1022 
1023 void G1CollectorPolicy::record_collection_pause_end(double pause_time_ms, size_t cards_scanned) {
1024   double end_time_sec = os::elapsedTime();
1025 
1026   size_t cur_used_bytes = _g1->used();
1027   assert(cur_used_bytes == _g1->recalculate_used(), "It should!");
1028   bool last_pause_included_initial_mark = false;
1029   bool update_stats = !_g1->evacuation_failed();
1030 
1031 #ifndef PRODUCT
1032   if (G1YoungSurvRateVerbose) {
1033     gclog_or_tty->cr();
1034     _short_lived_surv_rate_group->print();
1035     // do that for any other surv rate groups too
1036   }
1037 #endif // PRODUCT
1038 
1039   record_pause(young_gc_pause_kind(), end_time_sec - pause_time_ms / 1000.0, end_time_sec);
1040 
1041   last_pause_included_initial_mark = collector_state()->during_initial_mark_pause();
1042   if (last_pause_included_initial_mark) {
1043     record_concurrent_mark_init_end(0.0);
1044   } else {
1045     maybe_start_marking();
1046   }
1047 
1048   double app_time_ms = (phase_times()->cur_collection_start_sec() * 1000.0 - _prev_collection_pause_end_ms);
1049   if (app_time_ms < MIN_TIMER_GRANULARITY) {
1050     // This usually happens due to the timer not having the required
1051     // granularity. Some Linuxes are the usual culprits.
1052     // We'll just set it to something (arbitrarily) small.
1053     app_time_ms = 1.0;
1054   }
1055 
1056   if (update_stats) {
1057     _trace_young_gen_time_data.record_end_collection(pause_time_ms, phase_times());


1204   // IHOP control wants to know the expected young gen length if it were not
1205   // restrained by the heap reserve. Using the actual length would make the
1206   // prediction too small and the limit the young gen every time we get to the
1207   // predicted target occupancy.
1208   size_t last_unrestrained_young_length = update_young_list_max_and_target_length();
1209   update_rs_lengths_prediction();
1210 
1211   update_ihop_prediction(app_time_ms / 1000.0,
1212                          _bytes_allocated_in_old_since_last_gc,
1213                          last_unrestrained_young_length * HeapRegion::GrainBytes);
1214   _bytes_allocated_in_old_since_last_gc = 0;
1215 
1216   _ihop_control->send_trace_event(_g1->gc_tracer_stw());
1217 
1218   // Note that _mmu_tracker->max_gc_time() returns the time in seconds.
1219   double update_rs_time_goal_ms = _mmu_tracker->max_gc_time() * MILLIUNITS * G1RSetUpdatingPauseTimePercent / 100.0;
1220 
1221   double scan_hcc_time_ms = average_time_ms(G1GCPhaseTimes::ScanHCC);
1222 
1223   if (update_rs_time_goal_ms < scan_hcc_time_ms) {
1224     ergo_verbose2(ErgoTiming,
1225                   "adjust concurrent refinement thresholds",
1226                   ergo_format_reason("Scanning the HCC expected to take longer than Update RS time goal")
1227                   ergo_format_ms("Update RS time goal")
1228                   ergo_format_ms("Scan HCC time"),
1229                   update_rs_time_goal_ms,
1230                   scan_hcc_time_ms);
1231 
1232     update_rs_time_goal_ms = 0;
1233   } else {
1234     update_rs_time_goal_ms -= scan_hcc_time_ms;
1235   }
1236   adjust_concurrent_refinement(average_time_ms(G1GCPhaseTimes::UpdateRS) - scan_hcc_time_ms,
1237                                phase_times()->sum_thread_work_items(G1GCPhaseTimes::UpdateRS),
1238                                update_rs_time_goal_ms);
1239 
1240   cset_chooser()->verify();
1241 }
1242 
1243 G1IHOPControl* G1CollectorPolicy::create_ihop_control() const {
1244   if (G1UseAdaptiveIHOP) {
1245     return new G1AdaptiveIHOPControl(InitiatingHeapOccupancyPercent,
1246                                      G1CollectedHeap::heap()->max_capacity(),
1247                                      &_predictor,
1248                                      G1ReservePercent,
1249                                      G1HeapWastePercent);
1250   } else {


1288 
1289   if (report) {
1290     report_ihop_statistics();
1291   }
1292 }
1293 
1294 void G1CollectorPolicy::report_ihop_statistics() {
1295   _ihop_control->print();
1296 }
1297 
1298 #define EXT_SIZE_FORMAT "%.1f%s"
1299 #define EXT_SIZE_PARAMS(bytes)                                  \
1300   byte_size_in_proper_unit((double)(bytes)),                    \
1301   proper_unit_for_byte_size((bytes))
1302 
1303 void G1CollectorPolicy::record_heap_size_info_at_start(bool full) {
1304   YoungList* young_list = _g1->young_list();
1305   _eden_used_bytes_before_gc = young_list->eden_used_bytes();
1306   _survivor_used_bytes_before_gc = young_list->survivor_used_bytes();
1307   _heap_capacity_bytes_before_gc = _g1->capacity();


1308   _heap_used_bytes_before_gc = _g1->used();
1309 
1310   _eden_capacity_bytes_before_gc =
1311          (_young_list_target_length * HeapRegion::GrainBytes) - _survivor_used_bytes_before_gc;
1312 
1313   if (full) {
1314     _metaspace_used_bytes_before_gc = MetaspaceAux::used_bytes();
1315   }
1316 }
1317 
1318 void G1CollectorPolicy::print_heap_transition(size_t bytes_before) const {
1319   size_t bytes_after = _g1->used();
1320   size_t capacity = _g1->capacity();
1321 
1322   gclog_or_tty->print(" " SIZE_FORMAT "%s->" SIZE_FORMAT "%s(" SIZE_FORMAT "%s)",
1323       byte_size_in_proper_unit(bytes_before),
1324       proper_unit_for_byte_size(bytes_before),
1325       byte_size_in_proper_unit(bytes_after),
1326       proper_unit_for_byte_size(bytes_after),
1327       byte_size_in_proper_unit(capacity),
1328       proper_unit_for_byte_size(capacity));
1329 }
1330 
1331 void G1CollectorPolicy::print_heap_transition() const {
1332   print_heap_transition(_heap_used_bytes_before_gc);
1333 }
1334 
1335 void G1CollectorPolicy::print_detailed_heap_transition(bool full) const {
1336   YoungList* young_list = _g1->young_list();
1337 
1338   size_t eden_used_bytes_after_gc = young_list->eden_used_bytes();
1339   size_t survivor_used_bytes_after_gc = young_list->survivor_used_bytes();
1340   size_t heap_used_bytes_after_gc = _g1->used();


1341 
1342   size_t heap_capacity_bytes_after_gc = _g1->capacity();
1343   size_t eden_capacity_bytes_after_gc =
1344     (_young_list_target_length * HeapRegion::GrainBytes) - survivor_used_bytes_after_gc;

1345 
1346   gclog_or_tty->print(
1347     "   [Eden: " EXT_SIZE_FORMAT "(" EXT_SIZE_FORMAT ")->" EXT_SIZE_FORMAT "(" EXT_SIZE_FORMAT ") "
1348     "Survivors: " EXT_SIZE_FORMAT "->" EXT_SIZE_FORMAT " "
1349     "Heap: " EXT_SIZE_FORMAT "(" EXT_SIZE_FORMAT ")->"
1350     EXT_SIZE_FORMAT "(" EXT_SIZE_FORMAT ")]",
1351     EXT_SIZE_PARAMS(_eden_used_bytes_before_gc),
1352     EXT_SIZE_PARAMS(_eden_capacity_bytes_before_gc),
1353     EXT_SIZE_PARAMS(eden_used_bytes_after_gc),
1354     EXT_SIZE_PARAMS(eden_capacity_bytes_after_gc),
1355     EXT_SIZE_PARAMS(_survivor_used_bytes_before_gc),
1356     EXT_SIZE_PARAMS(survivor_used_bytes_after_gc),
1357     EXT_SIZE_PARAMS(_heap_used_bytes_before_gc),
1358     EXT_SIZE_PARAMS(_heap_capacity_bytes_before_gc),
1359     EXT_SIZE_PARAMS(heap_used_bytes_after_gc),
1360     EXT_SIZE_PARAMS(heap_capacity_bytes_after_gc));
1361 
1362   if (full) {
1363     MetaspaceAux::print_metaspace_change(_metaspace_used_bytes_before_gc);
1364   }
1365 
1366   gclog_or_tty->cr();
1367 }
1368 
1369 void G1CollectorPolicy::print_phases(double pause_time_sec) {
1370   phase_times()->print(pause_time_sec);
1371 }
1372 
1373 void G1CollectorPolicy::adjust_concurrent_refinement(double update_rs_time,
1374                                                      double update_rs_processed_buffers,
1375                                                      double goal_ms) {
1376   DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
1377   ConcurrentG1Refine *cg1r = G1CollectedHeap::heap()->concurrent_g1_refine();
1378 
1379   if (G1UseAdaptiveConcRefinement) {
1380     const int k_gy = 3, k_gr = 6;
1381     const double inc_k = 1.1, dec_k = 0.9;
1382 
1383     int g = cg1r->green_zone();
1384     if (update_rs_time > goal_ms) {
1385       g = (int)(g * dec_k);  // Can become 0, that's OK. That would mean a mutator-only processing.
1386     } else {


1601 
1602 size_t G1CollectorPolicy::expansion_amount() const {
1603   double recent_gc_overhead = recent_avg_pause_time_ratio() * 100.0;
1604   double threshold = _gc_overhead_perc;
1605   if (recent_gc_overhead > threshold) {
1606     // We will double the existing space, or take
1607     // G1ExpandByPercentOfAvailable % of the available expansion
1608     // space, whichever is smaller, bounded below by a minimum
1609     // expansion (unless that's all that's left.)
1610     const size_t min_expand_bytes = 1*M;
1611     size_t reserved_bytes = _g1->max_capacity();
1612     size_t committed_bytes = _g1->capacity();
1613     size_t uncommitted_bytes = reserved_bytes - committed_bytes;
1614     size_t expand_bytes;
1615     size_t expand_bytes_via_pct =
1616       uncommitted_bytes * G1ExpandByPercentOfAvailable / 100;
1617     expand_bytes = MIN2(expand_bytes_via_pct, committed_bytes);
1618     expand_bytes = MAX2(expand_bytes, min_expand_bytes);
1619     expand_bytes = MIN2(expand_bytes, uncommitted_bytes);
1620 
1621     ergo_verbose5(ErgoHeapSizing,
1622                   "attempt heap expansion",
1623                   ergo_format_reason("recent GC overhead higher than "
1624                                      "threshold after GC")
1625                   ergo_format_perc("recent GC overhead")
1626                   ergo_format_perc("threshold")
1627                   ergo_format_byte("uncommitted")
1628                   ergo_format_byte_perc("calculated expansion amount"),
1629                   recent_gc_overhead, threshold,
1630                   uncommitted_bytes,
1631                   expand_bytes_via_pct, (double) G1ExpandByPercentOfAvailable);
1632 
1633     return expand_bytes;
1634   } else {
1635     return 0;
1636   }
1637 }
1638 
1639 void G1CollectorPolicy::print_tracing_info() const {
1640   _trace_young_gen_time_data.print();
1641   _trace_old_gen_time_data.print();
1642 }
1643 
1644 void G1CollectorPolicy::print_yg_surv_rate_info() const {
1645 #ifndef PRODUCT
1646   _short_lived_surv_rate_group->print_surv_rate_summary();
1647   // add this call for any other surv rate groups
1648 #endif // PRODUCT
1649 }
1650 
1651 bool G1CollectorPolicy::is_young_list_full() const {


1676 }
1677 
1678 // Calculates survivor space parameters.
1679 void G1CollectorPolicy::update_survivors_policy() {
1680   double max_survivor_regions_d =
1681                  (double) _young_list_target_length / (double) SurvivorRatio;
1682   // We use ceiling so that if max_survivor_regions_d is > 0.0 (but
1683   // smaller than 1.0) we'll get 1.
1684   _max_survivor_regions = (uint) ceil(max_survivor_regions_d);
1685 
1686   _tenuring_threshold = _survivors_age_table.compute_tenuring_threshold(
1687         HeapRegion::GrainWords * _max_survivor_regions, counters());
1688 }
1689 
1690 bool G1CollectorPolicy::force_initial_mark_if_outside_cycle(GCCause::Cause gc_cause) {
1691   // We actually check whether we are marking here and not if we are in a
1692   // reclamation phase. This means that we will schedule a concurrent mark
1693   // even while we are still in the process of reclaiming memory.
1694   bool during_cycle = _g1->concurrent_mark()->cmThread()->during_cycle();
1695   if (!during_cycle) {
1696     ergo_verbose1(ErgoConcCycles,
1697                   "request concurrent cycle initiation",
1698                   ergo_format_reason("requested by GC cause")
1699                   ergo_format_str("GC cause"),
1700                   GCCause::to_string(gc_cause));
1701     collector_state()->set_initiate_conc_mark_if_possible(true);
1702     return true;
1703   } else {
1704     ergo_verbose1(ErgoConcCycles,
1705                   "do not request concurrent cycle initiation",
1706                   ergo_format_reason("concurrent cycle already in progress")
1707                   ergo_format_str("GC cause"),
1708                   GCCause::to_string(gc_cause));
1709     return false;
1710   }
1711 }
1712 
1713 void G1CollectorPolicy::decide_on_conc_mark_initiation() {
1714   // We are about to decide on whether this pause will be an
1715   // initial-mark pause.
1716 
1717   // First, collector_state()->during_initial_mark_pause() should not be already set. We
1718   // will set it here if we have to. However, it should be cleared by
1719   // the end of the pause (it's only set for the duration of an
1720   // initial-mark pause).
1721   assert(!collector_state()->during_initial_mark_pause(), "pre-condition");
1722 
1723   if (collector_state()->initiate_conc_mark_if_possible()) {
1724     // We had noticed on a previous pause that the heap occupancy has
1725     // gone over the initiating threshold and we should start a
1726     // concurrent marking cycle. So we might initiate one.
1727 
1728     if (!about_to_start_mixed_phase() && collector_state()->gcs_are_young()) {
1729       // Initiate a new initial mark only if there is no marking or reclamation going
1730       // on.
1731 
1732       collector_state()->set_during_initial_mark_pause(true);
1733       // And we can now clear initiate_conc_mark_if_possible() as
1734       // we've already acted on it.
1735       collector_state()->set_initiate_conc_mark_if_possible(false);
1736 
1737       ergo_verbose0(ErgoConcCycles,
1738                   "initiate concurrent cycle",
1739                   ergo_format_reason("concurrent cycle initiation requested"));
1740     } else {
1741       // The concurrent marking thread is still finishing up the
1742       // previous cycle. If we start one right now the two cycles
1743       // overlap. In particular, the concurrent marking thread might
1744       // be in the process of clearing the next marking bitmap (which
1745       // we will use for the next cycle if we start one). Starting a
1746       // cycle now will be bad given that parts of the marking
1747       // information might get cleared by the marking thread. And we
1748       // cannot wait for the marking thread to finish the cycle as it
1749       // periodically yields while clearing the next marking bitmap
1750       // and, if it's in a yield point, it's waiting for us to
1751       // finish. So, at this point we will not start a cycle and we'll
1752       // let the concurrent marking thread complete the last one.
1753       ergo_verbose0(ErgoConcCycles,
1754                     "do not initiate concurrent cycle",
1755                     ergo_format_reason("concurrent cycle already in progress"));
1756     }
1757   }
1758 }
1759 
1760 class ParKnownGarbageHRClosure: public HeapRegionClosure {
1761   G1CollectedHeap* _g1h;
1762   CSetChooserParUpdater _cset_updater;
1763 
1764 public:
1765   ParKnownGarbageHRClosure(CollectionSetChooser* hrSorted,
1766                            uint chunk_size) :
1767     _g1h(G1CollectedHeap::heap()),
1768     _cset_updater(hrSorted, true /* parallel */, chunk_size) { }
1769 
1770   bool doHeapRegion(HeapRegion* r) {
1771     // Do we have any marking information for this region?
1772     if (r->is_marked()) {
1773       // We will skip any region that's currently used as an old GC
1774       // alloc region (we should not consider those for collection
1775       // before we fill them up).


2081       _initial_mark_to_mixed.add_pause(end - start);
2082       break;
2083     case InitialMarkGC:
2084       _initial_mark_to_mixed.record_initial_mark_end(end);
2085       break;
2086     case MixedGC:
2087       _initial_mark_to_mixed.record_mixed_gc_start(start);
2088       break;
2089     default:
2090       ShouldNotReachHere();
2091   }
2092 }
2093 
2094 void G1CollectorPolicy::abort_time_to_mixed_tracking() {
2095   _initial_mark_to_mixed.reset();
2096 }
2097 
2098 bool G1CollectorPolicy::next_gc_should_be_mixed(const char* true_action_str,
2099                                                 const char* false_action_str) const {
2100   if (cset_chooser()->is_empty()) {
2101     ergo_verbose0(ErgoMixedGCs,
2102                   false_action_str,
2103                   ergo_format_reason("candidate old regions not available"));
2104     return false;
2105   }
2106 
2107   // Is the amount of uncollected reclaimable space above G1HeapWastePercent?
2108   size_t reclaimable_bytes = cset_chooser()->remaining_reclaimable_bytes();
2109   double reclaimable_perc = reclaimable_bytes_perc(reclaimable_bytes);
2110   double threshold = (double) G1HeapWastePercent;
2111   if (reclaimable_perc <= threshold) {
2112     ergo_verbose4(ErgoMixedGCs,
2113               false_action_str,
2114               ergo_format_reason("reclaimable percentage not over threshold")
2115               ergo_format_region("candidate old regions")
2116               ergo_format_byte_perc("reclaimable")
2117               ergo_format_perc("threshold"),
2118               cset_chooser()->remaining_regions(),
2119               reclaimable_bytes,
2120               reclaimable_perc, threshold);
2121     return false;
2122   }
2123 
2124   ergo_verbose4(ErgoMixedGCs,
2125                 true_action_str,
2126                 ergo_format_reason("candidate old regions available")
2127                 ergo_format_region("candidate old regions")
2128                 ergo_format_byte_perc("reclaimable")
2129                 ergo_format_perc("threshold"),
2130                 cset_chooser()->remaining_regions(),
2131                 reclaimable_bytes,
2132                 reclaimable_perc, threshold);
2133   return true;
2134 }
2135 
2136 uint G1CollectorPolicy::calc_min_old_cset_length() const {
2137   // The min old CSet region bound is based on the maximum desired
2138   // number of mixed GCs after a cycle. I.e., even if some old regions
2139   // look expensive, we should add them to the CSet anyway to make
2140   // sure we go through the available old regions in no more than the
2141   // maximum desired number of mixed GCs.
2142   //
2143   // The calculation is based on the number of marked regions we added
2144   // to the CSet chooser in the first place, not how many remain, so
2145   // that the result is the same during all mixed GCs that follow a cycle.
2146 
2147   const size_t region_num = (size_t) cset_chooser()->length();
2148   const size_t gc_num = (size_t) MAX2(G1MixedGCCountTarget, (uintx) 1);
2149   size_t result = region_num / gc_num;
2150   // emulate ceiling
2151   if (result * gc_num < region_num) {
2152     result += 1;


2168   if (100 * result < region_num * perc) {
2169     result += 1;
2170   }
2171   return (uint) result;
2172 }
2173 
2174 
2175 double G1CollectorPolicy::finalize_young_cset_part(double target_pause_time_ms) {
2176   double young_start_time_sec = os::elapsedTime();
2177 
2178   YoungList* young_list = _g1->young_list();
2179   finalize_incremental_cset_building();
2180 
2181   guarantee(target_pause_time_ms > 0.0,
2182             "target_pause_time_ms = %1.6lf should be positive", target_pause_time_ms);
2183   guarantee(_collection_set == NULL, "Precondition");
2184 
2185   double base_time_ms = predict_base_elapsed_time_ms(_pending_cards);
2186   double time_remaining_ms = MAX2(target_pause_time_ms - base_time_ms, 0.0);
2187 
2188   ergo_verbose4(ErgoCSetConstruction | ErgoHigh,
2189                 "start choosing CSet",
2190                 ergo_format_size("_pending_cards")
2191                 ergo_format_ms("predicted base time")
2192                 ergo_format_ms("remaining time")
2193                 ergo_format_ms("target pause time"),
2194                 _pending_cards, base_time_ms, time_remaining_ms, target_pause_time_ms);
2195 
2196   collector_state()->set_last_gc_was_young(collector_state()->gcs_are_young());
2197 
2198   if (collector_state()->last_gc_was_young()) {
2199     _trace_young_gen_time_data.increment_young_collection_count();
2200   } else {
2201     _trace_young_gen_time_data.increment_mixed_collection_count();
2202   }
2203 
2204   // The young list is laid with the survivor regions from the previous
2205   // pause are appended to the RHS of the young list, i.e.
2206   //   [Newly Young Regions ++ Survivors from last pause].
2207 
2208   uint survivor_region_length = young_list->survivor_length();
2209   uint eden_region_length = young_list->eden_length();
2210   init_cset_region_lengths(eden_region_length, survivor_region_length);
2211 
2212   HeapRegion* hr = young_list->first_survivor_region();
2213   while (hr != NULL) {
2214     assert(hr->is_survivor(), "badly formed young list");
2215     // There is a convention that all the young regions in the CSet
2216     // are tagged as "eden", so we do this for the survivors here. We
2217     // use the special set_eden_pre_gc() as it doesn't check that the
2218     // region is free (which is not the case here).
2219     hr->set_eden_pre_gc();
2220     hr = hr->get_next_young_region();
2221   }
2222 
2223   // Clear the fields that point to the survivor list - they are all young now.
2224   young_list->clear_survivors();
2225 
2226   _collection_set = _inc_cset_head;
2227   _collection_set_bytes_used_before = _inc_cset_bytes_used_before;
2228   time_remaining_ms = MAX2(time_remaining_ms - _inc_cset_predicted_elapsed_time_ms, 0.0);
2229 
2230   ergo_verbose4(ErgoCSetConstruction | ErgoHigh,
2231                 "add young regions to CSet",
2232                 ergo_format_region("eden")
2233                 ergo_format_region("survivors")
2234                 ergo_format_ms("predicted young region time")
2235                 ergo_format_ms("target pause time"),
2236                 eden_region_length, survivor_region_length,
2237                 _inc_cset_predicted_elapsed_time_ms,
2238                 target_pause_time_ms);
2239 
2240   // The number of recorded young regions is the incremental
2241   // collection set's current size
2242   set_recorded_rs_lengths(_inc_cset_recorded_rs_lengths);
2243 
2244   double young_end_time_sec = os::elapsedTime();
2245   phase_times()->record_young_cset_choice_time_ms((young_end_time_sec - young_start_time_sec) * 1000.0);
2246 
2247   return time_remaining_ms;
2248 }
2249 
2250 void G1CollectorPolicy::finalize_old_cset_part(double time_remaining_ms) {
2251   double non_young_start_time_sec = os::elapsedTime();
2252   double predicted_old_time_ms = 0.0;
2253 
2254 
2255   if (!collector_state()->gcs_are_young()) {
2256     cset_chooser()->verify();
2257     const uint min_old_cset_length = calc_min_old_cset_length();
2258     const uint max_old_cset_length = calc_max_old_cset_length();
2259 
2260     uint expensive_region_num = 0;
2261     bool check_time_remaining = adaptive_young_list_length();
2262 
2263     HeapRegion* hr = cset_chooser()->peek();
2264     while (hr != NULL) {
2265       if (old_cset_region_length() >= max_old_cset_length) {
2266         // Added maximum number of old regions to the CSet.
2267         ergo_verbose2(ErgoCSetConstruction,
2268                       "finish adding old regions to CSet",
2269                       ergo_format_reason("old CSet region num reached max")
2270                       ergo_format_region("old")
2271                       ergo_format_region("max"),
2272                       old_cset_region_length(), max_old_cset_length);
2273         break;
2274       }
2275 
2276 
2277       // Stop adding regions if the remaining reclaimable space is
2278       // not above G1HeapWastePercent.
2279       size_t reclaimable_bytes = cset_chooser()->remaining_reclaimable_bytes();
2280       double reclaimable_perc = reclaimable_bytes_perc(reclaimable_bytes);
2281       double threshold = (double) G1HeapWastePercent;
2282       if (reclaimable_perc <= threshold) {
2283         // We've added enough old regions that the amount of uncollected
2284         // reclaimable space is at or below the waste threshold. Stop
2285         // adding old regions to the CSet.
2286         ergo_verbose5(ErgoCSetConstruction,
2287                       "finish adding old regions to CSet",
2288                       ergo_format_reason("reclaimable percentage not over threshold")
2289                       ergo_format_region("old")
2290                       ergo_format_region("max")
2291                       ergo_format_byte_perc("reclaimable")
2292                       ergo_format_perc("threshold"),
2293                       old_cset_region_length(),
2294                       max_old_cset_length,
2295                       reclaimable_bytes,
2296                       reclaimable_perc, threshold);
2297         break;
2298       }
2299 
2300       double predicted_time_ms = predict_region_elapsed_time_ms(hr, collector_state()->gcs_are_young());
2301       if (check_time_remaining) {
2302         if (predicted_time_ms > time_remaining_ms) {
2303           // Too expensive for the current CSet.
2304 
2305           if (old_cset_region_length() >= min_old_cset_length) {
2306             // We have added the minimum number of old regions to the CSet,
2307             // we are done with this CSet.
2308             ergo_verbose4(ErgoCSetConstruction,
2309                           "finish adding old regions to CSet",
2310                           ergo_format_reason("predicted time is too high")
2311                           ergo_format_ms("predicted time")
2312                           ergo_format_ms("remaining time")
2313                           ergo_format_region("old")
2314                           ergo_format_region("min"),
2315                           predicted_time_ms, time_remaining_ms,
2316                           old_cset_region_length(), min_old_cset_length);
2317             break;
2318           }
2319 
2320           // We'll add it anyway given that we haven't reached the
2321           // minimum number of old regions.
2322           expensive_region_num += 1;
2323         }
2324       } else {
2325         if (old_cset_region_length() >= min_old_cset_length) {
2326           // In the non-auto-tuning case, we'll finish adding regions
2327           // to the CSet if we reach the minimum.
2328           ergo_verbose2(ErgoCSetConstruction,
2329                         "finish adding old regions to CSet",
2330                         ergo_format_reason("old CSet region num reached min")
2331                         ergo_format_region("old")
2332                         ergo_format_region("min"),
2333                         old_cset_region_length(), min_old_cset_length);
2334           break;
2335         }
2336       }
2337 
2338       // We will add this region to the CSet.
2339       time_remaining_ms = MAX2(time_remaining_ms - predicted_time_ms, 0.0);
2340       predicted_old_time_ms += predicted_time_ms;
2341       cset_chooser()->pop(); // already have region via peek()
2342       _g1->old_set_remove(hr);
2343       add_old_region_to_cset(hr);
2344 
2345       hr = cset_chooser()->peek();
2346     }
2347     if (hr == NULL) {
2348       ergo_verbose0(ErgoCSetConstruction,
2349                     "finish adding old regions to CSet",
2350                     ergo_format_reason("candidate old regions not available"));
2351     }
2352 
2353     if (expensive_region_num > 0) {
2354       // We print the information once here at the end, predicated on
2355       // whether we added any apparently expensive regions or not, to
2356       // avoid generating output per region.
2357       ergo_verbose4(ErgoCSetConstruction,
2358                     "added expensive regions to CSet",
2359                     ergo_format_reason("old CSet region num not reached min")
2360                     ergo_format_region("old")
2361                     ergo_format_region("expensive")
2362                     ergo_format_region("min")
2363                     ergo_format_ms("remaining time"),
2364                     old_cset_region_length(),
2365                     expensive_region_num,
2366                     min_old_cset_length,
2367                     time_remaining_ms);
2368     }
2369 
2370     cset_chooser()->verify();
2371   }
2372 
2373   stop_incremental_cset_building();
2374 
2375   ergo_verbose3(ErgoCSetConstruction,
2376                 "finish choosing CSet",
2377                 ergo_format_region("old")
2378                 ergo_format_ms("predicted old region time")
2379                 ergo_format_ms("time remaining"),
2380                 old_cset_region_length(),
2381                 predicted_old_time_ms, time_remaining_ms);
2382 
2383   double non_young_end_time_sec = os::elapsedTime();
2384   phase_times()->record_non_young_cset_choice_time_ms((non_young_end_time_sec - non_young_start_time_sec) * 1000.0);
2385 }
2386 
2387 void TraceYoungGenTimeData::record_start_collection(double time_to_stop_the_world_ms) {
2388   if(TraceYoungGenTime) {
2389     _all_stop_world_times_ms.add(time_to_stop_the_world_ms);
2390   }
2391 }
2392 
2393 void TraceYoungGenTimeData::record_yield_time(double yield_time_ms) {
2394   if(TraceYoungGenTime) {
2395     _all_yield_times_ms.add(yield_time_ms);
2396   }
2397 }
2398 
2399 void TraceYoungGenTimeData::record_end_collection(double pause_time_ms, G1GCPhaseTimes* phase_times) {
2400   if(TraceYoungGenTime) {
2401     _total.add(pause_time_ms);


2420     _parallel_other.add(parallel_other_time);
2421     _clear_ct.add(phase_times->cur_clear_ct_time_ms());
2422   }
2423 }
2424 
2425 void TraceYoungGenTimeData::increment_young_collection_count() {
2426   if(TraceYoungGenTime) {
2427     ++_young_pause_num;
2428   }
2429 }
2430 
2431 void TraceYoungGenTimeData::increment_mixed_collection_count() {
2432   if(TraceYoungGenTime) {
2433     ++_mixed_pause_num;
2434   }
2435 }
2436 
2437 void TraceYoungGenTimeData::print_summary(const char* str,
2438                                           const NumberSeq* seq) const {
2439   double sum = seq->sum();
2440   gclog_or_tty->print_cr("%-27s = %8.2lf s (avg = %8.2lf ms)",
2441                 str, sum / 1000.0, seq->avg());
2442 }
2443 
2444 void TraceYoungGenTimeData::print_summary_sd(const char* str,
2445                                              const NumberSeq* seq) const {
2446   print_summary(str, seq);
2447   gclog_or_tty->print_cr("%45s = %5d, std dev = %8.2lf ms, max = %8.2lf ms)",
2448                 "(num", seq->num(), seq->sd(), seq->maximum());
2449 }
2450 
2451 void TraceYoungGenTimeData::print() const {
2452   if (!TraceYoungGenTime) {
2453     return;
2454   }
2455 
2456   gclog_or_tty->print_cr("ALL PAUSES");
2457   print_summary_sd("   Total", &_total);
2458   gclog_or_tty->cr();
2459   gclog_or_tty->cr();
2460   gclog_or_tty->print_cr("   Young GC Pauses: %8d", _young_pause_num);
2461   gclog_or_tty->print_cr("   Mixed GC Pauses: %8d", _mixed_pause_num);
2462   gclog_or_tty->cr();
2463 
2464   gclog_or_tty->print_cr("EVACUATION PAUSES");
2465 
2466   if (_young_pause_num == 0 && _mixed_pause_num == 0) {
2467     gclog_or_tty->print_cr("none");
2468   } else {
2469     print_summary_sd("   Evacuation Pauses", &_total);
2470     print_summary("      Root Region Scan Wait", &_root_region_scan_wait);
2471     print_summary("      Parallel Time", &_parallel);
2472     print_summary("         Ext Root Scanning", &_ext_root_scan);
2473     print_summary("         SATB Filtering", &_satb_filtering);
2474     print_summary("         Update RS", &_update_rs);
2475     print_summary("         Scan RS", &_scan_rs);
2476     print_summary("         Object Copy", &_obj_copy);
2477     print_summary("         Termination", &_termination);
2478     print_summary("         Parallel Other", &_parallel_other);
2479     print_summary("      Clear CT", &_clear_ct);
2480     print_summary("      Other", &_other);
2481   }
2482   gclog_or_tty->cr();
2483 
2484   gclog_or_tty->print_cr("MISC");
2485   print_summary_sd("   Stop World", &_all_stop_world_times_ms);
2486   print_summary_sd("   Yields", &_all_yield_times_ms);
2487 }
2488 
2489 void TraceOldGenTimeData::record_full_collection(double full_gc_time_ms) {
2490   if (TraceOldGenTime) {
2491     _all_full_gc_times.add(full_gc_time_ms);
2492   }
2493 }
2494 
2495 void TraceOldGenTimeData::print() const {
2496   if (!TraceOldGenTime) {
2497     return;
2498   }
2499 
2500   if (_all_full_gc_times.num() > 0) {
2501     gclog_or_tty->print("\n%4d full_gcs: total time = %8.2f s",
2502       _all_full_gc_times.num(),
2503       _all_full_gc_times.sum() / 1000.0);
2504     gclog_or_tty->print_cr(" (avg = %8.2fms).", _all_full_gc_times.avg());
2505     gclog_or_tty->print_cr("                     [std. dev = %8.2f ms, max = %8.2f ms]",
2506       _all_full_gc_times.sd(),
2507       _all_full_gc_times.maximum());
2508   }
2509 }


  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "gc/g1/concurrentG1Refine.hpp"
  27 #include "gc/g1/concurrentMark.hpp"
  28 #include "gc/g1/concurrentMarkThread.inline.hpp"
  29 #include "gc/g1/g1CollectedHeap.inline.hpp"
  30 #include "gc/g1/g1CollectorPolicy.hpp"
  31 #include "gc/g1/g1IHOPControl.hpp"

  32 #include "gc/g1/g1GCPhaseTimes.hpp"

  33 #include "gc/g1/heapRegion.inline.hpp"
  34 #include "gc/g1/heapRegionRemSet.hpp"
  35 #include "gc/shared/gcPolicyCounters.hpp"
  36 #include "runtime/arguments.hpp"
  37 #include "runtime/java.hpp"
  38 #include "runtime/mutexLocker.hpp"
  39 #include "utilities/debug.hpp"
  40 #include "utilities/pair.hpp"
  41 
  42 // Different defaults for different number of GC threads
  43 // They were chosen by running GCOld and SPECjbb on debris with different
  44 //   numbers of GC threads and choosing them based on the results
  45 
  46 // all the same
  47 static double rs_length_diff_defaults[] = {
  48   0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0
  49 };
  50 
  51 static double cost_per_card_ms_defaults[] = {
  52   0.01, 0.005, 0.005, 0.003, 0.003, 0.002, 0.002, 0.0015


 102   _cost_per_byte_ms_during_cm_seq(new TruncatedSeq(TruncatedSeqLength)),
 103   _constant_other_time_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
 104   _young_other_cost_per_region_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
 105   _non_young_other_cost_per_region_ms_seq(
 106                                          new TruncatedSeq(TruncatedSeqLength)),
 107 
 108   _pending_cards_seq(new TruncatedSeq(TruncatedSeqLength)),
 109   _rs_lengths_seq(new TruncatedSeq(TruncatedSeqLength)),
 110 
 111   _pause_time_target_ms((double) MaxGCPauseMillis),
 112 
 113   _recent_prev_end_times_for_all_gcs_sec(
 114                                 new TruncatedSeq(NumPrevPausesForHeuristics)),
 115 
 116   _recent_avg_pause_time_ratio(0.0),
 117   _rs_lengths_prediction(0),
 118   _max_survivor_regions(0),
 119 
 120   _eden_used_bytes_before_gc(0),
 121   _survivor_used_bytes_before_gc(0),
 122   _old_used_bytes_before_gc(0),
 123   _humongous_used_bytes_before_gc(0),
 124   _heap_used_bytes_before_gc(0),
 125   _metaspace_used_bytes_before_gc(0),
 126   _eden_capacity_bytes_before_gc(0),
 127   _heap_capacity_bytes_before_gc(0),
 128 
 129   _eden_cset_region_length(0),
 130   _survivor_cset_region_length(0),
 131   _old_cset_region_length(0),
 132 
 133   _collection_set(NULL),
 134   _collection_set_bytes_used_before(0),
 135 
 136   // Incremental CSet attributes
 137   _inc_cset_build_state(Inactive),
 138   _inc_cset_head(NULL),
 139   _inc_cset_tail(NULL),
 140   _inc_cset_bytes_used_before(0),
 141   _inc_cset_max_finger(NULL),
 142   _inc_cset_recorded_rs_lengths(0),
 143   _inc_cset_recorded_rs_lengths_diffs(0),


 160   // indirectly use it through this object passed to their constructor.
 161   _short_lived_surv_rate_group =
 162     new SurvRateGroup(&_predictor, "Short Lived", G1YoungSurvRateNumRegionsSummary);
 163   _survivor_surv_rate_group =
 164     new SurvRateGroup(&_predictor, "Survivor", G1YoungSurvRateNumRegionsSummary);
 165 
 166   // Set up the region size and associated fields. Given that the
 167   // policy is created before the heap, we have to set this up here,
 168   // so it's done as soon as possible.
 169 
 170   // It would have been natural to pass initial_heap_byte_size() and
 171   // max_heap_byte_size() to setup_heap_region_size() but those have
 172   // not been set up at this point since they should be aligned with
 173   // the region size. So, there is a circular dependency here. We base
 174   // the region size on the heap size, but the heap size should be
 175   // aligned with the region size. To get around this we use the
 176   // unaligned values for the heap.
 177   HeapRegion::setup_heap_region_size(InitialHeapSize, MaxHeapSize);
 178   HeapRegionRemSet::setup_remset_size();
 179 












 180   _recent_prev_end_times_for_all_gcs_sec->add(os::elapsedTime());
 181   _prev_collection_pause_end_ms = os::elapsedTime() * 1000.0;
 182 
 183   _phase_times = new G1GCPhaseTimes(_parallel_gc_threads);
 184 
 185   int index = MIN2(_parallel_gc_threads - 1, 7);
 186 
 187   _rs_length_diff_seq->add(rs_length_diff_defaults[index]);
 188   _cost_per_card_ms_seq->add(cost_per_card_ms_defaults[index]);
 189   _cost_scan_hcc_seq->add(0.0);
 190   _young_cards_per_entry_ratio_seq->add(
 191                                   young_cards_per_entry_ratio_defaults[index]);
 192   _cost_per_entry_ms_seq->add(cost_per_entry_ms_defaults[index]);
 193   _cost_per_byte_ms_seq->add(cost_per_byte_ms_defaults[index]);
 194   _constant_other_time_ms_seq->add(constant_other_time_ms_defaults[index]);
 195   _young_other_cost_per_region_ms_seq->add(
 196                                young_other_cost_per_region_ms_defaults[index]);
 197   _non_young_other_cost_per_region_ms_seq->add(
 198                            non_young_other_cost_per_region_ms_defaults[index]);
 199 


 761   HeapRegion* head = _g1->young_list()->first_region();
 762   return
 763     verify_young_ages(head, _short_lived_surv_rate_group);
 764   // also call verify_young_ages on any additional surv rate groups
 765 }
 766 
 767 bool
 768 G1CollectorPolicy::verify_young_ages(HeapRegion* head,
 769                                      SurvRateGroup *surv_rate_group) {
 770   guarantee( surv_rate_group != NULL, "pre-condition" );
 771 
 772   const char* name = surv_rate_group->name();
 773   bool ret = true;
 774   int prev_age = -1;
 775 
 776   for (HeapRegion* curr = head;
 777        curr != NULL;
 778        curr = curr->get_next_young_region()) {
 779     SurvRateGroup* group = curr->surv_rate_group();
 780     if (group == NULL && !curr->is_survivor()) {
 781       log_info(gc, verify)("## %s: encountered NULL surv_rate_group", name);
 782       ret = false;
 783     }
 784 
 785     if (surv_rate_group == group) {
 786       int age = curr->age_in_surv_rate_group();
 787 
 788       if (age < 0) {
 789         log_info(gc, verify)("## %s: encountered negative age", name);
 790         ret = false;
 791       }
 792 
 793       if (age <= prev_age) {
 794         log_info(gc, verify)("## %s: region ages are not strictly increasing (%d, %d)", name, age, prev_age);

 795         ret = false;
 796       }
 797       prev_age = age;
 798     }
 799   }
 800 
 801   return ret;
 802 }
 803 #endif // PRODUCT
 804 
 805 void G1CollectorPolicy::record_full_collection_start() {
 806   _full_collection_start_sec = os::elapsedTime();
 807   record_heap_size_info_at_start(true /* full */);
 808   // Release the future to-space so that it is available for compaction into.
 809   collector_state()->set_full_collection(true);
 810 }
 811 
 812 void G1CollectorPolicy::record_full_collection_end() {
 813   // Consider this like a collection pause for the purposes of allocation
 814   // since last pause.


 953 
 954 double G1CollectorPolicy::constant_other_time_ms(double pause_time_ms) const {
 955   return other_time_ms(pause_time_ms) - young_other_time_ms() - non_young_other_time_ms();
 956 }
 957 
 958 bool G1CollectorPolicy::about_to_start_mixed_phase() const {
 959   return _g1->concurrent_mark()->cmThread()->during_cycle() || collector_state()->last_young_gc();
 960 }
 961 
 962 bool G1CollectorPolicy::need_to_start_conc_mark(const char* source, size_t alloc_word_size) {
 963   if (about_to_start_mixed_phase()) {
 964     return false;
 965   }
 966 
 967   size_t marking_initiating_used_threshold = _ihop_control->get_conc_mark_start_threshold();
 968 
 969   size_t cur_used_bytes = _g1->non_young_capacity_bytes();
 970   size_t alloc_byte_size = alloc_word_size * HeapWordSize;
 971   size_t marking_request_bytes = cur_used_bytes + alloc_byte_size;
 972 
 973   bool result = false;
 974   if (marking_request_bytes > marking_initiating_used_threshold) {
 975     result = collector_state()->gcs_are_young() && !collector_state()->last_young_gc();
 976     log_debug(gc, ihop, ergo)("%s occupancy: " SIZE_FORMAT "B allocation request: " SIZE_FORMAT "B threshold: " SIZE_FORMAT "B (%1.2f) source: %s",
 977                               result ? "Request concurrent cycle initiation (occupancy higher than threshold)" : "Do not request concurrent cycle initiation (still doing mixed collections)",
 978                               cur_used_bytes, alloc_byte_size, marking_initiating_used_threshold, (double) marking_initiating_used_threshold / _g1->capacity() * 100, source);
























 979   }
 980 
 981   return result;
 982 }
 983 
 984 // Anything below that is considered to be zero
 985 #define MIN_TIMER_GRANULARITY 0.0000001
 986 
 987 void G1CollectorPolicy::record_collection_pause_end(double pause_time_ms, size_t cards_scanned) {
 988   double end_time_sec = os::elapsedTime();
 989 
 990   size_t cur_used_bytes = _g1->used();
 991   assert(cur_used_bytes == _g1->recalculate_used(), "It should!");
 992   bool last_pause_included_initial_mark = false;
 993   bool update_stats = !_g1->evacuation_failed();
 994 
 995   NOT_PRODUCT(_short_lived_surv_rate_group->print());






 996 
 997   record_pause(young_gc_pause_kind(), end_time_sec - pause_time_ms / 1000.0, end_time_sec);
 998 
 999   last_pause_included_initial_mark = collector_state()->during_initial_mark_pause();
1000   if (last_pause_included_initial_mark) {
1001     record_concurrent_mark_init_end(0.0);
1002   } else {
1003     maybe_start_marking();
1004   }
1005 
1006   double app_time_ms = (phase_times()->cur_collection_start_sec() * 1000.0 - _prev_collection_pause_end_ms);
1007   if (app_time_ms < MIN_TIMER_GRANULARITY) {
1008     // This usually happens due to the timer not having the required
1009     // granularity. Some Linuxes are the usual culprits.
1010     // We'll just set it to something (arbitrarily) small.
1011     app_time_ms = 1.0;
1012   }
1013 
1014   if (update_stats) {
1015     _trace_young_gen_time_data.record_end_collection(pause_time_ms, phase_times());


1162   // IHOP control wants to know the expected young gen length if it were not
1163   // restrained by the heap reserve. Using the actual length would make the
1164   // prediction too small and the limit the young gen every time we get to the
1165   // predicted target occupancy.
1166   size_t last_unrestrained_young_length = update_young_list_max_and_target_length();
1167   update_rs_lengths_prediction();
1168 
1169   update_ihop_prediction(app_time_ms / 1000.0,
1170                          _bytes_allocated_in_old_since_last_gc,
1171                          last_unrestrained_young_length * HeapRegion::GrainBytes);
1172   _bytes_allocated_in_old_since_last_gc = 0;
1173 
1174   _ihop_control->send_trace_event(_g1->gc_tracer_stw());
1175 
1176   // Note that _mmu_tracker->max_gc_time() returns the time in seconds.
1177   double update_rs_time_goal_ms = _mmu_tracker->max_gc_time() * MILLIUNITS * G1RSetUpdatingPauseTimePercent / 100.0;
1178 
1179   double scan_hcc_time_ms = average_time_ms(G1GCPhaseTimes::ScanHCC);
1180 
1181   if (update_rs_time_goal_ms < scan_hcc_time_ms) {
1182     log_debug(gc, ergo, refine)("Adjust concurrent refinement thresholds (scanning the HCC expected to take longer than Update RS time goal)."
1183                                 "Update RS time goal: %1.2fms Scan HCC time: %1.2fms",
1184                                 update_rs_time_goal_ms, scan_hcc_time_ms);




1185 
1186     update_rs_time_goal_ms = 0;
1187   } else {
1188     update_rs_time_goal_ms -= scan_hcc_time_ms;
1189   }
1190   adjust_concurrent_refinement(average_time_ms(G1GCPhaseTimes::UpdateRS) - scan_hcc_time_ms,
1191                                phase_times()->sum_thread_work_items(G1GCPhaseTimes::UpdateRS),
1192                                update_rs_time_goal_ms);
1193 
1194   cset_chooser()->verify();
1195 }
1196 
1197 G1IHOPControl* G1CollectorPolicy::create_ihop_control() const {
1198   if (G1UseAdaptiveIHOP) {
1199     return new G1AdaptiveIHOPControl(InitiatingHeapOccupancyPercent,
1200                                      G1CollectedHeap::heap()->max_capacity(),
1201                                      &_predictor,
1202                                      G1ReservePercent,
1203                                      G1HeapWastePercent);
1204   } else {


1242 
1243   if (report) {
1244     report_ihop_statistics();
1245   }
1246 }
1247 
1248 void G1CollectorPolicy::report_ihop_statistics() {
1249   _ihop_control->print();
1250 }
1251 
1252 #define EXT_SIZE_FORMAT "%.1f%s"
1253 #define EXT_SIZE_PARAMS(bytes)                                  \
1254   byte_size_in_proper_unit((double)(bytes)),                    \
1255   proper_unit_for_byte_size((bytes))
1256 
1257 void G1CollectorPolicy::record_heap_size_info_at_start(bool full) {
1258   YoungList* young_list = _g1->young_list();
1259   _eden_used_bytes_before_gc = young_list->eden_used_bytes();
1260   _survivor_used_bytes_before_gc = young_list->survivor_used_bytes();
1261   _heap_capacity_bytes_before_gc = _g1->capacity();
1262   _old_used_bytes_before_gc = _g1->old_regions_count() * HeapRegion::GrainBytes;
1263   _humongous_used_bytes_before_gc = _g1->humongous_regions_count() * HeapRegion::GrainBytes;
1264   _heap_used_bytes_before_gc = _g1->used();
1265   _eden_capacity_bytes_before_gc = (_young_list_target_length * HeapRegion::GrainBytes) - _survivor_used_bytes_before_gc;




1266   _metaspace_used_bytes_before_gc = MetaspaceAux::used_bytes();


















1267 }
1268 
1269 void G1CollectorPolicy::print_detailed_heap_transition() const {
1270   YoungList* young_list = _g1->young_list();
1271 
1272   size_t eden_used_bytes_after_gc = young_list->eden_used_bytes();
1273   size_t survivor_used_bytes_after_gc = young_list->survivor_used_bytes();
1274   size_t heap_used_bytes_after_gc = _g1->used();
1275   size_t old_used_bytes_after_gc = _g1->old_regions_count() * HeapRegion::GrainBytes;
1276   size_t humongous_used_bytes_after_gc = _g1->humongous_regions_count() * HeapRegion::GrainBytes;
1277 
1278   size_t heap_capacity_bytes_after_gc = _g1->capacity();
1279   size_t eden_capacity_bytes_after_gc =
1280     (_young_list_target_length * HeapRegion::GrainBytes) - survivor_used_bytes_after_gc;
1281   size_t survivor_capacity_bytes_after_gc = _max_survivor_regions * HeapRegion::GrainBytes;
1282 
1283   log_info(gc, heap)("Eden: " SIZE_FORMAT "K->" SIZE_FORMAT "K("  SIZE_FORMAT "K)",
1284                      _eden_used_bytes_before_gc / K, eden_used_bytes_after_gc /K, eden_capacity_bytes_after_gc /K);
1285   log_info(gc, heap)("Survivor: " SIZE_FORMAT "K->" SIZE_FORMAT "K("  SIZE_FORMAT "K)",
1286                      _survivor_used_bytes_before_gc / K, survivor_used_bytes_after_gc /K, survivor_capacity_bytes_after_gc /K);
1287   log_info(gc, heap)("Survivor: " SIZE_FORMAT "K->" SIZE_FORMAT "K("  SIZE_FORMAT "K)",
1288                      _survivor_used_bytes_before_gc / K, survivor_used_bytes_after_gc /K, survivor_capacity_bytes_after_gc /K);
1289   log_info(gc, heap)("Humongous: " SIZE_FORMAT "K->" SIZE_FORMAT "K",
1290                      _humongous_used_bytes_before_gc / K, humongous_used_bytes_after_gc /K);







1291 

1292   MetaspaceAux::print_metaspace_change(_metaspace_used_bytes_before_gc);



1293 }
1294 
1295 void G1CollectorPolicy::print_phases(double pause_time_sec) {
1296   phase_times()->print(pause_time_sec);
1297 }
1298 
1299 void G1CollectorPolicy::adjust_concurrent_refinement(double update_rs_time,
1300                                                      double update_rs_processed_buffers,
1301                                                      double goal_ms) {
1302   DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
1303   ConcurrentG1Refine *cg1r = G1CollectedHeap::heap()->concurrent_g1_refine();
1304 
1305   if (G1UseAdaptiveConcRefinement) {
1306     const int k_gy = 3, k_gr = 6;
1307     const double inc_k = 1.1, dec_k = 0.9;
1308 
1309     int g = cg1r->green_zone();
1310     if (update_rs_time > goal_ms) {
1311       g = (int)(g * dec_k);  // Can become 0, that's OK. That would mean a mutator-only processing.
1312     } else {


1527 
1528 size_t G1CollectorPolicy::expansion_amount() const {
1529   double recent_gc_overhead = recent_avg_pause_time_ratio() * 100.0;
1530   double threshold = _gc_overhead_perc;
1531   if (recent_gc_overhead > threshold) {
1532     // We will double the existing space, or take
1533     // G1ExpandByPercentOfAvailable % of the available expansion
1534     // space, whichever is smaller, bounded below by a minimum
1535     // expansion (unless that's all that's left.)
1536     const size_t min_expand_bytes = 1*M;
1537     size_t reserved_bytes = _g1->max_capacity();
1538     size_t committed_bytes = _g1->capacity();
1539     size_t uncommitted_bytes = reserved_bytes - committed_bytes;
1540     size_t expand_bytes;
1541     size_t expand_bytes_via_pct =
1542       uncommitted_bytes * G1ExpandByPercentOfAvailable / 100;
1543     expand_bytes = MIN2(expand_bytes_via_pct, committed_bytes);
1544     expand_bytes = MAX2(expand_bytes, min_expand_bytes);
1545     expand_bytes = MIN2(expand_bytes, uncommitted_bytes);
1546 
1547     log_debug(gc, ergo, heap)("Attempt heap expansion (recent GC overhead higher than threshold after GC) "
1548                               "recent GC overhead: %1.2f %% threshold: %1.2f %% uncommitted: " SIZE_FORMAT "B calculated expansion amount: " SIZE_FORMAT "B (" INTX_FORMAT "%%)",
1549                               recent_gc_overhead, threshold, uncommitted_bytes, expand_bytes_via_pct, G1ExpandByPercentOfAvailable);








1550 
1551     return expand_bytes;
1552   } else {
1553     return 0;
1554   }
1555 }
1556 
1557 void G1CollectorPolicy::print_tracing_info() const {
1558   _trace_young_gen_time_data.print();
1559   _trace_old_gen_time_data.print();
1560 }
1561 
1562 void G1CollectorPolicy::print_yg_surv_rate_info() const {
1563 #ifndef PRODUCT
1564   _short_lived_surv_rate_group->print_surv_rate_summary();
1565   // add this call for any other surv rate groups
1566 #endif // PRODUCT
1567 }
1568 
1569 bool G1CollectorPolicy::is_young_list_full() const {


1594 }
1595 
1596 // Calculates survivor space parameters.
1597 void G1CollectorPolicy::update_survivors_policy() {
1598   double max_survivor_regions_d =
1599                  (double) _young_list_target_length / (double) SurvivorRatio;
1600   // We use ceiling so that if max_survivor_regions_d is > 0.0 (but
1601   // smaller than 1.0) we'll get 1.
1602   _max_survivor_regions = (uint) ceil(max_survivor_regions_d);
1603 
1604   _tenuring_threshold = _survivors_age_table.compute_tenuring_threshold(
1605         HeapRegion::GrainWords * _max_survivor_regions, counters());
1606 }
1607 
1608 bool G1CollectorPolicy::force_initial_mark_if_outside_cycle(GCCause::Cause gc_cause) {
1609   // We actually check whether we are marking here and not if we are in a
1610   // reclamation phase. This means that we will schedule a concurrent mark
1611   // even while we are still in the process of reclaiming memory.
1612   bool during_cycle = _g1->concurrent_mark()->cmThread()->during_cycle();
1613   if (!during_cycle) {
1614     log_debug(gc, ergo)("Request concurrent cycle initiation (requested by GC cause). GC cause: %s", GCCause::to_string(gc_cause));




1615     collector_state()->set_initiate_conc_mark_if_possible(true);
1616     return true;
1617   } else {
1618     log_debug(gc, ergo)("Do not request concurrent cycle initiation (concurrent cycle already in progress). GC cause: %s", GCCause::to_string(gc_cause));




1619     return false;
1620   }
1621 }
1622 
1623 void G1CollectorPolicy::decide_on_conc_mark_initiation() {
1624   // We are about to decide on whether this pause will be an
1625   // initial-mark pause.
1626 
1627   // First, collector_state()->during_initial_mark_pause() should not be already set. We
1628   // will set it here if we have to. However, it should be cleared by
1629   // the end of the pause (it's only set for the duration of an
1630   // initial-mark pause).
1631   assert(!collector_state()->during_initial_mark_pause(), "pre-condition");
1632 
1633   if (collector_state()->initiate_conc_mark_if_possible()) {
1634     // We had noticed on a previous pause that the heap occupancy has
1635     // gone over the initiating threshold and we should start a
1636     // concurrent marking cycle. So we might initiate one.
1637 
1638     if (!about_to_start_mixed_phase() && collector_state()->gcs_are_young()) {
1639       // Initiate a new initial mark only if there is no marking or reclamation going
1640       // on.
1641 
1642       collector_state()->set_during_initial_mark_pause(true);
1643       // And we can now clear initiate_conc_mark_if_possible() as
1644       // we've already acted on it.
1645       collector_state()->set_initiate_conc_mark_if_possible(false);
1646 
1647       log_debug(gc, ergo)("Initiate concurrent cycle (concurrent cycle initiation requested)");


1648     } else {
1649       // The concurrent marking thread is still finishing up the
1650       // previous cycle. If we start one right now the two cycles
1651       // overlap. In particular, the concurrent marking thread might
1652       // be in the process of clearing the next marking bitmap (which
1653       // we will use for the next cycle if we start one). Starting a
1654       // cycle now will be bad given that parts of the marking
1655       // information might get cleared by the marking thread. And we
1656       // cannot wait for the marking thread to finish the cycle as it
1657       // periodically yields while clearing the next marking bitmap
1658       // and, if it's in a yield point, it's waiting for us to
1659       // finish. So, at this point we will not start a cycle and we'll
1660       // let the concurrent marking thread complete the last one.
1661       log_debug(gc, ergo)("Do not initiate concurrent cycle (concurrent cycle already in progress)");


1662     }
1663   }
1664 }
1665 
1666 class ParKnownGarbageHRClosure: public HeapRegionClosure {
1667   G1CollectedHeap* _g1h;
1668   CSetChooserParUpdater _cset_updater;
1669 
1670 public:
1671   ParKnownGarbageHRClosure(CollectionSetChooser* hrSorted,
1672                            uint chunk_size) :
1673     _g1h(G1CollectedHeap::heap()),
1674     _cset_updater(hrSorted, true /* parallel */, chunk_size) { }
1675 
1676   bool doHeapRegion(HeapRegion* r) {
1677     // Do we have any marking information for this region?
1678     if (r->is_marked()) {
1679       // We will skip any region that's currently used as an old GC
1680       // alloc region (we should not consider those for collection
1681       // before we fill them up).


1987       _initial_mark_to_mixed.add_pause(end - start);
1988       break;
1989     case InitialMarkGC:
1990       _initial_mark_to_mixed.record_initial_mark_end(end);
1991       break;
1992     case MixedGC:
1993       _initial_mark_to_mixed.record_mixed_gc_start(start);
1994       break;
1995     default:
1996       ShouldNotReachHere();
1997   }
1998 }
1999 
2000 void G1CollectorPolicy::abort_time_to_mixed_tracking() {
2001   _initial_mark_to_mixed.reset();
2002 }
2003 
2004 bool G1CollectorPolicy::next_gc_should_be_mixed(const char* true_action_str,
2005                                                 const char* false_action_str) const {
2006   if (cset_chooser()->is_empty()) {
2007     log_debug(gc, ergo)("%s (candidate old regions not available)", false_action_str);


2008     return false;
2009   }
2010 
2011   // Is the amount of uncollected reclaimable space above G1HeapWastePercent?
2012   size_t reclaimable_bytes = cset_chooser()->remaining_reclaimable_bytes();
2013   double reclaimable_perc = reclaimable_bytes_perc(reclaimable_bytes);
2014   double threshold = (double) G1HeapWastePercent;
2015   if (reclaimable_perc <= threshold) {
2016     log_debug(gc, ergo)("%s (reclaimable percentage not over threshold). candidate old regions: %u reclaimable: " SIZE_FORMAT " (%1.2f) threshold: " UINTX_FORMAT,
2017                         false_action_str, cset_chooser()->remaining_regions(), reclaimable_bytes, reclaimable_perc, G1HeapWastePercent);







2018     return false;
2019   }
2020   log_debug(gc, ergo)("%s (candidate old regions available). candidate old regions: %u reclaimable: " SIZE_FORMAT " (%1.2f) threshold: " UINTX_FORMAT,
2021                       true_action_str, cset_chooser()->remaining_regions(), reclaimable_bytes, reclaimable_perc, G1HeapWastePercent);








2022   return true;
2023 }
2024 
2025 uint G1CollectorPolicy::calc_min_old_cset_length() const {
2026   // The min old CSet region bound is based on the maximum desired
2027   // number of mixed GCs after a cycle. I.e., even if some old regions
2028   // look expensive, we should add them to the CSet anyway to make
2029   // sure we go through the available old regions in no more than the
2030   // maximum desired number of mixed GCs.
2031   //
2032   // The calculation is based on the number of marked regions we added
2033   // to the CSet chooser in the first place, not how many remain, so
2034   // that the result is the same during all mixed GCs that follow a cycle.
2035 
2036   const size_t region_num = (size_t) cset_chooser()->length();
2037   const size_t gc_num = (size_t) MAX2(G1MixedGCCountTarget, (uintx) 1);
2038   size_t result = region_num / gc_num;
2039   // emulate ceiling
2040   if (result * gc_num < region_num) {
2041     result += 1;


2057   if (100 * result < region_num * perc) {
2058     result += 1;
2059   }
2060   return (uint) result;
2061 }
2062 
2063 
2064 double G1CollectorPolicy::finalize_young_cset_part(double target_pause_time_ms) {
2065   double young_start_time_sec = os::elapsedTime();
2066 
2067   YoungList* young_list = _g1->young_list();
2068   finalize_incremental_cset_building();
2069 
2070   guarantee(target_pause_time_ms > 0.0,
2071             "target_pause_time_ms = %1.6lf should be positive", target_pause_time_ms);
2072   guarantee(_collection_set == NULL, "Precondition");
2073 
2074   double base_time_ms = predict_base_elapsed_time_ms(_pending_cards);
2075   double time_remaining_ms = MAX2(target_pause_time_ms - base_time_ms, 0.0);
2076 
2077   log_trace(gc, ergo, cset)("Start choosing CSet. pending cards: " SIZE_FORMAT " predicted base time: %1.2fms remaining time: %1.2fms target pause time: %1.2fms",





2078                             _pending_cards, base_time_ms, time_remaining_ms, target_pause_time_ms);
2079 
2080   collector_state()->set_last_gc_was_young(collector_state()->gcs_are_young());
2081 
2082   if (collector_state()->last_gc_was_young()) {
2083     _trace_young_gen_time_data.increment_young_collection_count();
2084   } else {
2085     _trace_young_gen_time_data.increment_mixed_collection_count();
2086   }
2087 
2088   // The young list is laid with the survivor regions from the previous
2089   // pause are appended to the RHS of the young list, i.e.
2090   //   [Newly Young Regions ++ Survivors from last pause].
2091 
2092   uint survivor_region_length = young_list->survivor_length();
2093   uint eden_region_length = young_list->eden_length();
2094   init_cset_region_lengths(eden_region_length, survivor_region_length);
2095 
2096   HeapRegion* hr = young_list->first_survivor_region();
2097   while (hr != NULL) {
2098     assert(hr->is_survivor(), "badly formed young list");
2099     // There is a convention that all the young regions in the CSet
2100     // are tagged as "eden", so we do this for the survivors here. We
2101     // use the special set_eden_pre_gc() as it doesn't check that the
2102     // region is free (which is not the case here).
2103     hr->set_eden_pre_gc();
2104     hr = hr->get_next_young_region();
2105   }
2106 
2107   // Clear the fields that point to the survivor list - they are all young now.
2108   young_list->clear_survivors();
2109 
2110   _collection_set = _inc_cset_head;
2111   _collection_set_bytes_used_before = _inc_cset_bytes_used_before;
2112   time_remaining_ms = MAX2(time_remaining_ms - _inc_cset_predicted_elapsed_time_ms, 0.0);
2113 
2114   log_trace(gc, ergo, cset)("Add young regions to CSet. eden: %u regions, survivors: %u regions, predicted young region time: %1.2fms, target pause time: %1.2fms",
2115                             eden_region_length, survivor_region_length, _inc_cset_predicted_elapsed_time_ms, target_pause_time_ms);







2116 
2117   // The number of recorded young regions is the incremental
2118   // collection set's current size
2119   set_recorded_rs_lengths(_inc_cset_recorded_rs_lengths);
2120 
2121   double young_end_time_sec = os::elapsedTime();
2122   phase_times()->record_young_cset_choice_time_ms((young_end_time_sec - young_start_time_sec) * 1000.0);
2123 
2124   return time_remaining_ms;
2125 }
2126 
2127 void G1CollectorPolicy::finalize_old_cset_part(double time_remaining_ms) {
2128   double non_young_start_time_sec = os::elapsedTime();
2129   double predicted_old_time_ms = 0.0;
2130 
2131 
2132   if (!collector_state()->gcs_are_young()) {
2133     cset_chooser()->verify();
2134     const uint min_old_cset_length = calc_min_old_cset_length();
2135     const uint max_old_cset_length = calc_max_old_cset_length();
2136 
2137     uint expensive_region_num = 0;
2138     bool check_time_remaining = adaptive_young_list_length();
2139 
2140     HeapRegion* hr = cset_chooser()->peek();
2141     while (hr != NULL) {
2142       if (old_cset_region_length() >= max_old_cset_length) {
2143         // Added maximum number of old regions to the CSet.
2144         log_debug(gc, ergo, cset)("Finish adding old regions to CSet (old CSet region num reached max). old %u regions, max %u regions",




2145                                   old_cset_region_length(), max_old_cset_length);
2146         break;
2147       }
2148 
2149 
2150       // Stop adding regions if the remaining reclaimable space is
2151       // not above G1HeapWastePercent.
2152       size_t reclaimable_bytes = cset_chooser()->remaining_reclaimable_bytes();
2153       double reclaimable_perc = reclaimable_bytes_perc(reclaimable_bytes);
2154       double threshold = (double) G1HeapWastePercent;
2155       if (reclaimable_perc <= threshold) {
2156         // We've added enough old regions that the amount of uncollected
2157         // reclaimable space is at or below the waste threshold. Stop
2158         // adding old regions to the CSet.
2159         log_debug(gc, ergo, cset)("Finish adding old regions to CSet (reclaimable percentage not over threshold). "
2160                                   "old %u regions, max %u regions, reclaimable: " SIZE_FORMAT "B (%1.2f%%) threshold: " UINTX_FORMAT "%%",
2161                                   old_cset_region_length(), max_old_cset_length, reclaimable_bytes, reclaimable_perc, G1HeapWastePercent);








2162         break;
2163       }
2164 
2165       double predicted_time_ms = predict_region_elapsed_time_ms(hr, collector_state()->gcs_are_young());
2166       if (check_time_remaining) {
2167         if (predicted_time_ms > time_remaining_ms) {
2168           // Too expensive for the current CSet.
2169 
2170           if (old_cset_region_length() >= min_old_cset_length) {
2171             // We have added the minimum number of old regions to the CSet,
2172             // we are done with this CSet.
2173             log_debug(gc, ergo, cset)("Finish adding old regions to CSet (predicted time is too high). "
2174                                       "predicted time: %1.2fms, remaining time: %1.2fms old %u regions, min %u regions",
2175                                       predicted_time_ms, time_remaining_ms, old_cset_region_length(), min_old_cset_length);






2176             break;
2177           }
2178 
2179           // We'll add it anyway given that we haven't reached the
2180           // minimum number of old regions.
2181           expensive_region_num += 1;
2182         }
2183       } else {
2184         if (old_cset_region_length() >= min_old_cset_length) {
2185           // In the non-auto-tuning case, we'll finish adding regions
2186           // to the CSet if we reach the minimum.
2187 
2188           log_debug(gc, ergo, cset)("Finish adding old regions to CSet (old CSet region num reached min). old %u regions, min %u regions",



2189                                     old_cset_region_length(), min_old_cset_length);
2190           break;
2191         }
2192       }
2193 
2194       // We will add this region to the CSet.
2195       time_remaining_ms = MAX2(time_remaining_ms - predicted_time_ms, 0.0);
2196       predicted_old_time_ms += predicted_time_ms;
2197       cset_chooser()->pop(); // already have region via peek()
2198       _g1->old_set_remove(hr);
2199       add_old_region_to_cset(hr);
2200 
2201       hr = cset_chooser()->peek();
2202     }
2203     if (hr == NULL) {
2204       log_debug(gc, ergo, cset)("Finish adding old regions to CSet (candidate old regions not available)");


2205     }
2206 
2207     if (expensive_region_num > 0) {
2208       // We print the information once here at the end, predicated on
2209       // whether we added any apparently expensive regions or not, to
2210       // avoid generating output per region.
2211       log_debug(gc, ergo, cset)("Added expensive regions to CSet (old CSet region num not reached min)."
2212                                 "old %u regions, expensive: %u regions, min %u regions, remaining time: %1.2fms",
2213                                 old_cset_region_length(), expensive_region_num, min_old_cset_length, time_remaining_ms);








2214     }
2215 
2216     cset_chooser()->verify();
2217   }
2218 
2219   stop_incremental_cset_building();
2220 
2221   log_debug(gc, ergo, cset)("Finish choosing CSet. old %u regions, predicted old region time: %1.2fms, time remaining: %1.2f",
2222                             old_cset_region_length(), predicted_old_time_ms, time_remaining_ms);





2223 
2224   double non_young_end_time_sec = os::elapsedTime();
2225   phase_times()->record_non_young_cset_choice_time_ms((non_young_end_time_sec - non_young_start_time_sec) * 1000.0);
2226 }
2227 
2228 void TraceYoungGenTimeData::record_start_collection(double time_to_stop_the_world_ms) {
2229   if(TraceYoungGenTime) {
2230     _all_stop_world_times_ms.add(time_to_stop_the_world_ms);
2231   }
2232 }
2233 
2234 void TraceYoungGenTimeData::record_yield_time(double yield_time_ms) {
2235   if(TraceYoungGenTime) {
2236     _all_yield_times_ms.add(yield_time_ms);
2237   }
2238 }
2239 
2240 void TraceYoungGenTimeData::record_end_collection(double pause_time_ms, G1GCPhaseTimes* phase_times) {
2241   if(TraceYoungGenTime) {
2242     _total.add(pause_time_ms);


2261     _parallel_other.add(parallel_other_time);
2262     _clear_ct.add(phase_times->cur_clear_ct_time_ms());
2263   }
2264 }
2265 
2266 void TraceYoungGenTimeData::increment_young_collection_count() {
2267   if(TraceYoungGenTime) {
2268     ++_young_pause_num;
2269   }
2270 }
2271 
2272 void TraceYoungGenTimeData::increment_mixed_collection_count() {
2273   if(TraceYoungGenTime) {
2274     ++_mixed_pause_num;
2275   }
2276 }
2277 
2278 void TraceYoungGenTimeData::print_summary(const char* str,
2279                                           const NumberSeq* seq) const {
2280   double sum = seq->sum();
2281   tty->print_cr("%-27s = %8.2lf s (avg = %8.2lf ms)",
2282                 str, sum / 1000.0, seq->avg());
2283 }
2284 
2285 void TraceYoungGenTimeData::print_summary_sd(const char* str,
2286                                              const NumberSeq* seq) const {
2287   print_summary(str, seq);
2288   tty->print_cr("%45s = %5d, std dev = %8.2lf ms, max = %8.2lf ms)",
2289                 "(num", seq->num(), seq->sd(), seq->maximum());
2290 }
2291 
2292 void TraceYoungGenTimeData::print() const {
2293   if (!TraceYoungGenTime) {
2294     return;
2295   }
2296 
2297   tty->print_cr("ALL PAUSES");
2298   print_summary_sd("   Total", &_total);
2299   tty->cr();
2300   tty->cr();
2301   tty->print_cr("   Young GC Pauses: %8d", _young_pause_num);
2302   tty->print_cr("   Mixed GC Pauses: %8d", _mixed_pause_num);
2303   tty->cr();
2304 
2305   tty->print_cr("EVACUATION PAUSES");
2306 
2307   if (_young_pause_num == 0 && _mixed_pause_num == 0) {
2308     tty->print_cr("none");
2309   } else {
2310     print_summary_sd("   Evacuation Pauses", &_total);
2311     print_summary("      Root Region Scan Wait", &_root_region_scan_wait);
2312     print_summary("      Parallel Time", &_parallel);
2313     print_summary("         Ext Root Scanning", &_ext_root_scan);
2314     print_summary("         SATB Filtering", &_satb_filtering);
2315     print_summary("         Update RS", &_update_rs);
2316     print_summary("         Scan RS", &_scan_rs);
2317     print_summary("         Object Copy", &_obj_copy);
2318     print_summary("         Termination", &_termination);
2319     print_summary("         Parallel Other", &_parallel_other);
2320     print_summary("      Clear CT", &_clear_ct);
2321     print_summary("      Other", &_other);
2322   }
2323   tty->cr();
2324 
2325   tty->print_cr("MISC");
2326   print_summary_sd("   Stop World", &_all_stop_world_times_ms);
2327   print_summary_sd("   Yields", &_all_yield_times_ms);
2328 }
2329 
2330 void TraceOldGenTimeData::record_full_collection(double full_gc_time_ms) {
2331   if (TraceOldGenTime) {
2332     _all_full_gc_times.add(full_gc_time_ms);
2333   }
2334 }
2335 
2336 void TraceOldGenTimeData::print() const {
2337   if (!TraceOldGenTime) {
2338     return;
2339   }
2340 
2341   if (_all_full_gc_times.num() > 0) {
2342     tty->print("\n%4d full_gcs: total time = %8.2f s",
2343       _all_full_gc_times.num(),
2344       _all_full_gc_times.sum() / 1000.0);
2345     tty->print_cr(" (avg = %8.2fms).", _all_full_gc_times.avg());
2346     tty->print_cr("                     [std. dev = %8.2f ms, max = %8.2f ms]",
2347       _all_full_gc_times.sd(),
2348       _all_full_gc_times.maximum());
2349   }
2350 }
< prev index next >