11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "gc/g1/concurrentG1Refine.hpp"
27 #include "gc/g1/concurrentMark.hpp"
28 #include "gc/g1/concurrentMarkThread.inline.hpp"
29 #include "gc/g1/g1CollectedHeap.inline.hpp"
30 #include "gc/g1/g1CollectorPolicy.hpp"
31 #include "gc/g1/g1ErgoVerbose.hpp"
32 #include "gc/g1/g1GCPhaseTimes.hpp"
33 #include "gc/g1/g1Log.hpp"
34 #include "gc/g1/heapRegion.inline.hpp"
35 #include "gc/g1/heapRegionRemSet.hpp"
36 #include "gc/shared/gcPolicyCounters.hpp"
37 #include "runtime/arguments.hpp"
38 #include "runtime/java.hpp"
39 #include "runtime/mutexLocker.hpp"
40 #include "utilities/debug.hpp"
41
42 // Different defaults for different number of GC threads
43 // They were chosen by running GCOld and SPECjbb on debris with different
44 // numbers of GC threads and choosing them based on the results
45
46 // all the same
47 static double rs_length_diff_defaults[] = {
48 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0
49 };
50
51 static double cost_per_card_ms_defaults[] = {
52 0.01, 0.005, 0.005, 0.003, 0.003, 0.002, 0.002, 0.0015
53 };
54
55 // all the same
56 static double young_cards_per_entry_ratio_defaults[] = {
102 _cost_per_byte_ms_during_cm_seq(new TruncatedSeq(TruncatedSeqLength)),
103 _constant_other_time_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
104 _young_other_cost_per_region_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
105 _non_young_other_cost_per_region_ms_seq(
106 new TruncatedSeq(TruncatedSeqLength)),
107
108 _pending_cards_seq(new TruncatedSeq(TruncatedSeqLength)),
109 _rs_lengths_seq(new TruncatedSeq(TruncatedSeqLength)),
110
111 _pause_time_target_ms((double) MaxGCPauseMillis),
112
113 _recent_prev_end_times_for_all_gcs_sec(
114 new TruncatedSeq(NumPrevPausesForHeuristics)),
115
116 _recent_avg_pause_time_ratio(0.0),
117 _rs_lengths_prediction(0),
118 _max_survivor_regions(0),
119
120 _eden_used_bytes_before_gc(0),
121 _survivor_used_bytes_before_gc(0),
122 _heap_used_bytes_before_gc(0),
123 _metaspace_used_bytes_before_gc(0),
124 _eden_capacity_bytes_before_gc(0),
125 _heap_capacity_bytes_before_gc(0),
126
127 _eden_cset_region_length(0),
128 _survivor_cset_region_length(0),
129 _old_cset_region_length(0),
130
131 _collection_set(NULL),
132 _collection_set_bytes_used_before(0),
133
134 // Incremental CSet attributes
135 _inc_cset_build_state(Inactive),
136 _inc_cset_head(NULL),
137 _inc_cset_tail(NULL),
138 _inc_cset_bytes_used_before(0),
139 _inc_cset_max_finger(NULL),
140 _inc_cset_recorded_rs_lengths(0),
141 _inc_cset_recorded_rs_lengths_diffs(0),
154 // indirectly use it through this object passed to their constructor.
155 _short_lived_surv_rate_group =
156 new SurvRateGroup(&_predictor, "Short Lived", G1YoungSurvRateNumRegionsSummary);
157 _survivor_surv_rate_group =
158 new SurvRateGroup(&_predictor, "Survivor", G1YoungSurvRateNumRegionsSummary);
159
160 // Set up the region size and associated fields. Given that the
161 // policy is created before the heap, we have to set this up here,
162 // so it's done as soon as possible.
163
164 // It would have been natural to pass initial_heap_byte_size() and
165 // max_heap_byte_size() to setup_heap_region_size() but those have
166 // not been set up at this point since they should be aligned with
167 // the region size. So, there is a circular dependency here. We base
168 // the region size on the heap size, but the heap size should be
169 // aligned with the region size. To get around this we use the
170 // unaligned values for the heap.
171 HeapRegion::setup_heap_region_size(InitialHeapSize, MaxHeapSize);
172 HeapRegionRemSet::setup_remset_size();
173
174 G1ErgoVerbose::initialize();
175 if (PrintAdaptiveSizePolicy) {
176 // Currently, we only use a single switch for all the heuristics.
177 G1ErgoVerbose::set_enabled(true);
178 // Given that we don't currently have a verboseness level
179 // parameter, we'll hardcode this to high. This can be easily
180 // changed in the future.
181 G1ErgoVerbose::set_level(ErgoHigh);
182 } else {
183 G1ErgoVerbose::set_enabled(false);
184 }
185
186 _recent_prev_end_times_for_all_gcs_sec->add(os::elapsedTime());
187 _prev_collection_pause_end_ms = os::elapsedTime() * 1000.0;
188
189 _phase_times = new G1GCPhaseTimes(_parallel_gc_threads);
190
191 int index = MIN2(_parallel_gc_threads - 1, 7);
192
193 _rs_length_diff_seq->add(rs_length_diff_defaults[index]);
194 _cost_per_card_ms_seq->add(cost_per_card_ms_defaults[index]);
195 _cost_scan_hcc_seq->add(0.0);
196 _young_cards_per_entry_ratio_seq->add(
197 young_cards_per_entry_ratio_defaults[index]);
198 _cost_per_entry_ms_seq->add(cost_per_entry_ms_defaults[index]);
199 _cost_per_byte_ms_seq->add(cost_per_byte_ms_defaults[index]);
200 _constant_other_time_ms_seq->add(constant_other_time_ms_defaults[index]);
201 _young_other_cost_per_region_ms_seq->add(
202 young_other_cost_per_region_ms_defaults[index]);
203 _non_young_other_cost_per_region_ms_seq->add(
204 non_young_other_cost_per_region_ms_defaults[index]);
205
756 HeapRegion* head = _g1->young_list()->first_region();
757 return
758 verify_young_ages(head, _short_lived_surv_rate_group);
759 // also call verify_young_ages on any additional surv rate groups
760 }
761
762 bool
763 G1CollectorPolicy::verify_young_ages(HeapRegion* head,
764 SurvRateGroup *surv_rate_group) {
765 guarantee( surv_rate_group != NULL, "pre-condition" );
766
767 const char* name = surv_rate_group->name();
768 bool ret = true;
769 int prev_age = -1;
770
771 for (HeapRegion* curr = head;
772 curr != NULL;
773 curr = curr->get_next_young_region()) {
774 SurvRateGroup* group = curr->surv_rate_group();
775 if (group == NULL && !curr->is_survivor()) {
776 gclog_or_tty->print_cr("## %s: encountered NULL surv_rate_group", name);
777 ret = false;
778 }
779
780 if (surv_rate_group == group) {
781 int age = curr->age_in_surv_rate_group();
782
783 if (age < 0) {
784 gclog_or_tty->print_cr("## %s: encountered negative age", name);
785 ret = false;
786 }
787
788 if (age <= prev_age) {
789 gclog_or_tty->print_cr("## %s: region ages are not strictly increasing "
790 "(%d, %d)", name, age, prev_age);
791 ret = false;
792 }
793 prev_age = age;
794 }
795 }
796
797 return ret;
798 }
799 #endif // PRODUCT
800
801 void G1CollectorPolicy::record_full_collection_start() {
802 _full_collection_start_sec = os::elapsedTime();
803 record_heap_size_info_at_start(true /* full */);
804 // Release the future to-space so that it is available for compaction into.
805 collector_state()->set_full_collection(true);
806 }
807
808 void G1CollectorPolicy::record_full_collection_end() {
809 // Consider this like a collection pause for the purposes of allocation
810 // since last pause.
942 double G1CollectorPolicy::constant_other_time_ms(double pause_time_ms) const {
943 return other_time_ms(pause_time_ms) - young_other_time_ms() - non_young_other_time_ms();
944 }
945
946 bool G1CollectorPolicy::about_to_start_mixed_phase() const {
947 return _g1->concurrent_mark()->cmThread()->during_cycle() || collector_state()->last_young_gc();
948 }
949
950 bool G1CollectorPolicy::need_to_start_conc_mark(const char* source, size_t alloc_word_size) {
951 if (about_to_start_mixed_phase()) {
952 return false;
953 }
954
955 size_t marking_initiating_used_threshold =
956 (_g1->capacity() / 100) * InitiatingHeapOccupancyPercent;
957 size_t cur_used_bytes = _g1->non_young_capacity_bytes();
958 size_t alloc_byte_size = alloc_word_size * HeapWordSize;
959
960 if ((cur_used_bytes + alloc_byte_size) > marking_initiating_used_threshold) {
961 if (collector_state()->gcs_are_young() && !collector_state()->last_young_gc()) {
962 ergo_verbose5(ErgoConcCycles,
963 "request concurrent cycle initiation",
964 ergo_format_reason("occupancy higher than threshold")
965 ergo_format_byte("occupancy")
966 ergo_format_byte("allocation request")
967 ergo_format_byte_perc("threshold")
968 ergo_format_str("source"),
969 cur_used_bytes,
970 alloc_byte_size,
971 marking_initiating_used_threshold,
972 (double) InitiatingHeapOccupancyPercent,
973 source);
974 return true;
975 } else {
976 ergo_verbose5(ErgoConcCycles,
977 "do not request concurrent cycle initiation",
978 ergo_format_reason("still doing mixed collections")
979 ergo_format_byte("occupancy")
980 ergo_format_byte("allocation request")
981 ergo_format_byte_perc("threshold")
982 ergo_format_str("source"),
983 cur_used_bytes,
984 alloc_byte_size,
985 marking_initiating_used_threshold,
986 (double) InitiatingHeapOccupancyPercent,
987 source);
988 }
989 }
990
991 return false;
992 }
993
994 // Anything below that is considered to be zero
995 #define MIN_TIMER_GRANULARITY 0.0000001
996
997 void G1CollectorPolicy::record_collection_pause_end(double pause_time_ms, size_t cards_scanned) {
998 double end_time_sec = os::elapsedTime();
999 assert(_cur_collection_pause_used_regions_at_start >= cset_region_length(),
1000 "otherwise, the subtraction below does not make sense");
1001 size_t rs_size =
1002 _cur_collection_pause_used_regions_at_start - cset_region_length();
1003 size_t cur_used_bytes = _g1->used();
1004 assert(cur_used_bytes == _g1->recalculate_used(), "It should!");
1005 bool last_pause_included_initial_mark = false;
1006 bool update_stats = !_g1->evacuation_failed();
1007
1008 #ifndef PRODUCT
1009 if (G1YoungSurvRateVerbose) {
1010 gclog_or_tty->cr();
1011 _short_lived_surv_rate_group->print();
1012 // do that for any other surv rate groups too
1013 }
1014 #endif // PRODUCT
1015
1016 last_pause_included_initial_mark = collector_state()->during_initial_mark_pause();
1017 if (last_pause_included_initial_mark) {
1018 record_concurrent_mark_init_end(0.0);
1019 } else {
1020 maybe_start_marking();
1021 }
1022
1023 _mmu_tracker->add_pause(end_time_sec - pause_time_ms/1000.0, end_time_sec);
1024
1025 if (update_stats) {
1026 _trace_young_gen_time_data.record_end_collection(pause_time_ms, phase_times());
1027 // this is where we update the allocation rate of the application
1028 double app_time_ms =
1029 (phase_times()->cur_collection_start_sec() * 1000.0 - _prev_collection_pause_end_ms);
1030 if (app_time_ms < MIN_TIMER_GRANULARITY) {
1031 // This usually happens due to the timer not having the required
1032 // granularity. Some Linuxes are the usual culprits.
1033 // We'll just set it to something (arbitrarily) small.
1034 app_time_ms = 1.0;
1169 }
1170
1171 _constant_other_time_ms_seq->add(constant_other_time_ms(pause_time_ms));
1172
1173 _pending_cards_seq->add((double) _pending_cards);
1174 _rs_lengths_seq->add((double) _max_rs_lengths);
1175 }
1176
1177 collector_state()->set_in_marking_window(new_in_marking_window);
1178 collector_state()->set_in_marking_window_im(new_in_marking_window_im);
1179 _free_regions_at_end_of_collection = _g1->num_free_regions();
1180 update_young_list_max_and_target_length();
1181 update_rs_lengths_prediction();
1182
1183 // Note that _mmu_tracker->max_gc_time() returns the time in seconds.
1184 double update_rs_time_goal_ms = _mmu_tracker->max_gc_time() * MILLIUNITS * G1RSetUpdatingPauseTimePercent / 100.0;
1185
1186 double scan_hcc_time_ms = average_time_ms(G1GCPhaseTimes::ScanHCC);
1187
1188 if (update_rs_time_goal_ms < scan_hcc_time_ms) {
1189 ergo_verbose2(ErgoTiming,
1190 "adjust concurrent refinement thresholds",
1191 ergo_format_reason("Scanning the HCC expected to take longer than Update RS time goal")
1192 ergo_format_ms("Update RS time goal")
1193 ergo_format_ms("Scan HCC time"),
1194 update_rs_time_goal_ms,
1195 scan_hcc_time_ms);
1196
1197 update_rs_time_goal_ms = 0;
1198 } else {
1199 update_rs_time_goal_ms -= scan_hcc_time_ms;
1200 }
1201 adjust_concurrent_refinement(average_time_ms(G1GCPhaseTimes::UpdateRS) - scan_hcc_time_ms,
1202 phase_times()->sum_thread_work_items(G1GCPhaseTimes::UpdateRS),
1203 update_rs_time_goal_ms);
1204
1205 _collectionSetChooser->verify();
1206 }
1207
1208 #define EXT_SIZE_FORMAT "%.1f%s"
1209 #define EXT_SIZE_PARAMS(bytes) \
1210 byte_size_in_proper_unit((double)(bytes)), \
1211 proper_unit_for_byte_size((bytes))
1212
1213 void G1CollectorPolicy::record_heap_size_info_at_start(bool full) {
1214 YoungList* young_list = _g1->young_list();
1215 _eden_used_bytes_before_gc = young_list->eden_used_bytes();
1216 _survivor_used_bytes_before_gc = young_list->survivor_used_bytes();
1217 _heap_capacity_bytes_before_gc = _g1->capacity();
1218 _heap_used_bytes_before_gc = _g1->used();
1219 _cur_collection_pause_used_regions_at_start = _g1->num_used_regions();
1220
1221 _eden_capacity_bytes_before_gc =
1222 (_young_list_target_length * HeapRegion::GrainBytes) - _survivor_used_bytes_before_gc;
1223
1224 if (full) {
1225 _metaspace_used_bytes_before_gc = MetaspaceAux::used_bytes();
1226 }
1227 }
1228
1229 void G1CollectorPolicy::print_heap_transition(size_t bytes_before) const {
1230 size_t bytes_after = _g1->used();
1231 size_t capacity = _g1->capacity();
1232
1233 gclog_or_tty->print(" " SIZE_FORMAT "%s->" SIZE_FORMAT "%s(" SIZE_FORMAT "%s)",
1234 byte_size_in_proper_unit(bytes_before),
1235 proper_unit_for_byte_size(bytes_before),
1236 byte_size_in_proper_unit(bytes_after),
1237 proper_unit_for_byte_size(bytes_after),
1238 byte_size_in_proper_unit(capacity),
1239 proper_unit_for_byte_size(capacity));
1240 }
1241
1242 void G1CollectorPolicy::print_heap_transition() const {
1243 print_heap_transition(_heap_used_bytes_before_gc);
1244 }
1245
1246 void G1CollectorPolicy::print_detailed_heap_transition(bool full) const {
1247 YoungList* young_list = _g1->young_list();
1248
1249 size_t eden_used_bytes_after_gc = young_list->eden_used_bytes();
1250 size_t survivor_used_bytes_after_gc = young_list->survivor_used_bytes();
1251 size_t heap_used_bytes_after_gc = _g1->used();
1252
1253 size_t heap_capacity_bytes_after_gc = _g1->capacity();
1254 size_t eden_capacity_bytes_after_gc =
1255 (_young_list_target_length * HeapRegion::GrainBytes) - survivor_used_bytes_after_gc;
1256
1257 gclog_or_tty->print(
1258 " [Eden: " EXT_SIZE_FORMAT "(" EXT_SIZE_FORMAT ")->" EXT_SIZE_FORMAT "(" EXT_SIZE_FORMAT ") "
1259 "Survivors: " EXT_SIZE_FORMAT "->" EXT_SIZE_FORMAT " "
1260 "Heap: " EXT_SIZE_FORMAT "(" EXT_SIZE_FORMAT ")->"
1261 EXT_SIZE_FORMAT "(" EXT_SIZE_FORMAT ")]",
1262 EXT_SIZE_PARAMS(_eden_used_bytes_before_gc),
1263 EXT_SIZE_PARAMS(_eden_capacity_bytes_before_gc),
1264 EXT_SIZE_PARAMS(eden_used_bytes_after_gc),
1265 EXT_SIZE_PARAMS(eden_capacity_bytes_after_gc),
1266 EXT_SIZE_PARAMS(_survivor_used_bytes_before_gc),
1267 EXT_SIZE_PARAMS(survivor_used_bytes_after_gc),
1268 EXT_SIZE_PARAMS(_heap_used_bytes_before_gc),
1269 EXT_SIZE_PARAMS(_heap_capacity_bytes_before_gc),
1270 EXT_SIZE_PARAMS(heap_used_bytes_after_gc),
1271 EXT_SIZE_PARAMS(heap_capacity_bytes_after_gc));
1272
1273 if (full) {
1274 MetaspaceAux::print_metaspace_change(_metaspace_used_bytes_before_gc);
1275 }
1276
1277 gclog_or_tty->cr();
1278 }
1279
1280 void G1CollectorPolicy::print_phases(double pause_time_sec) {
1281 phase_times()->print(pause_time_sec);
1282 }
1283
1284 void G1CollectorPolicy::adjust_concurrent_refinement(double update_rs_time,
1285 double update_rs_processed_buffers,
1286 double goal_ms) {
1287 DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
1288 ConcurrentG1Refine *cg1r = G1CollectedHeap::heap()->concurrent_g1_refine();
1289
1290 if (G1UseAdaptiveConcRefinement) {
1291 const int k_gy = 3, k_gr = 6;
1292 const double inc_k = 1.1, dec_k = 0.9;
1293
1294 int g = cg1r->green_zone();
1295 if (update_rs_time > goal_ms) {
1296 g = (int)(g * dec_k); // Can become 0, that's OK. That would mean a mutator-only processing.
1297 } else {
1512
1513 size_t G1CollectorPolicy::expansion_amount() const {
1514 double recent_gc_overhead = recent_avg_pause_time_ratio() * 100.0;
1515 double threshold = _gc_overhead_perc;
1516 if (recent_gc_overhead > threshold) {
1517 // We will double the existing space, or take
1518 // G1ExpandByPercentOfAvailable % of the available expansion
1519 // space, whichever is smaller, bounded below by a minimum
1520 // expansion (unless that's all that's left.)
1521 const size_t min_expand_bytes = 1*M;
1522 size_t reserved_bytes = _g1->max_capacity();
1523 size_t committed_bytes = _g1->capacity();
1524 size_t uncommitted_bytes = reserved_bytes - committed_bytes;
1525 size_t expand_bytes;
1526 size_t expand_bytes_via_pct =
1527 uncommitted_bytes * G1ExpandByPercentOfAvailable / 100;
1528 expand_bytes = MIN2(expand_bytes_via_pct, committed_bytes);
1529 expand_bytes = MAX2(expand_bytes, min_expand_bytes);
1530 expand_bytes = MIN2(expand_bytes, uncommitted_bytes);
1531
1532 ergo_verbose5(ErgoHeapSizing,
1533 "attempt heap expansion",
1534 ergo_format_reason("recent GC overhead higher than "
1535 "threshold after GC")
1536 ergo_format_perc("recent GC overhead")
1537 ergo_format_perc("threshold")
1538 ergo_format_byte("uncommitted")
1539 ergo_format_byte_perc("calculated expansion amount"),
1540 recent_gc_overhead, threshold,
1541 uncommitted_bytes,
1542 expand_bytes_via_pct, (double) G1ExpandByPercentOfAvailable);
1543
1544 return expand_bytes;
1545 } else {
1546 return 0;
1547 }
1548 }
1549
1550 void G1CollectorPolicy::print_tracing_info() const {
1551 _trace_young_gen_time_data.print();
1552 _trace_old_gen_time_data.print();
1553 }
1554
1555 void G1CollectorPolicy::print_yg_surv_rate_info() const {
1556 #ifndef PRODUCT
1557 _short_lived_surv_rate_group->print_surv_rate_summary();
1558 // add this call for any other surv rate groups
1559 #endif // PRODUCT
1560 }
1561
1562 bool G1CollectorPolicy::is_young_list_full() const {
1587 }
1588
1589 // Calculates survivor space parameters.
1590 void G1CollectorPolicy::update_survivors_policy() {
1591 double max_survivor_regions_d =
1592 (double) _young_list_target_length / (double) SurvivorRatio;
1593 // We use ceiling so that if max_survivor_regions_d is > 0.0 (but
1594 // smaller than 1.0) we'll get 1.
1595 _max_survivor_regions = (uint) ceil(max_survivor_regions_d);
1596
1597 _tenuring_threshold = _survivors_age_table.compute_tenuring_threshold(
1598 HeapRegion::GrainWords * _max_survivor_regions, counters());
1599 }
1600
1601 bool G1CollectorPolicy::force_initial_mark_if_outside_cycle(GCCause::Cause gc_cause) {
1602 // We actually check whether we are marking here and not if we are in a
1603 // reclamation phase. This means that we will schedule a concurrent mark
1604 // even while we are still in the process of reclaiming memory.
1605 bool during_cycle = _g1->concurrent_mark()->cmThread()->during_cycle();
1606 if (!during_cycle) {
1607 ergo_verbose1(ErgoConcCycles,
1608 "request concurrent cycle initiation",
1609 ergo_format_reason("requested by GC cause")
1610 ergo_format_str("GC cause"),
1611 GCCause::to_string(gc_cause));
1612 collector_state()->set_initiate_conc_mark_if_possible(true);
1613 return true;
1614 } else {
1615 ergo_verbose1(ErgoConcCycles,
1616 "do not request concurrent cycle initiation",
1617 ergo_format_reason("concurrent cycle already in progress")
1618 ergo_format_str("GC cause"),
1619 GCCause::to_string(gc_cause));
1620 return false;
1621 }
1622 }
1623
1624 void G1CollectorPolicy::decide_on_conc_mark_initiation() {
1625 // We are about to decide on whether this pause will be an
1626 // initial-mark pause.
1627
1628 // First, collector_state()->during_initial_mark_pause() should not be already set. We
1629 // will set it here if we have to. However, it should be cleared by
1630 // the end of the pause (it's only set for the duration of an
1631 // initial-mark pause).
1632 assert(!collector_state()->during_initial_mark_pause(), "pre-condition");
1633
1634 if (collector_state()->initiate_conc_mark_if_possible()) {
1635 // We had noticed on a previous pause that the heap occupancy has
1636 // gone over the initiating threshold and we should start a
1637 // concurrent marking cycle. So we might initiate one.
1638
1639 if (!about_to_start_mixed_phase() && collector_state()->gcs_are_young()) {
1640 // Initiate a new initial mark only if there is no marking or reclamation going
1641 // on.
1642
1643 collector_state()->set_during_initial_mark_pause(true);
1644 // And we can now clear initiate_conc_mark_if_possible() as
1645 // we've already acted on it.
1646 collector_state()->set_initiate_conc_mark_if_possible(false);
1647
1648 ergo_verbose0(ErgoConcCycles,
1649 "initiate concurrent cycle",
1650 ergo_format_reason("concurrent cycle initiation requested"));
1651 } else {
1652 // The concurrent marking thread is still finishing up the
1653 // previous cycle. If we start one right now the two cycles
1654 // overlap. In particular, the concurrent marking thread might
1655 // be in the process of clearing the next marking bitmap (which
1656 // we will use for the next cycle if we start one). Starting a
1657 // cycle now will be bad given that parts of the marking
1658 // information might get cleared by the marking thread. And we
1659 // cannot wait for the marking thread to finish the cycle as it
1660 // periodically yields while clearing the next marking bitmap
1661 // and, if it's in a yield point, it's waiting for us to
1662 // finish. So, at this point we will not start a cycle and we'll
1663 // let the concurrent marking thread complete the last one.
1664 ergo_verbose0(ErgoConcCycles,
1665 "do not initiate concurrent cycle",
1666 ergo_format_reason("concurrent cycle already in progress"));
1667 }
1668 }
1669 }
1670
1671 class ParKnownGarbageHRClosure: public HeapRegionClosure {
1672 G1CollectedHeap* _g1h;
1673 CSetChooserParUpdater _cset_updater;
1674
1675 public:
1676 ParKnownGarbageHRClosure(CollectionSetChooser* hrSorted,
1677 uint chunk_size) :
1678 _g1h(G1CollectedHeap::heap()),
1679 _cset_updater(hrSorted, true /* parallel */, chunk_size) { }
1680
1681 bool doHeapRegion(HeapRegion* r) {
1682 // Do we have any marking information for this region?
1683 if (r->is_marked()) {
1684 // We will skip any region that's currently used as an old GC
1685 // alloc region (we should not consider those for collection
1686 // before we fill them up).
1940 // Returns the given amount of reclaimable bytes (that represents
1941 // the amount of reclaimable space still to be collected) as a
1942 // percentage of the current heap capacity.
1943 size_t capacity_bytes = _g1->capacity();
1944 return (double) reclaimable_bytes * 100.0 / (double) capacity_bytes;
1945 }
1946
1947 void G1CollectorPolicy::maybe_start_marking() {
1948 if (need_to_start_conc_mark("end of GC")) {
1949 // Note: this might have already been set, if during the last
1950 // pause we decided to start a cycle but at the beginning of
1951 // this pause we decided to postpone it. That's OK.
1952 collector_state()->set_initiate_conc_mark_if_possible(true);
1953 }
1954 }
1955
1956 bool G1CollectorPolicy::next_gc_should_be_mixed(const char* true_action_str,
1957 const char* false_action_str) const {
1958 CollectionSetChooser* cset_chooser = _collectionSetChooser;
1959 if (cset_chooser->is_empty()) {
1960 ergo_verbose0(ErgoMixedGCs,
1961 false_action_str,
1962 ergo_format_reason("candidate old regions not available"));
1963 return false;
1964 }
1965
1966 // Is the amount of uncollected reclaimable space above G1HeapWastePercent?
1967 size_t reclaimable_bytes = cset_chooser->remaining_reclaimable_bytes();
1968 double reclaimable_perc = reclaimable_bytes_perc(reclaimable_bytes);
1969 double threshold = (double) G1HeapWastePercent;
1970 if (reclaimable_perc <= threshold) {
1971 ergo_verbose4(ErgoMixedGCs,
1972 false_action_str,
1973 ergo_format_reason("reclaimable percentage not over threshold")
1974 ergo_format_region("candidate old regions")
1975 ergo_format_byte_perc("reclaimable")
1976 ergo_format_perc("threshold"),
1977 cset_chooser->remaining_regions(),
1978 reclaimable_bytes,
1979 reclaimable_perc, threshold);
1980 return false;
1981 }
1982
1983 ergo_verbose4(ErgoMixedGCs,
1984 true_action_str,
1985 ergo_format_reason("candidate old regions available")
1986 ergo_format_region("candidate old regions")
1987 ergo_format_byte_perc("reclaimable")
1988 ergo_format_perc("threshold"),
1989 cset_chooser->remaining_regions(),
1990 reclaimable_bytes,
1991 reclaimable_perc, threshold);
1992 return true;
1993 }
1994
1995 uint G1CollectorPolicy::calc_min_old_cset_length() const {
1996 // The min old CSet region bound is based on the maximum desired
1997 // number of mixed GCs after a cycle. I.e., even if some old regions
1998 // look expensive, we should add them to the CSet anyway to make
1999 // sure we go through the available old regions in no more than the
2000 // maximum desired number of mixed GCs.
2001 //
2002 // The calculation is based on the number of marked regions we added
2003 // to the CSet chooser in the first place, not how many remain, so
2004 // that the result is the same during all mixed GCs that follow a cycle.
2005
2006 const size_t region_num = (size_t) _collectionSetChooser->length();
2007 const size_t gc_num = (size_t) MAX2(G1MixedGCCountTarget, (uintx) 1);
2008 size_t result = region_num / gc_num;
2009 // emulate ceiling
2010 if (result * gc_num < region_num) {
2011 result += 1;
2027 if (100 * result < region_num * perc) {
2028 result += 1;
2029 }
2030 return (uint) result;
2031 }
2032
2033
2034 double G1CollectorPolicy::finalize_young_cset_part(double target_pause_time_ms) {
2035 double young_start_time_sec = os::elapsedTime();
2036
2037 YoungList* young_list = _g1->young_list();
2038 finalize_incremental_cset_building();
2039
2040 guarantee(target_pause_time_ms > 0.0,
2041 "target_pause_time_ms = %1.6lf should be positive", target_pause_time_ms);
2042 guarantee(_collection_set == NULL, "Precondition");
2043
2044 double base_time_ms = predict_base_elapsed_time_ms(_pending_cards);
2045 double time_remaining_ms = MAX2(target_pause_time_ms - base_time_ms, 0.0);
2046
2047 ergo_verbose4(ErgoCSetConstruction | ErgoHigh,
2048 "start choosing CSet",
2049 ergo_format_size("_pending_cards")
2050 ergo_format_ms("predicted base time")
2051 ergo_format_ms("remaining time")
2052 ergo_format_ms("target pause time"),
2053 _pending_cards, base_time_ms, time_remaining_ms, target_pause_time_ms);
2054
2055 collector_state()->set_last_gc_was_young(collector_state()->gcs_are_young());
2056
2057 if (collector_state()->last_gc_was_young()) {
2058 _trace_young_gen_time_data.increment_young_collection_count();
2059 } else {
2060 _trace_young_gen_time_data.increment_mixed_collection_count();
2061 }
2062
2063 // The young list is laid with the survivor regions from the previous
2064 // pause are appended to the RHS of the young list, i.e.
2065 // [Newly Young Regions ++ Survivors from last pause].
2066
2067 uint survivor_region_length = young_list->survivor_length();
2068 uint eden_region_length = young_list->eden_length();
2069 init_cset_region_lengths(eden_region_length, survivor_region_length);
2070
2071 HeapRegion* hr = young_list->first_survivor_region();
2072 while (hr != NULL) {
2073 assert(hr->is_survivor(), "badly formed young list");
2074 // There is a convention that all the young regions in the CSet
2075 // are tagged as "eden", so we do this for the survivors here. We
2076 // use the special set_eden_pre_gc() as it doesn't check that the
2077 // region is free (which is not the case here).
2078 hr->set_eden_pre_gc();
2079 hr = hr->get_next_young_region();
2080 }
2081
2082 // Clear the fields that point to the survivor list - they are all young now.
2083 young_list->clear_survivors();
2084
2085 _collection_set = _inc_cset_head;
2086 _collection_set_bytes_used_before = _inc_cset_bytes_used_before;
2087 time_remaining_ms = MAX2(time_remaining_ms - _inc_cset_predicted_elapsed_time_ms, 0.0);
2088
2089 ergo_verbose4(ErgoCSetConstruction | ErgoHigh,
2090 "add young regions to CSet",
2091 ergo_format_region("eden")
2092 ergo_format_region("survivors")
2093 ergo_format_ms("predicted young region time")
2094 ergo_format_ms("target pause time"),
2095 eden_region_length, survivor_region_length,
2096 _inc_cset_predicted_elapsed_time_ms,
2097 target_pause_time_ms);
2098
2099 // The number of recorded young regions is the incremental
2100 // collection set's current size
2101 set_recorded_rs_lengths(_inc_cset_recorded_rs_lengths);
2102
2103 double young_end_time_sec = os::elapsedTime();
2104 phase_times()->record_young_cset_choice_time_ms((young_end_time_sec - young_start_time_sec) * 1000.0);
2105
2106 return time_remaining_ms;
2107 }
2108
2109 void G1CollectorPolicy::finalize_old_cset_part(double time_remaining_ms) {
2110 double non_young_start_time_sec = os::elapsedTime();
2111 double predicted_old_time_ms = 0.0;
2112
2113
2114 if (!collector_state()->gcs_are_young()) {
2115 CollectionSetChooser* cset_chooser = _collectionSetChooser;
2116 cset_chooser->verify();
2117 const uint min_old_cset_length = calc_min_old_cset_length();
2118 const uint max_old_cset_length = calc_max_old_cset_length();
2119
2120 uint expensive_region_num = 0;
2121 bool check_time_remaining = adaptive_young_list_length();
2122
2123 HeapRegion* hr = cset_chooser->peek();
2124 while (hr != NULL) {
2125 if (old_cset_region_length() >= max_old_cset_length) {
2126 // Added maximum number of old regions to the CSet.
2127 ergo_verbose2(ErgoCSetConstruction,
2128 "finish adding old regions to CSet",
2129 ergo_format_reason("old CSet region num reached max")
2130 ergo_format_region("old")
2131 ergo_format_region("max"),
2132 old_cset_region_length(), max_old_cset_length);
2133 break;
2134 }
2135
2136
2137 // Stop adding regions if the remaining reclaimable space is
2138 // not above G1HeapWastePercent.
2139 size_t reclaimable_bytes = cset_chooser->remaining_reclaimable_bytes();
2140 double reclaimable_perc = reclaimable_bytes_perc(reclaimable_bytes);
2141 double threshold = (double) G1HeapWastePercent;
2142 if (reclaimable_perc <= threshold) {
2143 // We've added enough old regions that the amount of uncollected
2144 // reclaimable space is at or below the waste threshold. Stop
2145 // adding old regions to the CSet.
2146 ergo_verbose5(ErgoCSetConstruction,
2147 "finish adding old regions to CSet",
2148 ergo_format_reason("reclaimable percentage not over threshold")
2149 ergo_format_region("old")
2150 ergo_format_region("max")
2151 ergo_format_byte_perc("reclaimable")
2152 ergo_format_perc("threshold"),
2153 old_cset_region_length(),
2154 max_old_cset_length,
2155 reclaimable_bytes,
2156 reclaimable_perc, threshold);
2157 break;
2158 }
2159
2160 double predicted_time_ms = predict_region_elapsed_time_ms(hr, collector_state()->gcs_are_young());
2161 if (check_time_remaining) {
2162 if (predicted_time_ms > time_remaining_ms) {
2163 // Too expensive for the current CSet.
2164
2165 if (old_cset_region_length() >= min_old_cset_length) {
2166 // We have added the minimum number of old regions to the CSet,
2167 // we are done with this CSet.
2168 ergo_verbose4(ErgoCSetConstruction,
2169 "finish adding old regions to CSet",
2170 ergo_format_reason("predicted time is too high")
2171 ergo_format_ms("predicted time")
2172 ergo_format_ms("remaining time")
2173 ergo_format_region("old")
2174 ergo_format_region("min"),
2175 predicted_time_ms, time_remaining_ms,
2176 old_cset_region_length(), min_old_cset_length);
2177 break;
2178 }
2179
2180 // We'll add it anyway given that we haven't reached the
2181 // minimum number of old regions.
2182 expensive_region_num += 1;
2183 }
2184 } else {
2185 if (old_cset_region_length() >= min_old_cset_length) {
2186 // In the non-auto-tuning case, we'll finish adding regions
2187 // to the CSet if we reach the minimum.
2188 ergo_verbose2(ErgoCSetConstruction,
2189 "finish adding old regions to CSet",
2190 ergo_format_reason("old CSet region num reached min")
2191 ergo_format_region("old")
2192 ergo_format_region("min"),
2193 old_cset_region_length(), min_old_cset_length);
2194 break;
2195 }
2196 }
2197
2198 // We will add this region to the CSet.
2199 time_remaining_ms = MAX2(time_remaining_ms - predicted_time_ms, 0.0);
2200 predicted_old_time_ms += predicted_time_ms;
2201 cset_chooser->pop(); // already have region via peek()
2202 _g1->old_set_remove(hr);
2203 add_old_region_to_cset(hr);
2204
2205 hr = cset_chooser->peek();
2206 }
2207 if (hr == NULL) {
2208 ergo_verbose0(ErgoCSetConstruction,
2209 "finish adding old regions to CSet",
2210 ergo_format_reason("candidate old regions not available"));
2211 }
2212
2213 if (expensive_region_num > 0) {
2214 // We print the information once here at the end, predicated on
2215 // whether we added any apparently expensive regions or not, to
2216 // avoid generating output per region.
2217 ergo_verbose4(ErgoCSetConstruction,
2218 "added expensive regions to CSet",
2219 ergo_format_reason("old CSet region num not reached min")
2220 ergo_format_region("old")
2221 ergo_format_region("expensive")
2222 ergo_format_region("min")
2223 ergo_format_ms("remaining time"),
2224 old_cset_region_length(),
2225 expensive_region_num,
2226 min_old_cset_length,
2227 time_remaining_ms);
2228 }
2229
2230 cset_chooser->verify();
2231 }
2232
2233 stop_incremental_cset_building();
2234
2235 ergo_verbose3(ErgoCSetConstruction,
2236 "finish choosing CSet",
2237 ergo_format_region("old")
2238 ergo_format_ms("predicted old region time")
2239 ergo_format_ms("time remaining"),
2240 old_cset_region_length(),
2241 predicted_old_time_ms, time_remaining_ms);
2242
2243 double non_young_end_time_sec = os::elapsedTime();
2244 phase_times()->record_non_young_cset_choice_time_ms((non_young_end_time_sec - non_young_start_time_sec) * 1000.0);
2245 }
2246
2247 void TraceYoungGenTimeData::record_start_collection(double time_to_stop_the_world_ms) {
2248 if(TraceYoungGenTime) {
2249 _all_stop_world_times_ms.add(time_to_stop_the_world_ms);
2250 }
2251 }
2252
2253 void TraceYoungGenTimeData::record_yield_time(double yield_time_ms) {
2254 if(TraceYoungGenTime) {
2255 _all_yield_times_ms.add(yield_time_ms);
2256 }
2257 }
2258
2259 void TraceYoungGenTimeData::record_end_collection(double pause_time_ms, G1GCPhaseTimes* phase_times) {
2260 if(TraceYoungGenTime) {
2261 _total.add(pause_time_ms);
2280 _parallel_other.add(parallel_other_time);
2281 _clear_ct.add(phase_times->cur_clear_ct_time_ms());
2282 }
2283 }
2284
2285 void TraceYoungGenTimeData::increment_young_collection_count() {
2286 if(TraceYoungGenTime) {
2287 ++_young_pause_num;
2288 }
2289 }
2290
2291 void TraceYoungGenTimeData::increment_mixed_collection_count() {
2292 if(TraceYoungGenTime) {
2293 ++_mixed_pause_num;
2294 }
2295 }
2296
2297 void TraceYoungGenTimeData::print_summary(const char* str,
2298 const NumberSeq* seq) const {
2299 double sum = seq->sum();
2300 gclog_or_tty->print_cr("%-27s = %8.2lf s (avg = %8.2lf ms)",
2301 str, sum / 1000.0, seq->avg());
2302 }
2303
2304 void TraceYoungGenTimeData::print_summary_sd(const char* str,
2305 const NumberSeq* seq) const {
2306 print_summary(str, seq);
2307 gclog_or_tty->print_cr("%45s = %5d, std dev = %8.2lf ms, max = %8.2lf ms)",
2308 "(num", seq->num(), seq->sd(), seq->maximum());
2309 }
2310
2311 void TraceYoungGenTimeData::print() const {
2312 if (!TraceYoungGenTime) {
2313 return;
2314 }
2315
2316 gclog_or_tty->print_cr("ALL PAUSES");
2317 print_summary_sd(" Total", &_total);
2318 gclog_or_tty->cr();
2319 gclog_or_tty->cr();
2320 gclog_or_tty->print_cr(" Young GC Pauses: %8d", _young_pause_num);
2321 gclog_or_tty->print_cr(" Mixed GC Pauses: %8d", _mixed_pause_num);
2322 gclog_or_tty->cr();
2323
2324 gclog_or_tty->print_cr("EVACUATION PAUSES");
2325
2326 if (_young_pause_num == 0 && _mixed_pause_num == 0) {
2327 gclog_or_tty->print_cr("none");
2328 } else {
2329 print_summary_sd(" Evacuation Pauses", &_total);
2330 print_summary(" Root Region Scan Wait", &_root_region_scan_wait);
2331 print_summary(" Parallel Time", &_parallel);
2332 print_summary(" Ext Root Scanning", &_ext_root_scan);
2333 print_summary(" SATB Filtering", &_satb_filtering);
2334 print_summary(" Update RS", &_update_rs);
2335 print_summary(" Scan RS", &_scan_rs);
2336 print_summary(" Object Copy", &_obj_copy);
2337 print_summary(" Termination", &_termination);
2338 print_summary(" Parallel Other", &_parallel_other);
2339 print_summary(" Clear CT", &_clear_ct);
2340 print_summary(" Other", &_other);
2341 }
2342 gclog_or_tty->cr();
2343
2344 gclog_or_tty->print_cr("MISC");
2345 print_summary_sd(" Stop World", &_all_stop_world_times_ms);
2346 print_summary_sd(" Yields", &_all_yield_times_ms);
2347 }
2348
2349 void TraceOldGenTimeData::record_full_collection(double full_gc_time_ms) {
2350 if (TraceOldGenTime) {
2351 _all_full_gc_times.add(full_gc_time_ms);
2352 }
2353 }
2354
2355 void TraceOldGenTimeData::print() const {
2356 if (!TraceOldGenTime) {
2357 return;
2358 }
2359
2360 if (_all_full_gc_times.num() > 0) {
2361 gclog_or_tty->print("\n%4d full_gcs: total time = %8.2f s",
2362 _all_full_gc_times.num(),
2363 _all_full_gc_times.sum() / 1000.0);
2364 gclog_or_tty->print_cr(" (avg = %8.2fms).", _all_full_gc_times.avg());
2365 gclog_or_tty->print_cr(" [std. dev = %8.2f ms, max = %8.2f ms]",
2366 _all_full_gc_times.sd(),
2367 _all_full_gc_times.maximum());
2368 }
2369 }
|
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "gc/g1/concurrentG1Refine.hpp"
27 #include "gc/g1/concurrentMark.hpp"
28 #include "gc/g1/concurrentMarkThread.inline.hpp"
29 #include "gc/g1/g1CollectedHeap.inline.hpp"
30 #include "gc/g1/g1CollectorPolicy.hpp"
31 #include "gc/g1/g1GCPhaseTimes.hpp"
32 #include "gc/g1/heapRegion.inline.hpp"
33 #include "gc/g1/heapRegionRemSet.hpp"
34 #include "gc/shared/gcPolicyCounters.hpp"
35 #include "logging/log.hpp"
36 #include "runtime/arguments.hpp"
37 #include "runtime/java.hpp"
38 #include "runtime/mutexLocker.hpp"
39 #include "utilities/debug.hpp"
40
41 // Different defaults for different number of GC threads
42 // They were chosen by running GCOld and SPECjbb on debris with different
43 // numbers of GC threads and choosing them based on the results
44
45 // all the same
46 static double rs_length_diff_defaults[] = {
47 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0
48 };
49
50 static double cost_per_card_ms_defaults[] = {
51 0.01, 0.005, 0.005, 0.003, 0.003, 0.002, 0.002, 0.0015
52 };
53
54 // all the same
55 static double young_cards_per_entry_ratio_defaults[] = {
101 _cost_per_byte_ms_during_cm_seq(new TruncatedSeq(TruncatedSeqLength)),
102 _constant_other_time_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
103 _young_other_cost_per_region_ms_seq(new TruncatedSeq(TruncatedSeqLength)),
104 _non_young_other_cost_per_region_ms_seq(
105 new TruncatedSeq(TruncatedSeqLength)),
106
107 _pending_cards_seq(new TruncatedSeq(TruncatedSeqLength)),
108 _rs_lengths_seq(new TruncatedSeq(TruncatedSeqLength)),
109
110 _pause_time_target_ms((double) MaxGCPauseMillis),
111
112 _recent_prev_end_times_for_all_gcs_sec(
113 new TruncatedSeq(NumPrevPausesForHeuristics)),
114
115 _recent_avg_pause_time_ratio(0.0),
116 _rs_lengths_prediction(0),
117 _max_survivor_regions(0),
118
119 _eden_used_bytes_before_gc(0),
120 _survivor_used_bytes_before_gc(0),
121 _old_used_bytes_before_gc(0),
122 _heap_used_bytes_before_gc(0),
123 _metaspace_used_bytes_before_gc(0),
124 _eden_capacity_bytes_before_gc(0),
125 _heap_capacity_bytes_before_gc(0),
126
127 _eden_cset_region_length(0),
128 _survivor_cset_region_length(0),
129 _old_cset_region_length(0),
130
131 _collection_set(NULL),
132 _collection_set_bytes_used_before(0),
133
134 // Incremental CSet attributes
135 _inc_cset_build_state(Inactive),
136 _inc_cset_head(NULL),
137 _inc_cset_tail(NULL),
138 _inc_cset_bytes_used_before(0),
139 _inc_cset_max_finger(NULL),
140 _inc_cset_recorded_rs_lengths(0),
141 _inc_cset_recorded_rs_lengths_diffs(0),
154 // indirectly use it through this object passed to their constructor.
155 _short_lived_surv_rate_group =
156 new SurvRateGroup(&_predictor, "Short Lived", G1YoungSurvRateNumRegionsSummary);
157 _survivor_surv_rate_group =
158 new SurvRateGroup(&_predictor, "Survivor", G1YoungSurvRateNumRegionsSummary);
159
160 // Set up the region size and associated fields. Given that the
161 // policy is created before the heap, we have to set this up here,
162 // so it's done as soon as possible.
163
164 // It would have been natural to pass initial_heap_byte_size() and
165 // max_heap_byte_size() to setup_heap_region_size() but those have
166 // not been set up at this point since they should be aligned with
167 // the region size. So, there is a circular dependency here. We base
168 // the region size on the heap size, but the heap size should be
169 // aligned with the region size. To get around this we use the
170 // unaligned values for the heap.
171 HeapRegion::setup_heap_region_size(InitialHeapSize, MaxHeapSize);
172 HeapRegionRemSet::setup_remset_size();
173
174 _recent_prev_end_times_for_all_gcs_sec->add(os::elapsedTime());
175 _prev_collection_pause_end_ms = os::elapsedTime() * 1000.0;
176
177 _phase_times = new G1GCPhaseTimes(_parallel_gc_threads);
178
179 int index = MIN2(_parallel_gc_threads - 1, 7);
180
181 _rs_length_diff_seq->add(rs_length_diff_defaults[index]);
182 _cost_per_card_ms_seq->add(cost_per_card_ms_defaults[index]);
183 _cost_scan_hcc_seq->add(0.0);
184 _young_cards_per_entry_ratio_seq->add(
185 young_cards_per_entry_ratio_defaults[index]);
186 _cost_per_entry_ms_seq->add(cost_per_entry_ms_defaults[index]);
187 _cost_per_byte_ms_seq->add(cost_per_byte_ms_defaults[index]);
188 _constant_other_time_ms_seq->add(constant_other_time_ms_defaults[index]);
189 _young_other_cost_per_region_ms_seq->add(
190 young_other_cost_per_region_ms_defaults[index]);
191 _non_young_other_cost_per_region_ms_seq->add(
192 non_young_other_cost_per_region_ms_defaults[index]);
193
744 HeapRegion* head = _g1->young_list()->first_region();
745 return
746 verify_young_ages(head, _short_lived_surv_rate_group);
747 // also call verify_young_ages on any additional surv rate groups
748 }
749
750 bool
751 G1CollectorPolicy::verify_young_ages(HeapRegion* head,
752 SurvRateGroup *surv_rate_group) {
753 guarantee( surv_rate_group != NULL, "pre-condition" );
754
755 const char* name = surv_rate_group->name();
756 bool ret = true;
757 int prev_age = -1;
758
759 for (HeapRegion* curr = head;
760 curr != NULL;
761 curr = curr->get_next_young_region()) {
762 SurvRateGroup* group = curr->surv_rate_group();
763 if (group == NULL && !curr->is_survivor()) {
764 log_info(gc, verify)("## %s: encountered NULL surv_rate_group", name);
765 ret = false;
766 }
767
768 if (surv_rate_group == group) {
769 int age = curr->age_in_surv_rate_group();
770
771 if (age < 0) {
772 log_info(gc, verify)("## %s: encountered negative age", name);
773 ret = false;
774 }
775
776 if (age <= prev_age) {
777 log_info(gc, verify)("## %s: region ages are not strictly increasing (%d, %d)", name, age, prev_age);
778 ret = false;
779 }
780 prev_age = age;
781 }
782 }
783
784 return ret;
785 }
786 #endif // PRODUCT
787
788 void G1CollectorPolicy::record_full_collection_start() {
789 _full_collection_start_sec = os::elapsedTime();
790 record_heap_size_info_at_start(true /* full */);
791 // Release the future to-space so that it is available for compaction into.
792 collector_state()->set_full_collection(true);
793 }
794
795 void G1CollectorPolicy::record_full_collection_end() {
796 // Consider this like a collection pause for the purposes of allocation
797 // since last pause.
929 double G1CollectorPolicy::constant_other_time_ms(double pause_time_ms) const {
930 return other_time_ms(pause_time_ms) - young_other_time_ms() - non_young_other_time_ms();
931 }
932
933 bool G1CollectorPolicy::about_to_start_mixed_phase() const {
934 return _g1->concurrent_mark()->cmThread()->during_cycle() || collector_state()->last_young_gc();
935 }
936
937 bool G1CollectorPolicy::need_to_start_conc_mark(const char* source, size_t alloc_word_size) {
938 if (about_to_start_mixed_phase()) {
939 return false;
940 }
941
942 size_t marking_initiating_used_threshold =
943 (_g1->capacity() / 100) * InitiatingHeapOccupancyPercent;
944 size_t cur_used_bytes = _g1->non_young_capacity_bytes();
945 size_t alloc_byte_size = alloc_word_size * HeapWordSize;
946
947 if ((cur_used_bytes + alloc_byte_size) > marking_initiating_used_threshold) {
948 if (collector_state()->gcs_are_young() && !collector_state()->last_young_gc()) {
949 log_debug(gc, ergo)("Request concurrent cycle initiation (occupancy higher than threshold)"
950 "occupancy: " SIZE_FORMAT "B allocation request: " SIZE_FORMAT "B threshold: " SIZE_FORMAT "B (" UINTX_FORMAT "%%) source: %s",
951 cur_used_bytes, alloc_byte_size, marking_initiating_used_threshold, InitiatingHeapOccupancyPercent, source);
952 return true;
953 } else {
954 log_debug(gc, ergo)("Do not request concurrent cycle initiation (still doing mixed collections)"
955 "occupancy: " SIZE_FORMAT "B allocation request: " SIZE_FORMAT "B threshold: " SIZE_FORMAT "B (" UINTX_FORMAT "%%) source: %s",
956 cur_used_bytes, alloc_byte_size, marking_initiating_used_threshold, InitiatingHeapOccupancyPercent, source); }
957 }
958
959 return false;
960 }
961
962 // Anything below that is considered to be zero
963 #define MIN_TIMER_GRANULARITY 0.0000001
964
965 void G1CollectorPolicy::record_collection_pause_end(double pause_time_ms, size_t cards_scanned) {
966 double end_time_sec = os::elapsedTime();
967 assert(_cur_collection_pause_used_regions_at_start >= cset_region_length(),
968 "otherwise, the subtraction below does not make sense");
969 size_t rs_size =
970 _cur_collection_pause_used_regions_at_start - cset_region_length();
971 size_t cur_used_bytes = _g1->used();
972 assert(cur_used_bytes == _g1->recalculate_used(), "It should!");
973 bool last_pause_included_initial_mark = false;
974 bool update_stats = !_g1->evacuation_failed();
975
976 NOT_PRODUCT(_short_lived_surv_rate_group->print());
977
978 last_pause_included_initial_mark = collector_state()->during_initial_mark_pause();
979 if (last_pause_included_initial_mark) {
980 record_concurrent_mark_init_end(0.0);
981 } else {
982 maybe_start_marking();
983 }
984
985 _mmu_tracker->add_pause(end_time_sec - pause_time_ms/1000.0, end_time_sec);
986
987 if (update_stats) {
988 _trace_young_gen_time_data.record_end_collection(pause_time_ms, phase_times());
989 // this is where we update the allocation rate of the application
990 double app_time_ms =
991 (phase_times()->cur_collection_start_sec() * 1000.0 - _prev_collection_pause_end_ms);
992 if (app_time_ms < MIN_TIMER_GRANULARITY) {
993 // This usually happens due to the timer not having the required
994 // granularity. Some Linuxes are the usual culprits.
995 // We'll just set it to something (arbitrarily) small.
996 app_time_ms = 1.0;
1131 }
1132
1133 _constant_other_time_ms_seq->add(constant_other_time_ms(pause_time_ms));
1134
1135 _pending_cards_seq->add((double) _pending_cards);
1136 _rs_lengths_seq->add((double) _max_rs_lengths);
1137 }
1138
1139 collector_state()->set_in_marking_window(new_in_marking_window);
1140 collector_state()->set_in_marking_window_im(new_in_marking_window_im);
1141 _free_regions_at_end_of_collection = _g1->num_free_regions();
1142 update_young_list_max_and_target_length();
1143 update_rs_lengths_prediction();
1144
1145 // Note that _mmu_tracker->max_gc_time() returns the time in seconds.
1146 double update_rs_time_goal_ms = _mmu_tracker->max_gc_time() * MILLIUNITS * G1RSetUpdatingPauseTimePercent / 100.0;
1147
1148 double scan_hcc_time_ms = average_time_ms(G1GCPhaseTimes::ScanHCC);
1149
1150 if (update_rs_time_goal_ms < scan_hcc_time_ms) {
1151 log_debug(gc, ergo, refine)("Adjust concurrent refinement thresholds (scanning the HCC expected to take longer than Update RS time goal)."
1152 "Update RS time goal: %1.2fms Scan HCC time: %1.2fms",
1153 update_rs_time_goal_ms, scan_hcc_time_ms);
1154
1155 update_rs_time_goal_ms = 0;
1156 } else {
1157 update_rs_time_goal_ms -= scan_hcc_time_ms;
1158 }
1159 adjust_concurrent_refinement(average_time_ms(G1GCPhaseTimes::UpdateRS) - scan_hcc_time_ms,
1160 phase_times()->sum_thread_work_items(G1GCPhaseTimes::UpdateRS),
1161 update_rs_time_goal_ms);
1162
1163 _collectionSetChooser->verify();
1164 }
1165
1166 #define EXT_SIZE_FORMAT "%.1f%s"
1167 #define EXT_SIZE_PARAMS(bytes) \
1168 byte_size_in_proper_unit((double)(bytes)), \
1169 proper_unit_for_byte_size((bytes))
1170
1171 void G1CollectorPolicy::record_heap_size_info_at_start(bool full) {
1172 YoungList* young_list = _g1->young_list();
1173 _eden_used_bytes_before_gc = young_list->eden_used_bytes();
1174 _survivor_used_bytes_before_gc = young_list->survivor_used_bytes();
1175 _heap_capacity_bytes_before_gc = _g1->capacity();
1176 _heap_used_bytes_before_gc = _g1->used();
1177 _old_used_bytes_before_gc = _heap_used_bytes_before_gc - _survivor_used_bytes_before_gc - _eden_used_bytes_before_gc;
1178 _cur_collection_pause_used_regions_at_start = _g1->num_used_regions();
1179
1180 _eden_capacity_bytes_before_gc =
1181 (_young_list_target_length * HeapRegion::GrainBytes) - _survivor_used_bytes_before_gc;
1182
1183 _metaspace_used_bytes_before_gc = MetaspaceAux::used_bytes();
1184 }
1185
1186 void G1CollectorPolicy::print_detailed_heap_transition() const {
1187 YoungList* young_list = _g1->young_list();
1188
1189 size_t eden_used_bytes_after_gc = young_list->eden_used_bytes();
1190 size_t survivor_used_bytes_after_gc = young_list->survivor_used_bytes();
1191 size_t old_used_bytes_after_gc = _g1->used() - eden_used_bytes_after_gc - survivor_used_bytes_after_gc;
1192
1193 size_t heap_capacity_bytes_after_gc = _g1->capacity();
1194 size_t eden_capacity_bytes_after_gc =
1195 (_young_list_target_length * HeapRegion::GrainBytes) - survivor_used_bytes_after_gc;
1196 size_t survivor_capacity_bytes_after_gc = _max_survivor_regions * HeapRegion::GrainBytes;
1197
1198 log_info(gc, heap)("Eden: " SIZE_FORMAT "K->" SIZE_FORMAT "K(" SIZE_FORMAT "K)",
1199 _eden_used_bytes_before_gc / K, eden_used_bytes_after_gc /K, eden_capacity_bytes_after_gc /K);
1200
1201 log_info(gc, heap)("Survivor: " SIZE_FORMAT "K->" SIZE_FORMAT "K(" SIZE_FORMAT "K)",
1202 _survivor_used_bytes_before_gc / K, survivor_used_bytes_after_gc /K, survivor_capacity_bytes_after_gc /K);
1203
1204 log_info(gc, heap)("Old: " SIZE_FORMAT "K->" SIZE_FORMAT "K(" SIZE_FORMAT "K)",
1205 _old_used_bytes_before_gc / K, old_used_bytes_after_gc /K, heap_capacity_bytes_after_gc /K);
1206
1207 MetaspaceAux::print_metaspace_change(_metaspace_used_bytes_before_gc);
1208 }
1209
1210 void G1CollectorPolicy::print_phases(double pause_time_sec) {
1211 phase_times()->print(pause_time_sec);
1212 }
1213
1214 void G1CollectorPolicy::adjust_concurrent_refinement(double update_rs_time,
1215 double update_rs_processed_buffers,
1216 double goal_ms) {
1217 DirtyCardQueueSet& dcqs = JavaThread::dirty_card_queue_set();
1218 ConcurrentG1Refine *cg1r = G1CollectedHeap::heap()->concurrent_g1_refine();
1219
1220 if (G1UseAdaptiveConcRefinement) {
1221 const int k_gy = 3, k_gr = 6;
1222 const double inc_k = 1.1, dec_k = 0.9;
1223
1224 int g = cg1r->green_zone();
1225 if (update_rs_time > goal_ms) {
1226 g = (int)(g * dec_k); // Can become 0, that's OK. That would mean a mutator-only processing.
1227 } else {
1442
1443 size_t G1CollectorPolicy::expansion_amount() const {
1444 double recent_gc_overhead = recent_avg_pause_time_ratio() * 100.0;
1445 double threshold = _gc_overhead_perc;
1446 if (recent_gc_overhead > threshold) {
1447 // We will double the existing space, or take
1448 // G1ExpandByPercentOfAvailable % of the available expansion
1449 // space, whichever is smaller, bounded below by a minimum
1450 // expansion (unless that's all that's left.)
1451 const size_t min_expand_bytes = 1*M;
1452 size_t reserved_bytes = _g1->max_capacity();
1453 size_t committed_bytes = _g1->capacity();
1454 size_t uncommitted_bytes = reserved_bytes - committed_bytes;
1455 size_t expand_bytes;
1456 size_t expand_bytes_via_pct =
1457 uncommitted_bytes * G1ExpandByPercentOfAvailable / 100;
1458 expand_bytes = MIN2(expand_bytes_via_pct, committed_bytes);
1459 expand_bytes = MAX2(expand_bytes, min_expand_bytes);
1460 expand_bytes = MIN2(expand_bytes, uncommitted_bytes);
1461
1462 log_debug(gc, ergo, heap)("Attempt heap expansion (recent GC overhead higher than threshold after GC) "
1463 "recent GC overhead: %1.2f %% threshold: %1.2f %% uncommitted: " SIZE_FORMAT "B calculated expansion amount: " SIZE_FORMAT "B (" INTX_FORMAT "%%)",
1464 recent_gc_overhead, threshold, uncommitted_bytes, expand_bytes_via_pct, G1ExpandByPercentOfAvailable);
1465
1466 return expand_bytes;
1467 } else {
1468 return 0;
1469 }
1470 }
1471
1472 void G1CollectorPolicy::print_tracing_info() const {
1473 _trace_young_gen_time_data.print();
1474 _trace_old_gen_time_data.print();
1475 }
1476
1477 void G1CollectorPolicy::print_yg_surv_rate_info() const {
1478 #ifndef PRODUCT
1479 _short_lived_surv_rate_group->print_surv_rate_summary();
1480 // add this call for any other surv rate groups
1481 #endif // PRODUCT
1482 }
1483
1484 bool G1CollectorPolicy::is_young_list_full() const {
1509 }
1510
1511 // Calculates survivor space parameters.
1512 void G1CollectorPolicy::update_survivors_policy() {
1513 double max_survivor_regions_d =
1514 (double) _young_list_target_length / (double) SurvivorRatio;
1515 // We use ceiling so that if max_survivor_regions_d is > 0.0 (but
1516 // smaller than 1.0) we'll get 1.
1517 _max_survivor_regions = (uint) ceil(max_survivor_regions_d);
1518
1519 _tenuring_threshold = _survivors_age_table.compute_tenuring_threshold(
1520 HeapRegion::GrainWords * _max_survivor_regions, counters());
1521 }
1522
1523 bool G1CollectorPolicy::force_initial_mark_if_outside_cycle(GCCause::Cause gc_cause) {
1524 // We actually check whether we are marking here and not if we are in a
1525 // reclamation phase. This means that we will schedule a concurrent mark
1526 // even while we are still in the process of reclaiming memory.
1527 bool during_cycle = _g1->concurrent_mark()->cmThread()->during_cycle();
1528 if (!during_cycle) {
1529 log_debug(gc, ergo)("Request concurrent cycle initiation (requested by GC cause). GC cause: %s", GCCause::to_string(gc_cause));
1530 collector_state()->set_initiate_conc_mark_if_possible(true);
1531 return true;
1532 } else {
1533 log_debug(gc, ergo)("Do not request concurrent cycle initiation (concurrent cycle already in progress). GC cause: %s", GCCause::to_string(gc_cause));
1534 return false;
1535 }
1536 }
1537
1538 void G1CollectorPolicy::decide_on_conc_mark_initiation() {
1539 // We are about to decide on whether this pause will be an
1540 // initial-mark pause.
1541
1542 // First, collector_state()->during_initial_mark_pause() should not be already set. We
1543 // will set it here if we have to. However, it should be cleared by
1544 // the end of the pause (it's only set for the duration of an
1545 // initial-mark pause).
1546 assert(!collector_state()->during_initial_mark_pause(), "pre-condition");
1547
1548 if (collector_state()->initiate_conc_mark_if_possible()) {
1549 // We had noticed on a previous pause that the heap occupancy has
1550 // gone over the initiating threshold and we should start a
1551 // concurrent marking cycle. So we might initiate one.
1552
1553 if (!about_to_start_mixed_phase() && collector_state()->gcs_are_young()) {
1554 // Initiate a new initial mark only if there is no marking or reclamation going
1555 // on.
1556
1557 collector_state()->set_during_initial_mark_pause(true);
1558 // And we can now clear initiate_conc_mark_if_possible() as
1559 // we've already acted on it.
1560 collector_state()->set_initiate_conc_mark_if_possible(false);
1561
1562 log_debug(gc, ergo)("Initiate concurrent cycle (concurrent cycle initiation requested)");
1563 } else {
1564 // The concurrent marking thread is still finishing up the
1565 // previous cycle. If we start one right now the two cycles
1566 // overlap. In particular, the concurrent marking thread might
1567 // be in the process of clearing the next marking bitmap (which
1568 // we will use for the next cycle if we start one). Starting a
1569 // cycle now will be bad given that parts of the marking
1570 // information might get cleared by the marking thread. And we
1571 // cannot wait for the marking thread to finish the cycle as it
1572 // periodically yields while clearing the next marking bitmap
1573 // and, if it's in a yield point, it's waiting for us to
1574 // finish. So, at this point we will not start a cycle and we'll
1575 // let the concurrent marking thread complete the last one.
1576 log_debug(gc, ergo)("Do not initiate concurrent cycle (concurrent cycle already in progress)");
1577 }
1578 }
1579 }
1580
1581 class ParKnownGarbageHRClosure: public HeapRegionClosure {
1582 G1CollectedHeap* _g1h;
1583 CSetChooserParUpdater _cset_updater;
1584
1585 public:
1586 ParKnownGarbageHRClosure(CollectionSetChooser* hrSorted,
1587 uint chunk_size) :
1588 _g1h(G1CollectedHeap::heap()),
1589 _cset_updater(hrSorted, true /* parallel */, chunk_size) { }
1590
1591 bool doHeapRegion(HeapRegion* r) {
1592 // Do we have any marking information for this region?
1593 if (r->is_marked()) {
1594 // We will skip any region that's currently used as an old GC
1595 // alloc region (we should not consider those for collection
1596 // before we fill them up).
1850 // Returns the given amount of reclaimable bytes (that represents
1851 // the amount of reclaimable space still to be collected) as a
1852 // percentage of the current heap capacity.
1853 size_t capacity_bytes = _g1->capacity();
1854 return (double) reclaimable_bytes * 100.0 / (double) capacity_bytes;
1855 }
1856
1857 void G1CollectorPolicy::maybe_start_marking() {
1858 if (need_to_start_conc_mark("end of GC")) {
1859 // Note: this might have already been set, if during the last
1860 // pause we decided to start a cycle but at the beginning of
1861 // this pause we decided to postpone it. That's OK.
1862 collector_state()->set_initiate_conc_mark_if_possible(true);
1863 }
1864 }
1865
1866 bool G1CollectorPolicy::next_gc_should_be_mixed(const char* true_action_str,
1867 const char* false_action_str) const {
1868 CollectionSetChooser* cset_chooser = _collectionSetChooser;
1869 if (cset_chooser->is_empty()) {
1870 log_debug(gc, ergo)("%s (candidate old regions not available)", false_action_str);
1871 return false;
1872 }
1873
1874 // Is the amount of uncollected reclaimable space above G1HeapWastePercent?
1875 size_t reclaimable_bytes = cset_chooser->remaining_reclaimable_bytes();
1876 double reclaimable_perc = reclaimable_bytes_perc(reclaimable_bytes);
1877 double threshold = (double) G1HeapWastePercent;
1878 if (reclaimable_perc <= threshold) {
1879 log_debug(gc, ergo)("%s (reclaimable percentage not over threshold). candidate old regions: %u reclaimable: " SIZE_FORMAT " (%1.2f) threshold: " UINTX_FORMAT,
1880 false_action_str, cset_chooser->remaining_regions(), reclaimable_bytes, reclaimable_perc, G1HeapWastePercent);
1881 return false;
1882 }
1883
1884 log_debug(gc, ergo)("%s (candidate old regions available). candidate old regions: %u reclaimable: " SIZE_FORMAT " (%1.2f) threshold: " UINTX_FORMAT,
1885 true_action_str, cset_chooser->remaining_regions(), reclaimable_bytes, reclaimable_perc, G1HeapWastePercent);
1886 return true;
1887 }
1888
1889 uint G1CollectorPolicy::calc_min_old_cset_length() const {
1890 // The min old CSet region bound is based on the maximum desired
1891 // number of mixed GCs after a cycle. I.e., even if some old regions
1892 // look expensive, we should add them to the CSet anyway to make
1893 // sure we go through the available old regions in no more than the
1894 // maximum desired number of mixed GCs.
1895 //
1896 // The calculation is based on the number of marked regions we added
1897 // to the CSet chooser in the first place, not how many remain, so
1898 // that the result is the same during all mixed GCs that follow a cycle.
1899
1900 const size_t region_num = (size_t) _collectionSetChooser->length();
1901 const size_t gc_num = (size_t) MAX2(G1MixedGCCountTarget, (uintx) 1);
1902 size_t result = region_num / gc_num;
1903 // emulate ceiling
1904 if (result * gc_num < region_num) {
1905 result += 1;
1921 if (100 * result < region_num * perc) {
1922 result += 1;
1923 }
1924 return (uint) result;
1925 }
1926
1927
1928 double G1CollectorPolicy::finalize_young_cset_part(double target_pause_time_ms) {
1929 double young_start_time_sec = os::elapsedTime();
1930
1931 YoungList* young_list = _g1->young_list();
1932 finalize_incremental_cset_building();
1933
1934 guarantee(target_pause_time_ms > 0.0,
1935 "target_pause_time_ms = %1.6lf should be positive", target_pause_time_ms);
1936 guarantee(_collection_set == NULL, "Precondition");
1937
1938 double base_time_ms = predict_base_elapsed_time_ms(_pending_cards);
1939 double time_remaining_ms = MAX2(target_pause_time_ms - base_time_ms, 0.0);
1940
1941 log_trace(gc, ergo, cset)("Start choosing CSet. pending cards: " SIZE_FORMAT " predicted base time: %1.2fms remaining time: %1.2fms target pause time: %1.2fms",
1942 _pending_cards, base_time_ms, time_remaining_ms, target_pause_time_ms);
1943
1944 collector_state()->set_last_gc_was_young(collector_state()->gcs_are_young());
1945
1946 if (collector_state()->last_gc_was_young()) {
1947 _trace_young_gen_time_data.increment_young_collection_count();
1948 } else {
1949 _trace_young_gen_time_data.increment_mixed_collection_count();
1950 }
1951
1952 // The young list is laid with the survivor regions from the previous
1953 // pause are appended to the RHS of the young list, i.e.
1954 // [Newly Young Regions ++ Survivors from last pause].
1955
1956 uint survivor_region_length = young_list->survivor_length();
1957 uint eden_region_length = young_list->eden_length();
1958 init_cset_region_lengths(eden_region_length, survivor_region_length);
1959
1960 HeapRegion* hr = young_list->first_survivor_region();
1961 while (hr != NULL) {
1962 assert(hr->is_survivor(), "badly formed young list");
1963 // There is a convention that all the young regions in the CSet
1964 // are tagged as "eden", so we do this for the survivors here. We
1965 // use the special set_eden_pre_gc() as it doesn't check that the
1966 // region is free (which is not the case here).
1967 hr->set_eden_pre_gc();
1968 hr = hr->get_next_young_region();
1969 }
1970
1971 // Clear the fields that point to the survivor list - they are all young now.
1972 young_list->clear_survivors();
1973
1974 _collection_set = _inc_cset_head;
1975 _collection_set_bytes_used_before = _inc_cset_bytes_used_before;
1976 time_remaining_ms = MAX2(time_remaining_ms - _inc_cset_predicted_elapsed_time_ms, 0.0);
1977
1978 log_trace(gc, ergo, cset)("Add young regions to CSet. eden: %u regions, survivors: %u regions, predicted young region time: %1.2fms, target pause time: %1.2fms",
1979 eden_region_length, survivor_region_length, _inc_cset_predicted_elapsed_time_ms, target_pause_time_ms);
1980
1981 // The number of recorded young regions is the incremental
1982 // collection set's current size
1983 set_recorded_rs_lengths(_inc_cset_recorded_rs_lengths);
1984
1985 double young_end_time_sec = os::elapsedTime();
1986 phase_times()->record_young_cset_choice_time_ms((young_end_time_sec - young_start_time_sec) * 1000.0);
1987
1988 return time_remaining_ms;
1989 }
1990
1991 void G1CollectorPolicy::finalize_old_cset_part(double time_remaining_ms) {
1992 double non_young_start_time_sec = os::elapsedTime();
1993 double predicted_old_time_ms = 0.0;
1994
1995
1996 if (!collector_state()->gcs_are_young()) {
1997 CollectionSetChooser* cset_chooser = _collectionSetChooser;
1998 cset_chooser->verify();
1999 const uint min_old_cset_length = calc_min_old_cset_length();
2000 const uint max_old_cset_length = calc_max_old_cset_length();
2001
2002 uint expensive_region_num = 0;
2003 bool check_time_remaining = adaptive_young_list_length();
2004
2005 HeapRegion* hr = cset_chooser->peek();
2006 while (hr != NULL) {
2007 if (old_cset_region_length() >= max_old_cset_length) {
2008 // Added maximum number of old regions to the CSet.
2009 log_debug(gc, ergo, cset)("Finish adding old regions to CSet (old CSet region num reached max). old %u regions, max %u regions",
2010 old_cset_region_length(), max_old_cset_length);
2011 break;
2012 }
2013
2014
2015 // Stop adding regions if the remaining reclaimable space is
2016 // not above G1HeapWastePercent.
2017 size_t reclaimable_bytes = cset_chooser->remaining_reclaimable_bytes();
2018 double reclaimable_perc = reclaimable_bytes_perc(reclaimable_bytes);
2019 double threshold = (double) G1HeapWastePercent;
2020 if (reclaimable_perc <= threshold) {
2021 // We've added enough old regions that the amount of uncollected
2022 // reclaimable space is at or below the waste threshold. Stop
2023 // adding old regions to the CSet.
2024 log_debug(gc, ergo, cset)("Finish adding old regions to CSet (reclaimable percentage not over threshold). "
2025 "old %u regions, max %u regions, reclaimable: " SIZE_FORMAT "B (%1.2f%%) threshold: " UINTX_FORMAT "%%",
2026 old_cset_region_length(), max_old_cset_length, reclaimable_bytes, reclaimable_perc, G1HeapWastePercent);
2027 break;
2028 }
2029
2030 double predicted_time_ms = predict_region_elapsed_time_ms(hr, collector_state()->gcs_are_young());
2031 if (check_time_remaining) {
2032 if (predicted_time_ms > time_remaining_ms) {
2033 // Too expensive for the current CSet.
2034
2035 if (old_cset_region_length() >= min_old_cset_length) {
2036 // We have added the minimum number of old regions to the CSet,
2037 // we are done with this CSet.
2038 log_debug(gc, ergo, cset)("Finish adding old regions to CSet (predicted time is too high). "
2039 "predicted time: %1.2fms, remaining time: %1.2fms old %u regions, min %u regions",
2040 predicted_time_ms, time_remaining_ms, old_cset_region_length(), min_old_cset_length);
2041 break;
2042 }
2043
2044 // We'll add it anyway given that we haven't reached the
2045 // minimum number of old regions.
2046 expensive_region_num += 1;
2047 }
2048 } else {
2049 if (old_cset_region_length() >= min_old_cset_length) {
2050 // In the non-auto-tuning case, we'll finish adding regions
2051 // to the CSet if we reach the minimum.
2052
2053 log_debug(gc, ergo, cset)("Finish adding old regions to CSet (old CSet region num reached min). old %u regions, min %u regions",
2054 old_cset_region_length(), min_old_cset_length);
2055 break;
2056 }
2057 }
2058
2059 // We will add this region to the CSet.
2060 time_remaining_ms = MAX2(time_remaining_ms - predicted_time_ms, 0.0);
2061 predicted_old_time_ms += predicted_time_ms;
2062 cset_chooser->pop(); // already have region via peek()
2063 _g1->old_set_remove(hr);
2064 add_old_region_to_cset(hr);
2065
2066 hr = cset_chooser->peek();
2067 }
2068 if (hr == NULL) {
2069 log_debug(gc, ergo, cset)("Finish adding old regions to CSet (candidate old regions not available)");
2070 }
2071
2072 if (expensive_region_num > 0) {
2073 // We print the information once here at the end, predicated on
2074 // whether we added any apparently expensive regions or not, to
2075 // avoid generating output per region.
2076 log_debug(gc, ergo, cset)("Added expensive regions to CSet (old CSet region num not reached min)."
2077 "old %u regions, expensive: %u regions, min %u regions, remaining time: %1.2fms",
2078 old_cset_region_length(), expensive_region_num, min_old_cset_length, time_remaining_ms);
2079 }
2080
2081 cset_chooser->verify();
2082 }
2083
2084 stop_incremental_cset_building();
2085
2086 log_debug(gc, ergo, cset)("Finish choosing CSet. old %u regions, predicted old region time: %1.2fms, time remaining: %1.2f",
2087 old_cset_region_length(), predicted_old_time_ms, time_remaining_ms);
2088
2089 double non_young_end_time_sec = os::elapsedTime();
2090 phase_times()->record_non_young_cset_choice_time_ms((non_young_end_time_sec - non_young_start_time_sec) * 1000.0);
2091 }
2092
2093 void TraceYoungGenTimeData::record_start_collection(double time_to_stop_the_world_ms) {
2094 if(TraceYoungGenTime) {
2095 _all_stop_world_times_ms.add(time_to_stop_the_world_ms);
2096 }
2097 }
2098
2099 void TraceYoungGenTimeData::record_yield_time(double yield_time_ms) {
2100 if(TraceYoungGenTime) {
2101 _all_yield_times_ms.add(yield_time_ms);
2102 }
2103 }
2104
2105 void TraceYoungGenTimeData::record_end_collection(double pause_time_ms, G1GCPhaseTimes* phase_times) {
2106 if(TraceYoungGenTime) {
2107 _total.add(pause_time_ms);
2126 _parallel_other.add(parallel_other_time);
2127 _clear_ct.add(phase_times->cur_clear_ct_time_ms());
2128 }
2129 }
2130
2131 void TraceYoungGenTimeData::increment_young_collection_count() {
2132 if(TraceYoungGenTime) {
2133 ++_young_pause_num;
2134 }
2135 }
2136
2137 void TraceYoungGenTimeData::increment_mixed_collection_count() {
2138 if(TraceYoungGenTime) {
2139 ++_mixed_pause_num;
2140 }
2141 }
2142
2143 void TraceYoungGenTimeData::print_summary(const char* str,
2144 const NumberSeq* seq) const {
2145 double sum = seq->sum();
2146 tty->print_cr("%-27s = %8.2lf s (avg = %8.2lf ms)",
2147 str, sum / 1000.0, seq->avg());
2148 }
2149
2150 void TraceYoungGenTimeData::print_summary_sd(const char* str,
2151 const NumberSeq* seq) const {
2152 print_summary(str, seq);
2153 tty->print_cr("%45s = %5d, std dev = %8.2lf ms, max = %8.2lf ms)",
2154 "(num", seq->num(), seq->sd(), seq->maximum());
2155 }
2156
2157 void TraceYoungGenTimeData::print() const {
2158 if (!TraceYoungGenTime) {
2159 return;
2160 }
2161
2162 tty->print_cr("ALL PAUSES");
2163 print_summary_sd(" Total", &_total);
2164 tty->cr();
2165 tty->cr();
2166 tty->print_cr(" Young GC Pauses: %8d", _young_pause_num);
2167 tty->print_cr(" Mixed GC Pauses: %8d", _mixed_pause_num);
2168 tty->cr();
2169
2170 tty->print_cr("EVACUATION PAUSES");
2171
2172 if (_young_pause_num == 0 && _mixed_pause_num == 0) {
2173 tty->print_cr("none");
2174 } else {
2175 print_summary_sd(" Evacuation Pauses", &_total);
2176 print_summary(" Root Region Scan Wait", &_root_region_scan_wait);
2177 print_summary(" Parallel Time", &_parallel);
2178 print_summary(" Ext Root Scanning", &_ext_root_scan);
2179 print_summary(" SATB Filtering", &_satb_filtering);
2180 print_summary(" Update RS", &_update_rs);
2181 print_summary(" Scan RS", &_scan_rs);
2182 print_summary(" Object Copy", &_obj_copy);
2183 print_summary(" Termination", &_termination);
2184 print_summary(" Parallel Other", &_parallel_other);
2185 print_summary(" Clear CT", &_clear_ct);
2186 print_summary(" Other", &_other);
2187 }
2188 tty->cr();
2189
2190 tty->print_cr("MISC");
2191 print_summary_sd(" Stop World", &_all_stop_world_times_ms);
2192 print_summary_sd(" Yields", &_all_yield_times_ms);
2193 }
2194
2195 void TraceOldGenTimeData::record_full_collection(double full_gc_time_ms) {
2196 if (TraceOldGenTime) {
2197 _all_full_gc_times.add(full_gc_time_ms);
2198 }
2199 }
2200
2201 void TraceOldGenTimeData::print() const {
2202 if (!TraceOldGenTime) {
2203 return;
2204 }
2205
2206 if (_all_full_gc_times.num() > 0) {
2207 tty->print("\n%4d full_gcs: total time = %8.2f s",
2208 _all_full_gc_times.num(),
2209 _all_full_gc_times.sum() / 1000.0);
2210 tty->print_cr(" (avg = %8.2fms).", _all_full_gc_times.avg());
2211 tty->print_cr(" [std. dev = %8.2f ms, max = %8.2f ms]",
2212 _all_full_gc_times.sd(),
2213 _all_full_gc_times.maximum());
2214 }
2215 }
|