6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "gc/g1/g1Analytics.hpp"
27 #include "gc/g1/g1CollectedHeap.inline.hpp"
28 #include "gc/g1/g1CollectionSet.hpp"
29 #include "gc/g1/g1ConcurrentMark.hpp"
30 #include "gc/g1/g1ConcurrentMarkThread.inline.hpp"
31 #include "gc/g1/g1ConcurrentRefine.hpp"
32 #include "gc/g1/g1HeterogeneousHeapPolicy.hpp"
33 #include "gc/g1/g1HotCardCache.hpp"
34 #include "gc/g1/g1IHOPControl.hpp"
35 #include "gc/g1/g1GCPhaseTimes.hpp"
36 #include "gc/g1/g1Policy.hpp"
37 #include "gc/g1/g1SurvivorRegions.hpp"
38 #include "gc/g1/g1YoungGenSizer.hpp"
39 #include "gc/g1/heapRegion.inline.hpp"
40 #include "gc/g1/heapRegionRemSet.hpp"
41 #include "gc/shared/gcPolicyCounters.hpp"
42 #include "logging/logStream.hpp"
43 #include "runtime/arguments.hpp"
44 #include "runtime/java.hpp"
45 #include "runtime/mutexLocker.hpp"
421
422 update_young_list_max_and_target_length(rs_lengths_prediction);
423 }
424 }
425
426 void G1Policy::update_rs_lengths_prediction() {
427 update_rs_lengths_prediction(_analytics->predict_rs_lengths());
428 }
429
430 void G1Policy::update_rs_lengths_prediction(size_t prediction) {
431 if (collector_state()->in_young_only_phase() && adaptive_young_list_length()) {
432 _rs_lengths_prediction = prediction;
433 }
434 }
435
436 void G1Policy::record_full_collection_start() {
437 _full_collection_start_sec = os::elapsedTime();
438 // Release the future to-space so that it is available for compaction into.
439 collector_state()->set_in_young_only_phase(false);
440 collector_state()->set_in_full_gc(true);
441 cset_chooser()->clear();
442 }
443
444 void G1Policy::record_full_collection_end() {
445 // Consider this like a collection pause for the purposes of allocation
446 // since last pause.
447 double end_sec = os::elapsedTime();
448 double full_gc_time_sec = end_sec - _full_collection_start_sec;
449 double full_gc_time_ms = full_gc_time_sec * 1000.0;
450
451 _analytics->update_recent_gc_times(end_sec, full_gc_time_ms);
452
453 collector_state()->set_in_full_gc(false);
454
455 // "Nuke" the heuristics that control the young/mixed GC
456 // transitions and make sure we start with young GCs after the Full GC.
457 collector_state()->set_in_young_only_phase(true);
458 collector_state()->set_in_young_gc_before_mixed(false);
459 collector_state()->set_initiate_conc_mark_if_possible(need_to_start_conc_mark("end of Full GC", 0));
460 collector_state()->set_in_initial_mark_gc(false);
461 collector_state()->set_mark_or_rebuild_in_progress(false);
529 }
530
531 double G1Policy::young_other_time_ms() const {
532 return phase_times()->young_cset_choice_time_ms() +
533 phase_times()->average_time_ms(G1GCPhaseTimes::YoungFreeCSet);
534 }
535
536 double G1Policy::non_young_other_time_ms() const {
537 return phase_times()->non_young_cset_choice_time_ms() +
538 phase_times()->average_time_ms(G1GCPhaseTimes::NonYoungFreeCSet);
539 }
540
541 double G1Policy::other_time_ms(double pause_time_ms) const {
542 return pause_time_ms - phase_times()->cur_collection_par_time_ms();
543 }
544
545 double G1Policy::constant_other_time_ms(double pause_time_ms) const {
546 return other_time_ms(pause_time_ms) - phase_times()->total_free_cset_time_ms();
547 }
548
549 CollectionSetChooser* G1Policy::cset_chooser() const {
550 return _collection_set->cset_chooser();
551 }
552
553 bool G1Policy::about_to_start_mixed_phase() const {
554 return _g1h->concurrent_mark()->cm_thread()->during_cycle() || collector_state()->in_young_gc_before_mixed();
555 }
556
557 bool G1Policy::need_to_start_conc_mark(const char* source, size_t alloc_word_size) {
558 if (about_to_start_mixed_phase()) {
559 return false;
560 }
561
562 size_t marking_initiating_used_threshold = _ihop_control->get_conc_mark_start_threshold();
563
564 size_t cur_used_bytes = _g1h->non_young_capacity_bytes();
565 size_t alloc_byte_size = alloc_word_size * HeapWordSize;
566 size_t marking_request_bytes = cur_used_bytes + alloc_byte_size;
567
568 bool result = false;
569 if (marking_request_bytes > marking_initiating_used_threshold) {
570 result = collector_state()->in_young_only_phase() && !collector_state()->in_young_gc_before_mixed();
571 log_debug(gc, ergo, ihop)("%s occupancy: " SIZE_FORMAT "B allocation request: " SIZE_FORMAT "B threshold: " SIZE_FORMAT "B (%1.2f) source: %s",
572 result ? "Request concurrent cycle initiation (occupancy higher than threshold)" : "Do not request concurrent cycle initiation (still doing mixed collections)",
573 cur_used_bytes, alloc_byte_size, marking_initiating_used_threshold, (double) marking_initiating_used_threshold / _g1h->capacity() * 100, source);
574 }
575
576 return result;
577 }
578
579 // Anything below that is considered to be zero
580 #define MIN_TIMER_GRANULARITY 0.0000001
581
582 void G1Policy::record_collection_pause_end(double pause_time_ms, size_t cards_scanned, size_t heap_used_bytes_before_gc) {
583 double end_time_sec = os::elapsedTime();
584
585 size_t cur_used_bytes = _g1h->used();
586 assert(cur_used_bytes == _g1h->recalculate_used(), "It should!");
587 bool this_pause_included_initial_mark = false;
588 bool this_pause_was_young_only = collector_state()->in_young_only_phase();
589
590 bool update_stats = !_g1h->evacuation_failed();
591
592 record_pause(young_gc_pause_kind(), end_time_sec - pause_time_ms / 1000.0, end_time_sec);
593
594 _collection_pause_end_millis = os::javaTimeNanos() / NANOSECS_PER_MILLISEC;
595
596 this_pause_included_initial_mark = collector_state()->in_initial_mark_gc();
597 if (this_pause_included_initial_mark) {
598 record_concurrent_mark_init_end(0.0);
599 } else {
600 maybe_start_marking();
601 }
602
603 double app_time_ms = (phase_times()->cur_collection_start_sec() * 1000.0 - _analytics->prev_collection_pause_end_ms());
604 if (app_time_ms < MIN_TIMER_GRANULARITY) {
605 // This usually happens due to the timer not having the required
606 // granularity. Some Linuxes are the usual culprits.
607 // We'll just set it to something (arbitrarily) small.
608 app_time_ms = 1.0;
756 // This skews the predicted marking length towards smaller values which might cause
757 // the mark start being too late.
758 _initial_mark_to_mixed.reset();
759 }
760
761 // Note that _mmu_tracker->max_gc_time() returns the time in seconds.
762 double update_rs_time_goal_ms = _mmu_tracker->max_gc_time() * MILLIUNITS * G1RSetUpdatingPauseTimePercent / 100.0;
763
764 if (update_rs_time_goal_ms < scan_hcc_time_ms) {
765 log_debug(gc, ergo, refine)("Adjust concurrent refinement thresholds (scanning the HCC expected to take longer than Update RS time goal)."
766 "Update RS time goal: %1.2fms Scan HCC time: %1.2fms",
767 update_rs_time_goal_ms, scan_hcc_time_ms);
768
769 update_rs_time_goal_ms = 0;
770 } else {
771 update_rs_time_goal_ms -= scan_hcc_time_ms;
772 }
773 _g1h->concurrent_refine()->adjust(average_time_ms(G1GCPhaseTimes::UpdateRS),
774 phase_times()->sum_thread_work_items(G1GCPhaseTimes::UpdateRS),
775 update_rs_time_goal_ms);
776
777 cset_chooser()->verify();
778 }
779
780 G1IHOPControl* G1Policy::create_ihop_control(const G1Predictions* predictor){
781 if (G1UseAdaptiveIHOP) {
782 return new G1AdaptiveIHOPControl(InitiatingHeapOccupancyPercent,
783 predictor,
784 G1ReservePercent,
785 G1HeapWastePercent);
786 } else {
787 return new G1StaticIHOPControl(InitiatingHeapOccupancyPercent);
788 }
789 }
790
791 void G1Policy::update_ihop_prediction(double mutator_time_s,
792 size_t mutator_alloc_bytes,
793 size_t young_gen_size,
794 bool this_gc_was_young_only) {
795 // Always try to update IHOP prediction. Even evacuation failures give information
796 // about e.g. whether to start IHOP earlier next time.
797
1015 log_debug(gc, ergo)("Initiate concurrent cycle (user requested concurrent cycle)");
1016 } else {
1017 // The concurrent marking thread is still finishing up the
1018 // previous cycle. If we start one right now the two cycles
1019 // overlap. In particular, the concurrent marking thread might
1020 // be in the process of clearing the next marking bitmap (which
1021 // we will use for the next cycle if we start one). Starting a
1022 // cycle now will be bad given that parts of the marking
1023 // information might get cleared by the marking thread. And we
1024 // cannot wait for the marking thread to finish the cycle as it
1025 // periodically yields while clearing the next marking bitmap
1026 // and, if it's in a yield point, it's waiting for us to
1027 // finish. So, at this point we will not start a cycle and we'll
1028 // let the concurrent marking thread complete the last one.
1029 log_debug(gc, ergo)("Do not initiate concurrent cycle (concurrent cycle already in progress)");
1030 }
1031 }
1032 }
1033
1034 void G1Policy::record_concurrent_mark_cleanup_end() {
1035 cset_chooser()->rebuild(_g1h->workers(), _g1h->num_regions());
1036
1037 bool mixed_gc_pending = next_gc_should_be_mixed("request mixed gcs", "request young-only gcs");
1038 if (!mixed_gc_pending) {
1039 clear_collection_set_candidates();
1040 abort_time_to_mixed_tracking();
1041 }
1042 collector_state()->set_in_young_gc_before_mixed(mixed_gc_pending);
1043 collector_state()->set_mark_or_rebuild_in_progress(false);
1044
1045 double end_sec = os::elapsedTime();
1046 double elapsed_time_ms = (end_sec - _mark_cleanup_start_sec) * 1000.0;
1047 _analytics->report_concurrent_mark_cleanup_times_ms(elapsed_time_ms);
1048 _analytics->append_prev_collection_pause_end_ms(elapsed_time_ms);
1049
1050 record_pause(Cleanup, _mark_cleanup_start_sec, end_sec);
1051 }
1052
1053 double G1Policy::reclaimable_bytes_percent(size_t reclaimable_bytes) const {
1054 return percent_of(reclaimable_bytes, _g1h->capacity());
1055 }
1056
1057 class G1ClearCollectionSetCandidateRemSets : public HeapRegionClosure {
1058 virtual bool do_heap_region(HeapRegion* r) {
1059 r->rem_set()->clear_locked(true /* only_cardset */);
1060 return false;
1061 }
1062 };
1063
1064 void G1Policy::clear_collection_set_candidates() {
1065 // Clear remembered sets of remaining candidate regions and the actual candidate
1066 // list.
1067 G1ClearCollectionSetCandidateRemSets cl;
1068 cset_chooser()->iterate(&cl);
1069 cset_chooser()->clear();
1070 }
1071
1072 void G1Policy::maybe_start_marking() {
1073 if (need_to_start_conc_mark("end of GC")) {
1074 // Note: this might have already been set, if during the last
1075 // pause we decided to start a cycle but at the beginning of
1076 // this pause we decided to postpone it. That's OK.
1077 collector_state()->set_initiate_conc_mark_if_possible(true);
1078 }
1079 }
1080
1081 G1Policy::PauseKind G1Policy::young_gc_pause_kind() const {
1082 assert(!collector_state()->in_full_gc(), "must be");
1083 if (collector_state()->in_initial_mark_gc()) {
1084 assert(!collector_state()->in_young_gc_before_mixed(), "must be");
1085 return InitialMarkGC;
1086 } else if (collector_state()->in_young_gc_before_mixed()) {
1087 assert(!collector_state()->in_initial_mark_gc(), "must be");
1088 return LastYoungGC;
1089 } else if (collector_state()->in_mixed_phase()) {
1115 break;
1116 case InitialMarkGC:
1117 if (_g1h->gc_cause() != GCCause::_g1_periodic_collection) {
1118 _initial_mark_to_mixed.record_initial_mark_end(end);
1119 }
1120 break;
1121 case MixedGC:
1122 _initial_mark_to_mixed.record_mixed_gc_start(start);
1123 break;
1124 default:
1125 ShouldNotReachHere();
1126 }
1127 }
1128
1129 void G1Policy::abort_time_to_mixed_tracking() {
1130 _initial_mark_to_mixed.reset();
1131 }
1132
1133 bool G1Policy::next_gc_should_be_mixed(const char* true_action_str,
1134 const char* false_action_str) const {
1135 if (cset_chooser()->is_empty()) {
1136 log_debug(gc, ergo)("%s (candidate old regions not available)", false_action_str);
1137 return false;
1138 }
1139
1140 // Is the amount of uncollected reclaimable space above G1HeapWastePercent?
1141 size_t reclaimable_bytes = cset_chooser()->remaining_reclaimable_bytes();
1142 double reclaimable_percent = reclaimable_bytes_percent(reclaimable_bytes);
1143 double threshold = (double) G1HeapWastePercent;
1144 if (reclaimable_percent <= threshold) {
1145 log_debug(gc, ergo)("%s (reclaimable percentage not over threshold). candidate old regions: %u reclaimable: " SIZE_FORMAT " (%1.2f) threshold: " UINTX_FORMAT,
1146 false_action_str, cset_chooser()->remaining_regions(), reclaimable_bytes, reclaimable_percent, G1HeapWastePercent);
1147 return false;
1148 }
1149 log_debug(gc, ergo)("%s (candidate old regions available). candidate old regions: %u reclaimable: " SIZE_FORMAT " (%1.2f) threshold: " UINTX_FORMAT,
1150 true_action_str, cset_chooser()->remaining_regions(), reclaimable_bytes, reclaimable_percent, G1HeapWastePercent);
1151 return true;
1152 }
1153
1154 uint G1Policy::calc_min_old_cset_length() const {
1155 // The min old CSet region bound is based on the maximum desired
1156 // number of mixed GCs after a cycle. I.e., even if some old regions
1157 // look expensive, we should add them to the CSet anyway to make
1158 // sure we go through the available old regions in no more than the
1159 // maximum desired number of mixed GCs.
1160 //
1161 // The calculation is based on the number of marked regions we added
1162 // to the CSet chooser in the first place, not how many remain, so
1163 // that the result is the same during all mixed GCs that follow a cycle.
1164
1165 const size_t region_num = (size_t) cset_chooser()->length();
1166 const size_t gc_num = (size_t) MAX2(G1MixedGCCountTarget, (uintx) 1);
1167 size_t result = region_num / gc_num;
1168 // emulate ceiling
1169 if (result * gc_num < region_num) {
1170 result += 1;
1171 }
1172 return (uint) result;
1173 }
1174
1175 uint G1Policy::calc_max_old_cset_length() const {
1176 // The max old CSet region bound is based on the threshold expressed
1177 // as a percentage of the heap size. I.e., it should bound the
1178 // number of old regions added to the CSet irrespective of how many
1179 // of them are available.
1180
1181 const G1CollectedHeap* g1h = G1CollectedHeap::heap();
1182 const size_t region_num = g1h->num_regions();
1183 const size_t perc = (size_t) G1OldCSetRegionThresholdPercent;
1184 size_t result = region_num * perc / 100;
1185 // emulate ceiling
|
6 * under the terms of the GNU General Public License version 2 only, as
7 * published by the Free Software Foundation.
8 *
9 * This code is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
12 * version 2 for more details (a copy is included in the LICENSE file that
13 * accompanied this code).
14 *
15 * You should have received a copy of the GNU General Public License version
16 * 2 along with this work; if not, write to the Free Software Foundation,
17 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
20 * or visit www.oracle.com if you need additional information or have any
21 * questions.
22 *
23 */
24
25 #include "precompiled.hpp"
26 #include "gc/g1/collectionSetChooser.hpp"
27 #include "gc/g1/g1Analytics.hpp"
28 #include "gc/g1/g1CollectedHeap.inline.hpp"
29 #include "gc/g1/g1CollectionSet.hpp"
30 #include "gc/g1/g1ConcurrentMark.hpp"
31 #include "gc/g1/g1ConcurrentMarkThread.inline.hpp"
32 #include "gc/g1/g1ConcurrentRefine.hpp"
33 #include "gc/g1/g1HeterogeneousHeapPolicy.hpp"
34 #include "gc/g1/g1HotCardCache.hpp"
35 #include "gc/g1/g1IHOPControl.hpp"
36 #include "gc/g1/g1GCPhaseTimes.hpp"
37 #include "gc/g1/g1Policy.hpp"
38 #include "gc/g1/g1SurvivorRegions.hpp"
39 #include "gc/g1/g1YoungGenSizer.hpp"
40 #include "gc/g1/heapRegion.inline.hpp"
41 #include "gc/g1/heapRegionRemSet.hpp"
42 #include "gc/shared/gcPolicyCounters.hpp"
43 #include "logging/logStream.hpp"
44 #include "runtime/arguments.hpp"
45 #include "runtime/java.hpp"
46 #include "runtime/mutexLocker.hpp"
422
423 update_young_list_max_and_target_length(rs_lengths_prediction);
424 }
425 }
426
427 void G1Policy::update_rs_lengths_prediction() {
428 update_rs_lengths_prediction(_analytics->predict_rs_lengths());
429 }
430
431 void G1Policy::update_rs_lengths_prediction(size_t prediction) {
432 if (collector_state()->in_young_only_phase() && adaptive_young_list_length()) {
433 _rs_lengths_prediction = prediction;
434 }
435 }
436
437 void G1Policy::record_full_collection_start() {
438 _full_collection_start_sec = os::elapsedTime();
439 // Release the future to-space so that it is available for compaction into.
440 collector_state()->set_in_young_only_phase(false);
441 collector_state()->set_in_full_gc(true);
442 _collection_set->clear_candidates();
443 }
444
445 void G1Policy::record_full_collection_end() {
446 // Consider this like a collection pause for the purposes of allocation
447 // since last pause.
448 double end_sec = os::elapsedTime();
449 double full_gc_time_sec = end_sec - _full_collection_start_sec;
450 double full_gc_time_ms = full_gc_time_sec * 1000.0;
451
452 _analytics->update_recent_gc_times(end_sec, full_gc_time_ms);
453
454 collector_state()->set_in_full_gc(false);
455
456 // "Nuke" the heuristics that control the young/mixed GC
457 // transitions and make sure we start with young GCs after the Full GC.
458 collector_state()->set_in_young_only_phase(true);
459 collector_state()->set_in_young_gc_before_mixed(false);
460 collector_state()->set_initiate_conc_mark_if_possible(need_to_start_conc_mark("end of Full GC", 0));
461 collector_state()->set_in_initial_mark_gc(false);
462 collector_state()->set_mark_or_rebuild_in_progress(false);
530 }
531
532 double G1Policy::young_other_time_ms() const {
533 return phase_times()->young_cset_choice_time_ms() +
534 phase_times()->average_time_ms(G1GCPhaseTimes::YoungFreeCSet);
535 }
536
537 double G1Policy::non_young_other_time_ms() const {
538 return phase_times()->non_young_cset_choice_time_ms() +
539 phase_times()->average_time_ms(G1GCPhaseTimes::NonYoungFreeCSet);
540 }
541
542 double G1Policy::other_time_ms(double pause_time_ms) const {
543 return pause_time_ms - phase_times()->cur_collection_par_time_ms();
544 }
545
546 double G1Policy::constant_other_time_ms(double pause_time_ms) const {
547 return other_time_ms(pause_time_ms) - phase_times()->total_free_cset_time_ms();
548 }
549
550 bool G1Policy::about_to_start_mixed_phase() const {
551 return _g1h->concurrent_mark()->cm_thread()->during_cycle() || collector_state()->in_young_gc_before_mixed();
552 }
553
554 bool G1Policy::need_to_start_conc_mark(const char* source, size_t alloc_word_size) {
555 if (about_to_start_mixed_phase()) {
556 return false;
557 }
558
559 size_t marking_initiating_used_threshold = _ihop_control->get_conc_mark_start_threshold();
560
561 size_t cur_used_bytes = _g1h->non_young_capacity_bytes();
562 size_t alloc_byte_size = alloc_word_size * HeapWordSize;
563 size_t marking_request_bytes = cur_used_bytes + alloc_byte_size;
564
565 bool result = false;
566 if (marking_request_bytes > marking_initiating_used_threshold) {
567 result = collector_state()->in_young_only_phase() && !collector_state()->in_young_gc_before_mixed();
568 log_debug(gc, ergo, ihop)("%s occupancy: " SIZE_FORMAT "B allocation request: " SIZE_FORMAT "B threshold: " SIZE_FORMAT "B (%1.2f) source: %s",
569 result ? "Request concurrent cycle initiation (occupancy higher than threshold)" : "Do not request concurrent cycle initiation (still doing mixed collections)",
570 cur_used_bytes, alloc_byte_size, marking_initiating_used_threshold, (double) marking_initiating_used_threshold / _g1h->capacity() * 100, source);
571 }
572
573 return result;
574 }
575
576 // Anything below that is considered to be zero
577 #define MIN_TIMER_GRANULARITY 0.0000001
578
579 void G1Policy::record_collection_pause_end(double pause_time_ms, size_t cards_scanned, size_t heap_used_bytes_before_gc) {
580 double end_time_sec = os::elapsedTime();
581
582 size_t cur_used_bytes = _g1h->used();
583 assert(cur_used_bytes == _g1h->recalculate_used(), "It should!");
584 bool this_pause_included_initial_mark = false;
585 bool this_pause_was_young_only = collector_state()->in_young_only_phase();
586 bool this_pause_was_last_before_mixed = collector_state()->in_young_gc_before_mixed();
587
588 bool update_stats = !_g1h->evacuation_failed();
589
590 record_pause(young_gc_pause_kind(), end_time_sec - pause_time_ms / 1000.0, end_time_sec);
591
592 _collection_pause_end_millis = os::javaTimeNanos() / NANOSECS_PER_MILLISEC;
593
594 this_pause_included_initial_mark = collector_state()->in_initial_mark_gc();
595 if (this_pause_included_initial_mark) {
596 record_concurrent_mark_init_end(0.0);
597 } else {
598 maybe_start_marking();
599 }
600
601 double app_time_ms = (phase_times()->cur_collection_start_sec() * 1000.0 - _analytics->prev_collection_pause_end_ms());
602 if (app_time_ms < MIN_TIMER_GRANULARITY) {
603 // This usually happens due to the timer not having the required
604 // granularity. Some Linuxes are the usual culprits.
605 // We'll just set it to something (arbitrarily) small.
606 app_time_ms = 1.0;
754 // This skews the predicted marking length towards smaller values which might cause
755 // the mark start being too late.
756 _initial_mark_to_mixed.reset();
757 }
758
759 // Note that _mmu_tracker->max_gc_time() returns the time in seconds.
760 double update_rs_time_goal_ms = _mmu_tracker->max_gc_time() * MILLIUNITS * G1RSetUpdatingPauseTimePercent / 100.0;
761
762 if (update_rs_time_goal_ms < scan_hcc_time_ms) {
763 log_debug(gc, ergo, refine)("Adjust concurrent refinement thresholds (scanning the HCC expected to take longer than Update RS time goal)."
764 "Update RS time goal: %1.2fms Scan HCC time: %1.2fms",
765 update_rs_time_goal_ms, scan_hcc_time_ms);
766
767 update_rs_time_goal_ms = 0;
768 } else {
769 update_rs_time_goal_ms -= scan_hcc_time_ms;
770 }
771 _g1h->concurrent_refine()->adjust(average_time_ms(G1GCPhaseTimes::UpdateRS),
772 phase_times()->sum_thread_work_items(G1GCPhaseTimes::UpdateRS),
773 update_rs_time_goal_ms);
774 }
775
776 G1IHOPControl* G1Policy::create_ihop_control(const G1Predictions* predictor){
777 if (G1UseAdaptiveIHOP) {
778 return new G1AdaptiveIHOPControl(InitiatingHeapOccupancyPercent,
779 predictor,
780 G1ReservePercent,
781 G1HeapWastePercent);
782 } else {
783 return new G1StaticIHOPControl(InitiatingHeapOccupancyPercent);
784 }
785 }
786
787 void G1Policy::update_ihop_prediction(double mutator_time_s,
788 size_t mutator_alloc_bytes,
789 size_t young_gen_size,
790 bool this_gc_was_young_only) {
791 // Always try to update IHOP prediction. Even evacuation failures give information
792 // about e.g. whether to start IHOP earlier next time.
793
1011 log_debug(gc, ergo)("Initiate concurrent cycle (user requested concurrent cycle)");
1012 } else {
1013 // The concurrent marking thread is still finishing up the
1014 // previous cycle. If we start one right now the two cycles
1015 // overlap. In particular, the concurrent marking thread might
1016 // be in the process of clearing the next marking bitmap (which
1017 // we will use for the next cycle if we start one). Starting a
1018 // cycle now will be bad given that parts of the marking
1019 // information might get cleared by the marking thread. And we
1020 // cannot wait for the marking thread to finish the cycle as it
1021 // periodically yields while clearing the next marking bitmap
1022 // and, if it's in a yield point, it's waiting for us to
1023 // finish. So, at this point we will not start a cycle and we'll
1024 // let the concurrent marking thread complete the last one.
1025 log_debug(gc, ergo)("Do not initiate concurrent cycle (concurrent cycle already in progress)");
1026 }
1027 }
1028 }
1029
1030 void G1Policy::record_concurrent_mark_cleanup_end() {
1031 G1CollectionSetCandidates* candidates = CollectionSetChooser::build(_g1h->workers(), _g1h->num_regions());
1032 _collection_set->set_candidates(candidates);
1033
1034 bool mixed_gc_pending = next_gc_should_be_mixed("request mixed gcs", "request young-only gcs");
1035 if (!mixed_gc_pending) {
1036 clear_collection_set_candidates();
1037 abort_time_to_mixed_tracking();
1038 }
1039 collector_state()->set_in_young_gc_before_mixed(mixed_gc_pending);
1040 collector_state()->set_mark_or_rebuild_in_progress(false);
1041
1042 double end_sec = os::elapsedTime();
1043 double elapsed_time_ms = (end_sec - _mark_cleanup_start_sec) * 1000.0;
1044 _analytics->report_concurrent_mark_cleanup_times_ms(elapsed_time_ms);
1045 _analytics->append_prev_collection_pause_end_ms(elapsed_time_ms);
1046
1047 record_pause(Cleanup, _mark_cleanup_start_sec, end_sec);
1048 }
1049
1050 double G1Policy::reclaimable_bytes_percent(size_t reclaimable_bytes) const {
1051 return percent_of(reclaimable_bytes, _g1h->capacity());
1052 }
1053
1054 class G1ClearCollectionSetCandidateRemSets : public HeapRegionClosure {
1055 virtual bool do_heap_region(HeapRegion* r) {
1056 r->rem_set()->clear_locked(true /* only_cardset */);
1057 return false;
1058 }
1059 };
1060
1061 void G1Policy::clear_collection_set_candidates() {
1062 // Clear remembered sets of remaining candidate regions and the actual candidate
1063 // set.
1064 G1ClearCollectionSetCandidateRemSets cl;
1065 _collection_set->candidates()->iterate(&cl);
1066 _collection_set->clear_candidates();
1067 }
1068
1069 void G1Policy::maybe_start_marking() {
1070 if (need_to_start_conc_mark("end of GC")) {
1071 // Note: this might have already been set, if during the last
1072 // pause we decided to start a cycle but at the beginning of
1073 // this pause we decided to postpone it. That's OK.
1074 collector_state()->set_initiate_conc_mark_if_possible(true);
1075 }
1076 }
1077
1078 G1Policy::PauseKind G1Policy::young_gc_pause_kind() const {
1079 assert(!collector_state()->in_full_gc(), "must be");
1080 if (collector_state()->in_initial_mark_gc()) {
1081 assert(!collector_state()->in_young_gc_before_mixed(), "must be");
1082 return InitialMarkGC;
1083 } else if (collector_state()->in_young_gc_before_mixed()) {
1084 assert(!collector_state()->in_initial_mark_gc(), "must be");
1085 return LastYoungGC;
1086 } else if (collector_state()->in_mixed_phase()) {
1112 break;
1113 case InitialMarkGC:
1114 if (_g1h->gc_cause() != GCCause::_g1_periodic_collection) {
1115 _initial_mark_to_mixed.record_initial_mark_end(end);
1116 }
1117 break;
1118 case MixedGC:
1119 _initial_mark_to_mixed.record_mixed_gc_start(start);
1120 break;
1121 default:
1122 ShouldNotReachHere();
1123 }
1124 }
1125
1126 void G1Policy::abort_time_to_mixed_tracking() {
1127 _initial_mark_to_mixed.reset();
1128 }
1129
1130 bool G1Policy::next_gc_should_be_mixed(const char* true_action_str,
1131 const char* false_action_str) const {
1132 G1CollectionSetCandidates* candidates = _collection_set->candidates();
1133
1134 if (candidates->is_empty()) {
1135 log_debug(gc, ergo)("%s (candidate old regions not available)", false_action_str);
1136 return false;
1137 }
1138
1139 // Is the amount of uncollected reclaimable space above G1HeapWastePercent?
1140 size_t reclaimable_bytes = candidates->remaining_reclaimable_bytes();
1141 double reclaimable_percent = reclaimable_bytes_percent(reclaimable_bytes);
1142 double threshold = (double) G1HeapWastePercent;
1143 if (reclaimable_percent <= threshold) {
1144 log_debug(gc, ergo)("%s (reclaimable percentage not over threshold). candidate old regions: %u reclaimable: " SIZE_FORMAT " (%1.2f) threshold: " UINTX_FORMAT,
1145 false_action_str, candidates->num_remaining(), reclaimable_bytes, reclaimable_percent, G1HeapWastePercent);
1146 return false;
1147 }
1148 log_debug(gc, ergo)("%s (candidate old regions available). candidate old regions: %u reclaimable: " SIZE_FORMAT " (%1.2f) threshold: " UINTX_FORMAT,
1149 true_action_str, candidates->num_remaining(), reclaimable_bytes, reclaimable_percent, G1HeapWastePercent);
1150 return true;
1151 }
1152
1153 uint G1Policy::calc_min_old_cset_length() const {
1154 // The min old CSet region bound is based on the maximum desired
1155 // number of mixed GCs after a cycle. I.e., even if some old regions
1156 // look expensive, we should add them to the CSet anyway to make
1157 // sure we go through the available old regions in no more than the
1158 // maximum desired number of mixed GCs.
1159 //
1160 // The calculation is based on the number of marked regions we added
1161 // to the CSet candidates in the first place, not how many remain, so
1162 // that the result is the same during all mixed GCs that follow a cycle.
1163
1164 const size_t region_num = _collection_set->candidates()->num_regions();
1165 const size_t gc_num = (size_t) MAX2(G1MixedGCCountTarget, (uintx) 1);
1166 size_t result = region_num / gc_num;
1167 // emulate ceiling
1168 if (result * gc_num < region_num) {
1169 result += 1;
1170 }
1171 return (uint) result;
1172 }
1173
1174 uint G1Policy::calc_max_old_cset_length() const {
1175 // The max old CSet region bound is based on the threshold expressed
1176 // as a percentage of the heap size. I.e., it should bound the
1177 // number of old regions added to the CSet irrespective of how many
1178 // of them are available.
1179
1180 const G1CollectedHeap* g1h = G1CollectedHeap::heap();
1181 const size_t region_num = g1h->num_regions();
1182 const size_t perc = (size_t) G1OldCSetRegionThresholdPercent;
1183 size_t result = region_num * perc / 100;
1184 // emulate ceiling
|