Print this page
JDK-8236073 G1: Use SoftMaxHeapSize to guide GC heuristics


  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "gc/g1/g1Analytics.hpp"
  27 #include "gc/g1/g1Arguments.hpp"
  28 #include "gc/g1/g1CollectedHeap.inline.hpp"
  29 #include "gc/g1/g1CollectionSet.hpp"
  30 #include "gc/g1/g1CollectionSetCandidates.hpp"
  31 #include "gc/g1/g1ConcurrentMark.hpp"
  32 #include "gc/g1/g1ConcurrentMarkThread.inline.hpp"
  33 #include "gc/g1/g1ConcurrentRefine.hpp"
  34 #include "gc/g1/g1CollectionSetChooser.hpp"

  35 #include "gc/g1/g1HeterogeneousHeapPolicy.hpp"
  36 #include "gc/g1/g1HotCardCache.hpp"
  37 #include "gc/g1/g1IHOPControl.hpp"
  38 #include "gc/g1/g1GCPhaseTimes.hpp"
  39 #include "gc/g1/g1Policy.hpp"
  40 #include "gc/g1/g1SurvivorRegions.hpp"
  41 #include "gc/g1/g1YoungGenSizer.hpp"
  42 #include "gc/g1/heapRegion.inline.hpp"
  43 #include "gc/g1/heapRegionRemSet.hpp"
  44 #include "gc/shared/gcPolicyCounters.hpp"
  45 #include "logging/logStream.hpp"
  46 #include "runtime/arguments.hpp"
  47 #include "runtime/java.hpp"
  48 #include "runtime/mutexLocker.hpp"
  49 #include "utilities/debug.hpp"
  50 #include "utilities/growableArray.hpp"
  51 #include "utilities/pair.hpp"
  52 
  53 G1Policy::G1Policy(STWGCTimer* gc_timer) :
  54   _predictor(G1ConfidencePercent / 100.0),


  59   _policy_counters(new GCPolicyCounters("GarbageFirst", 1, 2)),
  60   _full_collection_start_sec(0.0),
  61   _collection_pause_end_millis(os::javaTimeNanos() / NANOSECS_PER_MILLISEC),
  62   _young_list_target_length(0),
  63   _young_list_fixed_length(0),
  64   _young_list_max_length(0),
  65   _eden_surv_rate_group(new G1SurvRateGroup()),
  66   _survivor_surv_rate_group(new G1SurvRateGroup()),
  67   _reserve_factor((double) G1ReservePercent / 100.0),
  68   _reserve_regions(0),
  69   _young_gen_sizer(G1YoungGenSizer::create_gen_sizer()),
  70   _free_regions_at_end_of_collection(0),
  71   _rs_length(0),
  72   _rs_length_prediction(0),
  73   _pending_cards_at_gc_start(0),
  74   _pending_cards_at_prev_gc_end(0),
  75   _total_mutator_refined_cards(0),
  76   _total_concurrent_refined_cards(0),
  77   _total_concurrent_refinement_time(),
  78   _bytes_allocated_in_old_since_last_gc(0),

  79   _initial_mark_to_mixed(),
  80   _collection_set(NULL),
  81   _g1h(NULL),
  82   _phase_times(new G1GCPhaseTimes(gc_timer, ParallelGCThreads)),
  83   _mark_remark_start_sec(0),
  84   _mark_cleanup_start_sec(0),
  85   _tenuring_threshold(MaxTenuringThreshold),
  86   _max_survivor_regions(0),
  87   _survivors_age_table(true)
  88 {
  89 }
  90 
  91 G1Policy::~G1Policy() {
  92   delete _ihop_control;
  93   delete _young_gen_sizer;
  94 }
  95 
  96 G1Policy* G1Policy::create_policy(STWGCTimer* gc_timer_stw) {
  97   if (G1Arguments::is_heterogeneous_heap()) {
  98     return new G1HeterogeneousHeapPolicy(gc_timer_stw);


1078       initiate_conc_mark();
1079       log_debug(gc, ergo)("Initiate concurrent cycle (user requested concurrent cycle)");
1080     } else {
1081       // The concurrent marking thread is still finishing up the
1082       // previous cycle. If we start one right now the two cycles
1083       // overlap. In particular, the concurrent marking thread might
1084       // be in the process of clearing the next marking bitmap (which
1085       // we will use for the next cycle if we start one). Starting a
1086       // cycle now will be bad given that parts of the marking
1087       // information might get cleared by the marking thread. And we
1088       // cannot wait for the marking thread to finish the cycle as it
1089       // periodically yields while clearing the next marking bitmap
1090       // and, if it's in a yield point, it's waiting for us to
1091       // finish. So, at this point we will not start a cycle and we'll
1092       // let the concurrent marking thread complete the last one.
1093       log_debug(gc, ergo)("Do not initiate concurrent cycle (concurrent cycle already in progress)");
1094     }
1095   }
1096 }
1097 















1098 void G1Policy::record_concurrent_mark_cleanup_end() {
1099   G1CollectionSetCandidates* candidates = G1CollectionSetChooser::build(_g1h->workers(), _g1h->num_regions());
1100   _collection_set->set_candidates(candidates);
1101 
1102   bool mixed_gc_pending = next_gc_should_be_mixed("request mixed gcs", "request young-only gcs");
1103   if (!mixed_gc_pending) {
1104     clear_collection_set_candidates();
1105     abort_time_to_mixed_tracking();
1106   }
1107   collector_state()->set_in_young_gc_before_mixed(mixed_gc_pending);
1108   collector_state()->set_mark_or_rebuild_in_progress(false);
1109 


1110   double end_sec = os::elapsedTime();
1111   double elapsed_time_ms = (end_sec - _mark_cleanup_start_sec) * 1000.0;
1112   _analytics->report_concurrent_mark_cleanup_times_ms(elapsed_time_ms);
1113   _analytics->append_prev_collection_pause_end_ms(elapsed_time_ms);
1114 
1115   record_pause(Cleanup, _mark_cleanup_start_sec, end_sec);
1116 }
1117 
1118 double G1Policy::reclaimable_bytes_percent(size_t reclaimable_bytes) const {
1119   return percent_of(reclaimable_bytes, _g1h->capacity());
1120 }
1121 
1122 class G1ClearCollectionSetCandidateRemSets : public HeapRegionClosure {
1123   virtual bool do_heap_region(HeapRegion* r) {
1124     r->rem_set()->clear_locked(true /* only_cardset */);
1125     return false;
1126   }
1127 };
1128 
1129 void G1Policy::clear_collection_set_candidates() {


1182       if (_g1h->gc_cause() != GCCause::_g1_periodic_collection) {
1183         _initial_mark_to_mixed.record_initial_mark_end(end);
1184       }
1185       break;
1186     case MixedGC:
1187       _initial_mark_to_mixed.record_mixed_gc_start(start);
1188       break;
1189     default:
1190       ShouldNotReachHere();
1191   }
1192 }
1193 
1194 void G1Policy::abort_time_to_mixed_tracking() {
1195   _initial_mark_to_mixed.reset();
1196 }
1197 
1198 bool G1Policy::next_gc_should_be_mixed(const char* true_action_str,
1199                                        const char* false_action_str) const {
1200   G1CollectionSetCandidates* candidates = _collection_set->candidates();
1201 
1202   if (candidates->is_empty()) {

1203     log_debug(gc, ergo)("%s (candidate old regions not available)", false_action_str);

1204     return false;
1205   }
1206 
1207   // Is the amount of uncollected reclaimable space above G1HeapWastePercent?
1208   size_t reclaimable_bytes = candidates->remaining_reclaimable_bytes();
1209   double reclaimable_percent = reclaimable_bytes_percent(reclaimable_bytes);
1210   double threshold = (double) G1HeapWastePercent;
1211   if (reclaimable_percent <= threshold) {

1212     log_debug(gc, ergo)("%s (reclaimable percentage not over threshold). candidate old regions: %u reclaimable: " SIZE_FORMAT " (%1.2f) threshold: " UINTX_FORMAT,
1213                         false_action_str, candidates->num_remaining(), reclaimable_bytes, reclaimable_percent, G1HeapWastePercent);

1214     return false;
1215   }

1216   log_debug(gc, ergo)("%s (candidate old regions available). candidate old regions: %u reclaimable: " SIZE_FORMAT " (%1.2f) threshold: " UINTX_FORMAT,
1217                       true_action_str, candidates->num_remaining(), reclaimable_bytes, reclaimable_percent, G1HeapWastePercent);

1218   return true;
1219 }
1220 
1221 uint G1Policy::calc_min_old_cset_length() const {
1222   // The min old CSet region bound is based on the maximum desired
1223   // number of mixed GCs after a cycle. I.e., even if some old regions
1224   // look expensive, we should add them to the CSet anyway to make
1225   // sure we go through the available old regions in no more than the
1226   // maximum desired number of mixed GCs.
1227   //
1228   // The calculation is based on the number of marked regions we added
1229   // to the CSet candidates in the first place, not how many remain, so
1230   // that the result is the same during all mixed GCs that follow a cycle.
1231 
1232   const size_t region_num = _collection_set->candidates()->num_regions();
1233   const size_t gc_num = (size_t) MAX2(G1MixedGCCountTarget, (uintx) 1);
1234   size_t result = region_num / gc_num;
1235   // emulate ceiling
1236   if (result * gc_num < region_num) {
1237     result += 1;


1393   HeapRegion* last = NULL;
1394   for (GrowableArrayIterator<HeapRegion*> it = survivors->regions()->begin();
1395        it != survivors->regions()->end();
1396        ++it) {
1397     HeapRegion* curr = *it;
1398     set_region_survivor(curr);
1399 
1400     // The region is a non-empty survivor so let's add it to
1401     // the incremental collection set for the next evacuation
1402     // pause.
1403     _collection_set->add_survivor_regions(curr);
1404 
1405     last = curr;
1406   }
1407   note_stop_adding_survivor_regions();
1408 
1409   // Don't clear the survivor list handles until the start of
1410   // the next evacuation pause - we need it in order to re-tag
1411   // the survivor regions from this evacuation pause as 'young'
1412   // at the start of the next.









1413 }


  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 #include "gc/g1/g1Analytics.hpp"
  27 #include "gc/g1/g1Arguments.hpp"
  28 #include "gc/g1/g1CollectedHeap.inline.hpp"
  29 #include "gc/g1/g1CollectionSet.hpp"
  30 #include "gc/g1/g1CollectionSetCandidates.hpp"
  31 #include "gc/g1/g1ConcurrentMark.hpp"
  32 #include "gc/g1/g1ConcurrentMarkThread.inline.hpp"
  33 #include "gc/g1/g1ConcurrentRefine.hpp"
  34 #include "gc/g1/g1CollectionSetChooser.hpp"
  35 #include "gc/g1/g1HeapSizingPolicy.hpp"
  36 #include "gc/g1/g1HeterogeneousHeapPolicy.hpp"
  37 #include "gc/g1/g1HotCardCache.hpp"
  38 #include "gc/g1/g1IHOPControl.hpp"
  39 #include "gc/g1/g1GCPhaseTimes.hpp"
  40 #include "gc/g1/g1Policy.hpp"
  41 #include "gc/g1/g1SurvivorRegions.hpp"
  42 #include "gc/g1/g1YoungGenSizer.hpp"
  43 #include "gc/g1/heapRegion.inline.hpp"
  44 #include "gc/g1/heapRegionRemSet.hpp"
  45 #include "gc/shared/gcPolicyCounters.hpp"
  46 #include "logging/logStream.hpp"
  47 #include "runtime/arguments.hpp"
  48 #include "runtime/java.hpp"
  49 #include "runtime/mutexLocker.hpp"
  50 #include "utilities/debug.hpp"
  51 #include "utilities/growableArray.hpp"
  52 #include "utilities/pair.hpp"
  53 
  54 G1Policy::G1Policy(STWGCTimer* gc_timer) :
  55   _predictor(G1ConfidencePercent / 100.0),


  60   _policy_counters(new GCPolicyCounters("GarbageFirst", 1, 2)),
  61   _full_collection_start_sec(0.0),
  62   _collection_pause_end_millis(os::javaTimeNanos() / NANOSECS_PER_MILLISEC),
  63   _young_list_target_length(0),
  64   _young_list_fixed_length(0),
  65   _young_list_max_length(0),
  66   _eden_surv_rate_group(new G1SurvRateGroup()),
  67   _survivor_surv_rate_group(new G1SurvRateGroup()),
  68   _reserve_factor((double) G1ReservePercent / 100.0),
  69   _reserve_regions(0),
  70   _young_gen_sizer(G1YoungGenSizer::create_gen_sizer()),
  71   _free_regions_at_end_of_collection(0),
  72   _rs_length(0),
  73   _rs_length_prediction(0),
  74   _pending_cards_at_gc_start(0),
  75   _pending_cards_at_prev_gc_end(0),
  76   _total_mutator_refined_cards(0),
  77   _total_concurrent_refined_cards(0),
  78   _total_concurrent_refinement_time(),
  79   _bytes_allocated_in_old_since_last_gc(0),
  80   _minimum_desired_bytes_after_last_cm(0),
  81   _initial_mark_to_mixed(),
  82   _collection_set(NULL),
  83   _g1h(NULL),
  84   _phase_times(new G1GCPhaseTimes(gc_timer, ParallelGCThreads)),
  85   _mark_remark_start_sec(0),
  86   _mark_cleanup_start_sec(0),
  87   _tenuring_threshold(MaxTenuringThreshold),
  88   _max_survivor_regions(0),
  89   _survivors_age_table(true)
  90 {
  91 }
  92 
  93 G1Policy::~G1Policy() {
  94   delete _ihop_control;
  95   delete _young_gen_sizer;
  96 }
  97 
  98 G1Policy* G1Policy::create_policy(STWGCTimer* gc_timer_stw) {
  99   if (G1Arguments::is_heterogeneous_heap()) {
 100     return new G1HeterogeneousHeapPolicy(gc_timer_stw);


1080       initiate_conc_mark();
1081       log_debug(gc, ergo)("Initiate concurrent cycle (user requested concurrent cycle)");
1082     } else {
1083       // The concurrent marking thread is still finishing up the
1084       // previous cycle. If we start one right now the two cycles
1085       // overlap. In particular, the concurrent marking thread might
1086       // be in the process of clearing the next marking bitmap (which
1087       // we will use for the next cycle if we start one). Starting a
1088       // cycle now will be bad given that parts of the marking
1089       // information might get cleared by the marking thread. And we
1090       // cannot wait for the marking thread to finish the cycle as it
1091       // periodically yields while clearing the next marking bitmap
1092       // and, if it's in a yield point, it's waiting for us to
1093       // finish. So, at this point we will not start a cycle and we'll
1094       // let the concurrent marking thread complete the last one.
1095       log_debug(gc, ergo)("Do not initiate concurrent cycle (concurrent cycle already in progress)");
1096     }
1097   }
1098 }
1099 
1100 void G1Policy::determine_desired_bytes_after_concurrent_mark() {
1101   size_t cur_used_bytes = _g1h->non_young_capacity_bytes();
1102 
1103   size_t overall_target_capacity = _g1h->heap_sizing_policy()->target_heap_capacity(cur_used_bytes, MinHeapFreeRatio);
1104 
1105   size_t desired_bytes_after_concurrent_mark = _g1h->policy()->desired_bytes_after_concurrent_mark(cur_used_bytes);
1106 
1107   _minimum_desired_bytes_after_last_cm = MIN2(desired_bytes_after_concurrent_mark, overall_target_capacity);
1108 
1109   log_debug(gc, ergo, heap)("Expansion amount after remark used: " SIZE_FORMAT " "
1110                             "minimum_desired_capacity " SIZE_FORMAT " desired_bytes_after_concurrent_mark: " SIZE_FORMAT " "
1111                             "minimum_desired_bytes_after_concurrent_mark " SIZE_FORMAT,
1112                             cur_used_bytes, overall_target_capacity, desired_bytes_after_concurrent_mark, _minimum_desired_bytes_after_last_cm);
1113 }
1114 
1115 void G1Policy::record_concurrent_mark_cleanup_end() {
1116   G1CollectionSetCandidates* candidates = G1CollectionSetChooser::build(_g1h->workers(), _g1h->num_regions());
1117   _collection_set->set_candidates(candidates);
1118 
1119   bool mixed_gc_pending = next_gc_should_be_mixed("request mixed gcs", "request young-only gcs");
1120   if (!mixed_gc_pending) {
1121     clear_collection_set_candidates();
1122     abort_time_to_mixed_tracking();
1123   }
1124   collector_state()->set_in_young_gc_before_mixed(mixed_gc_pending);
1125   collector_state()->set_mark_or_rebuild_in_progress(false);
1126 
1127   determine_desired_bytes_after_concurrent_mark();
1128 
1129   double end_sec = os::elapsedTime();
1130   double elapsed_time_ms = (end_sec - _mark_cleanup_start_sec) * 1000.0;
1131   _analytics->report_concurrent_mark_cleanup_times_ms(elapsed_time_ms);
1132   _analytics->append_prev_collection_pause_end_ms(elapsed_time_ms);
1133 
1134   record_pause(Cleanup, _mark_cleanup_start_sec, end_sec);
1135 }
1136 
1137 double G1Policy::reclaimable_bytes_percent(size_t reclaimable_bytes) const {
1138   return percent_of(reclaimable_bytes, _g1h->capacity());
1139 }
1140 
1141 class G1ClearCollectionSetCandidateRemSets : public HeapRegionClosure {
1142   virtual bool do_heap_region(HeapRegion* r) {
1143     r->rem_set()->clear_locked(true /* only_cardset */);
1144     return false;
1145   }
1146 };
1147 
1148 void G1Policy::clear_collection_set_candidates() {


1201       if (_g1h->gc_cause() != GCCause::_g1_periodic_collection) {
1202         _initial_mark_to_mixed.record_initial_mark_end(end);
1203       }
1204       break;
1205     case MixedGC:
1206       _initial_mark_to_mixed.record_mixed_gc_start(start);
1207       break;
1208     default:
1209       ShouldNotReachHere();
1210   }
1211 }
1212 
1213 void G1Policy::abort_time_to_mixed_tracking() {
1214   _initial_mark_to_mixed.reset();
1215 }
1216 
1217 bool G1Policy::next_gc_should_be_mixed(const char* true_action_str,
1218                                        const char* false_action_str) const {
1219   G1CollectionSetCandidates* candidates = _collection_set->candidates();
1220 
1221   if (candidates == NULL || candidates->is_empty()) {
1222     if (false_action_str != NULL) {
1223       log_debug(gc, ergo)("%s (candidate old regions not available)", false_action_str);
1224     }
1225     return false;
1226   }
1227 
1228   // Is the amount of uncollected reclaimable space above G1HeapWastePercent?
1229   size_t reclaimable_bytes = candidates->remaining_reclaimable_bytes();
1230   double reclaimable_percent = reclaimable_bytes_percent(reclaimable_bytes);
1231   double threshold = (double) G1HeapWastePercent;
1232   if (reclaimable_percent <= threshold) {
1233     if (false_action_str != NULL) {
1234       log_debug(gc, ergo)("%s (reclaimable percentage not over threshold). candidate old regions: %u reclaimable: " SIZE_FORMAT " (%1.2f) threshold: " UINTX_FORMAT,
1235                           false_action_str, candidates->num_remaining(), reclaimable_bytes, reclaimable_percent, G1HeapWastePercent);
1236     }
1237     return false;
1238   }
1239   if (true_action_str != NULL) {
1240     log_debug(gc, ergo)("%s (candidate old regions available). candidate old regions: %u reclaimable: " SIZE_FORMAT " (%1.2f) threshold: " UINTX_FORMAT,
1241                         true_action_str, candidates->num_remaining(), reclaimable_bytes, reclaimable_percent, G1HeapWastePercent);
1242   }
1243   return true;
1244 }
1245 
1246 uint G1Policy::calc_min_old_cset_length() const {
1247   // The min old CSet region bound is based on the maximum desired
1248   // number of mixed GCs after a cycle. I.e., even if some old regions
1249   // look expensive, we should add them to the CSet anyway to make
1250   // sure we go through the available old regions in no more than the
1251   // maximum desired number of mixed GCs.
1252   //
1253   // The calculation is based on the number of marked regions we added
1254   // to the CSet candidates in the first place, not how many remain, so
1255   // that the result is the same during all mixed GCs that follow a cycle.
1256 
1257   const size_t region_num = _collection_set->candidates()->num_regions();
1258   const size_t gc_num = (size_t) MAX2(G1MixedGCCountTarget, (uintx) 1);
1259   size_t result = region_num / gc_num;
1260   // emulate ceiling
1261   if (result * gc_num < region_num) {
1262     result += 1;


1418   HeapRegion* last = NULL;
1419   for (GrowableArrayIterator<HeapRegion*> it = survivors->regions()->begin();
1420        it != survivors->regions()->end();
1421        ++it) {
1422     HeapRegion* curr = *it;
1423     set_region_survivor(curr);
1424 
1425     // The region is a non-empty survivor so let's add it to
1426     // the incremental collection set for the next evacuation
1427     // pause.
1428     _collection_set->add_survivor_regions(curr);
1429 
1430     last = curr;
1431   }
1432   note_stop_adding_survivor_regions();
1433 
1434   // Don't clear the survivor list handles until the start of
1435   // the next evacuation pause - we need it in order to re-tag
1436   // the survivor regions from this evacuation pause as 'young'
1437   // at the start of the next.
1438 }
1439 
1440 size_t G1Policy::desired_bytes_after_concurrent_mark(size_t used_bytes) {
1441   size_t minimum_desired_buffer_size = _ihop_control->predict_unrestrained_buffer_size();
1442   if (minimum_desired_buffer_size != 0) {
1443     return minimum_desired_buffer_size;
1444   } else {
1445     return _young_list_max_length * HeapRegion::GrainBytes + _reserve_regions * HeapRegion::GrainBytes + used_bytes;
1446   }
1447 }