< prev index next >

src/hotspot/share/gc/shenandoah/heuristics/shenandoahAdaptiveHeuristics.cpp

Print this page
rev 55538 : 8226757: Shenandoah: Make Traversal a separate mode


  18  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  19  * or visit www.oracle.com if you need additional information or have any
  20  * questions.
  21  *
  22  */
  23 
  24 #include "precompiled.hpp"
  25 
  26 #include "gc/shenandoah/heuristics/shenandoahAdaptiveHeuristics.hpp"
  27 #include "gc/shenandoah/shenandoahCollectionSet.hpp"
  28 #include "gc/shenandoah/shenandoahFreeSet.hpp"
  29 #include "gc/shenandoah/shenandoahHeapRegion.hpp"
  30 #include "logging/log.hpp"
  31 #include "logging/logTag.hpp"
  32 #include "utilities/quickSort.hpp"
  33 
  34 ShenandoahAdaptiveHeuristics::ShenandoahAdaptiveHeuristics() :
  35   ShenandoahHeuristics(),
  36   _cycle_gap_history(new TruncatedSeq(5)),
  37   _conc_mark_duration_history(new TruncatedSeq(5)),
  38   _conc_uprefs_duration_history(new TruncatedSeq(5)) {
  39 
  40   SHENANDOAH_ERGO_ENABLE_FLAG(ExplicitGCInvokesConcurrent);
  41   SHENANDOAH_ERGO_ENABLE_FLAG(ShenandoahImplicitGCInvokesConcurrent);
  42 
  43   // Final configuration checks
  44   SHENANDOAH_CHECK_FLAG_SET(ShenandoahLoadRefBarrier);
  45   SHENANDOAH_CHECK_FLAG_SET(ShenandoahSATBBarrier);
  46   SHENANDOAH_CHECK_FLAG_SET(ShenandoahKeepAliveBarrier);
  47   SHENANDOAH_CHECK_FLAG_SET(ShenandoahCASBarrier);
  48   SHENANDOAH_CHECK_FLAG_SET(ShenandoahCloneBarrier);
  49 }
  50 
  51 ShenandoahAdaptiveHeuristics::~ShenandoahAdaptiveHeuristics() {}
  52 
  53 void ShenandoahAdaptiveHeuristics::choose_collection_set_from_regiondata(ShenandoahCollectionSet* cset,
  54                                                                          RegionData* data, size_t size,
  55                                                                          size_t actual_free) {
  56   size_t garbage_threshold = ShenandoahHeapRegion::region_size_bytes() * ShenandoahGarbageThreshold / 100;
  57 
  58   // The logic for cset selection in adaptive is as follows:
  59   //
  60   //   1. We cannot get cset larger than available free space. Otherwise we guarantee OOME
  61   //      during evacuation, and thus guarantee full GC. In practice, we also want to let
  62   //      application to allocate something. This is why we limit CSet to some fraction of
  63   //      available space. In non-overloaded heap, max_cset would contain all plausible candidates
  64   //      over garbage threshold.
  65   //
  66   //   2. We should not get cset too low so that free threshold would not be met right
  67   //      after the cycle. Otherwise we get back-to-back cycles for no reason if heap is
  68   //      too fragmented. In non-overloaded non-fragmented heap min_garbage would be around zero.
  69   //


 104       cur_cset = new_cset;
 105       cur_garbage = new_garbage;
 106     }
 107   }
 108 }
 109 
 110 void ShenandoahAdaptiveHeuristics::record_cycle_start() {
 111   ShenandoahHeuristics::record_cycle_start();
 112   double last_cycle_gap = (_cycle_start - _last_cycle_end);
 113   _cycle_gap_history->add(last_cycle_gap);
 114 }
 115 
 116 void ShenandoahAdaptiveHeuristics::record_phase_time(ShenandoahPhaseTimings::Phase phase, double secs) {
 117   if (phase == ShenandoahPhaseTimings::conc_mark) {
 118     _conc_mark_duration_history->add(secs);
 119   } else if (phase == ShenandoahPhaseTimings::conc_update_refs) {
 120     _conc_uprefs_duration_history->add(secs);
 121   } // Else ignore
 122 }
 123 
 124 bool ShenandoahAdaptiveHeuristics::should_start_normal_gc() const {
 125   ShenandoahHeap* heap = ShenandoahHeap::heap();
 126   size_t capacity = heap->max_capacity();
 127   size_t available = heap->free_set()->available();
 128 
 129   // Check if we are falling below the worst limit, time to trigger the GC, regardless of
 130   // anything else.
 131   size_t min_threshold = capacity / 100 * ShenandoahMinFreeThreshold;
 132   if (available < min_threshold) {
 133     log_info(gc)("Trigger: Free (" SIZE_FORMAT "M) is below minimum threshold (" SIZE_FORMAT "M)",
 134                  available / M, min_threshold / M);
 135     return true;
 136   }
 137 
 138   // Check if are need to learn a bit about the application
 139   const size_t max_learn = ShenandoahLearningSteps;
 140   if (_gc_times_learned < max_learn) {
 141     size_t init_threshold = capacity / 100 * ShenandoahInitFreeThreshold;
 142     if (available < init_threshold) {
 143       log_info(gc)("Trigger: Learning " SIZE_FORMAT " of " SIZE_FORMAT ". Free (" SIZE_FORMAT "M) is below initial threshold (" SIZE_FORMAT "M)",
 144                    _gc_times_learned + 1, max_learn, available / M, init_threshold / M);


 155   size_t spike_headroom = capacity / 100 * ShenandoahAllocSpikeFactor;
 156   size_t penalties      = capacity / 100 * _gc_time_penalties;
 157 
 158   allocation_headroom -= MIN2(allocation_headroom, spike_headroom);
 159   allocation_headroom -= MIN2(allocation_headroom, penalties);
 160 
 161   // TODO: Allocation rate is way too averaged to be useful during state changes
 162 
 163   double average_gc = _gc_time_history->avg();
 164   double time_since_last = time_since_last_gc();
 165   double allocation_rate = heap->bytes_allocated_since_gc_start() / time_since_last;
 166 
 167   if (average_gc > allocation_headroom / allocation_rate) {
 168     log_info(gc)("Trigger: Average GC time (%.2f ms) is above the time for allocation rate (%.2f MB/s) to deplete free headroom (" SIZE_FORMAT "M)",
 169                  average_gc * 1000, allocation_rate / M, allocation_headroom / M);
 170     log_info(gc, ergo)("Free headroom: " SIZE_FORMAT "M (free) - " SIZE_FORMAT "M (spike) - " SIZE_FORMAT "M (penalties) = " SIZE_FORMAT "M",
 171                        available / M, spike_headroom / M, penalties / M, allocation_headroom / M);
 172     return true;
 173   }
 174 
 175   return ShenandoahHeuristics::should_start_normal_gc();
 176 }
 177 
 178 bool ShenandoahAdaptiveHeuristics::should_start_update_refs() {
 179   if (! _update_refs_adaptive) {
 180     return _update_refs_early;
 181   }
 182 
 183   double cycle_gap_avg = _cycle_gap_history->avg();
 184   double conc_mark_avg = _conc_mark_duration_history->avg();
 185   double conc_uprefs_avg = _conc_uprefs_duration_history->avg();
 186 
 187   if (_update_refs_early) {
 188     double threshold = ShenandoahMergeUpdateRefsMinGap / 100.0;
 189     if (conc_mark_avg + conc_uprefs_avg > cycle_gap_avg * threshold) {
 190       _update_refs_early = false;
 191     }
 192   } else {
 193     double threshold = ShenandoahMergeUpdateRefsMaxGap / 100.0;
 194     if (conc_mark_avg + conc_uprefs_avg < cycle_gap_avg * threshold) {
 195       _update_refs_early = true;


  18  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  19  * or visit www.oracle.com if you need additional information or have any
  20  * questions.
  21  *
  22  */
  23 
  24 #include "precompiled.hpp"
  25 
  26 #include "gc/shenandoah/heuristics/shenandoahAdaptiveHeuristics.hpp"
  27 #include "gc/shenandoah/shenandoahCollectionSet.hpp"
  28 #include "gc/shenandoah/shenandoahFreeSet.hpp"
  29 #include "gc/shenandoah/shenandoahHeapRegion.hpp"
  30 #include "logging/log.hpp"
  31 #include "logging/logTag.hpp"
  32 #include "utilities/quickSort.hpp"
  33 
  34 ShenandoahAdaptiveHeuristics::ShenandoahAdaptiveHeuristics() :
  35   ShenandoahHeuristics(),
  36   _cycle_gap_history(new TruncatedSeq(5)),
  37   _conc_mark_duration_history(new TruncatedSeq(5)),
  38   _conc_uprefs_duration_history(new TruncatedSeq(5)) {}











  39 
  40 ShenandoahAdaptiveHeuristics::~ShenandoahAdaptiveHeuristics() {}
  41 
  42 void ShenandoahAdaptiveHeuristics::choose_collection_set_from_regiondata(ShenandoahCollectionSet* cset,
  43                                                                          RegionData* data, size_t size,
  44                                                                          size_t actual_free) {
  45   size_t garbage_threshold = ShenandoahHeapRegion::region_size_bytes() * ShenandoahGarbageThreshold / 100;
  46 
  47   // The logic for cset selection in adaptive is as follows:
  48   //
  49   //   1. We cannot get cset larger than available free space. Otherwise we guarantee OOME
  50   //      during evacuation, and thus guarantee full GC. In practice, we also want to let
  51   //      application to allocate something. This is why we limit CSet to some fraction of
  52   //      available space. In non-overloaded heap, max_cset would contain all plausible candidates
  53   //      over garbage threshold.
  54   //
  55   //   2. We should not get cset too low so that free threshold would not be met right
  56   //      after the cycle. Otherwise we get back-to-back cycles for no reason if heap is
  57   //      too fragmented. In non-overloaded non-fragmented heap min_garbage would be around zero.
  58   //


  93       cur_cset = new_cset;
  94       cur_garbage = new_garbage;
  95     }
  96   }
  97 }
  98 
  99 void ShenandoahAdaptiveHeuristics::record_cycle_start() {
 100   ShenandoahHeuristics::record_cycle_start();
 101   double last_cycle_gap = (_cycle_start - _last_cycle_end);
 102   _cycle_gap_history->add(last_cycle_gap);
 103 }
 104 
 105 void ShenandoahAdaptiveHeuristics::record_phase_time(ShenandoahPhaseTimings::Phase phase, double secs) {
 106   if (phase == ShenandoahPhaseTimings::conc_mark) {
 107     _conc_mark_duration_history->add(secs);
 108   } else if (phase == ShenandoahPhaseTimings::conc_update_refs) {
 109     _conc_uprefs_duration_history->add(secs);
 110   } // Else ignore
 111 }
 112 
 113 bool ShenandoahAdaptiveHeuristics::should_start_gc() const {
 114   ShenandoahHeap* heap = ShenandoahHeap::heap();
 115   size_t capacity = heap->max_capacity();
 116   size_t available = heap->free_set()->available();
 117 
 118   // Check if we are falling below the worst limit, time to trigger the GC, regardless of
 119   // anything else.
 120   size_t min_threshold = capacity / 100 * ShenandoahMinFreeThreshold;
 121   if (available < min_threshold) {
 122     log_info(gc)("Trigger: Free (" SIZE_FORMAT "M) is below minimum threshold (" SIZE_FORMAT "M)",
 123                  available / M, min_threshold / M);
 124     return true;
 125   }
 126 
 127   // Check if are need to learn a bit about the application
 128   const size_t max_learn = ShenandoahLearningSteps;
 129   if (_gc_times_learned < max_learn) {
 130     size_t init_threshold = capacity / 100 * ShenandoahInitFreeThreshold;
 131     if (available < init_threshold) {
 132       log_info(gc)("Trigger: Learning " SIZE_FORMAT " of " SIZE_FORMAT ". Free (" SIZE_FORMAT "M) is below initial threshold (" SIZE_FORMAT "M)",
 133                    _gc_times_learned + 1, max_learn, available / M, init_threshold / M);


 144   size_t spike_headroom = capacity / 100 * ShenandoahAllocSpikeFactor;
 145   size_t penalties      = capacity / 100 * _gc_time_penalties;
 146 
 147   allocation_headroom -= MIN2(allocation_headroom, spike_headroom);
 148   allocation_headroom -= MIN2(allocation_headroom, penalties);
 149 
 150   // TODO: Allocation rate is way too averaged to be useful during state changes
 151 
 152   double average_gc = _gc_time_history->avg();
 153   double time_since_last = time_since_last_gc();
 154   double allocation_rate = heap->bytes_allocated_since_gc_start() / time_since_last;
 155 
 156   if (average_gc > allocation_headroom / allocation_rate) {
 157     log_info(gc)("Trigger: Average GC time (%.2f ms) is above the time for allocation rate (%.2f MB/s) to deplete free headroom (" SIZE_FORMAT "M)",
 158                  average_gc * 1000, allocation_rate / M, allocation_headroom / M);
 159     log_info(gc, ergo)("Free headroom: " SIZE_FORMAT "M (free) - " SIZE_FORMAT "M (spike) - " SIZE_FORMAT "M (penalties) = " SIZE_FORMAT "M",
 160                        available / M, spike_headroom / M, penalties / M, allocation_headroom / M);
 161     return true;
 162   }
 163 
 164   return ShenandoahHeuristics::should_start_gc();
 165 }
 166 
 167 bool ShenandoahAdaptiveHeuristics::should_start_update_refs() {
 168   if (! _update_refs_adaptive) {
 169     return _update_refs_early;
 170   }
 171 
 172   double cycle_gap_avg = _cycle_gap_history->avg();
 173   double conc_mark_avg = _conc_mark_duration_history->avg();
 174   double conc_uprefs_avg = _conc_uprefs_duration_history->avg();
 175 
 176   if (_update_refs_early) {
 177     double threshold = ShenandoahMergeUpdateRefsMinGap / 100.0;
 178     if (conc_mark_avg + conc_uprefs_avg > cycle_gap_avg * threshold) {
 179       _update_refs_early = false;
 180     }
 181   } else {
 182     double threshold = ShenandoahMergeUpdateRefsMaxGap / 100.0;
 183     if (conc_mark_avg + conc_uprefs_avg < cycle_gap_avg * threshold) {
 184       _update_refs_early = true;
< prev index next >