< prev index next >

src/share/vm/gc/shenandoah/shenandoahCollectorPolicy.cpp

Print this page
rev 13068 : [mq]: partial.patch


   9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  11  * version 2 for more details (a copy is included in the LICENSE file that
  12  * accompanied this code).
  13  *
  14  * You should have received a copy of the GNU General Public License version
  15  * 2 along with this work; if not, write to the Free Software Foundation,
  16  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  17  *
  18  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  19  * or visit www.oracle.com if you need additional information or have any
  20  * questions.
  21  *
  22  */
  23 
  24 #include "gc/shared/gcPolicyCounters.hpp"
  25 #include "gc/shenandoah/shenandoahCollectionSet.hpp"
  26 #include "gc/shenandoah/shenandoahFreeSet.hpp"
  27 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
  28 #include "gc/shenandoah/shenandoahHeap.inline.hpp"

  29 #include "gc/shenandoah/shenandoahPhaseTimes.hpp"
  30 #include "runtime/os.hpp"
  31 
  32 class ShenandoahHeuristics : public CHeapObj<mtGC> {
  33 
  34   NumberSeq _allocation_rate_bytes;
  35   NumberSeq _reclamation_rate_bytes;
  36 
  37   size_t _bytes_allocated_since_CM;
  38   size_t _bytes_reclaimed_this_cycle;
  39 
  40 protected:
  41   typedef struct {
  42     size_t region_number;
  43     size_t garbage;
  44   } RegionGarbage;
  45 
  46   static int compare_by_garbage(RegionGarbage a, RegionGarbage b) {
  47     if (a.garbage > b.garbage)
  48       return -1;
  49     else if (b.garbage < a.garbage)
  50       return 1;
  51     else return 0;
  52   }
  53 
  54   RegionGarbage* _region_garbage;
  55   size_t _region_garbage_size;
  56 
  57   size_t _bytes_allocated_start_CM;
  58   size_t _bytes_allocated_during_CM;
  59 


  60   uint _cancelled_cm_cycles_in_a_row;
  61   uint _successful_cm_cycles_in_a_row;
  62 
  63   size_t _bytes_in_cset;
  64 
  65 public:
  66 
  67   ShenandoahHeuristics();
  68   ~ShenandoahHeuristics();
  69 
  70   void record_bytes_allocated(size_t bytes);
  71   void record_bytes_reclaimed(size_t bytes);
  72   void record_bytes_start_CM(size_t bytes);
  73   void record_bytes_end_CM(size_t bytes);
  74 








  75   size_t bytes_in_cset() const { return _bytes_in_cset; }
  76 
  77   virtual void print_thresholds() {
  78   }
  79 
  80   virtual bool should_start_concurrent_mark(size_t used, size_t capacity) const=0;
  81 




  82   virtual bool handover_cancelled_marking() {
  83     return _cancelled_cm_cycles_in_a_row <= ShenandoahFullGCThreshold;
  84   }
  85 
  86   virtual void record_cm_cancelled() {
  87     _cancelled_cm_cycles_in_a_row++;
  88     _successful_cm_cycles_in_a_row = 0;
  89   }
  90 
  91   virtual void record_cm_success() {
  92     _cancelled_cm_cycles_in_a_row = 0;
  93     _successful_cm_cycles_in_a_row++;
  94   }
  95 
  96   virtual void record_full_gc() {
  97     _bytes_in_cset = 0;
  98   }
  99 
 100   virtual void start_choose_collection_set() {
 101   }


 117     if (ShenandoahUnloadClassesFrequency == 0) return false;
 118     size_t cycle = ShenandoahHeap::heap()->shenandoahPolicy()->cycle_counter();
 119     // Unload classes every Nth GC cycle.
 120     // This should not happen in the same cycle as process_references to amortize costs.
 121     // Offsetting by one is enough to break the rendezvous when periods are equal.
 122     // When periods are not equal, offsetting by one is just as good as any other guess.
 123     return (cycle + 1) % ShenandoahUnloadClassesFrequency == 0;
 124   }
 125 
 126   virtual bool needs_regions_sorted_by_garbage() {
 127     // Most of them do not.
 128     return false;
 129   }
 130 };
 131 
 132 ShenandoahHeuristics::ShenandoahHeuristics() :
 133   _bytes_allocated_since_CM(0),
 134   _bytes_reclaimed_this_cycle(0),
 135   _bytes_allocated_start_CM(0),
 136   _bytes_allocated_during_CM(0),

 137   _bytes_in_cset(0),
 138   _cancelled_cm_cycles_in_a_row(0),
 139   _successful_cm_cycles_in_a_row(0),
 140   _region_garbage(NULL),
 141   _region_garbage_size(0)
 142 {
 143 }
 144 
 145 ShenandoahHeuristics::~ShenandoahHeuristics() {
 146   if (_region_garbage != NULL) {
 147     FREE_C_HEAP_ARRAY(RegionGarbage, _region_garbage);
 148   }
 149 }
 150 
 151 void ShenandoahHeuristics::choose_collection_set(ShenandoahCollectionSet* collection_set, int* connections) {
 152   start_choose_collection_set();
 153 
 154   ShenandoahHeap* heap = ShenandoahHeap::heap();
 155 
 156   // Step 1. Build up the region candidates we care about, rejecting losers and accepting winners right away.


 257 void ShenandoahCollectorPolicy::record_workers_end(TimingPhase phase) {
 258   if (phase != _num_phases) {
 259     for (uint i = 0; i < ShenandoahPhaseTimes::GCParPhasesSentinel; i++) {
 260       double t = _phase_times->average(i);
 261       _timing_data[phase + i]._secs.add(t);
 262     }
 263   }
 264 }
 265 
 266 void ShenandoahCollectorPolicy::record_phase_start(TimingPhase phase) {
 267   _timing_data[phase]._start = os::elapsedTime();
 268 
 269 }
 270 
 271 void ShenandoahCollectorPolicy::record_phase_end(TimingPhase phase) {
 272   double end = os::elapsedTime();
 273   double elapsed = end - _timing_data[phase]._start;
 274   _timing_data[phase]._secs.add(elapsed);
 275 }
 276 








 277 void ShenandoahCollectorPolicy::report_concgc_cancelled() {
 278 }
 279 
 280 void ShenandoahHeuristics::record_bytes_allocated(size_t bytes) {
 281   _bytes_allocated_since_CM = bytes;
 282   _bytes_allocated_start_CM = bytes;
 283   _allocation_rate_bytes.add(bytes);
 284 }
 285 
 286 void ShenandoahHeuristics::record_bytes_reclaimed(size_t bytes) {
 287   _bytes_reclaimed_this_cycle = bytes;
 288   _reclamation_rate_bytes.add(bytes);
 289 }
 290 
 291 void ShenandoahHeuristics::record_bytes_start_CM(size_t bytes) {
 292   _bytes_allocated_start_CM = bytes;
 293 }
 294 
 295 void ShenandoahHeuristics::record_bytes_end_CM(size_t bytes) {
 296   _bytes_allocated_during_CM = (bytes > _bytes_allocated_start_CM) ? (bytes - _bytes_allocated_start_CM)


 615       int cs_heap_region_number = cs_heap_region->region_number();
 616       for (int i = 0; i < num; i++) {
 617         if (connections[i * num + cs_heap_region_number] > 0) {
 618           ShenandoahHeapRegion* candidate = regions->get(sorted_by_garbage[i].region_number);
 619           if (maybe_add_heap_region(candidate, collection_set)) {
 620             log_develop_trace(gc)
 621               ("Adding region %d which is pointed to by region %d\n", i, cs_heap_region_number);
 622           }
 623         }
 624       }
 625     }
 626     _max_live_data = MAX2(_max_live_data, collection_set->live_data());
 627     collection_set->print();
 628   }
 629 
 630   virtual bool region_in_collection_set(ShenandoahHeapRegion* r, size_t immediate_garbage) {
 631     assert(false, "Shouldn't get here");
 632     return false;
 633   }
 634 };


























 635 
 636 ShenandoahCollectorPolicy::ShenandoahCollectorPolicy() :
 637   _cycle_counter(0),
 638   _successful_cm(0),
 639   _degenerated_cm(0)
 640 {
 641 
 642   ShenandoahHeapRegion::setup_heap_region_size(initial_heap_byte_size(), max_heap_byte_size());
 643 
 644   initialize_all();
 645 
 646   _tracer = new (ResourceObj::C_HEAP, mtGC) ShenandoahTracer();
 647   _stw_timer = new (ResourceObj::C_HEAP, mtGC) STWGCTimer();
 648   _conc_timer = new (ResourceObj::C_HEAP, mtGC) ConcurrentGCTimer();
 649   _user_requested_gcs = 0;
 650   _allocation_failure_gcs = 0;
 651   _conc_gc_aborted = false;
 652 
 653   _phase_names[total_pause]                     = "Total Pauses (net)";
 654   _phase_names[total_pause_gross]               = "Total Pauses (gross)";


 729       log_info(gc, init)("Shenandoah heuristics: aggressive");
 730       _heuristics = new AggressiveHeuristics();
 731     } else if (strcmp(ShenandoahGCHeuristics, "dynamic") == 0) {
 732       log_info(gc, init)("Shenandoah heuristics: dynamic");
 733       _heuristics = new DynamicHeuristics();
 734     } else if (strcmp(ShenandoahGCHeuristics, "global") == 0) {
 735       log_info(gc, init)("Shenandoah heuristics: global");
 736       _heuristics = new GlobalHeuristics();
 737     } else if (strcmp(ShenandoahGCHeuristics, "ratio") == 0) {
 738       log_info(gc, init)("Shenandoah heuristics: ratio");
 739       _heuristics = new RatioHeuristics();
 740     } else if (strcmp(ShenandoahGCHeuristics, "adaptive") == 0) {
 741       log_info(gc, init)("Shenandoah heuristics: adaptive");
 742       _heuristics = new AdaptiveHeuristics();
 743     } else if (strcmp(ShenandoahGCHeuristics, "passive") == 0) {
 744       log_info(gc, init)("Shenandoah heuristics: passive");
 745       _heuristics = new PassiveHeuristics();
 746     } else if (strcmp(ShenandoahGCHeuristics, "connections") == 0) {
 747       log_info(gc, init)("Shenandoah heuristics: connections");
 748       _heuristics = new ConnectionHeuristics();



 749     } else {
 750       vm_exit_during_initialization("Unknown -XX:ShenandoahGCHeuristics option");
 751     }
 752     _heuristics->print_thresholds();
 753   } else {
 754       ShouldNotReachHere();
 755   }
 756   _phase_times = new ShenandoahPhaseTimes(MAX2(ConcGCThreads, ParallelGCThreads));
 757 }
 758 
 759 ShenandoahCollectorPolicy* ShenandoahCollectorPolicy::as_pgc_policy() {
 760   return this;
 761 }
 762 
 763 BarrierSet::Name ShenandoahCollectorPolicy::barrier_set_name() {
 764   return BarrierSet::ShenandoahBarrierSet;
 765 }
 766 
 767 HeapWord* ShenandoahCollectorPolicy::mem_allocate_work(size_t size,
 768                                                        bool is_tlab,


1048 
1049   // Calculation based on live set
1050   size_t live_data = 0;
1051   ShenandoahHeap* heap = ShenandoahHeap::heap();
1052   if (full_gc) {
1053     ShenandoahHeapRegionSet* regions = heap->regions();
1054     for (size_t index = 0; index < regions->active_regions(); index ++) {
1055       live_data += regions->get_fast(index)->get_live_data_bytes();
1056     }
1057   } else {
1058     ShenandoahCollectorPolicy* policy = (ShenandoahCollectorPolicy*)heap->collector_policy();
1059     live_data = policy->_heuristics->bytes_in_cset();
1060   }
1061 
1062   uint active_workers_by_liveset = calc_workers_for_live_set(live_data);
1063   return calc_default_active_workers(total_workers,
1064       (total_workers > 1 ? 2 : 1), active_workers,
1065       application_workers, 0, active_workers_by_liveset);
1066 }
1067 





   9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  11  * version 2 for more details (a copy is included in the LICENSE file that
  12  * accompanied this code).
  13  *
  14  * You should have received a copy of the GNU General Public License version
  15  * 2 along with this work; if not, write to the Free Software Foundation,
  16  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  17  *
  18  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  19  * or visit www.oracle.com if you need additional information or have any
  20  * questions.
  21  *
  22  */
  23 
  24 #include "gc/shared/gcPolicyCounters.hpp"
  25 #include "gc/shenandoah/shenandoahCollectionSet.hpp"
  26 #include "gc/shenandoah/shenandoahFreeSet.hpp"
  27 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
  28 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
  29 #include "gc/shenandoah/shenandoahPartialGC.hpp"
  30 #include "gc/shenandoah/shenandoahPhaseTimes.hpp"
  31 #include "runtime/os.hpp"
  32 
  33 class ShenandoahHeuristics : public CHeapObj<mtGC> {
  34 
  35   NumberSeq _allocation_rate_bytes;
  36   NumberSeq _reclamation_rate_bytes;
  37 
  38   size_t _bytes_allocated_since_CM;
  39   size_t _bytes_reclaimed_this_cycle;
  40 
  41 protected:
  42   typedef struct {
  43     size_t region_number;
  44     size_t garbage;
  45   } RegionGarbage;
  46 
  47   static int compare_by_garbage(RegionGarbage a, RegionGarbage b) {
  48     if (a.garbage > b.garbage)
  49       return -1;
  50     else if (b.garbage < a.garbage)
  51       return 1;
  52     else return 0;
  53   }
  54 
  55   RegionGarbage* _region_garbage;
  56   size_t _region_garbage_size;
  57 
  58   size_t _bytes_allocated_start_CM;
  59   size_t _bytes_allocated_during_CM;
  60 
  61   size_t _bytes_allocated_after_last_gc;
  62 
  63   uint _cancelled_cm_cycles_in_a_row;
  64   uint _successful_cm_cycles_in_a_row;
  65 
  66   size_t _bytes_in_cset;
  67 
  68 public:
  69 
  70   ShenandoahHeuristics();
  71   ~ShenandoahHeuristics();
  72 
  73   void record_bytes_allocated(size_t bytes);
  74   void record_bytes_reclaimed(size_t bytes);
  75   void record_bytes_start_CM(size_t bytes);
  76   void record_bytes_end_CM(size_t bytes);
  77 
  78   void record_gc_start() {
  79     // Do nothing.
  80   }
  81 
  82   void record_gc_end() {
  83     _bytes_allocated_after_last_gc = ShenandoahHeap::heap()->used();
  84   }
  85 
  86   size_t bytes_in_cset() const { return _bytes_in_cset; }
  87 
  88   virtual void print_thresholds() {
  89   }
  90 
  91   virtual bool should_start_concurrent_mark(size_t used, size_t capacity) const=0;
  92 
  93   virtual bool should_start_partial_gc() {
  94     return false;
  95   }
  96 
  97   virtual bool handover_cancelled_marking() {
  98     return _cancelled_cm_cycles_in_a_row <= ShenandoahFullGCThreshold;
  99   }
 100 
 101   virtual void record_cm_cancelled() {
 102     _cancelled_cm_cycles_in_a_row++;
 103     _successful_cm_cycles_in_a_row = 0;
 104   }
 105 
 106   virtual void record_cm_success() {
 107     _cancelled_cm_cycles_in_a_row = 0;
 108     _successful_cm_cycles_in_a_row++;
 109   }
 110 
 111   virtual void record_full_gc() {
 112     _bytes_in_cset = 0;
 113   }
 114 
 115   virtual void start_choose_collection_set() {
 116   }


 132     if (ShenandoahUnloadClassesFrequency == 0) return false;
 133     size_t cycle = ShenandoahHeap::heap()->shenandoahPolicy()->cycle_counter();
 134     // Unload classes every Nth GC cycle.
 135     // This should not happen in the same cycle as process_references to amortize costs.
 136     // Offsetting by one is enough to break the rendezvous when periods are equal.
 137     // When periods are not equal, offsetting by one is just as good as any other guess.
 138     return (cycle + 1) % ShenandoahUnloadClassesFrequency == 0;
 139   }
 140 
 141   virtual bool needs_regions_sorted_by_garbage() {
 142     // Most of them do not.
 143     return false;
 144   }
 145 };
 146 
 147 ShenandoahHeuristics::ShenandoahHeuristics() :
 148   _bytes_allocated_since_CM(0),
 149   _bytes_reclaimed_this_cycle(0),
 150   _bytes_allocated_start_CM(0),
 151   _bytes_allocated_during_CM(0),
 152   _bytes_allocated_after_last_gc(0),
 153   _bytes_in_cset(0),
 154   _cancelled_cm_cycles_in_a_row(0),
 155   _successful_cm_cycles_in_a_row(0),
 156   _region_garbage(NULL),
 157   _region_garbage_size(0)
 158 {
 159 }
 160 
 161 ShenandoahHeuristics::~ShenandoahHeuristics() {
 162   if (_region_garbage != NULL) {
 163     FREE_C_HEAP_ARRAY(RegionGarbage, _region_garbage);
 164   }
 165 }
 166 
 167 void ShenandoahHeuristics::choose_collection_set(ShenandoahCollectionSet* collection_set, int* connections) {
 168   start_choose_collection_set();
 169 
 170   ShenandoahHeap* heap = ShenandoahHeap::heap();
 171 
 172   // Step 1. Build up the region candidates we care about, rejecting losers and accepting winners right away.


 273 void ShenandoahCollectorPolicy::record_workers_end(TimingPhase phase) {
 274   if (phase != _num_phases) {
 275     for (uint i = 0; i < ShenandoahPhaseTimes::GCParPhasesSentinel; i++) {
 276       double t = _phase_times->average(i);
 277       _timing_data[phase + i]._secs.add(t);
 278     }
 279   }
 280 }
 281 
 282 void ShenandoahCollectorPolicy::record_phase_start(TimingPhase phase) {
 283   _timing_data[phase]._start = os::elapsedTime();
 284 
 285 }
 286 
 287 void ShenandoahCollectorPolicy::record_phase_end(TimingPhase phase) {
 288   double end = os::elapsedTime();
 289   double elapsed = end - _timing_data[phase]._start;
 290   _timing_data[phase]._secs.add(elapsed);
 291 }
 292 
 293 void ShenandoahCollectorPolicy::record_gc_start() {
 294   _heuristics->record_gc_start();
 295 }
 296 
 297 void ShenandoahCollectorPolicy::record_gc_end() {
 298   _heuristics->record_gc_end();
 299 }
 300 
 301 void ShenandoahCollectorPolicy::report_concgc_cancelled() {
 302 }
 303 
 304 void ShenandoahHeuristics::record_bytes_allocated(size_t bytes) {
 305   _bytes_allocated_since_CM = bytes;
 306   _bytes_allocated_start_CM = bytes;
 307   _allocation_rate_bytes.add(bytes);
 308 }
 309 
 310 void ShenandoahHeuristics::record_bytes_reclaimed(size_t bytes) {
 311   _bytes_reclaimed_this_cycle = bytes;
 312   _reclamation_rate_bytes.add(bytes);
 313 }
 314 
 315 void ShenandoahHeuristics::record_bytes_start_CM(size_t bytes) {
 316   _bytes_allocated_start_CM = bytes;
 317 }
 318 
 319 void ShenandoahHeuristics::record_bytes_end_CM(size_t bytes) {
 320   _bytes_allocated_during_CM = (bytes > _bytes_allocated_start_CM) ? (bytes - _bytes_allocated_start_CM)


 639       int cs_heap_region_number = cs_heap_region->region_number();
 640       for (int i = 0; i < num; i++) {
 641         if (connections[i * num + cs_heap_region_number] > 0) {
 642           ShenandoahHeapRegion* candidate = regions->get(sorted_by_garbage[i].region_number);
 643           if (maybe_add_heap_region(candidate, collection_set)) {
 644             log_develop_trace(gc)
 645               ("Adding region %d which is pointed to by region %d\n", i, cs_heap_region_number);
 646           }
 647         }
 648       }
 649     }
 650     _max_live_data = MAX2(_max_live_data, collection_set->live_data());
 651     collection_set->print();
 652   }
 653 
 654   virtual bool region_in_collection_set(ShenandoahHeapRegion* r, size_t immediate_garbage) {
 655     assert(false, "Shouldn't get here");
 656     return false;
 657   }
 658 };
 659 class PartialHeuristics : public AdaptiveHeuristics {
 660 public:
 661   PartialHeuristics() : AdaptiveHeuristics() {
 662     if (FLAG_IS_DEFAULT(ShenandoahAllocationThreshold)) {
 663       FLAG_SET_DEFAULT(ShenandoahAllocationThreshold, 5);
 664     }
 665     FLAG_SET_DEFAULT(UseShenandoahMatrix, true);
 666     // TODO: Disable this optimization for now, as it also requires the matrix barriers.
 667     FLAG_SET_DEFAULT(ArrayCopyLoadStoreMaxElem, 0);
 668   }
 669 
 670   virtual ~PartialHeuristics() {}
 671 
 672   bool should_start_concurrent_mark(size_t used, size_t capacity) const {
 673     // Never do concurrent GCs.
 674     return false;
 675   }
 676 
 677   bool should_start_partial_gc() {
 678     ShenandoahHeap* heap = ShenandoahHeap::heap();
 679     size_t capacity = heap->capacity();
 680 
 681     size_t used = heap->used();
 682     return (used - _bytes_allocated_after_last_gc) * 100 / capacity > ShenandoahAllocationThreshold;
 683   }
 684 };
 685 
 686 ShenandoahCollectorPolicy::ShenandoahCollectorPolicy() :
 687   _cycle_counter(0),
 688   _successful_cm(0),
 689   _degenerated_cm(0)
 690 {
 691 
 692   ShenandoahHeapRegion::setup_heap_region_size(initial_heap_byte_size(), max_heap_byte_size());
 693 
 694   initialize_all();
 695 
 696   _tracer = new (ResourceObj::C_HEAP, mtGC) ShenandoahTracer();
 697   _stw_timer = new (ResourceObj::C_HEAP, mtGC) STWGCTimer();
 698   _conc_timer = new (ResourceObj::C_HEAP, mtGC) ConcurrentGCTimer();
 699   _user_requested_gcs = 0;
 700   _allocation_failure_gcs = 0;
 701   _conc_gc_aborted = false;
 702 
 703   _phase_names[total_pause]                     = "Total Pauses (net)";
 704   _phase_names[total_pause_gross]               = "Total Pauses (gross)";


 779       log_info(gc, init)("Shenandoah heuristics: aggressive");
 780       _heuristics = new AggressiveHeuristics();
 781     } else if (strcmp(ShenandoahGCHeuristics, "dynamic") == 0) {
 782       log_info(gc, init)("Shenandoah heuristics: dynamic");
 783       _heuristics = new DynamicHeuristics();
 784     } else if (strcmp(ShenandoahGCHeuristics, "global") == 0) {
 785       log_info(gc, init)("Shenandoah heuristics: global");
 786       _heuristics = new GlobalHeuristics();
 787     } else if (strcmp(ShenandoahGCHeuristics, "ratio") == 0) {
 788       log_info(gc, init)("Shenandoah heuristics: ratio");
 789       _heuristics = new RatioHeuristics();
 790     } else if (strcmp(ShenandoahGCHeuristics, "adaptive") == 0) {
 791       log_info(gc, init)("Shenandoah heuristics: adaptive");
 792       _heuristics = new AdaptiveHeuristics();
 793     } else if (strcmp(ShenandoahGCHeuristics, "passive") == 0) {
 794       log_info(gc, init)("Shenandoah heuristics: passive");
 795       _heuristics = new PassiveHeuristics();
 796     } else if (strcmp(ShenandoahGCHeuristics, "connections") == 0) {
 797       log_info(gc, init)("Shenandoah heuristics: connections");
 798       _heuristics = new ConnectionHeuristics();
 799     } else if (strcmp(ShenandoahGCHeuristics, "partial") == 0) {
 800       log_info(gc, init)("Shenandoah heuristics: partial GC");
 801       _heuristics = new PartialHeuristics();
 802     } else {
 803       vm_exit_during_initialization("Unknown -XX:ShenandoahGCHeuristics option");
 804     }
 805     _heuristics->print_thresholds();
 806   } else {
 807       ShouldNotReachHere();
 808   }
 809   _phase_times = new ShenandoahPhaseTimes(MAX2(ConcGCThreads, ParallelGCThreads));
 810 }
 811 
 812 ShenandoahCollectorPolicy* ShenandoahCollectorPolicy::as_pgc_policy() {
 813   return this;
 814 }
 815 
 816 BarrierSet::Name ShenandoahCollectorPolicy::barrier_set_name() {
 817   return BarrierSet::ShenandoahBarrierSet;
 818 }
 819 
 820 HeapWord* ShenandoahCollectorPolicy::mem_allocate_work(size_t size,
 821                                                        bool is_tlab,


1101 
1102   // Calculation based on live set
1103   size_t live_data = 0;
1104   ShenandoahHeap* heap = ShenandoahHeap::heap();
1105   if (full_gc) {
1106     ShenandoahHeapRegionSet* regions = heap->regions();
1107     for (size_t index = 0; index < regions->active_regions(); index ++) {
1108       live_data += regions->get_fast(index)->get_live_data_bytes();
1109     }
1110   } else {
1111     ShenandoahCollectorPolicy* policy = (ShenandoahCollectorPolicy*)heap->collector_policy();
1112     live_data = policy->_heuristics->bytes_in_cset();
1113   }
1114 
1115   uint active_workers_by_liveset = calc_workers_for_live_set(live_data);
1116   return calc_default_active_workers(total_workers,
1117       (total_workers > 1 ? 2 : 1), active_workers,
1118       application_workers, 0, active_workers_by_liveset);
1119 }
1120 
1121 bool ShenandoahCollectorPolicy::should_start_partial_gc() {
1122   return _heuristics->should_start_partial_gc();
1123 }
< prev index next >