1 /*
   2  * Copyright (c) 2013, 2015, Red Hat, Inc. and/or its affiliates.
   3  *
   4  * This code is free software; you can redistribute it and/or modify it
   5  * under the terms of the GNU General Public License version 2 only, as
   6  * published by the Free Software Foundation.
   7  *
   8  * This code is distributed in the hope that it will be useful, but WITHOUT
   9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  11  * version 2 for more details (a copy is included in the LICENSE file that
  12  * accompanied this code).
  13  *
  14  * You should have received a copy of the GNU General Public License version
  15  * 2 along with this work; if not, write to the Free Software Foundation,
  16  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  17  *
  18  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  19  * or visit www.oracle.com if you need additional information or have any
  20  * questions.
  21  *
  22  */
  23 
  24 #include "gc/shared/gcPolicyCounters.hpp"
  25 #include "gc/shenandoah/shenandoahCollectionSet.hpp"
  26 #include "gc/shenandoah/shenandoahFreeSet.hpp"
  27 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
  28 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
  29 #include "gc/shenandoah/shenandoahPartialGC.hpp"
  30 #include "gc/shenandoah/shenandoahPhaseTimes.hpp"
  31 #include "runtime/os.hpp"
  32 
  33 class ShenandoahHeuristics : public CHeapObj<mtGC> {
  34 
  35   NumberSeq _allocation_rate_bytes;
  36   NumberSeq _reclamation_rate_bytes;
  37 
  38   size_t _bytes_allocated_since_CM;
  39   size_t _bytes_reclaimed_this_cycle;
  40 
  41 protected:
  42   typedef struct {
  43     size_t region_number;
  44     size_t garbage;
  45   } RegionGarbage;
  46 
  47   static int compare_by_garbage(RegionGarbage a, RegionGarbage b) {
  48     if (a.garbage > b.garbage)
  49       return -1;
  50     else if (b.garbage < a.garbage)
  51       return 1;
  52     else return 0;
  53   }
  54 
  55   RegionGarbage* _region_garbage;
  56   size_t _region_garbage_size;
  57 
  58   size_t _bytes_allocated_start_CM;
  59   size_t _bytes_allocated_during_CM;
  60 
  61   size_t _bytes_allocated_after_last_gc;
  62 
  63   uint _cancelled_cm_cycles_in_a_row;
  64   uint _successful_cm_cycles_in_a_row;
  65 
  66   size_t _bytes_in_cset;
  67 
  68 public:
  69 
  70   ShenandoahHeuristics();
  71   ~ShenandoahHeuristics();
  72 
  73   void record_bytes_allocated(size_t bytes);
  74   void record_bytes_reclaimed(size_t bytes);
  75   void record_bytes_start_CM(size_t bytes);
  76   void record_bytes_end_CM(size_t bytes);
  77 
  78   void record_gc_start() {
  79     // Do nothing.
  80   }
  81 
  82   void record_gc_end() {
  83     _bytes_allocated_after_last_gc = ShenandoahHeap::heap()->used();
  84   }
  85 
  86   size_t bytes_in_cset() const { return _bytes_in_cset; }
  87 
  88   virtual void print_thresholds() {
  89   }
  90 
  91   virtual bool should_start_concurrent_mark(size_t used, size_t capacity) const=0;
  92 
  93   virtual bool should_start_partial_gc() {
  94     return false;
  95   }
  96 
  97   virtual bool handover_cancelled_marking() {
  98     return _cancelled_cm_cycles_in_a_row <= ShenandoahFullGCThreshold;
  99   }
 100 
 101   virtual void record_cm_cancelled() {
 102     _cancelled_cm_cycles_in_a_row++;
 103     _successful_cm_cycles_in_a_row = 0;
 104   }
 105 
 106   virtual void record_cm_success() {
 107     _cancelled_cm_cycles_in_a_row = 0;
 108     _successful_cm_cycles_in_a_row++;
 109   }
 110 
 111   virtual void record_full_gc() {
 112     _bytes_in_cset = 0;
 113   }
 114 
 115   virtual void start_choose_collection_set() {
 116   }
 117   virtual void end_choose_collection_set() {
 118   }
 119   virtual bool region_in_collection_set(ShenandoahHeapRegion* r, size_t immediate_garbage) = 0;
 120 
 121   virtual void choose_collection_set(ShenandoahCollectionSet* collection_set, int* connections=NULL);
 122   virtual void choose_free_set(ShenandoahFreeSet* free_set);
 123 
 124   virtual bool process_references() {
 125     if (ShenandoahRefProcFrequency == 0) return false;
 126     size_t cycle = ShenandoahHeap::heap()->shenandoahPolicy()->cycle_counter();
 127     // Process references every Nth GC cycle.
 128     return cycle % ShenandoahRefProcFrequency == 0;
 129   }
 130 
 131   virtual bool unload_classes() {
 132     if (ShenandoahUnloadClassesFrequency == 0) return false;
 133     size_t cycle = ShenandoahHeap::heap()->shenandoahPolicy()->cycle_counter();
 134     // Unload classes every Nth GC cycle.
 135     // This should not happen in the same cycle as process_references to amortize costs.
 136     // Offsetting by one is enough to break the rendezvous when periods are equal.
 137     // When periods are not equal, offsetting by one is just as good as any other guess.
 138     return (cycle + 1) % ShenandoahUnloadClassesFrequency == 0;
 139   }
 140 
 141   virtual bool needs_regions_sorted_by_garbage() {
 142     // Most of them do not.
 143     return false;
 144   }
 145 };
 146 
 147 ShenandoahHeuristics::ShenandoahHeuristics() :
 148   _bytes_allocated_since_CM(0),
 149   _bytes_reclaimed_this_cycle(0),
 150   _bytes_allocated_start_CM(0),
 151   _bytes_allocated_during_CM(0),
 152   _bytes_allocated_after_last_gc(0),
 153   _bytes_in_cset(0),
 154   _cancelled_cm_cycles_in_a_row(0),
 155   _successful_cm_cycles_in_a_row(0),
 156   _region_garbage(NULL),
 157   _region_garbage_size(0)
 158 {
 159 }
 160 
 161 ShenandoahHeuristics::~ShenandoahHeuristics() {
 162   if (_region_garbage != NULL) {
 163     FREE_C_HEAP_ARRAY(RegionGarbage, _region_garbage);
 164   }
 165 }
 166 
 167 void ShenandoahHeuristics::choose_collection_set(ShenandoahCollectionSet* collection_set, int* connections) {
 168   start_choose_collection_set();
 169 
 170   ShenandoahHeap* heap = ShenandoahHeap::heap();
 171 
 172   // Step 1. Build up the region candidates we care about, rejecting losers and accepting winners right away.
 173 
 174   ShenandoahHeapRegionSet* regions = heap->regions();
 175   size_t active = regions->active_regions();
 176 
 177   RegionGarbage* candidates = _region_garbage;
 178   if (candidates == NULL) {
 179     candidates = NEW_C_HEAP_ARRAY(RegionGarbage, active, mtGC);
 180     _region_garbage_size = active;
 181   } else if (_region_garbage_size < active) {
 182     REALLOC_C_HEAP_ARRAY(RegionGarbage, _region_garbage, active, mtGC);
 183     _region_garbage_size = active;
 184   }
 185 
 186   size_t cand_idx = 0;
 187   _bytes_in_cset = 0;
 188 
 189   size_t immediate_garbage = 0;
 190   size_t immediate_regions = 0;
 191   for (size_t i = 0; i < active; i++) {
 192     ShenandoahHeapRegion* region = regions->get(i);
 193 
 194     if (! region->is_humongous() && ! region->is_pinned()) {
 195       if ((! region->is_empty()) && ! region->has_live()) {
 196         // We can recycle it right away and put it in the free set.
 197         immediate_regions++;
 198         immediate_garbage += region->garbage();
 199         heap->decrease_used(region->used());
 200         region->recycle();
 201         log_develop_trace(gc)("Choose region " SIZE_FORMAT " for immediate reclaim with garbage = " SIZE_FORMAT
 202                               " and live = " SIZE_FORMAT "\n",
 203                               region->region_number(), region->garbage(), region->get_live_data_bytes());
 204       } else {
 205         // This is our candidate for later consideration.
 206         candidates[cand_idx].region_number = region->region_number();
 207         candidates[cand_idx].garbage = region->garbage();
 208         cand_idx++;
 209       }
 210     } else {
 211       assert(region->has_live() || region->is_empty() || region->is_pinned() || region->is_humongous(), "check rejected");
 212       log_develop_trace(gc)("Rejected region " SIZE_FORMAT " with garbage = " SIZE_FORMAT
 213                             " and live = " SIZE_FORMAT "\n",
 214                             region->region_number(), region->garbage(), region->get_live_data_bytes());
 215     }
 216   }
 217 
 218   // Step 2. Process the remanining candidates, if any.
 219 
 220   if (cand_idx > 0) {
 221     if (needs_regions_sorted_by_garbage()) {
 222       QuickSort::sort<RegionGarbage>(candidates, cand_idx, compare_by_garbage, false);
 223     }
 224 
 225     for (size_t i = 0; i < cand_idx; i++) {
 226       ShenandoahHeapRegion *region = regions->get_fast(candidates[i].region_number);
 227       if (region_in_collection_set(region, immediate_garbage)) {
 228         log_develop_trace(gc)("Choose region " SIZE_FORMAT " with garbage = " SIZE_FORMAT
 229                                       " and live = " SIZE_FORMAT "\n",
 230                               region->region_number(), region->garbage(), region->get_live_data_bytes());
 231         collection_set->add_region(region);
 232         region->set_in_collection_set(true);
 233         _bytes_in_cset += region->used();
 234       }
 235     }
 236   }
 237 
 238   end_choose_collection_set();
 239 
 240   size_t total_garbage = heap->garbage();
 241   log_debug(gc)("Total Garbage: "SIZE_FORMAT, total_garbage);
 242   log_debug(gc)("Immediate Garbage: "SIZE_FORMAT, immediate_garbage);
 243   log_debug(gc)("Immediate Garbage regions: "SIZE_FORMAT, immediate_regions);
 244   log_debug(gc)("Garbage to be collected: "SIZE_FORMAT, collection_set->garbage());
 245   log_debug(gc)("Objects to be evacuated: "SIZE_FORMAT, collection_set->live_data());
 246   log_debug(gc)("Live / Garbage ratio: "SIZE_FORMAT"%%", collection_set->live_data() * 100 / MAX2(collection_set->garbage(), 1UL));
 247   log_debug(gc)("Collected-Garbage ratio / Total-garbage: "SIZE_FORMAT"%%", collection_set->garbage() * 100 / MAX2(total_garbage, 1UL));
 248 }
 249 
 250 void ShenandoahHeuristics::choose_free_set(ShenandoahFreeSet* free_set) {
 251 
 252   ShenandoahHeapRegionSet* ordered_regions = ShenandoahHeap::heap()->regions();
 253   size_t i = 0;
 254   size_t end = ordered_regions->active_regions();
 255 
 256   ShenandoahHeap* heap = ShenandoahHeap::heap();
 257   while (i < end) {
 258     ShenandoahHeapRegion* region = ordered_regions->get(i++);
 259     if ((! heap->in_collection_set(region))
 260         && (! region->is_humongous())
 261         && (! region->is_pinned())) {
 262       free_set->add_region(region);
 263     }
 264   }
 265 }
 266 
 267 void ShenandoahCollectorPolicy::record_workers_start(TimingPhase phase) {
 268   for (uint i = 0; i < ShenandoahPhaseTimes::GCParPhasesSentinel; i++) {
 269     _phase_times->reset(i);
 270   }
 271 }
 272 
 273 void ShenandoahCollectorPolicy::record_workers_end(TimingPhase phase) {
 274   if (phase != _num_phases) {
 275     for (uint i = 0; i < ShenandoahPhaseTimes::GCParPhasesSentinel; i++) {
 276       double t = _phase_times->average(i);
 277       _timing_data[phase + i]._secs.add(t);
 278     }
 279   }
 280 }
 281 
 282 void ShenandoahCollectorPolicy::record_phase_start(TimingPhase phase) {
 283   _timing_data[phase]._start = os::elapsedTime();
 284 
 285 }
 286 
 287 void ShenandoahCollectorPolicy::record_phase_end(TimingPhase phase) {
 288   double end = os::elapsedTime();
 289   double elapsed = end - _timing_data[phase]._start;
 290   _timing_data[phase]._secs.add(elapsed);
 291 }
 292 
 293 void ShenandoahCollectorPolicy::record_gc_start() {
 294   _heuristics->record_gc_start();
 295 }
 296 
 297 void ShenandoahCollectorPolicy::record_gc_end() {
 298   _heuristics->record_gc_end();
 299 }
 300 
 301 void ShenandoahCollectorPolicy::report_concgc_cancelled() {
 302 }
 303 
 304 void ShenandoahHeuristics::record_bytes_allocated(size_t bytes) {
 305   _bytes_allocated_since_CM = bytes;
 306   _bytes_allocated_start_CM = bytes;
 307   _allocation_rate_bytes.add(bytes);
 308 }
 309 
 310 void ShenandoahHeuristics::record_bytes_reclaimed(size_t bytes) {
 311   _bytes_reclaimed_this_cycle = bytes;
 312   _reclamation_rate_bytes.add(bytes);
 313 }
 314 
 315 void ShenandoahHeuristics::record_bytes_start_CM(size_t bytes) {
 316   _bytes_allocated_start_CM = bytes;
 317 }
 318 
 319 void ShenandoahHeuristics::record_bytes_end_CM(size_t bytes) {
 320   _bytes_allocated_during_CM = (bytes > _bytes_allocated_start_CM) ? (bytes - _bytes_allocated_start_CM)
 321                                                                    : bytes;
 322 }
 323 
 324 class PassiveHeuristics : public ShenandoahHeuristics {
 325 public:
 326   PassiveHeuristics() : ShenandoahHeuristics() {
 327   }
 328 
 329   virtual bool region_in_collection_set(ShenandoahHeapRegion* r, size_t immediate_garbage) {
 330     return r->garbage() > 0;
 331   }
 332 
 333   virtual bool should_start_concurrent_mark(size_t used, size_t capacity) const {
 334     // Never do concurrent GCs.
 335     return false;
 336   }
 337 
 338   virtual bool process_references() {
 339     // Randomly process refs with 50% chance.
 340     return (os::random() & 1) == 1;
 341   }
 342 
 343   virtual bool unload_classes() {
 344     // Randomly unload classes with 50% chance.
 345     return (os::random() & 1) == 1;
 346   }
 347 };
 348 
 349 class AggressiveHeuristics : public ShenandoahHeuristics {
 350 public:
 351   AggressiveHeuristics() : ShenandoahHeuristics() {
 352   }
 353 
 354   virtual bool region_in_collection_set(ShenandoahHeapRegion* r, size_t immediate_garbage) {
 355     return r->garbage() > 0;
 356   }
 357 
 358   virtual bool should_start_concurrent_mark(size_t used, size_t capacity) const {
 359     return true;
 360   }
 361 
 362   virtual bool process_references() {
 363     // Randomly process refs with 50% chance.
 364     return (os::random() & 1) == 1;
 365   }
 366 
 367   virtual bool unload_classes() {
 368     // Randomly unload classes with 50% chance.
 369     return (os::random() & 1) == 1;
 370   }
 371 };
 372 
 373 class DynamicHeuristics : public ShenandoahHeuristics {
 374 public:
 375   DynamicHeuristics() : ShenandoahHeuristics() {
 376   }
 377 
 378   void print_thresholds() {
 379     log_info(gc, init)("Shenandoah heuristics thresholds: allocation "SIZE_FORMAT", free "SIZE_FORMAT", garbage "SIZE_FORMAT,
 380                        ShenandoahAllocationThreshold,
 381                        ShenandoahFreeThreshold,
 382                        ShenandoahGarbageThreshold);
 383   }
 384 
 385   virtual ~DynamicHeuristics() {}
 386 
 387   virtual bool should_start_concurrent_mark(size_t used, size_t capacity) const {
 388 
 389     bool shouldStartConcurrentMark = false;
 390 
 391     ShenandoahHeap* heap = ShenandoahHeap::heap();
 392     size_t free_capacity = heap->free_regions()->capacity();
 393     size_t free_used = heap->free_regions()->used();
 394     assert(free_used <= free_capacity, "must use less than capacity");
 395     size_t cset = MIN2(_bytes_in_cset, (ShenandoahCSetThreshold * capacity) / 100);
 396     size_t available =  free_capacity - free_used + cset;
 397     uintx threshold = ShenandoahFreeThreshold + ShenandoahCSetThreshold;
 398     size_t targetStartMarking = (capacity * threshold) / 100;
 399 
 400     size_t threshold_bytes_allocated = heap->capacity() * ShenandoahAllocationThreshold / 100;
 401     if (available < targetStartMarking &&
 402         heap->bytes_allocated_since_cm() > threshold_bytes_allocated)
 403     {
 404       // Need to check that an appropriate number of regions have
 405       // been allocated since last concurrent mark too.
 406       shouldStartConcurrentMark = true;
 407     }
 408 
 409     return shouldStartConcurrentMark;
 410   }
 411 
 412   virtual bool region_in_collection_set(ShenandoahHeapRegion* r, size_t immediate_garbage) {
 413     size_t threshold = ShenandoahHeapRegion::RegionSizeBytes * ShenandoahGarbageThreshold / 100;
 414     return r->garbage() > threshold;
 415   }
 416 
 417 };
 418 
 419 
 420 class AdaptiveHeuristics : public ShenandoahHeuristics {
 421 private:
 422   uintx _free_threshold;
 423   TruncatedSeq* _cset_history;
 424 
 425 public:
 426   AdaptiveHeuristics() :
 427     ShenandoahHeuristics(),
 428     _free_threshold(ShenandoahInitFreeThreshold),
 429     _cset_history(new TruncatedSeq(ShenandoahHappyCyclesThreshold)) {
 430 
 431     _cset_history->add((double) ShenandoahCSetThreshold);
 432     _cset_history->add((double) ShenandoahCSetThreshold);
 433   }
 434 
 435   virtual ~AdaptiveHeuristics() {
 436     delete _cset_history;
 437   }
 438 
 439   virtual bool region_in_collection_set(ShenandoahHeapRegion* r, size_t immediate_garbage) {
 440     size_t threshold = ShenandoahHeapRegion::RegionSizeBytes * ShenandoahGarbageThreshold / 100;
 441     return r->garbage() > threshold;
 442   }
 443 
 444   virtual void record_cm_cancelled() {
 445     ShenandoahHeuristics::record_cm_cancelled();
 446     if (_free_threshold < ShenandoahMaxFreeThreshold) {
 447       _free_threshold++;
 448       log_info(gc,ergo)("increasing free threshold to: "UINTX_FORMAT, _free_threshold);
 449     }
 450   }
 451 
 452   virtual void record_cm_success() {
 453     ShenandoahHeuristics::record_cm_success();
 454     if (_successful_cm_cycles_in_a_row > ShenandoahHappyCyclesThreshold &&
 455         _free_threshold > ShenandoahMinFreeThreshold) {
 456       _free_threshold--;
 457       log_info(gc,ergo)("reducing free threshold to: "UINTX_FORMAT, _free_threshold);
 458       _successful_cm_cycles_in_a_row = 0;
 459     }
 460   }
 461 
 462   virtual bool should_start_concurrent_mark(size_t used, size_t capacity) const {
 463     bool shouldStartConcurrentMark = false;
 464 
 465     ShenandoahHeap* heap = ShenandoahHeap::heap();
 466     size_t free_capacity = heap->free_regions()->capacity();
 467     size_t free_used = heap->free_regions()->used();
 468     assert(free_used <= free_capacity, "must use less than capacity");
 469     // size_t cset_threshold = (size_t) _cset_history->maximum();
 470     size_t cset_threshold = (size_t) _cset_history->davg();
 471     size_t cset = MIN2(_bytes_in_cset, (cset_threshold * capacity) / 100);
 472     size_t available =  free_capacity - free_used + cset;
 473     uintx factor = _free_threshold + cset_threshold;
 474     size_t targetStartMarking = (capacity * factor) / 100;
 475 
 476     size_t threshold_bytes_allocated = heap->capacity() * ShenandoahAllocationThreshold / 100;
 477     if (available < targetStartMarking &&
 478         heap->bytes_allocated_since_cm() > threshold_bytes_allocated)
 479     {
 480       // Need to check that an appropriate number of regions have
 481       // been allocated since last concurrent mark too.
 482       shouldStartConcurrentMark = true;
 483     }
 484 
 485     if (shouldStartConcurrentMark) {
 486       log_info(gc,ergo)("predicted cset threshold: "SIZE_FORMAT, cset_threshold);
 487       log_info(gc,ergo)("Starting concurrent mark at "SIZE_FORMAT"K CSet ("SIZE_FORMAT"%%)", _bytes_in_cset / K, _bytes_in_cset * 100 / capacity);
 488       _cset_history->add((double) (_bytes_in_cset * 100 / capacity));
 489     }
 490     return shouldStartConcurrentMark;
 491   }
 492 
 493 };
 494 
 495 class GlobalHeuristics : public DynamicHeuristics {
 496 private:
 497   size_t _garbage;
 498   size_t _min_garbage;
 499 public:
 500   GlobalHeuristics() : DynamicHeuristics() {
 501     if (FLAG_IS_DEFAULT(ShenandoahGarbageThreshold)) {
 502       FLAG_SET_DEFAULT(ShenandoahGarbageThreshold, 90);
 503     }
 504   }
 505   virtual ~GlobalHeuristics() {}
 506 
 507   virtual void start_choose_collection_set() {
 508     _garbage = 0;
 509     size_t heap_garbage = ShenandoahHeap::heap()->garbage();
 510     _min_garbage =  heap_garbage * ShenandoahGarbageThreshold / 100;
 511   }
 512 
 513   virtual bool region_in_collection_set(ShenandoahHeapRegion* r, size_t immediate_garbage) {
 514     if (_garbage + immediate_garbage < _min_garbage && ! r->is_empty()) {
 515       _garbage += r->garbage();
 516       return true;
 517     } else {
 518       return false;
 519     }
 520   }
 521 
 522   virtual bool needs_regions_sorted_by_garbage() {
 523     return true;
 524   }
 525 };
 526 
 527 class RatioHeuristics : public DynamicHeuristics {
 528 private:
 529   size_t _garbage;
 530   size_t _live;
 531 public:
 532   RatioHeuristics() : DynamicHeuristics() {
 533     if (FLAG_IS_DEFAULT(ShenandoahGarbageThreshold)) {
 534       FLAG_SET_DEFAULT(ShenandoahGarbageThreshold, 95);
 535     }
 536   }
 537   virtual ~RatioHeuristics() {}
 538 
 539   virtual void start_choose_collection_set() {
 540     _garbage = 0;
 541     _live = 0;
 542   }
 543 
 544   virtual bool region_in_collection_set(ShenandoahHeapRegion* r, size_t immediate_garbage) {
 545     size_t min_ratio = 100 - ShenandoahGarbageThreshold;
 546     if (_live * 100 / MAX2(_garbage + immediate_garbage, 1UL) < min_ratio && ! r->is_empty()) {
 547       _garbage += r->garbage();
 548       _live += r->get_live_data_bytes();
 549       return true;
 550     } else {
 551       return false;
 552     }
 553   }
 554 
 555   virtual bool needs_regions_sorted_by_garbage() {
 556     return true;
 557   }
 558 };
 559 
 560 class ConnectionHeuristics : public ShenandoahHeuristics {
 561 private:
 562   size_t _max_live_data;
 563   double _used_threshold_factor;
 564   double _garbage_threshold_factor;
 565   double _allocation_threshold_factor;
 566 
 567   uintx _used_threshold;
 568   uintx _garbage_threshold;
 569   uintx _allocation_threshold;
 570 
 571 public:
 572   ConnectionHeuristics() : ShenandoahHeuristics() {
 573     _max_live_data = 0;
 574 
 575     _used_threshold = 0;
 576     _garbage_threshold = 0;
 577     _allocation_threshold = 0;
 578 
 579     _used_threshold_factor = 0.;
 580     _garbage_threshold_factor = 0.1;
 581     _allocation_threshold_factor = 0.;
 582   }
 583 
 584   virtual ~ConnectionHeuristics() {}
 585 
 586   virtual bool should_start_concurrent_mark(size_t used, size_t capacity) const {
 587     size_t half_gig = 64 * 1024 * 1024;
 588     size_t bytes_alloc = ShenandoahHeap::heap()->bytes_allocated_since_cm();
 589     bool result =  bytes_alloc > half_gig;
 590     if (result) tty->print("Starting a concurrent mark");
 591     return result;
 592   }
 593 
 594   bool maybe_add_heap_region(ShenandoahHeapRegion* hr, ShenandoahCollectionSet* collection_set) {
 595     if (!hr->is_humongous() && hr->has_live() && !collection_set->contains(hr)) {
 596       collection_set->add_region_check_for_duplicates(hr);
 597       hr->set_in_collection_set(true);
 598       return true;
 599     }
 600     return false;
 601   }
 602 
 603   virtual void choose_collection_set(ShenandoahCollectionSet* collection_set, int* connections) {
 604     ShenandoahHeapRegionSet* regions = ShenandoahHeap::heap()->regions();
 605     size_t end = regions->active_regions();
 606     RegionGarbage sorted_by_garbage[end];
 607     for (size_t i = 0; i < end; i++) {
 608       ShenandoahHeapRegion* r = regions->get_fast(i);
 609       sorted_by_garbage[i].region_number = r->region_number();
 610       sorted_by_garbage[i].garbage = r->garbage();
 611     }
 612 
 613     QuickSort::sort<RegionGarbage>(sorted_by_garbage, end, compare_by_garbage, false);
 614 
 615     int num = ShenandoahHeap::heap()->num_regions();
 616     // simulate write heuristics by picking best region.
 617     int r = 0;
 618     ShenandoahHeapRegion* choosenOne = regions->get(sorted_by_garbage[0].region_number);
 619 
 620     while (! maybe_add_heap_region(choosenOne, collection_set)) {
 621       choosenOne = regions->get(sorted_by_garbage[++r].region_number);
 622     }
 623 
 624     int region_number = choosenOne->region_number();
 625     log_develop_trace(gc)("Adding choosen region %d\n", region_number);
 626 
 627     // Add all the regions which point to this region.
 628     for (int i = 0; i < num; i++) {
 629       if (connections[i * num + region_number] > 0) {
 630         ShenandoahHeapRegion* candidate = regions->get(sorted_by_garbage[i].region_number);
 631         if (maybe_add_heap_region(candidate, collection_set))
 632           log_develop_trace(gc)("Adding region %d which points to the choosen region\n", i);
 633       }
 634     }
 635 
 636     // Add all the regions they point to.
 637     for (size_t ci = 0; ci < collection_set->active_regions(); ci++) {
 638       ShenandoahHeapRegion* cs_heap_region = collection_set->get(ci);
 639       int cs_heap_region_number = cs_heap_region->region_number();
 640       for (int i = 0; i < num; i++) {
 641         if (connections[i * num + cs_heap_region_number] > 0) {
 642           ShenandoahHeapRegion* candidate = regions->get(sorted_by_garbage[i].region_number);
 643           if (maybe_add_heap_region(candidate, collection_set)) {
 644             log_develop_trace(gc)
 645               ("Adding region %d which is pointed to by region %d\n", i, cs_heap_region_number);
 646           }
 647         }
 648       }
 649     }
 650     _max_live_data = MAX2(_max_live_data, collection_set->live_data());
 651     collection_set->print();
 652   }
 653 
 654   virtual bool region_in_collection_set(ShenandoahHeapRegion* r, size_t immediate_garbage) {
 655     assert(false, "Shouldn't get here");
 656     return false;
 657   }
 658 };
 659 class PartialHeuristics : public AdaptiveHeuristics {
 660 public:
 661   PartialHeuristics() : AdaptiveHeuristics() {
 662     if (FLAG_IS_DEFAULT(ShenandoahAllocationThreshold)) {
 663       FLAG_SET_DEFAULT(ShenandoahAllocationThreshold, 5);
 664     }
 665     FLAG_SET_DEFAULT(UseShenandoahMatrix, true);
 666     // TODO: Disable this optimization for now, as it also requires the matrix barriers.
 667     FLAG_SET_DEFAULT(ArrayCopyLoadStoreMaxElem, 0);
 668   }
 669 
 670   virtual ~PartialHeuristics() {}
 671 
 672   bool should_start_concurrent_mark(size_t used, size_t capacity) const {
 673     // Never do concurrent GCs.
 674     return false;
 675   }
 676 
 677   bool should_start_partial_gc() {
 678     ShenandoahHeap* heap = ShenandoahHeap::heap();
 679     size_t capacity = heap->capacity();
 680 
 681     size_t used = heap->used();
 682     return (used - _bytes_allocated_after_last_gc) * 100 / capacity > ShenandoahAllocationThreshold;
 683   }
 684 };
 685 
 686 ShenandoahCollectorPolicy::ShenandoahCollectorPolicy() :
 687   _cycle_counter(0),
 688   _successful_cm(0),
 689   _degenerated_cm(0)
 690 {
 691 
 692   ShenandoahHeapRegion::setup_heap_region_size(initial_heap_byte_size(), max_heap_byte_size());
 693 
 694   initialize_all();
 695 
 696   _tracer = new (ResourceObj::C_HEAP, mtGC) ShenandoahTracer();
 697   _stw_timer = new (ResourceObj::C_HEAP, mtGC) STWGCTimer();
 698   _conc_timer = new (ResourceObj::C_HEAP, mtGC) ConcurrentGCTimer();
 699   _user_requested_gcs = 0;
 700   _allocation_failure_gcs = 0;
 701   _conc_gc_aborted = false;
 702 
 703   _phase_names[total_pause]                     = "Total Pauses (net)";
 704   _phase_names[total_pause_gross]               = "Total Pauses (gross)";
 705   _phase_names[init_mark]                       = "Initial Mark Pauses (net)";
 706   _phase_names[init_mark_gross]                 = "Initial Mark Pauses (gross)";
 707   _phase_names[final_mark]                      = "Final Mark Pauses (net)";
 708   _phase_names[final_mark_gross]                = "Final Mark Pauses (gross)";
 709   _phase_names[accumulate_stats]                = "  Accumulate Stats";
 710   _phase_names[make_parsable]                   = "  Make Parsable";
 711   _phase_names[clear_liveness]                  = "  Clear Liveness";
 712   _phase_names[scan_roots]                      = "  Scan Roots";
 713   _phase_names[update_roots]                    = "  Update Roots";
 714   _phase_names[drain_satb]                      = "  Drain SATB";
 715   _phase_names[weakrefs]                        = "  Weak References";
 716   _phase_names[class_unloading]                 = "  Class Unloading";
 717   _phase_names[prepare_evac]                    = "  Prepare Evacuation";
 718   _phase_names[init_evac]                       = "  Initial Evacuation";
 719 
 720   _phase_names[scan_thread_roots]               = "    S: Thread Roots";
 721   _phase_names[scan_code_roots]                 = "    S: Code Cache Roots";
 722   _phase_names[scan_string_table_roots]         = "    S: String Table Roots";
 723   _phase_names[scan_universe_roots]             = "    S: Universe Roots";
 724   _phase_names[scan_jni_roots]                  = "    S: JNI Roots";
 725   _phase_names[scan_jni_weak_roots]             = "    S: JNI Weak Roots";
 726   _phase_names[scan_synchronizer_roots]         = "    S: Synchronizer Roots";
 727   _phase_names[scan_flat_profiler_roots]        = "    S: Flat Profiler Roots";
 728   _phase_names[scan_management_roots]           = "    S: Management Roots";
 729   _phase_names[scan_system_dictionary_roots]    = "    S: System Dict Roots";
 730   _phase_names[scan_cldg_roots]                 = "    S: CLDG Roots";
 731   _phase_names[scan_jvmti_roots]                = "    S: JVMTI Roots";
 732 
 733   _phase_names[update_thread_roots]             = "    U: Thread Roots";
 734   _phase_names[update_code_roots]               = "    U: Code Cache Roots";
 735   _phase_names[update_string_table_roots]       = "    U: String Table Roots";
 736   _phase_names[update_universe_roots]           = "    U: Universe Roots";
 737   _phase_names[update_jni_roots]                = "    U: JNI Roots";
 738   _phase_names[update_jni_weak_roots]           = "    U: JNI Weak Roots";
 739   _phase_names[update_synchronizer_roots]       = "    U: Synchronizer Roots";
 740   _phase_names[update_flat_profiler_roots]      = "    U: Flat Profiler Roots";
 741   _phase_names[update_management_roots]         = "    U: Management Roots";
 742   _phase_names[update_system_dictionary_roots]  = "    U: System Dict Roots";
 743   _phase_names[update_cldg_roots]               = "    U: CLDG Roots";
 744   _phase_names[update_jvmti_roots]              = "    U: JVMTI Roots";
 745 
 746   _phase_names[evac_thread_roots]               = "    E: Thread Roots";
 747   _phase_names[evac_code_roots]                 = "    E: Code Cache Roots";
 748   _phase_names[evac_string_table_roots]         = "    E: String Table Roots";
 749   _phase_names[evac_universe_roots]             = "    E: Universe Roots";
 750   _phase_names[evac_jni_roots]                  = "    E: JNI Roots";
 751   _phase_names[evac_jni_weak_roots]             = "    E: JNI Weak Roots";
 752   _phase_names[evac_synchronizer_roots]         = "    E: Synchronizer Roots";
 753   _phase_names[evac_flat_profiler_roots]        = "    E: Flat Profiler Roots";
 754   _phase_names[evac_management_roots]           = "    E: Management Roots";
 755   _phase_names[evac_system_dictionary_roots]    = "    E: System Dict Roots";
 756   _phase_names[evac_cldg_roots]                 = "    E: CLDG Roots";
 757   _phase_names[evac_jvmti_roots]                = "    E: JVMTI Roots";
 758 
 759   _phase_names[recycle_regions]                 = "  Recycle regions";
 760   _phase_names[reset_bitmaps]                   = "Reset Bitmaps";
 761   _phase_names[resize_tlabs]                    = "Resize TLABs";
 762 
 763   _phase_names[full_gc]                         = "Full GC";
 764   _phase_names[full_gc_heapdumps]               = "  Heap Dumps";
 765   _phase_names[full_gc_prepare]                 = "  Prepare";
 766   _phase_names[full_gc_mark]                    = "  Mark";
 767   _phase_names[full_gc_mark_drain_queues]       = "    Drain Queues";
 768   _phase_names[full_gc_mark_weakrefs]           = "    Weak References";
 769   _phase_names[full_gc_mark_class_unloading]    = "    Class Unloading";
 770   _phase_names[full_gc_calculate_addresses]     = "  Calculate Addresses";
 771   _phase_names[full_gc_adjust_pointers]         = "  Adjust Pointers";
 772   _phase_names[full_gc_copy_objects]            = "  Copy Objects";
 773 
 774   _phase_names[conc_mark]                       = "Concurrent Marking";
 775   _phase_names[conc_evac]                       = "Concurrent Evacuation";
 776 
 777   if (ShenandoahGCHeuristics != NULL) {
 778     if (strcmp(ShenandoahGCHeuristics, "aggressive") == 0) {
 779       log_info(gc, init)("Shenandoah heuristics: aggressive");
 780       _heuristics = new AggressiveHeuristics();
 781     } else if (strcmp(ShenandoahGCHeuristics, "dynamic") == 0) {
 782       log_info(gc, init)("Shenandoah heuristics: dynamic");
 783       _heuristics = new DynamicHeuristics();
 784     } else if (strcmp(ShenandoahGCHeuristics, "global") == 0) {
 785       log_info(gc, init)("Shenandoah heuristics: global");
 786       _heuristics = new GlobalHeuristics();
 787     } else if (strcmp(ShenandoahGCHeuristics, "ratio") == 0) {
 788       log_info(gc, init)("Shenandoah heuristics: ratio");
 789       _heuristics = new RatioHeuristics();
 790     } else if (strcmp(ShenandoahGCHeuristics, "adaptive") == 0) {
 791       log_info(gc, init)("Shenandoah heuristics: adaptive");
 792       _heuristics = new AdaptiveHeuristics();
 793     } else if (strcmp(ShenandoahGCHeuristics, "passive") == 0) {
 794       log_info(gc, init)("Shenandoah heuristics: passive");
 795       _heuristics = new PassiveHeuristics();
 796     } else if (strcmp(ShenandoahGCHeuristics, "connections") == 0) {
 797       log_info(gc, init)("Shenandoah heuristics: connections");
 798       _heuristics = new ConnectionHeuristics();
 799     } else if (strcmp(ShenandoahGCHeuristics, "partial") == 0) {
 800       log_info(gc, init)("Shenandoah heuristics: partial GC");
 801       _heuristics = new PartialHeuristics();
 802     } else {
 803       vm_exit_during_initialization("Unknown -XX:ShenandoahGCHeuristics option");
 804     }
 805     _heuristics->print_thresholds();
 806   } else {
 807       ShouldNotReachHere();
 808   }
 809   _phase_times = new ShenandoahPhaseTimes(MAX2(ConcGCThreads, ParallelGCThreads));
 810 }
 811 
 812 ShenandoahCollectorPolicy* ShenandoahCollectorPolicy::as_pgc_policy() {
 813   return this;
 814 }
 815 
 816 BarrierSet::Name ShenandoahCollectorPolicy::barrier_set_name() {
 817   return BarrierSet::ShenandoahBarrierSet;
 818 }
 819 
 820 HeapWord* ShenandoahCollectorPolicy::mem_allocate_work(size_t size,
 821                                                        bool is_tlab,
 822                                                        bool* gc_overhead_limit_was_exceeded) {
 823   guarantee(false, "Not using this policy feature yet.");
 824   return NULL;
 825 }
 826 
 827 HeapWord* ShenandoahCollectorPolicy::satisfy_failed_allocation(size_t size, bool is_tlab) {
 828   guarantee(false, "Not using this policy feature yet.");
 829   return NULL;
 830 }
 831 
 832 void ShenandoahCollectorPolicy::initialize_alignments() {
 833 
 834   // This is expected by our algorithm for ShenandoahHeap::heap_region_containing().
 835   _space_alignment = ShenandoahHeapRegion::RegionSizeBytes;
 836   _heap_alignment = ShenandoahHeapRegion::RegionSizeBytes;
 837 }
 838 
 839 void ShenandoahCollectorPolicy::post_heap_initialize() {
 840   // Nothing to do here (yet).
 841 }
 842 
 843 void ShenandoahCollectorPolicy::record_bytes_allocated(size_t bytes) {
 844   _heuristics->record_bytes_allocated(bytes);
 845 }
 846 
 847 void ShenandoahCollectorPolicy::record_bytes_start_CM(size_t bytes) {
 848   _heuristics->record_bytes_start_CM(bytes);
 849 }
 850 
 851 void ShenandoahCollectorPolicy::record_bytes_end_CM(size_t bytes) {
 852   _heuristics->record_bytes_end_CM(bytes);
 853 }
 854 
 855 void ShenandoahCollectorPolicy::record_bytes_reclaimed(size_t bytes) {
 856   _heuristics->record_bytes_reclaimed(bytes);
 857 }
 858 
 859 void ShenandoahCollectorPolicy::record_user_requested_gc() {
 860   _user_requested_gcs++;
 861 }
 862 
 863 void ShenandoahCollectorPolicy::record_allocation_failure_gc() {
 864   _allocation_failure_gcs++;
 865 }
 866 
 867 bool ShenandoahCollectorPolicy::should_start_concurrent_mark(size_t used,
 868                                                              size_t capacity) {
 869   return _heuristics->should_start_concurrent_mark(used, capacity);
 870 }
 871 
 872 bool ShenandoahCollectorPolicy::handover_cancelled_marking() {
 873   return _heuristics->handover_cancelled_marking();
 874 }
 875 
 876 void ShenandoahCollectorPolicy::record_cm_success() {
 877   _heuristics->record_cm_success();
 878   _successful_cm++;
 879 }
 880 
 881 void ShenandoahCollectorPolicy::record_cm_degenerated() {
 882   _degenerated_cm++;
 883 }
 884 
 885 void ShenandoahCollectorPolicy::record_cm_cancelled() {
 886   _heuristics->record_cm_cancelled();
 887 }
 888 
 889 void ShenandoahCollectorPolicy::record_full_gc() {
 890   _heuristics->record_full_gc();
 891 }
 892 
 893 void ShenandoahCollectorPolicy::choose_collection_set(ShenandoahCollectionSet* collection_set, int* connections) {
 894   _heuristics->choose_collection_set(collection_set, connections);
 895 }
 896 
 897 void ShenandoahCollectorPolicy::choose_free_set(ShenandoahFreeSet* free_set) {
 898    _heuristics->choose_free_set(free_set);
 899 }
 900 
 901 
 902 bool ShenandoahCollectorPolicy::process_references() {
 903   return _heuristics->process_references();
 904 }
 905 
 906 bool ShenandoahCollectorPolicy::unload_classes() {
 907   return _heuristics->unload_classes();
 908 }
 909 
 910 void ShenandoahCollectorPolicy::print_tracing_info(outputStream* out) {
 911   out->cr();
 912   out->print_cr("GC STATISTICS:");
 913   out->print_cr("  \"gross\" pauses include time to safepoint. \"net\" pauses are times spent in GC.");
 914   out->print_cr("  \"a\" is average time for each phase, look at levels to see if average makes sense.");
 915   out->print_cr("  \"lvls\" are quantiles: 0%% (minimum), 25%%, 50%% (median), 75%%, 100%% (maximum).");
 916   out->cr();
 917 
 918   for (uint i = 0; i < _num_phases; i++) {
 919     if (_timing_data[i]._secs.maximum() != 0) {
 920       print_summary_sd(out, _phase_names[i], &(_timing_data[i]._secs));
 921     }
 922   }
 923 
 924   out->cr();
 925   out->print_cr("" SIZE_FORMAT " allocation failure and " SIZE_FORMAT " user requested GCs", _allocation_failure_gcs, _user_requested_gcs);
 926   out->print_cr("" SIZE_FORMAT " successful and " SIZE_FORMAT " degenerated concurrent markings", _successful_cm, _degenerated_cm);
 927   out->cr();
 928 }
 929 
 930 void ShenandoahCollectorPolicy::print_summary_sd(outputStream* out, const char* str, const HdrSeq* seq)  {
 931   out->print_cr("%-27s = %8.2lf s (a = %8.0lf us) (n = "INT32_FORMAT_W(5)") (lvls, us = %8.0lf, %8.0lf, %8.0lf, %8.0lf, %8.0lf)",
 932           str,
 933           seq->sum(),
 934           seq->avg() * 1000000.0,
 935           seq->num(),
 936           seq->percentile(0)  * 1000000.0,
 937           seq->percentile(25) * 1000000.0,
 938           seq->percentile(50) * 1000000.0,
 939           seq->percentile(75) * 1000000.0,
 940           seq->maximum() * 1000000.0
 941   );
 942 }
 943 
 944 void ShenandoahCollectorPolicy::increase_cycle_counter() {
 945   _cycle_counter++;
 946 }
 947 
 948 size_t ShenandoahCollectorPolicy::cycle_counter() const {
 949   return _cycle_counter;
 950 }
 951 
 952  ShenandoahPhaseTimes* ShenandoahCollectorPolicy::phase_times() {
 953   return _phase_times;
 954 }
 955 
 956 
 957 uint ShenandoahCollectorPolicy::calc_workers_for_java_threads(uint application_workers) {
 958   return (uint)ShenandoahGCWorkerPerJavaThread * application_workers;
 959 }
 960 
 961 uint ShenandoahCollectorPolicy::calc_workers_for_live_set(size_t live_data) {
 962   return (uint)(live_data / HeapSizePerGCThread);
 963 }
 964 
 965 
 966 uint ShenandoahCollectorPolicy::calc_default_active_workers(
 967                                                      uint total_workers,
 968                                                      uint min_workers,
 969                                                      uint active_workers,
 970                                                      uint application_workers,
 971                                                      uint  workers_by_java_threads,
 972                                                      uint  workers_by_liveset) {
 973   // If the user has turned off using a dynamic number of GC threads
 974   // or the users has requested a specific number, set the active
 975   // number of workers to all the workers.
 976   uint new_active_workers = total_workers;
 977   uint prev_active_workers = active_workers;
 978   uint active_workers_by_JT = 0;
 979   uint active_workers_by_liveset = 0;
 980 
 981   active_workers_by_JT = MAX2(workers_by_java_threads, min_workers);
 982 
 983   // Choose a number of GC threads based on the live set.
 984   active_workers_by_liveset =
 985       MAX2((uint) 2U, workers_by_liveset);
 986 
 987   uint max_active_workers =
 988     MAX2(active_workers_by_JT, active_workers_by_liveset);
 989 
 990   new_active_workers = MIN2(max_active_workers, total_workers);
 991 
 992   // Increase GC workers instantly but decrease them more
 993   // slowly.
 994   if (new_active_workers < prev_active_workers) {
 995     new_active_workers =
 996       MAX2(min_workers, (prev_active_workers + new_active_workers) / 2);
 997   }
 998 
 999   if (UseNUMA) {
1000     uint numa_groups = (uint)os::numa_get_groups_num();
1001     assert(numa_groups <= total_workers, "Not enough workers to cover all numa groups");
1002     new_active_workers = MAX2(new_active_workers, numa_groups);
1003   }
1004 
1005   // Check once more that the number of workers is within the limits.
1006   assert(min_workers <= total_workers, "Minimum workers not consistent with total workers");
1007   assert(new_active_workers >= min_workers, "Minimum workers not observed");
1008   assert(new_active_workers <= total_workers, "Total workers not observed");
1009 
1010   log_trace(gc, task)("ShenandoahCollectorPolicy::calc_default_active_workers() : "
1011      "active_workers(): " UINTX_FORMAT "  new_active_workers: " UINTX_FORMAT "  "
1012      "prev_active_workers: " UINTX_FORMAT "\n"
1013      " active_workers_by_JT: " UINTX_FORMAT "  active_workers_by_liveset: " UINTX_FORMAT,
1014      (uintx)active_workers, (uintx)new_active_workers, (uintx)prev_active_workers,
1015      (uintx)active_workers_by_JT, (uintx)active_workers_by_liveset);
1016   assert(new_active_workers > 0, "Always need at least 1");
1017   return new_active_workers;
1018 }
1019 
1020 /**
1021  * Initial marking phase also update references of live objects from previous concurrent GC cycle,
1022  * so we take Java threads and live set into account.
1023  */
1024 uint ShenandoahCollectorPolicy::calc_workers_for_init_marking(uint active_workers,
1025                                             uint application_workers) {
1026 
1027   if (!UseDynamicNumberOfGCThreads ||
1028      (!FLAG_IS_DEFAULT(ParallelGCThreads) && !ForceDynamicNumberOfGCThreads)) {
1029     assert(ParallelGCThreads > 0, "Always need at least 1");
1030     return ParallelGCThreads;
1031   } else {
1032     ShenandoahCollectorPolicy* policy = (ShenandoahCollectorPolicy*)ShenandoahHeap::heap()->collector_policy();
1033     size_t live_data = policy->_heuristics->bytes_in_cset();
1034 
1035     return calc_default_active_workers(ParallelGCThreads, (ParallelGCThreads > 1) ? 2 : 1,
1036       active_workers, application_workers,
1037       calc_workers_for_java_threads(application_workers),
1038       calc_workers_for_live_set(live_data));
1039   }
1040 }
1041 
1042 uint ShenandoahCollectorPolicy::calc_workers_for_conc_marking(uint active_workers,
1043                                             uint application_workers) {
1044 
1045   if (!UseDynamicNumberOfGCThreads ||
1046      (!FLAG_IS_DEFAULT(ConcGCThreads) && !ForceDynamicNumberOfGCThreads)) {
1047     assert(ConcGCThreads > 0, "Always need at least 1");
1048     return ConcGCThreads;
1049   } else {
1050     return calc_default_active_workers(ConcGCThreads,
1051       (ConcGCThreads > 1 ? 2 : 1), active_workers,
1052       application_workers, calc_workers_for_java_threads(application_workers), 0);
1053   }
1054 }
1055 
1056 uint ShenandoahCollectorPolicy::calc_workers_for_final_marking(uint active_workers,
1057                                             uint application_workers) {
1058 
1059   if (!UseDynamicNumberOfGCThreads ||
1060      (!FLAG_IS_DEFAULT(ParallelGCThreads) && !ForceDynamicNumberOfGCThreads)) {
1061     assert(ParallelGCThreads > 0, "Always need at least 1");
1062     return ParallelGCThreads;
1063   } else {
1064     return calc_default_active_workers(ParallelGCThreads,
1065       (ParallelGCThreads > 1 ? 2 : 1), active_workers,
1066       application_workers, calc_workers_for_java_threads(application_workers), 0);
1067   }
1068 }
1069 
1070   // Calculate workers for concurrent evacuation (concurrent GC)
1071 uint ShenandoahCollectorPolicy::calc_workers_for_conc_evacuation(uint active_workers,
1072                                             uint application_workers) {
1073   if (!UseDynamicNumberOfGCThreads ||
1074      (!FLAG_IS_DEFAULT(ConcGCThreads) && !ForceDynamicNumberOfGCThreads)) {
1075     assert(ConcGCThreads > 0, "Always need at least 1");
1076     return ConcGCThreads;
1077   } else {
1078     return calc_workers_for_evacuation(false, // not a full GC
1079       ConcGCThreads, active_workers, application_workers);
1080   }
1081 }
1082 
1083   // Calculate workers for parallel evaculation (full GC)
1084 uint ShenandoahCollectorPolicy::calc_workers_for_parallel_evacuation(uint active_workers,
1085                                             uint application_workers) {
1086   if (!UseDynamicNumberOfGCThreads ||
1087      (!FLAG_IS_DEFAULT(ParallelGCThreads) && !ForceDynamicNumberOfGCThreads)) {
1088     assert(ParallelGCThreads > 0, "Always need at least 1");
1089     return ParallelGCThreads;
1090   } else {
1091     return calc_workers_for_evacuation(true, // a full GC
1092       ParallelGCThreads, active_workers, application_workers);
1093   }
1094 }
1095 
1096 
1097 uint ShenandoahCollectorPolicy::calc_workers_for_evacuation(bool full_gc,
1098                                             uint total_workers,
1099                                             uint active_workers,
1100                                             uint application_workers) {
1101 
1102   // Calculation based on live set
1103   size_t live_data = 0;
1104   ShenandoahHeap* heap = ShenandoahHeap::heap();
1105   if (full_gc) {
1106     ShenandoahHeapRegionSet* regions = heap->regions();
1107     for (size_t index = 0; index < regions->active_regions(); index ++) {
1108       live_data += regions->get_fast(index)->get_live_data_bytes();
1109     }
1110   } else {
1111     ShenandoahCollectorPolicy* policy = (ShenandoahCollectorPolicy*)heap->collector_policy();
1112     live_data = policy->_heuristics->bytes_in_cset();
1113   }
1114 
1115   uint active_workers_by_liveset = calc_workers_for_live_set(live_data);
1116   return calc_default_active_workers(total_workers,
1117       (total_workers > 1 ? 2 : 1), active_workers,
1118       application_workers, 0, active_workers_by_liveset);
1119 }
1120 
1121 bool ShenandoahCollectorPolicy::should_start_partial_gc() {
1122   return _heuristics->should_start_partial_gc();
1123 }