1 /*
   2  * Copyright (c) 2013, 2015, Red Hat, Inc. and/or its affiliates.
   3  *
   4  * This code is free software; you can redistribute it and/or modify it
   5  * under the terms of the GNU General Public License version 2 only, as
   6  * published by the Free Software Foundation.
   7  *
   8  * This code is distributed in the hope that it will be useful, but WITHOUT
   9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  11  * version 2 for more details (a copy is included in the LICENSE file that
  12  * accompanied this code).
  13  *
  14  * You should have received a copy of the GNU General Public License version
  15  * 2 along with this work; if not, write to the Free Software Foundation,
  16  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  17  *
  18  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  19  * or visit www.oracle.com if you need additional information or have any
  20  * questions.
  21  *
  22  */
  23 
  24 #include "precompiled.hpp"
  25 #include "gc/shared/gcPolicyCounters.hpp"
  26 #include "gc/shenandoah/shenandoahCollectionSet.hpp"
  27 #include "gc/shenandoah/shenandoahFreeSet.hpp"
  28 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
  29 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
  30 #include "gc/shenandoah/shenandoahPartialGC.hpp"
  31 #include "gc/shenandoah/shenandoahPhaseTimes.hpp"
  32 #include "runtime/os.hpp"
  33 
  34 class ShenandoahHeuristics : public CHeapObj<mtGC> {
  35 
  36   NumberSeq _allocation_rate_bytes;
  37   NumberSeq _reclamation_rate_bytes;
  38 
  39   size_t _bytes_allocated_since_CM;
  40   size_t _bytes_reclaimed_this_cycle;
  41 
  42 protected:
  43   typedef struct {
  44     size_t region_number;
  45     size_t garbage;
  46   } RegionGarbage;
  47 
  48   static int compare_by_garbage(RegionGarbage a, RegionGarbage b) {
  49     if (a.garbage > b.garbage)
  50       return -1;
  51     else if (a.garbage < b.garbage)
  52       return 1;
  53     else return 0;
  54   }
  55 
  56   RegionGarbage* get_region_garbage_cache(size_t num) {
  57     RegionGarbage* res = _region_garbage;
  58     if (res == NULL) {
  59       res = NEW_C_HEAP_ARRAY(RegionGarbage, num, mtGC);
  60       _region_garbage_size = num;
  61     } else if (_region_garbage_size < num) {
  62       REALLOC_C_HEAP_ARRAY(RegionGarbage, _region_garbage, num, mtGC);
  63       _region_garbage_size = num;
  64     }
  65     return res;
  66   }
  67 
  68   RegionGarbage* _region_garbage;
  69   size_t _region_garbage_size;
  70 
  71   size_t _bytes_allocated_start_CM;
  72   size_t _bytes_allocated_during_CM;
  73 
  74   size_t _bytes_allocated_after_last_gc;
  75 
  76   uint _cancelled_cm_cycles_in_a_row;
  77   uint _successful_cm_cycles_in_a_row;
  78 
  79   size_t _bytes_in_cset;
  80 
  81 public:
  82 
  83   ShenandoahHeuristics();
  84   ~ShenandoahHeuristics();
  85 
  86   void record_bytes_allocated(size_t bytes);
  87   void record_bytes_reclaimed(size_t bytes);
  88   void record_bytes_start_CM(size_t bytes);
  89   void record_bytes_end_CM(size_t bytes);
  90 
  91   void record_gc_start() {
  92     // Do nothing.
  93   }
  94 
  95   void record_gc_end() {
  96     _bytes_allocated_after_last_gc = ShenandoahHeap::heap()->used();
  97   }
  98 
  99   size_t bytes_in_cset() const { return _bytes_in_cset; }
 100 
 101   virtual void print_thresholds() {
 102   }
 103 
 104   virtual bool should_start_concurrent_mark(size_t used, size_t capacity) const=0;
 105 
 106   virtual bool update_refs_early() const {
 107     return ShenandoahUpdateRefsEarly;
 108   }
 109 
 110   virtual bool should_start_partial_gc() {
 111     return false;
 112   }
 113 
 114   virtual bool handover_cancelled_marking() {
 115     return _cancelled_cm_cycles_in_a_row <= ShenandoahFullGCThreshold;
 116   }
 117 
 118   virtual void record_cm_cancelled() {
 119     _cancelled_cm_cycles_in_a_row++;
 120     _successful_cm_cycles_in_a_row = 0;
 121   }
 122 
 123   virtual void record_cm_success() {
 124     _cancelled_cm_cycles_in_a_row = 0;
 125     _successful_cm_cycles_in_a_row++;
 126   }
 127 
 128   virtual void record_full_gc() {
 129     _bytes_in_cset = 0;
 130   }
 131 
 132   virtual void start_choose_collection_set() {
 133   }
 134   virtual void end_choose_collection_set() {
 135   }
 136   virtual bool region_in_collection_set(ShenandoahHeapRegion* r, size_t immediate_garbage) = 0;
 137 
 138   virtual void choose_collection_set(ShenandoahCollectionSet* collection_set, int* connections=NULL);
 139   virtual void choose_free_set(ShenandoahFreeSet* free_set);
 140 
 141   virtual bool process_references() {
 142     if (ShenandoahRefProcFrequency == 0) return false;
 143     size_t cycle = ShenandoahHeap::heap()->shenandoahPolicy()->cycle_counter();
 144     // Process references every Nth GC cycle.
 145     return cycle % ShenandoahRefProcFrequency == 0;
 146   }
 147 
 148   virtual bool unload_classes() {
 149     if (ShenandoahUnloadClassesFrequency == 0) return false;
 150     size_t cycle = ShenandoahHeap::heap()->shenandoahPolicy()->cycle_counter();
 151     // Unload classes every Nth GC cycle.
 152     // This should not happen in the same cycle as process_references to amortize costs.
 153     // Offsetting by one is enough to break the rendezvous when periods are equal.
 154     // When periods are not equal, offsetting by one is just as good as any other guess.
 155     return (cycle + 1) % ShenandoahUnloadClassesFrequency == 0;
 156   }
 157 
 158   virtual bool needs_regions_sorted_by_garbage() {
 159     // Most of them do not.
 160     return false;
 161   }
 162 };
 163 
 164 ShenandoahHeuristics::ShenandoahHeuristics() :
 165   _bytes_allocated_since_CM(0),
 166   _bytes_reclaimed_this_cycle(0),
 167   _bytes_allocated_start_CM(0),
 168   _bytes_allocated_during_CM(0),
 169   _bytes_allocated_after_last_gc(0),
 170   _bytes_in_cset(0),
 171   _cancelled_cm_cycles_in_a_row(0),
 172   _successful_cm_cycles_in_a_row(0),
 173   _region_garbage(NULL),
 174   _region_garbage_size(0)
 175 {
 176 }
 177 
 178 ShenandoahHeuristics::~ShenandoahHeuristics() {
 179   if (_region_garbage != NULL) {
 180     FREE_C_HEAP_ARRAY(RegionGarbage, _region_garbage);
 181   }
 182 }
 183 
 184 void ShenandoahHeuristics::choose_collection_set(ShenandoahCollectionSet* collection_set, int* connections) {
 185   start_choose_collection_set();
 186 
 187   ShenandoahHeap* heap = ShenandoahHeap::heap();
 188 
 189   // Poll this before populating collection set.
 190   size_t total_garbage = heap->garbage();
 191 
 192   // Step 1. Build up the region candidates we care about, rejecting losers and accepting winners right away.
 193 
 194   ShenandoahHeapRegionSet* regions = heap->regions();
 195   size_t active = regions->active_regions();
 196 
 197   RegionGarbage* candidates = get_region_garbage_cache(active);
 198 
 199   size_t cand_idx = 0;
 200   _bytes_in_cset = 0;
 201 
 202   size_t immediate_garbage = 0;
 203   size_t immediate_regions = 0;
 204   for (size_t i = 0; i < active; i++) {
 205     ShenandoahHeapRegion* region = regions->get(i);
 206 
 207     if (! region->is_humongous() && ! region->is_pinned()) {
 208       if ((! region->is_empty()) && ! region->has_live()) {
 209         // We can recycle it right away and put it in the free set.
 210         immediate_regions++;
 211         immediate_garbage += region->garbage();
 212         heap->decrease_used(region->used());
 213         region->recycle();
 214         log_develop_trace(gc)("Choose region " SIZE_FORMAT " for immediate reclaim with garbage = " SIZE_FORMAT
 215                               " and live = " SIZE_FORMAT "\n",
 216                               region->region_number(), region->garbage(), region->get_live_data_bytes());
 217       } else {
 218         // This is our candidate for later consideration.
 219         candidates[cand_idx].region_number = region->region_number();
 220         candidates[cand_idx].garbage = region->garbage();
 221         cand_idx++;
 222       }
 223     } else {
 224       assert(region->has_live() || region->is_empty() || region->is_pinned() || region->is_humongous(), "check rejected");
 225       log_develop_trace(gc)("Rejected region " SIZE_FORMAT " with garbage = " SIZE_FORMAT
 226                             " and live = " SIZE_FORMAT "\n",
 227                             region->region_number(), region->garbage(), region->get_live_data_bytes());
 228     }
 229   }
 230 
 231   // Step 2. Process the remanining candidates, if any.
 232 
 233   if (cand_idx > 0) {
 234     if (needs_regions_sorted_by_garbage()) {
 235       QuickSort::sort<RegionGarbage>(candidates, (int)cand_idx, compare_by_garbage, false);
 236     }
 237 
 238     for (size_t i = 0; i < cand_idx; i++) {
 239       ShenandoahHeapRegion *region = regions->get_fast(candidates[i].region_number);
 240       if (region_in_collection_set(region, immediate_garbage)) {
 241         log_develop_trace(gc)("Choose region " SIZE_FORMAT " with garbage = " SIZE_FORMAT
 242                                       " and live = " SIZE_FORMAT "\n",
 243                               region->region_number(), region->garbage(), region->get_live_data_bytes());
 244         collection_set->add_region(region);
 245         region->set_in_collection_set(true);
 246         _bytes_in_cset += region->used();
 247       }
 248     }
 249   }
 250 
 251   end_choose_collection_set();
 252 
 253   log_info(gc, ergo)("Total Garbage: "SIZE_FORMAT"M",
 254                      total_garbage / M);
 255   log_info(gc, ergo)("Immediate Garbage: "SIZE_FORMAT"M, "SIZE_FORMAT" regions",
 256                      immediate_garbage / M, immediate_regions);
 257   log_info(gc, ergo)("Garbage to be collected: "SIZE_FORMAT"M ("SIZE_FORMAT"%% of total), "SIZE_FORMAT" regions",
 258                      collection_set->garbage() / M, collection_set->garbage() * 100 / MAX2(total_garbage, (size_t)1), collection_set->count());
 259   log_info(gc, ergo)("Live objects to be evacuated: "SIZE_FORMAT"M",
 260                      collection_set->live_data() / M);
 261   log_info(gc, ergo)("Live/garbage ratio in collected regions: "SIZE_FORMAT"%%",
 262                      collection_set->live_data() * 100 / MAX2(collection_set->garbage(), (size_t)1));
 263 }
 264 
 265 void ShenandoahHeuristics::choose_free_set(ShenandoahFreeSet* free_set) {
 266 
 267   ShenandoahHeapRegionSet* ordered_regions = ShenandoahHeap::heap()->regions();
 268   size_t i = 0;
 269   size_t end = ordered_regions->active_regions();
 270 
 271   ShenandoahHeap* heap = ShenandoahHeap::heap();
 272   while (i < end) {
 273     ShenandoahHeapRegion* region = ordered_regions->get(i++);
 274     if ((! heap->in_collection_set(region))
 275         && (! region->is_humongous())
 276         && (! region->is_pinned())) {
 277       free_set->add_region(region);
 278     }
 279   }
 280 }
 281 
 282 void ShenandoahCollectorPolicy::record_workers_start(TimingPhase phase) {
 283   for (uint i = 0; i < ShenandoahPhaseTimes::GCParPhasesSentinel; i++) {
 284     _phase_times->reset(i);
 285   }
 286 }
 287 
 288 void ShenandoahCollectorPolicy::record_workers_end(TimingPhase phase) {
 289   guarantee(phase == init_evac ||
 290             phase == scan_roots ||
 291             phase == update_roots ||
 292             phase == partial_gc_work ||
 293             phase == final_update_refs_roots ||
 294             phase == _num_phases,
 295             "only in these phases we can add per-thread phase times");
 296   if (phase != _num_phases) {
 297     // Merge _phase_time to counters below the given phase.
 298     for (uint i = 0; i < ShenandoahPhaseTimes::GCParPhasesSentinel; i++) {
 299       double t = _phase_times->average(i);
 300       _timing_data[phase + i + 1]._secs.add(t);
 301     }
 302   }
 303 }
 304 
 305 void ShenandoahCollectorPolicy::record_phase_start(TimingPhase phase) {
 306   _timing_data[phase]._start = os::elapsedTime();
 307 
 308 }
 309 
 310 void ShenandoahCollectorPolicy::record_phase_end(TimingPhase phase) {
 311   double end = os::elapsedTime();
 312   double elapsed = end - _timing_data[phase]._start;
 313   _timing_data[phase]._secs.add(elapsed);
 314 }
 315 
 316 void ShenandoahCollectorPolicy::record_gc_start() {
 317   _heuristics->record_gc_start();
 318 }
 319 
 320 void ShenandoahCollectorPolicy::record_gc_end() {
 321   _heuristics->record_gc_end();
 322 }
 323 
 324 void ShenandoahCollectorPolicy::report_concgc_cancelled() {
 325 }
 326 
 327 void ShenandoahHeuristics::record_bytes_allocated(size_t bytes) {
 328   _bytes_allocated_since_CM = bytes;
 329   _bytes_allocated_start_CM = bytes;
 330   _allocation_rate_bytes.add(bytes);
 331 }
 332 
 333 void ShenandoahHeuristics::record_bytes_reclaimed(size_t bytes) {
 334   _bytes_reclaimed_this_cycle = bytes;
 335   _reclamation_rate_bytes.add(bytes);
 336 }
 337 
 338 void ShenandoahHeuristics::record_bytes_start_CM(size_t bytes) {
 339   _bytes_allocated_start_CM = bytes;
 340 }
 341 
 342 void ShenandoahHeuristics::record_bytes_end_CM(size_t bytes) {
 343   _bytes_allocated_during_CM = (bytes > _bytes_allocated_start_CM) ? (bytes - _bytes_allocated_start_CM)
 344                                                                    : bytes;
 345 }
 346 
 347 class PassiveHeuristics : public ShenandoahHeuristics {
 348 public:
 349   PassiveHeuristics() : ShenandoahHeuristics() {
 350   }
 351 
 352   virtual bool region_in_collection_set(ShenandoahHeapRegion* r, size_t immediate_garbage) {
 353     return r->garbage() > 0;
 354   }
 355 
 356   virtual bool should_start_concurrent_mark(size_t used, size_t capacity) const {
 357     // Never do concurrent GCs.
 358     return false;
 359   }
 360 
 361   virtual bool process_references() {
 362     // Randomly process refs with 50% chance.
 363     return (os::random() & 1) == 1;
 364   }
 365 
 366   virtual bool unload_classes() {
 367     // Randomly unload classes with 50% chance.
 368     return (os::random() & 1) == 1;
 369   }
 370 };
 371 
 372 class AggressiveHeuristics : public ShenandoahHeuristics {
 373 public:
 374   AggressiveHeuristics() : ShenandoahHeuristics() {
 375   }
 376 
 377   virtual bool region_in_collection_set(ShenandoahHeapRegion* r, size_t immediate_garbage) {
 378     return r->garbage() > 0;
 379   }
 380 
 381   virtual bool should_start_concurrent_mark(size_t used, size_t capacity) const {
 382     return true;
 383   }
 384 
 385   virtual bool process_references() {
 386     // Randomly process refs with 50% chance.
 387     return (os::random() & 1) == 1;
 388   }
 389 
 390   virtual bool unload_classes() {
 391     // Randomly unload classes with 50% chance.
 392     return (os::random() & 1) == 1;
 393   }
 394 };
 395 
 396 class DynamicHeuristics : public ShenandoahHeuristics {
 397 public:
 398   DynamicHeuristics() : ShenandoahHeuristics() {
 399   }
 400 
 401   void print_thresholds() {
 402     log_info(gc, init)("Shenandoah heuristics thresholds: allocation "SIZE_FORMAT", free "SIZE_FORMAT", garbage "SIZE_FORMAT,
 403                        ShenandoahAllocationThreshold,
 404                        ShenandoahFreeThreshold,
 405                        ShenandoahGarbageThreshold);
 406   }
 407 
 408   virtual ~DynamicHeuristics() {}
 409 
 410   virtual bool should_start_concurrent_mark(size_t used, size_t capacity) const {
 411 
 412     bool shouldStartConcurrentMark = false;
 413 
 414     ShenandoahHeap* heap = ShenandoahHeap::heap();
 415     size_t free_capacity = heap->free_regions()->capacity();
 416     size_t free_used = heap->free_regions()->used();
 417     assert(free_used <= free_capacity, "must use less than capacity");
 418     size_t available =  free_capacity - free_used;
 419 
 420     if (!update_refs_early()) {
 421       // Count in the memory available after cset reclamation.
 422       size_t cset = MIN2(_bytes_in_cset, (ShenandoahCSetThreshold * capacity) / 100);
 423       available += cset;
 424     }
 425 
 426     uintx threshold = ShenandoahFreeThreshold + ShenandoahCSetThreshold;
 427     size_t targetStartMarking = (capacity * threshold) / 100;
 428 
 429     size_t threshold_bytes_allocated = heap->capacity() * ShenandoahAllocationThreshold / 100;
 430     if (available < targetStartMarking &&
 431         heap->bytes_allocated_since_cm() > threshold_bytes_allocated)
 432     {
 433       // Need to check that an appropriate number of regions have
 434       // been allocated since last concurrent mark too.
 435       shouldStartConcurrentMark = true;
 436     }
 437 
 438     return shouldStartConcurrentMark;
 439   }
 440 
 441   virtual bool region_in_collection_set(ShenandoahHeapRegion* r, size_t immediate_garbage) {
 442     size_t threshold = ShenandoahHeapRegion::region_size_bytes() * ShenandoahGarbageThreshold / 100;
 443     return r->garbage() > threshold;
 444   }
 445 
 446 };
 447 
 448 
 449 class AdaptiveHeuristics : public ShenandoahHeuristics {
 450 private:
 451   uintx _free_threshold;
 452   TruncatedSeq* _cset_history;
 453 
 454 public:
 455   AdaptiveHeuristics() :
 456     ShenandoahHeuristics(),
 457     _free_threshold(ShenandoahInitFreeThreshold),
 458     _cset_history(new TruncatedSeq((uint)ShenandoahHappyCyclesThreshold)) {
 459 
 460     _cset_history->add((double) ShenandoahCSetThreshold);
 461     _cset_history->add((double) ShenandoahCSetThreshold);
 462   }
 463 
 464   virtual ~AdaptiveHeuristics() {
 465     delete _cset_history;
 466   }
 467 
 468   virtual bool region_in_collection_set(ShenandoahHeapRegion* r, size_t immediate_garbage) {
 469     size_t threshold = ShenandoahHeapRegion::region_size_bytes() * ShenandoahGarbageThreshold / 100;
 470     return r->garbage() > threshold;
 471   }
 472 
 473   virtual void record_cm_cancelled() {
 474     ShenandoahHeuristics::record_cm_cancelled();
 475     if (_free_threshold < ShenandoahMaxFreeThreshold) {
 476       _free_threshold++;
 477       log_info(gc,ergo)("increasing free threshold to: "UINTX_FORMAT, _free_threshold);
 478     }
 479   }
 480 
 481   virtual void record_cm_success() {
 482     ShenandoahHeuristics::record_cm_success();
 483     if (_successful_cm_cycles_in_a_row > ShenandoahHappyCyclesThreshold &&
 484         _free_threshold > ShenandoahMinFreeThreshold) {
 485       _free_threshold--;
 486       log_info(gc,ergo)("reducing free threshold to: "UINTX_FORMAT, _free_threshold);
 487       _successful_cm_cycles_in_a_row = 0;
 488     }
 489   }
 490 
 491   virtual bool should_start_concurrent_mark(size_t used, size_t capacity) const {
 492     bool shouldStartConcurrentMark = false;
 493 
 494     ShenandoahHeap* heap = ShenandoahHeap::heap();
 495     size_t free_capacity = heap->free_regions()->capacity();
 496     size_t free_used = heap->free_regions()->used();
 497     assert(free_used <= free_capacity, "must use less than capacity");
 498     size_t available =  free_capacity - free_used;
 499     uintx factor = _free_threshold;
 500     size_t cset_threshold = 0;
 501     if (!update_refs_early()) {
 502       // Count in the memory available after cset reclamation.
 503       cset_threshold = (size_t) _cset_history->davg();
 504       size_t cset = MIN2(_bytes_in_cset, (cset_threshold * capacity) / 100);
 505       available += cset;
 506       factor += cset_threshold;
 507     }
 508 
 509     size_t targetStartMarking = (capacity * factor) / 100;
 510 
 511     size_t threshold_bytes_allocated = heap->capacity() * ShenandoahAllocationThreshold / 100;
 512     if (available < targetStartMarking &&
 513         heap->bytes_allocated_since_cm() > threshold_bytes_allocated)
 514     {
 515       // Need to check that an appropriate number of regions have
 516       // been allocated since last concurrent mark too.
 517       shouldStartConcurrentMark = true;
 518     }
 519 
 520     if (shouldStartConcurrentMark) {
 521       if (! update_refs_early()) {
 522         log_info(gc,ergo)("predicted cset threshold: "SIZE_FORMAT, cset_threshold);
 523         log_info(gc,ergo)("Starting concurrent mark at "SIZE_FORMAT"K CSet ("SIZE_FORMAT"%%)", _bytes_in_cset / K, _bytes_in_cset * 100 / capacity);
 524         _cset_history->add((double) (_bytes_in_cset * 100 / capacity));
 525       }
 526     }
 527     return shouldStartConcurrentMark;
 528   }
 529 
 530 };
 531 
 532 class GlobalHeuristics : public DynamicHeuristics {
 533 private:
 534   size_t _garbage;
 535   size_t _min_garbage;
 536 public:
 537   GlobalHeuristics() : DynamicHeuristics() {
 538     if (FLAG_IS_DEFAULT(ShenandoahGarbageThreshold)) {
 539       FLAG_SET_DEFAULT(ShenandoahGarbageThreshold, 90);
 540     }
 541   }
 542   virtual ~GlobalHeuristics() {}
 543 
 544   virtual void start_choose_collection_set() {
 545     _garbage = 0;
 546     size_t heap_garbage = ShenandoahHeap::heap()->garbage();
 547     _min_garbage =  heap_garbage * ShenandoahGarbageThreshold / 100;
 548   }
 549 
 550   virtual bool region_in_collection_set(ShenandoahHeapRegion* r, size_t immediate_garbage) {
 551     if (_garbage + immediate_garbage < _min_garbage && ! r->is_empty()) {
 552       _garbage += r->garbage();
 553       return true;
 554     } else {
 555       return false;
 556     }
 557   }
 558 
 559   virtual bool needs_regions_sorted_by_garbage() {
 560     return true;
 561   }
 562 };
 563 
 564 class RatioHeuristics : public DynamicHeuristics {
 565 private:
 566   size_t _garbage;
 567   size_t _live;
 568 public:
 569   RatioHeuristics() : DynamicHeuristics() {
 570     if (FLAG_IS_DEFAULT(ShenandoahGarbageThreshold)) {
 571       FLAG_SET_DEFAULT(ShenandoahGarbageThreshold, 95);
 572     }
 573   }
 574   virtual ~RatioHeuristics() {}
 575 
 576   virtual void start_choose_collection_set() {
 577     _garbage = 0;
 578     _live = 0;
 579   }
 580 
 581   virtual bool region_in_collection_set(ShenandoahHeapRegion* r, size_t immediate_garbage) {
 582     size_t min_ratio = 100 - ShenandoahGarbageThreshold;
 583     if (_live * 100 / MAX2(_garbage + immediate_garbage, (size_t)1) < min_ratio && ! r->is_empty()) {
 584       _garbage += r->garbage();
 585       _live += r->get_live_data_bytes();
 586       return true;
 587     } else {
 588       return false;
 589     }
 590   }
 591 
 592   virtual bool needs_regions_sorted_by_garbage() {
 593     return true;
 594   }
 595 };
 596 
 597 class ConnectionHeuristics : public ShenandoahHeuristics {
 598 private:
 599   size_t _max_live_data;
 600   double _used_threshold_factor;
 601   double _garbage_threshold_factor;
 602   double _allocation_threshold_factor;
 603 
 604   uintx _used_threshold;
 605   uintx _garbage_threshold;
 606   uintx _allocation_threshold;
 607 
 608 public:
 609   ConnectionHeuristics() : ShenandoahHeuristics() {
 610     _max_live_data = 0;
 611 
 612     _used_threshold = 0;
 613     _garbage_threshold = 0;
 614     _allocation_threshold = 0;
 615 
 616     _used_threshold_factor = 0.;
 617     _garbage_threshold_factor = 0.1;
 618     _allocation_threshold_factor = 0.;
 619   }
 620 
 621   virtual bool should_start_concurrent_mark(size_t used, size_t capacity) const {
 622     size_t half_gig = 64 * 1024 * 1024;
 623     size_t bytes_alloc = ShenandoahHeap::heap()->bytes_allocated_since_cm();
 624     bool result =  bytes_alloc > half_gig;
 625     if (result) tty->print("Starting a concurrent mark");
 626     return result;
 627   }
 628 
 629   bool maybe_add_heap_region(ShenandoahHeapRegion* hr, ShenandoahCollectionSet* collection_set) {
 630     if (!hr->is_humongous() && hr->has_live() && !collection_set->contains(hr)) {
 631       collection_set->add_region_check_for_duplicates(hr);
 632       hr->set_in_collection_set(true);
 633       return true;
 634     }
 635     return false;
 636   }
 637 
 638   virtual void choose_collection_set(ShenandoahCollectionSet* collection_set, int* connections) {
 639     ShenandoahHeapRegionSet* regions = ShenandoahHeap::heap()->regions();
 640     size_t active = regions->active_regions();
 641 
 642     RegionGarbage* sorted_by_garbage = get_region_garbage_cache(active);
 643     for (size_t i = 0; i < active; i++) {
 644       ShenandoahHeapRegion* r = regions->get_fast(i);
 645       sorted_by_garbage[i].region_number = r->region_number();
 646       sorted_by_garbage[i].garbage = r->garbage();
 647     }
 648 
 649     QuickSort::sort<RegionGarbage>(sorted_by_garbage, (int) active, compare_by_garbage, false);
 650 
 651     size_t num = ShenandoahHeap::heap()->num_regions();
 652     // simulate write heuristics by picking best region.
 653     int r = 0;
 654     ShenandoahHeapRegion* choosenOne = regions->get(sorted_by_garbage[0].region_number);
 655 
 656     while (! maybe_add_heap_region(choosenOne, collection_set)) {
 657       choosenOne = regions->get(sorted_by_garbage[++r].region_number);
 658     }
 659 
 660     size_t region_number = choosenOne->region_number();
 661     log_develop_trace(gc)("Adding choosen region " SIZE_FORMAT, region_number);
 662 
 663     // Add all the regions which point to this region.
 664     for (size_t i = 0; i < num; i++) {
 665       if (connections[i * num + region_number] > 0) {
 666         ShenandoahHeapRegion* candidate = regions->get(sorted_by_garbage[i].region_number);
 667         if (maybe_add_heap_region(candidate, collection_set)) {
 668           log_develop_trace(gc)("Adding region " SIZE_FORMAT " which points to the choosen region", i);
 669         }
 670       }
 671     }
 672 
 673     // Add all the regions they point to.
 674     for (size_t ci = 0; ci < collection_set->active_regions(); ci++) {
 675       ShenandoahHeapRegion* cs_heap_region = collection_set->get(ci);
 676       size_t cs_heap_region_number = cs_heap_region->region_number();
 677       for (size_t i = 0; i < num; i++) {
 678         if (connections[i * num + cs_heap_region_number] > 0) {
 679           ShenandoahHeapRegion* candidate = regions->get(sorted_by_garbage[i].region_number);
 680           if (maybe_add_heap_region(candidate, collection_set)) {
 681             log_develop_trace(gc)
 682               ("Adding region " SIZE_FORMAT " which is pointed to by region " SIZE_FORMAT, i, cs_heap_region_number);
 683           }
 684         }
 685       }
 686     }
 687     _max_live_data = MAX2(_max_live_data, collection_set->live_data());
 688     collection_set->print();
 689   }
 690 
 691   virtual bool region_in_collection_set(ShenandoahHeapRegion* r, size_t immediate_garbage) {
 692     assert(false, "Shouldn't get here");
 693     return false;
 694   }
 695 };
 696 class PartialHeuristics : public AdaptiveHeuristics {
 697 public:
 698   PartialHeuristics() : AdaptiveHeuristics() {
 699     if (FLAG_IS_DEFAULT(ShenandoahAllocationThreshold)) {
 700       FLAG_SET_DEFAULT(ShenandoahAllocationThreshold, 5);
 701     }
 702     FLAG_SET_DEFAULT(UseShenandoahMatrix, true);
 703     // TODO: Disable this optimization for now, as it also requires the matrix barriers.
 704     FLAG_SET_DEFAULT(ArrayCopyLoadStoreMaxElem, 0);
 705   }
 706 
 707   virtual ~PartialHeuristics() {}
 708 
 709   bool update_refs_early() const {
 710     return true;
 711   }
 712 
 713   bool should_start_partial_gc() {
 714     ShenandoahHeap* heap = ShenandoahHeap::heap();
 715     size_t capacity = heap->capacity();
 716 
 717     size_t used = heap->used();
 718     return (used - _bytes_allocated_after_last_gc) * 100 / capacity > ShenandoahAllocationThreshold;
 719   }
 720 };
 721 
 722 ShenandoahCollectorPolicy::ShenandoahCollectorPolicy() :
 723   _cycle_counter(0),
 724   _successful_cm(0),
 725   _degenerated_cm(0)
 726 {
 727 
 728   ShenandoahHeapRegion::setup_heap_region_size(initial_heap_byte_size(), max_heap_byte_size());
 729 
 730   initialize_all();
 731 
 732   _tracer = new (ResourceObj::C_HEAP, mtGC) ShenandoahTracer();
 733   _stw_timer = new (ResourceObj::C_HEAP, mtGC) STWGCTimer();
 734   _conc_timer = new (ResourceObj::C_HEAP, mtGC) ConcurrentGCTimer();
 735   _user_requested_gcs = 0;
 736   _allocation_failure_gcs = 0;
 737   _conc_gc_aborted = false;
 738 
 739   _phase_names[total_pause]                     = "Total Pauses (N)";
 740   _phase_names[total_pause_gross]               = "Total Pauses (G)";
 741   _phase_names[init_mark]                       = "Initial Mark Pauses (N)";
 742   _phase_names[init_mark_gross]                 = "Initial Mark Pauses (G)";
 743   _phase_names[final_mark]                      = "Final Mark Pauses (N)";
 744   _phase_names[final_mark_gross]                = "Final Mark Pauses (G)";
 745   _phase_names[accumulate_stats]                = "  Accumulate Stats";
 746   _phase_names[make_parsable]                   = "  Make Parsable";
 747   _phase_names[clear_liveness]                  = "  Clear Liveness";
 748   _phase_names[finish_queues]                   = "  Finish Queues";
 749   _phase_names[weakrefs]                        = "  Weak References";
 750   _phase_names[class_unloading]                 = "  Class Unloading";
 751   _phase_names[prepare_evac]                    = "  Prepare Evacuation";
 752 
 753   _phase_names[scan_roots]                      = "  Scan Roots";
 754   _phase_names[scan_thread_roots]               = "    S: Thread Roots";
 755   _phase_names[scan_code_roots]                 = "    S: Code Cache Roots";
 756   _phase_names[scan_string_table_roots]         = "    S: String Table Roots";
 757   _phase_names[scan_universe_roots]             = "    S: Universe Roots";
 758   _phase_names[scan_jni_roots]                  = "    S: JNI Roots";
 759   _phase_names[scan_jni_weak_roots]             = "    S: JNI Weak Roots";
 760   _phase_names[scan_synchronizer_roots]         = "    S: Synchronizer Roots";
 761   _phase_names[scan_flat_profiler_roots]        = "    S: Flat Profiler Roots";
 762   _phase_names[scan_management_roots]           = "    S: Management Roots";
 763   _phase_names[scan_system_dictionary_roots]    = "    S: System Dict Roots";
 764   _phase_names[scan_cldg_roots]                 = "    S: CLDG Roots";
 765   _phase_names[scan_jvmti_roots]                = "    S: JVMTI Roots";
 766 
 767   _phase_names[update_roots]                    = "  Update Roots";
 768   _phase_names[update_thread_roots]             = "    U: Thread Roots";
 769   _phase_names[update_code_roots]               = "    U: Code Cache Roots";
 770   _phase_names[update_string_table_roots]       = "    U: String Table Roots";
 771   _phase_names[update_universe_roots]           = "    U: Universe Roots";
 772   _phase_names[update_jni_roots]                = "    U: JNI Roots";
 773   _phase_names[update_jni_weak_roots]           = "    U: JNI Weak Roots";
 774   _phase_names[update_synchronizer_roots]       = "    U: Synchronizer Roots";
 775   _phase_names[update_flat_profiler_roots]      = "    U: Flat Profiler Roots";
 776   _phase_names[update_management_roots]         = "    U: Management Roots";
 777   _phase_names[update_system_dictionary_roots]  = "    U: System Dict Roots";
 778   _phase_names[update_cldg_roots]               = "    U: CLDG Roots";
 779   _phase_names[update_jvmti_roots]              = "    U: JVMTI Roots";
 780 
 781   _phase_names[init_evac]                       = "  Initial Evacuation";
 782   _phase_names[evac_thread_roots]               = "    E: Thread Roots";
 783   _phase_names[evac_code_roots]                 = "    E: Code Cache Roots";
 784   _phase_names[evac_string_table_roots]         = "    E: String Table Roots";
 785   _phase_names[evac_universe_roots]             = "    E: Universe Roots";
 786   _phase_names[evac_jni_roots]                  = "    E: JNI Roots";
 787   _phase_names[evac_jni_weak_roots]             = "    E: JNI Weak Roots";
 788   _phase_names[evac_synchronizer_roots]         = "    E: Synchronizer Roots";
 789   _phase_names[evac_flat_profiler_roots]        = "    E: Flat Profiler Roots";
 790   _phase_names[evac_management_roots]           = "    E: Management Roots";
 791   _phase_names[evac_system_dictionary_roots]    = "    E: System Dict Roots";
 792   _phase_names[evac_cldg_roots]                 = "    E: CLDG Roots";
 793   _phase_names[evac_jvmti_roots]                = "    E: JVMTI Roots";
 794 
 795   _phase_names[recycle_regions]                 = "  Recycle regions";
 796   _phase_names[reset_bitmaps]                   = "Reset Bitmaps";
 797   _phase_names[resize_tlabs]                    = "Resize TLABs";
 798 
 799   _phase_names[full_gc]                         = "Full GC";
 800   _phase_names[full_gc_heapdumps]               = "  Heap Dumps";
 801   _phase_names[full_gc_prepare]                 = "  Prepare";
 802   _phase_names[full_gc_mark]                    = "  Mark";
 803   _phase_names[full_gc_mark_finish_queues]      = "    Finish Queues";
 804   _phase_names[full_gc_mark_weakrefs]           = "    Weak References";
 805   _phase_names[full_gc_mark_class_unloading]    = "    Class Unloading";
 806   _phase_names[full_gc_calculate_addresses]     = "  Calculate Addresses";
 807   _phase_names[full_gc_adjust_pointers]         = "  Adjust Pointers";
 808   _phase_names[full_gc_copy_objects]            = "  Copy Objects";
 809 
 810   _phase_names[partial_gc_gross]                = "Pause Partial GC (G)";
 811   _phase_names[partial_gc]                      = "Pause Partial GC (N)";
 812   _phase_names[partial_gc_prepare]              = "  Prepare";
 813   _phase_names[partial_gc_work]                 = "  Work";
 814   _phase_names[partial_gc_thread_roots]         = "    P: Thread Roots";
 815   _phase_names[partial_gc_code_roots]           = "    P: Code Cache Roots";
 816   _phase_names[partial_gc_string_table_roots]   = "    P: String Table Roots";
 817   _phase_names[partial_gc_universe_roots]       = "    P: Universe Roots";
 818   _phase_names[partial_gc_jni_roots]            = "    P: JNI Roots";
 819   _phase_names[partial_gc_jni_weak_roots]       = "    P: JNI Weak Roots";
 820   _phase_names[partial_gc_synchronizer_roots]   = "    P: Synchronizer Roots";
 821   _phase_names[partial_gc_flat_profiler_roots]  = "    P: Flat Profiler Roots";
 822   _phase_names[partial_gc_management_roots]     = "    P: Management Roots";
 823   _phase_names[partial_gc_system_dict_roots]    = "    P: System Dict Roots";
 824   _phase_names[partial_gc_cldg_roots]           = "    P: CLDG Roots";
 825   _phase_names[partial_gc_jvmti_roots]          = "    P: JVMTI Roots";
 826   _phase_names[partial_gc_recycle]              = "  Recycle";
 827 
 828   _phase_names[conc_mark]                       = "Concurrent Marking";
 829   _phase_names[conc_evac]                       = "Concurrent Evacuation";
 830 
 831   _phase_names[init_update_refs_gross]          = "Pause Init  Update Refs (G)";
 832   _phase_names[init_update_refs]                = "Pause Init  Update Refs (N)";
 833   _phase_names[conc_update_refs]                = "Concurrent Update Refs";
 834   _phase_names[final_update_refs_gross]         = "Pause Final Update Refs (G)";
 835   _phase_names[final_update_refs]               = "Pause Final Update Refs (N)";
 836 
 837   _phase_names[final_update_refs_roots]                = "  Update Roots";
 838   _phase_names[final_update_refs_thread_roots]         = "    UR: Thread Roots";
 839   _phase_names[final_update_refs_code_roots]           = "    UR: Code Cache Roots";
 840   _phase_names[final_update_refs_string_table_roots]   = "    UR: String Table Roots";
 841   _phase_names[final_update_refs_universe_roots]       = "    UR: Universe Roots";
 842   _phase_names[final_update_refs_jni_roots]            = "    UR: JNI Roots";
 843   _phase_names[final_update_refs_jni_weak_roots]       = "    UR: JNI Weak Roots";
 844   _phase_names[final_update_refs_synchronizer_roots]   = "    UR: Synchronizer Roots";
 845   _phase_names[final_update_refs_flat_profiler_roots]  = "    UR: Flat Profiler Roots";
 846   _phase_names[final_update_refs_management_roots]     = "    UR: Management Roots";
 847   _phase_names[final_update_refs_system_dict_roots]    = "    UR: System Dict Roots";
 848   _phase_names[final_update_refs_cldg_roots]           = "    UR: CLDG Roots";
 849   _phase_names[final_update_refs_jvmti_roots]          = "    UR: JVMTI Roots";
 850 
 851   if (ShenandoahGCHeuristics != NULL) {
 852     if (strcmp(ShenandoahGCHeuristics, "aggressive") == 0) {
 853       log_info(gc, init)("Shenandoah heuristics: aggressive");
 854       _heuristics = new AggressiveHeuristics();
 855     } else if (strcmp(ShenandoahGCHeuristics, "dynamic") == 0) {
 856       log_info(gc, init)("Shenandoah heuristics: dynamic");
 857       _heuristics = new DynamicHeuristics();
 858     } else if (strcmp(ShenandoahGCHeuristics, "global") == 0) {
 859       log_info(gc, init)("Shenandoah heuristics: global");
 860       _heuristics = new GlobalHeuristics();
 861     } else if (strcmp(ShenandoahGCHeuristics, "ratio") == 0) {
 862       log_info(gc, init)("Shenandoah heuristics: ratio");
 863       _heuristics = new RatioHeuristics();
 864     } else if (strcmp(ShenandoahGCHeuristics, "adaptive") == 0) {
 865       log_info(gc, init)("Shenandoah heuristics: adaptive");
 866       _heuristics = new AdaptiveHeuristics();
 867     } else if (strcmp(ShenandoahGCHeuristics, "passive") == 0) {
 868       log_info(gc, init)("Shenandoah heuristics: passive");
 869       _heuristics = new PassiveHeuristics();
 870     } else if (strcmp(ShenandoahGCHeuristics, "connections") == 0) {
 871       log_info(gc, init)("Shenandoah heuristics: connections");
 872       _heuristics = new ConnectionHeuristics();
 873     } else if (strcmp(ShenandoahGCHeuristics, "partial") == 0) {
 874       log_info(gc, init)("Shenandoah heuristics: partial GC");
 875       _heuristics = new PartialHeuristics();
 876     } else {
 877       vm_exit_during_initialization("Unknown -XX:ShenandoahGCHeuristics option");
 878     }
 879     _heuristics->print_thresholds();
 880   } else {
 881       ShouldNotReachHere();
 882   }
 883   _phase_times = new ShenandoahPhaseTimes(MAX2(ConcGCThreads, ParallelGCThreads));
 884 }
 885 
 886 ShenandoahCollectorPolicy* ShenandoahCollectorPolicy::as_pgc_policy() {
 887   return this;
 888 }
 889 
 890 BarrierSet::Name ShenandoahCollectorPolicy::barrier_set_name() {
 891   return BarrierSet::ShenandoahBarrierSet;
 892 }
 893 
 894 HeapWord* ShenandoahCollectorPolicy::mem_allocate_work(size_t size,
 895                                                        bool is_tlab,
 896                                                        bool* gc_overhead_limit_was_exceeded) {
 897   guarantee(false, "Not using this policy feature yet.");
 898   return NULL;
 899 }
 900 
 901 HeapWord* ShenandoahCollectorPolicy::satisfy_failed_allocation(size_t size, bool is_tlab) {
 902   guarantee(false, "Not using this policy feature yet.");
 903   return NULL;
 904 }
 905 
 906 void ShenandoahCollectorPolicy::initialize_alignments() {
 907 
 908   // This is expected by our algorithm for ShenandoahHeap::heap_region_containing().
 909   _space_alignment = ShenandoahHeapRegion::region_size_bytes();
 910   _heap_alignment = ShenandoahHeapRegion::region_size_bytes();
 911 }
 912 
 913 void ShenandoahCollectorPolicy::post_heap_initialize() {
 914   // Nothing to do here (yet).
 915 }
 916 
 917 void ShenandoahCollectorPolicy::record_bytes_allocated(size_t bytes) {
 918   _heuristics->record_bytes_allocated(bytes);
 919 }
 920 
 921 void ShenandoahCollectorPolicy::record_bytes_start_CM(size_t bytes) {
 922   _heuristics->record_bytes_start_CM(bytes);
 923 }
 924 
 925 void ShenandoahCollectorPolicy::record_bytes_end_CM(size_t bytes) {
 926   _heuristics->record_bytes_end_CM(bytes);
 927 }
 928 
 929 void ShenandoahCollectorPolicy::record_bytes_reclaimed(size_t bytes) {
 930   _heuristics->record_bytes_reclaimed(bytes);
 931 }
 932 
 933 void ShenandoahCollectorPolicy::record_user_requested_gc() {
 934   _user_requested_gcs++;
 935 }
 936 
 937 void ShenandoahCollectorPolicy::record_allocation_failure_gc() {
 938   _allocation_failure_gcs++;
 939 }
 940 
 941 bool ShenandoahCollectorPolicy::should_start_concurrent_mark(size_t used,
 942                                                              size_t capacity) {
 943   return _heuristics->should_start_concurrent_mark(used, capacity);
 944 }
 945 
 946 bool ShenandoahCollectorPolicy::handover_cancelled_marking() {
 947   return _heuristics->handover_cancelled_marking();
 948 }
 949 
 950 bool ShenandoahCollectorPolicy::update_refs_early() {
 951   return _heuristics->update_refs_early();
 952 }
 953 
 954 void ShenandoahCollectorPolicy::record_cm_success() {
 955   _heuristics->record_cm_success();
 956   _successful_cm++;
 957 }
 958 
 959 void ShenandoahCollectorPolicy::record_cm_degenerated() {
 960   _degenerated_cm++;
 961 }
 962 
 963 void ShenandoahCollectorPolicy::record_cm_cancelled() {
 964   _heuristics->record_cm_cancelled();
 965 }
 966 
 967 void ShenandoahCollectorPolicy::record_full_gc() {
 968   _heuristics->record_full_gc();
 969 }
 970 
 971 void ShenandoahCollectorPolicy::choose_collection_set(ShenandoahCollectionSet* collection_set, int* connections) {
 972   _heuristics->choose_collection_set(collection_set, connections);
 973 }
 974 
 975 void ShenandoahCollectorPolicy::choose_free_set(ShenandoahFreeSet* free_set) {
 976    _heuristics->choose_free_set(free_set);
 977 }
 978 
 979 
 980 bool ShenandoahCollectorPolicy::process_references() {
 981   return _heuristics->process_references();
 982 }
 983 
 984 bool ShenandoahCollectorPolicy::unload_classes() {
 985   return _heuristics->unload_classes();
 986 }
 987 
 988 void ShenandoahCollectorPolicy::print_tracing_info(outputStream* out) {
 989   out->cr();
 990   out->print_cr("GC STATISTICS:");
 991   out->print_cr("  \"(G)\" (gross) pauses include time to safepoint. \"(N)\" (net) pauses are times spent in GC.");
 992   out->print_cr("  \"a\" is average time for each phase, look at levels to see if average makes sense.");
 993   out->print_cr("  \"lvls\" are quantiles: 0%% (minimum), 25%%, 50%% (median), 75%%, 100%% (maximum).");
 994   out->cr();
 995 
 996   for (uint i = 0; i < _num_phases; i++) {
 997     if (_timing_data[i]._secs.maximum() != 0) {
 998       print_summary_sd(out, _phase_names[i], &(_timing_data[i]._secs));
 999     }
1000   }
1001 
1002   out->cr();
1003   out->print_cr("" SIZE_FORMAT " allocation failure and " SIZE_FORMAT " user requested GCs", _allocation_failure_gcs, _user_requested_gcs);
1004   out->print_cr("" SIZE_FORMAT " successful and " SIZE_FORMAT " degenerated concurrent markings", _successful_cm, _degenerated_cm);
1005   out->cr();
1006 }
1007 
1008 void ShenandoahCollectorPolicy::print_summary_sd(outputStream* out, const char* str, const HdrSeq* seq)  {
1009   out->print_cr("%-27s = %8.2lf s (a = %8.0lf us) (n = "INT32_FORMAT_W(5)") (lvls, us = %8.0lf, %8.0lf, %8.0lf, %8.0lf, %8.0lf)",
1010           str,
1011           seq->sum(),
1012           seq->avg() * 1000000.0,
1013           seq->num(),
1014           seq->percentile(0)  * 1000000.0,
1015           seq->percentile(25) * 1000000.0,
1016           seq->percentile(50) * 1000000.0,
1017           seq->percentile(75) * 1000000.0,
1018           seq->maximum() * 1000000.0
1019   );
1020 }
1021 
1022 void ShenandoahCollectorPolicy::increase_cycle_counter() {
1023   _cycle_counter++;
1024 }
1025 
1026 size_t ShenandoahCollectorPolicy::cycle_counter() const {
1027   return _cycle_counter;
1028 }
1029 
1030  ShenandoahPhaseTimes* ShenandoahCollectorPolicy::phase_times() {
1031   return _phase_times;
1032 }
1033 
1034 
1035 uint ShenandoahCollectorPolicy::calc_workers_for_java_threads(uint application_workers) {
1036   return (uint)(ShenandoahGCWorkerPerJavaThread * application_workers);
1037 }
1038 
1039 uint ShenandoahCollectorPolicy::calc_workers_for_live_set(size_t live_data) {
1040   return (uint)(live_data / HeapSizePerGCThread);
1041 }
1042 
1043 
1044 uint ShenandoahCollectorPolicy::calc_default_active_workers(
1045                                                      uint total_workers,
1046                                                      uint min_workers,
1047                                                      uint active_workers,
1048                                                      uint application_workers,
1049                                                      uint  workers_by_java_threads,
1050                                                      uint  workers_by_liveset) {
1051   // If the user has turned off using a dynamic number of GC threads
1052   // or the users has requested a specific number, set the active
1053   // number of workers to all the workers.
1054   uint new_active_workers = total_workers;
1055   uint prev_active_workers = active_workers;
1056   uint active_workers_by_JT = 0;
1057   uint active_workers_by_liveset = 0;
1058 
1059   active_workers_by_JT = MAX2(workers_by_java_threads, min_workers);
1060 
1061   // Choose a number of GC threads based on the live set.
1062   active_workers_by_liveset =
1063       MAX2((uint) 2U, workers_by_liveset);
1064 
1065   uint max_active_workers =
1066     MAX2(active_workers_by_JT, active_workers_by_liveset);
1067 
1068   new_active_workers = MIN2(max_active_workers, new_active_workers);
1069 
1070   // Increase GC workers instantly but decrease them more
1071   // slowly.
1072   if (new_active_workers < prev_active_workers) {
1073     new_active_workers =
1074       MAX2(min_workers, (prev_active_workers + new_active_workers) / 2);
1075   }
1076 
1077   if (UseNUMA) {
1078     uint numa_groups = (uint)os::numa_get_groups_num();
1079     assert(numa_groups <= total_workers, "Not enough workers to cover all numa groups");
1080     new_active_workers = MAX2(new_active_workers, numa_groups);
1081   }
1082 
1083   // Check once more that the number of workers is within the limits.
1084   assert(min_workers <= total_workers, "Minimum workers not consistent with total workers");
1085   assert(new_active_workers >= min_workers, "Minimum workers not observed");
1086   assert(new_active_workers <= total_workers, "Total workers not observed");
1087 
1088   log_trace(gc, task)("ShenandoahCollectorPolicy::calc_default_active_workers() : "
1089      "active_workers(): " UINTX_FORMAT "  new_active_workers: " UINTX_FORMAT "  "
1090      "prev_active_workers: " UINTX_FORMAT "\n"
1091      " active_workers_by_JT: " UINTX_FORMAT "  active_workers_by_liveset: " UINTX_FORMAT,
1092      (uintx)active_workers, (uintx)new_active_workers, (uintx)prev_active_workers,
1093      (uintx)active_workers_by_JT, (uintx)active_workers_by_liveset);
1094   assert(new_active_workers > 0, "Always need at least 1");
1095   return new_active_workers;
1096 }
1097 
1098 /**
1099  * Initial marking phase also update references of live objects from previous concurrent GC cycle,
1100  * so we take Java threads and live set into account.
1101  */
1102 uint ShenandoahCollectorPolicy::calc_workers_for_init_marking(uint active_workers,
1103                                             uint application_workers) {
1104 
1105   if (!UseDynamicNumberOfGCThreads ||
1106      (!FLAG_IS_DEFAULT(ParallelGCThreads) && !ForceDynamicNumberOfGCThreads)) {
1107     assert(ParallelGCThreads > 0, "Always need at least 1");
1108     return ParallelGCThreads;
1109   } else {
1110     ShenandoahCollectorPolicy* policy = (ShenandoahCollectorPolicy*)ShenandoahHeap::heap()->collector_policy();
1111     size_t live_data = policy->_heuristics->bytes_in_cset();
1112 
1113     return calc_default_active_workers(ParallelGCThreads, (ParallelGCThreads > 1) ? 2 : 1,
1114       active_workers, application_workers,
1115       calc_workers_for_java_threads(application_workers),
1116       calc_workers_for_live_set(live_data));
1117   }
1118 }
1119 
1120 uint ShenandoahCollectorPolicy::calc_workers_for_conc_marking(uint active_workers,
1121                                             uint application_workers) {
1122 
1123   if (!UseDynamicNumberOfGCThreads ||
1124      (!FLAG_IS_DEFAULT(ConcGCThreads) && !ForceDynamicNumberOfGCThreads)) {
1125     assert(ConcGCThreads > 0, "Always need at least 1");
1126     return ConcGCThreads;
1127   } else {
1128     return calc_default_active_workers(ConcGCThreads,
1129       (ConcGCThreads > 1 ? 2 : 1), active_workers,
1130       application_workers, calc_workers_for_java_threads(application_workers), 0);
1131   }
1132 }
1133 
1134 uint ShenandoahCollectorPolicy::calc_workers_for_final_marking(uint active_workers,
1135                                             uint application_workers) {
1136 
1137   if (!UseDynamicNumberOfGCThreads ||
1138      (!FLAG_IS_DEFAULT(ParallelGCThreads) && !ForceDynamicNumberOfGCThreads)) {
1139     assert(ParallelGCThreads > 0, "Always need at least 1");
1140     return ParallelGCThreads;
1141   } else {
1142     return calc_default_active_workers(ParallelGCThreads,
1143       (ParallelGCThreads > 1 ? 2 : 1), active_workers,
1144       application_workers, calc_workers_for_java_threads(application_workers), 0);
1145   }
1146 }
1147 
1148   // Calculate workers for concurrent evacuation (concurrent GC)
1149 uint ShenandoahCollectorPolicy::calc_workers_for_conc_evacuation(uint active_workers,
1150                                             uint application_workers) {
1151   if (!UseDynamicNumberOfGCThreads ||
1152      (!FLAG_IS_DEFAULT(ConcGCThreads) && !ForceDynamicNumberOfGCThreads)) {
1153     assert(ConcGCThreads > 0, "Always need at least 1");
1154     return ConcGCThreads;
1155   } else {
1156     return calc_workers_for_evacuation(false, // not a full GC
1157       ConcGCThreads, active_workers, application_workers);
1158   }
1159 }
1160 
1161   // Calculate workers for parallel evaculation (full GC)
1162 uint ShenandoahCollectorPolicy::calc_workers_for_parallel_evacuation(uint active_workers,
1163                                             uint application_workers) {
1164   if (!UseDynamicNumberOfGCThreads ||
1165      (!FLAG_IS_DEFAULT(ParallelGCThreads) && !ForceDynamicNumberOfGCThreads)) {
1166     assert(ParallelGCThreads > 0, "Always need at least 1");
1167     return ParallelGCThreads;
1168   } else {
1169     return calc_workers_for_evacuation(true, // a full GC
1170       ParallelGCThreads, active_workers, application_workers);
1171   }
1172 }
1173 
1174 
1175 uint ShenandoahCollectorPolicy::calc_workers_for_evacuation(bool full_gc,
1176                                             uint total_workers,
1177                                             uint active_workers,
1178                                             uint application_workers) {
1179 
1180   // Calculation based on live set
1181   size_t live_data = 0;
1182   ShenandoahHeap* heap = ShenandoahHeap::heap();
1183   if (full_gc) {
1184     ShenandoahHeapRegionSet* regions = heap->regions();
1185     for (size_t index = 0; index < regions->active_regions(); index ++) {
1186       live_data += regions->get_fast(index)->get_live_data_bytes();
1187     }
1188   } else {
1189     ShenandoahCollectorPolicy* policy = (ShenandoahCollectorPolicy*)heap->collector_policy();
1190     live_data = policy->_heuristics->bytes_in_cset();
1191   }
1192 
1193   uint active_workers_by_liveset = calc_workers_for_live_set(live_data);
1194   return calc_default_active_workers(total_workers,
1195       (total_workers > 1 ? 2 : 1), active_workers,
1196       application_workers, 0, active_workers_by_liveset);
1197 }
1198 
1199 bool ShenandoahCollectorPolicy::should_start_partial_gc() {
1200   return _heuristics->should_start_partial_gc();
1201 }