1 /*
   2  * Copyright (c) 2013, 2015, Red Hat, Inc. and/or its affiliates.
   3  *
   4  * This code is free software; you can redistribute it and/or modify it
   5  * under the terms of the GNU General Public License version 2 only, as
   6  * published by the Free Software Foundation.
   7  *
   8  * This code is distributed in the hope that it will be useful, but WITHOUT
   9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  11  * version 2 for more details (a copy is included in the LICENSE file that
  12  * accompanied this code).
  13  *
  14  * You should have received a copy of the GNU General Public License version
  15  * 2 along with this work; if not, write to the Free Software Foundation,
  16  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  17  *
  18  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  19  * or visit www.oracle.com if you need additional information or have any
  20  * questions.
  21  *
  22  */
  23 
  24 #include "gc/shared/gcPolicyCounters.hpp"
  25 #include "gc/shenandoah/shenandoahCollectionSet.hpp"
  26 #include "gc/shenandoah/shenandoahFreeSet.hpp"
  27 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
  28 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
  29 #include "gc/shenandoah/shenandoahPhaseTimes.hpp"
  30 #include "runtime/os.hpp"
  31 
  32 class ShenandoahHeuristics : public CHeapObj<mtGC> {
  33 
  34   NumberSeq _allocation_rate_bytes;
  35   NumberSeq _reclamation_rate_bytes;
  36 
  37   size_t _bytes_allocated_since_CM;
  38   size_t _bytes_reclaimed_this_cycle;
  39 
  40 protected:
  41   typedef struct {
  42     size_t region_number;
  43     size_t garbage;
  44   } RegionGarbage;
  45 
  46   static int compare_by_garbage(RegionGarbage a, RegionGarbage b) {
  47     if (a.garbage > b.garbage)
  48       return -1;
  49     else if (b.garbage < a.garbage)
  50       return 1;
  51     else return 0;
  52   }
  53 
  54   RegionGarbage* _region_garbage;
  55   size_t _region_garbage_size;
  56 
  57   size_t _bytes_allocated_start_CM;
  58   size_t _bytes_allocated_during_CM;
  59 
  60   uint _cancelled_cm_cycles_in_a_row;
  61   uint _successful_cm_cycles_in_a_row;
  62 
  63   size_t _bytes_in_cset;
  64 
  65 public:
  66 
  67   ShenandoahHeuristics();
  68   ~ShenandoahHeuristics();
  69 
  70   void record_bytes_allocated(size_t bytes);
  71   void record_bytes_reclaimed(size_t bytes);
  72   void record_bytes_start_CM(size_t bytes);
  73   void record_bytes_end_CM(size_t bytes);
  74 
  75   size_t bytes_in_cset() const { return _bytes_in_cset; }
  76 
  77   virtual void print_thresholds() {
  78   }
  79 
  80   virtual bool should_start_concurrent_mark(size_t used, size_t capacity) const=0;
  81 
  82   virtual bool handover_cancelled_marking() {
  83     return _cancelled_cm_cycles_in_a_row <= ShenandoahFullGCThreshold;
  84   }
  85 
  86   virtual void record_cm_cancelled() {
  87     _cancelled_cm_cycles_in_a_row++;
  88     _successful_cm_cycles_in_a_row = 0;
  89   }
  90 
  91   virtual void record_cm_success() {
  92     _cancelled_cm_cycles_in_a_row = 0;
  93     _successful_cm_cycles_in_a_row++;
  94   }
  95 
  96   virtual void record_full_gc() {
  97     _bytes_in_cset = 0;
  98   }
  99 
 100   virtual void start_choose_collection_set() {
 101   }
 102   virtual void end_choose_collection_set() {
 103   }
 104   virtual bool region_in_collection_set(ShenandoahHeapRegion* r, size_t immediate_garbage) = 0;
 105 
 106   virtual void choose_collection_set(ShenandoahCollectionSet* collection_set, int* connections=NULL);
 107   virtual void choose_free_set(ShenandoahFreeSet* free_set);
 108 
 109   virtual bool process_references() {
 110     if (ShenandoahRefProcFrequency == 0) return false;
 111     size_t cycle = ShenandoahHeap::heap()->shenandoahPolicy()->cycle_counter();
 112     // Process references every Nth GC cycle.
 113     return cycle % ShenandoahRefProcFrequency == 0;
 114   }
 115 
 116   virtual bool unload_classes() {
 117     if (ShenandoahUnloadClassesFrequency == 0) return false;
 118     size_t cycle = ShenandoahHeap::heap()->shenandoahPolicy()->cycle_counter();
 119     // Unload classes every Nth GC cycle.
 120     // This should not happen in the same cycle as process_references to amortize costs.
 121     // Offsetting by one is enough to break the rendezvous when periods are equal.
 122     // When periods are not equal, offsetting by one is just as good as any other guess.
 123     return (cycle + 1) % ShenandoahUnloadClassesFrequency == 0;
 124   }
 125 
 126   virtual bool needs_regions_sorted_by_garbage() {
 127     // Most of them do not.
 128     return false;
 129   }
 130 };
 131 
 132 ShenandoahHeuristics::ShenandoahHeuristics() :
 133   _bytes_allocated_since_CM(0),
 134   _bytes_reclaimed_this_cycle(0),
 135   _bytes_allocated_start_CM(0),
 136   _bytes_allocated_during_CM(0),
 137   _bytes_in_cset(0),
 138   _cancelled_cm_cycles_in_a_row(0),
 139   _successful_cm_cycles_in_a_row(0),
 140   _region_garbage(NULL),
 141   _region_garbage_size(0)
 142 {
 143 }
 144 
 145 ShenandoahHeuristics::~ShenandoahHeuristics() {
 146   if (_region_garbage != NULL) {
 147     FREE_C_HEAP_ARRAY(RegionGarbage, _region_garbage);
 148   }
 149 }
 150 
 151 void ShenandoahHeuristics::choose_collection_set(ShenandoahCollectionSet* collection_set, int* connections) {
 152   start_choose_collection_set();
 153 
 154   ShenandoahHeap* heap = ShenandoahHeap::heap();
 155 
 156   // Step 1. Build up the region candidates we care about, rejecting losers and accepting winners right away.
 157 
 158   ShenandoahHeapRegionSet* regions = heap->regions();
 159   size_t active = regions->active_regions();
 160 
 161   RegionGarbage* candidates = _region_garbage;
 162   if (candidates == NULL) {
 163     candidates = NEW_C_HEAP_ARRAY(RegionGarbage, active, mtGC);
 164     _region_garbage_size = active;
 165   } else if (_region_garbage_size < active) {
 166     REALLOC_C_HEAP_ARRAY(RegionGarbage, _region_garbage, active, mtGC);
 167     _region_garbage_size = active;
 168   }
 169 
 170   size_t cand_idx = 0;
 171   _bytes_in_cset = 0;
 172 
 173   size_t immediate_garbage = 0;
 174   size_t immediate_regions = 0;
 175   for (size_t i = 0; i < active; i++) {
 176     ShenandoahHeapRegion* region = regions->get(i);
 177 
 178     if (! region->is_humongous() && ! region->is_pinned()) {
 179       if ((! region->is_empty()) && ! region->has_live()) {
 180         // We can recycle it right away and put it in the free set.
 181         immediate_regions++;
 182         immediate_garbage += region->garbage();
 183         heap->decrease_used(region->used());
 184         region->recycle();
 185         log_develop_trace(gc)("Choose region " SIZE_FORMAT " for immediate reclaim with garbage = " SIZE_FORMAT
 186                               " and live = " SIZE_FORMAT "\n",
 187                               region->region_number(), region->garbage(), region->get_live_data_bytes());
 188       } else {
 189         // This is our candidate for later consideration.
 190         candidates[cand_idx].region_number = region->region_number();
 191         candidates[cand_idx].garbage = region->garbage();
 192         cand_idx++;
 193       }
 194     } else {
 195       assert(region->has_live() || region->is_empty() || region->is_pinned() || region->is_humongous(), "check rejected");
 196       log_develop_trace(gc)("Rejected region " SIZE_FORMAT " with garbage = " SIZE_FORMAT
 197                             " and live = " SIZE_FORMAT "\n",
 198                             region->region_number(), region->garbage(), region->get_live_data_bytes());
 199     }
 200   }
 201 
 202   // Step 2. Process the remanining candidates, if any.
 203 
 204   if (cand_idx > 0) {
 205     if (needs_regions_sorted_by_garbage()) {
 206       QuickSort::sort<RegionGarbage>(candidates, cand_idx, compare_by_garbage, false);
 207     }
 208 
 209     for (size_t i = 0; i < cand_idx; i++) {
 210       ShenandoahHeapRegion *region = regions->get_fast(candidates[i].region_number);
 211       if (region_in_collection_set(region, immediate_garbage)) {
 212         log_develop_trace(gc)("Choose region " SIZE_FORMAT " with garbage = " SIZE_FORMAT
 213                                       " and live = " SIZE_FORMAT "\n",
 214                               region->region_number(), region->garbage(), region->get_live_data_bytes());
 215         collection_set->add_region(region);
 216         region->set_in_collection_set(true);
 217         _bytes_in_cset += region->used();
 218       }
 219     }
 220   }
 221 
 222   end_choose_collection_set();
 223 
 224   size_t total_garbage = heap->garbage();
 225   log_debug(gc)("Total Garbage: "SIZE_FORMAT, total_garbage);
 226   log_debug(gc)("Immediate Garbage: "SIZE_FORMAT, immediate_garbage);
 227   log_debug(gc)("Immediate Garbage regions: "SIZE_FORMAT, immediate_regions);
 228   log_debug(gc)("Garbage to be collected: "SIZE_FORMAT, collection_set->garbage());
 229   log_debug(gc)("Objects to be evacuated: "SIZE_FORMAT, collection_set->live_data());
 230   log_debug(gc)("Live / Garbage ratio: "SIZE_FORMAT"%%", collection_set->live_data() * 100 / MAX2(collection_set->garbage(), 1UL));
 231   log_debug(gc)("Collected-Garbage ratio / Total-garbage: "SIZE_FORMAT"%%", collection_set->garbage() * 100 / MAX2(total_garbage, 1UL));
 232 }
 233 
 234 void ShenandoahHeuristics::choose_free_set(ShenandoahFreeSet* free_set) {
 235 
 236   ShenandoahHeapRegionSet* ordered_regions = ShenandoahHeap::heap()->regions();
 237   size_t i = 0;
 238   size_t end = ordered_regions->active_regions();
 239 
 240   ShenandoahHeap* heap = ShenandoahHeap::heap();
 241   while (i < end) {
 242     ShenandoahHeapRegion* region = ordered_regions->get(i++);
 243     if ((! heap->in_collection_set(region))
 244         && (! region->is_humongous())
 245         && (! region->is_pinned())) {
 246       free_set->add_region(region);
 247     }
 248   }
 249 }
 250 
 251 void ShenandoahCollectorPolicy::record_workers_start(TimingPhase phase) {
 252   for (uint i = 0; i < ShenandoahPhaseTimes::GCParPhasesSentinel; i++) {
 253     _phase_times->reset(i);
 254   }
 255 }
 256 
 257 void ShenandoahCollectorPolicy::record_workers_end(TimingPhase phase) {
 258   if (phase != _num_phases) {
 259     for (uint i = 0; i < ShenandoahPhaseTimes::GCParPhasesSentinel; i++) {
 260       double t = _phase_times->average(i);
 261       _timing_data[phase + i]._secs.add(t);
 262     }
 263   }
 264 }
 265 
 266 void ShenandoahCollectorPolicy::record_phase_start(TimingPhase phase) {
 267   _timing_data[phase]._start = os::elapsedTime();
 268 
 269 }
 270 
 271 void ShenandoahCollectorPolicy::record_phase_end(TimingPhase phase) {
 272   double end = os::elapsedTime();
 273   double elapsed = end - _timing_data[phase]._start;
 274   _timing_data[phase]._secs.add(elapsed);
 275 }
 276 
 277 void ShenandoahCollectorPolicy::report_concgc_cancelled() {
 278 }
 279 
 280 void ShenandoahHeuristics::record_bytes_allocated(size_t bytes) {
 281   _bytes_allocated_since_CM = bytes;
 282   _bytes_allocated_start_CM = bytes;
 283   _allocation_rate_bytes.add(bytes);
 284 }
 285 
 286 void ShenandoahHeuristics::record_bytes_reclaimed(size_t bytes) {
 287   _bytes_reclaimed_this_cycle = bytes;
 288   _reclamation_rate_bytes.add(bytes);
 289 }
 290 
 291 void ShenandoahHeuristics::record_bytes_start_CM(size_t bytes) {
 292   _bytes_allocated_start_CM = bytes;
 293 }
 294 
 295 void ShenandoahHeuristics::record_bytes_end_CM(size_t bytes) {
 296   _bytes_allocated_during_CM = (bytes > _bytes_allocated_start_CM) ? (bytes - _bytes_allocated_start_CM)
 297                                                                    : bytes;
 298 }
 299 
 300 class PassiveHeuristics : public ShenandoahHeuristics {
 301 public:
 302   PassiveHeuristics() : ShenandoahHeuristics() {
 303   }
 304 
 305   virtual bool region_in_collection_set(ShenandoahHeapRegion* r, size_t immediate_garbage) {
 306     return r->garbage() > 0;
 307   }
 308 
 309   virtual bool should_start_concurrent_mark(size_t used, size_t capacity) const {
 310     // Never do concurrent GCs.
 311     return false;
 312   }
 313 
 314   virtual bool process_references() {
 315     // Randomly process refs with 50% chance.
 316     return (os::random() & 1) == 1;
 317   }
 318 
 319   virtual bool unload_classes() {
 320     // Randomly unload classes with 50% chance.
 321     return (os::random() & 1) == 1;
 322   }
 323 };
 324 
 325 class AggressiveHeuristics : public ShenandoahHeuristics {
 326 public:
 327   AggressiveHeuristics() : ShenandoahHeuristics() {
 328   }
 329 
 330   virtual bool region_in_collection_set(ShenandoahHeapRegion* r, size_t immediate_garbage) {
 331     return r->garbage() > 0;
 332   }
 333 
 334   virtual bool should_start_concurrent_mark(size_t used, size_t capacity) const {
 335     return true;
 336   }
 337 
 338   virtual bool process_references() {
 339     // Randomly process refs with 50% chance.
 340     return (os::random() & 1) == 1;
 341   }
 342 
 343   virtual bool unload_classes() {
 344     // Randomly unload classes with 50% chance.
 345     return (os::random() & 1) == 1;
 346   }
 347 };
 348 
 349 class DynamicHeuristics : public ShenandoahHeuristics {
 350 public:
 351   DynamicHeuristics() : ShenandoahHeuristics() {
 352   }
 353 
 354   void print_thresholds() {
 355     log_info(gc, init)("Shenandoah heuristics thresholds: allocation "SIZE_FORMAT", free "SIZE_FORMAT", garbage "SIZE_FORMAT,
 356                        ShenandoahAllocationThreshold,
 357                        ShenandoahFreeThreshold,
 358                        ShenandoahGarbageThreshold);
 359   }
 360 
 361   virtual ~DynamicHeuristics() {}
 362 
 363   virtual bool should_start_concurrent_mark(size_t used, size_t capacity) const {
 364 
 365     bool shouldStartConcurrentMark = false;
 366 
 367     ShenandoahHeap* heap = ShenandoahHeap::heap();
 368     size_t free_capacity = heap->free_regions()->capacity();
 369     size_t free_used = heap->free_regions()->used();
 370     assert(free_used <= free_capacity, "must use less than capacity");
 371     size_t cset = MIN2(_bytes_in_cset, (ShenandoahCSetThreshold * capacity) / 100);
 372     size_t available =  free_capacity - free_used + cset;
 373     uintx threshold = ShenandoahFreeThreshold + ShenandoahCSetThreshold;
 374     size_t targetStartMarking = (capacity * threshold) / 100;
 375 
 376     size_t threshold_bytes_allocated = heap->capacity() * ShenandoahAllocationThreshold / 100;
 377     if (available < targetStartMarking &&
 378         heap->bytes_allocated_since_cm() > threshold_bytes_allocated)
 379     {
 380       // Need to check that an appropriate number of regions have
 381       // been allocated since last concurrent mark too.
 382       shouldStartConcurrentMark = true;
 383     }
 384 
 385     return shouldStartConcurrentMark;
 386   }
 387 
 388   virtual bool region_in_collection_set(ShenandoahHeapRegion* r, size_t immediate_garbage) {
 389     size_t threshold = ShenandoahHeapRegion::RegionSizeBytes * ShenandoahGarbageThreshold / 100;
 390     return r->garbage() > threshold;
 391   }
 392 
 393 };
 394 
 395 
 396 class AdaptiveHeuristics : public ShenandoahHeuristics {
 397 private:
 398   uintx _free_threshold;
 399   TruncatedSeq* _cset_history;
 400 
 401 public:
 402   AdaptiveHeuristics() :
 403     ShenandoahHeuristics(),
 404     _free_threshold(ShenandoahInitFreeThreshold),
 405     _cset_history(new TruncatedSeq(ShenandoahHappyCyclesThreshold)) {
 406 
 407     _cset_history->add((double) ShenandoahCSetThreshold);
 408     _cset_history->add((double) ShenandoahCSetThreshold);
 409   }
 410 
 411   virtual ~AdaptiveHeuristics() {
 412     delete _cset_history;
 413   }
 414 
 415   virtual bool region_in_collection_set(ShenandoahHeapRegion* r, size_t immediate_garbage) {
 416     size_t threshold = ShenandoahHeapRegion::RegionSizeBytes * ShenandoahGarbageThreshold / 100;
 417     return r->garbage() > threshold;
 418   }
 419 
 420   virtual void record_cm_cancelled() {
 421     ShenandoahHeuristics::record_cm_cancelled();
 422     if (_free_threshold < ShenandoahMaxFreeThreshold) {
 423       _free_threshold++;
 424       log_info(gc,ergo)("increasing free threshold to: "UINTX_FORMAT, _free_threshold);
 425     }
 426   }
 427 
 428   virtual void record_cm_success() {
 429     ShenandoahHeuristics::record_cm_success();
 430     if (_successful_cm_cycles_in_a_row > ShenandoahHappyCyclesThreshold &&
 431         _free_threshold > ShenandoahMinFreeThreshold) {
 432       _free_threshold--;
 433       log_info(gc,ergo)("reducing free threshold to: "UINTX_FORMAT, _free_threshold);
 434       _successful_cm_cycles_in_a_row = 0;
 435     }
 436   }
 437 
 438   virtual bool should_start_concurrent_mark(size_t used, size_t capacity) const {
 439     bool shouldStartConcurrentMark = false;
 440 
 441     ShenandoahHeap* heap = ShenandoahHeap::heap();
 442     size_t free_capacity = heap->free_regions()->capacity();
 443     size_t free_used = heap->free_regions()->used();
 444     assert(free_used <= free_capacity, "must use less than capacity");
 445     // size_t cset_threshold = (size_t) _cset_history->maximum();
 446     size_t cset_threshold = (size_t) _cset_history->davg();
 447     size_t cset = MIN2(_bytes_in_cset, (cset_threshold * capacity) / 100);
 448     size_t available =  free_capacity - free_used + cset;
 449     uintx factor = _free_threshold + cset_threshold;
 450     size_t targetStartMarking = (capacity * factor) / 100;
 451 
 452     size_t threshold_bytes_allocated = heap->capacity() * ShenandoahAllocationThreshold / 100;
 453     if (available < targetStartMarking &&
 454         heap->bytes_allocated_since_cm() > threshold_bytes_allocated)
 455     {
 456       // Need to check that an appropriate number of regions have
 457       // been allocated since last concurrent mark too.
 458       shouldStartConcurrentMark = true;
 459     }
 460 
 461     if (shouldStartConcurrentMark) {
 462       log_info(gc,ergo)("predicted cset threshold: "SIZE_FORMAT, cset_threshold);
 463       log_info(gc,ergo)("Starting concurrent mark at "SIZE_FORMAT"K CSet ("SIZE_FORMAT"%%)", _bytes_in_cset / K, _bytes_in_cset * 100 / capacity);
 464       _cset_history->add((double) (_bytes_in_cset * 100 / capacity));
 465     }
 466     return shouldStartConcurrentMark;
 467   }
 468 
 469 };
 470 
 471 class GlobalHeuristics : public DynamicHeuristics {
 472 private:
 473   size_t _garbage;
 474   size_t _min_garbage;
 475 public:
 476   GlobalHeuristics() : DynamicHeuristics() {
 477     if (FLAG_IS_DEFAULT(ShenandoahGarbageThreshold)) {
 478       FLAG_SET_DEFAULT(ShenandoahGarbageThreshold, 90);
 479     }
 480   }
 481   virtual ~GlobalHeuristics() {}
 482 
 483   virtual void start_choose_collection_set() {
 484     _garbage = 0;
 485     size_t heap_garbage = ShenandoahHeap::heap()->garbage();
 486     _min_garbage =  heap_garbage * ShenandoahGarbageThreshold / 100;
 487   }
 488 
 489   virtual bool region_in_collection_set(ShenandoahHeapRegion* r, size_t immediate_garbage) {
 490     if (_garbage + immediate_garbage < _min_garbage && ! r->is_empty()) {
 491       _garbage += r->garbage();
 492       return true;
 493     } else {
 494       return false;
 495     }
 496   }
 497 
 498   virtual bool needs_regions_sorted_by_garbage() {
 499     return true;
 500   }
 501 };
 502 
 503 class RatioHeuristics : public DynamicHeuristics {
 504 private:
 505   size_t _garbage;
 506   size_t _live;
 507 public:
 508   RatioHeuristics() : DynamicHeuristics() {
 509     if (FLAG_IS_DEFAULT(ShenandoahGarbageThreshold)) {
 510       FLAG_SET_DEFAULT(ShenandoahGarbageThreshold, 95);
 511     }
 512   }
 513   virtual ~RatioHeuristics() {}
 514 
 515   virtual void start_choose_collection_set() {
 516     _garbage = 0;
 517     _live = 0;
 518   }
 519 
 520   virtual bool region_in_collection_set(ShenandoahHeapRegion* r, size_t immediate_garbage) {
 521     size_t min_ratio = 100 - ShenandoahGarbageThreshold;
 522     if (_live * 100 / MAX2(_garbage + immediate_garbage, 1UL) < min_ratio && ! r->is_empty()) {
 523       _garbage += r->garbage();
 524       _live += r->get_live_data_bytes();
 525       return true;
 526     } else {
 527       return false;
 528     }
 529   }
 530 
 531   virtual bool needs_regions_sorted_by_garbage() {
 532     return true;
 533   }
 534 };
 535 
 536 class ConnectionHeuristics : public ShenandoahHeuristics {
 537 private:
 538   size_t _max_live_data;
 539   double _used_threshold_factor;
 540   double _garbage_threshold_factor;
 541   double _allocation_threshold_factor;
 542 
 543   uintx _used_threshold;
 544   uintx _garbage_threshold;
 545   uintx _allocation_threshold;
 546 
 547 public:
 548   ConnectionHeuristics() : ShenandoahHeuristics() {
 549     _max_live_data = 0;
 550 
 551     _used_threshold = 0;
 552     _garbage_threshold = 0;
 553     _allocation_threshold = 0;
 554 
 555     _used_threshold_factor = 0.;
 556     _garbage_threshold_factor = 0.1;
 557     _allocation_threshold_factor = 0.;
 558   }
 559 
 560   virtual ~ConnectionHeuristics() {}
 561 
 562   virtual bool should_start_concurrent_mark(size_t used, size_t capacity) const {
 563     size_t half_gig = 64 * 1024 * 1024;
 564     size_t bytes_alloc = ShenandoahHeap::heap()->bytes_allocated_since_cm();
 565     bool result =  bytes_alloc > half_gig;
 566     if (result) tty->print("Starting a concurrent mark");
 567     return result;
 568   }
 569 
 570   bool maybe_add_heap_region(ShenandoahHeapRegion* hr, ShenandoahCollectionSet* collection_set) {
 571     if (!hr->is_humongous() && hr->has_live() && !collection_set->contains(hr)) {
 572       collection_set->add_region_check_for_duplicates(hr);
 573       hr->set_in_collection_set(true);
 574       return true;
 575     }
 576     return false;
 577   }
 578 
 579   virtual void choose_collection_set(ShenandoahCollectionSet* collection_set, int* connections) {
 580     ShenandoahHeapRegionSet* regions = ShenandoahHeap::heap()->regions();
 581     size_t end = regions->active_regions();
 582     RegionGarbage sorted_by_garbage[end];
 583     for (size_t i = 0; i < end; i++) {
 584       ShenandoahHeapRegion* r = regions->get_fast(i);
 585       sorted_by_garbage[i].region_number = r->region_number();
 586       sorted_by_garbage[i].garbage = r->garbage();
 587     }
 588 
 589     QuickSort::sort<RegionGarbage>(sorted_by_garbage, end, compare_by_garbage, false);
 590 
 591     int num = ShenandoahHeap::heap()->num_regions();
 592     // simulate write heuristics by picking best region.
 593     int r = 0;
 594     ShenandoahHeapRegion* choosenOne = regions->get(sorted_by_garbage[0].region_number);
 595 
 596     while (! maybe_add_heap_region(choosenOne, collection_set)) {
 597       choosenOne = regions->get(sorted_by_garbage[++r].region_number);
 598     }
 599 
 600     int region_number = choosenOne->region_number();
 601     log_develop_trace(gc)("Adding choosen region %d\n", region_number);
 602 
 603     // Add all the regions which point to this region.
 604     for (int i = 0; i < num; i++) {
 605       if (connections[i * num + region_number] > 0) {
 606         ShenandoahHeapRegion* candidate = regions->get(sorted_by_garbage[i].region_number);
 607         if (maybe_add_heap_region(candidate, collection_set))
 608           log_develop_trace(gc)("Adding region %d which points to the choosen region\n", i);
 609       }
 610     }
 611 
 612     // Add all the regions they point to.
 613     for (size_t ci = 0; ci < collection_set->active_regions(); ci++) {
 614       ShenandoahHeapRegion* cs_heap_region = collection_set->get(ci);
 615       int cs_heap_region_number = cs_heap_region->region_number();
 616       for (int i = 0; i < num; i++) {
 617         if (connections[i * num + cs_heap_region_number] > 0) {
 618           ShenandoahHeapRegion* candidate = regions->get(sorted_by_garbage[i].region_number);
 619           if (maybe_add_heap_region(candidate, collection_set)) {
 620             log_develop_trace(gc)
 621               ("Adding region %d which is pointed to by region %d\n", i, cs_heap_region_number);
 622           }
 623         }
 624       }
 625     }
 626     _max_live_data = MAX2(_max_live_data, collection_set->live_data());
 627     collection_set->print();
 628   }
 629 
 630   virtual bool region_in_collection_set(ShenandoahHeapRegion* r, size_t immediate_garbage) {
 631     assert(false, "Shouldn't get here");
 632     return false;
 633   }
 634 };
 635 
 636 ShenandoahCollectorPolicy::ShenandoahCollectorPolicy() :
 637   _cycle_counter(0),
 638   _successful_cm(0),
 639   _degenerated_cm(0)
 640 {
 641 
 642   ShenandoahHeapRegion::setup_heap_region_size(initial_heap_byte_size(), max_heap_byte_size());
 643 
 644   initialize_all();
 645 
 646   _tracer = new (ResourceObj::C_HEAP, mtGC) ShenandoahTracer();
 647   _stw_timer = new (ResourceObj::C_HEAP, mtGC) STWGCTimer();
 648   _conc_timer = new (ResourceObj::C_HEAP, mtGC) ConcurrentGCTimer();
 649   _user_requested_gcs = 0;
 650   _allocation_failure_gcs = 0;
 651   _conc_gc_aborted = false;
 652 
 653   _phase_names[total_pause]                     = "Total Pauses (net)";
 654   _phase_names[total_pause_gross]               = "Total Pauses (gross)";
 655   _phase_names[init_mark]                       = "Initial Mark Pauses (net)";
 656   _phase_names[init_mark_gross]                 = "Initial Mark Pauses (gross)";
 657   _phase_names[final_mark]                      = "Final Mark Pauses (net)";
 658   _phase_names[final_mark_gross]                = "Final Mark Pauses (gross)";
 659   _phase_names[accumulate_stats]                = "  Accumulate Stats";
 660   _phase_names[make_parsable]                   = "  Make Parsable";
 661   _phase_names[clear_liveness]                  = "  Clear Liveness";
 662   _phase_names[scan_roots]                      = "  Scan Roots";
 663   _phase_names[update_roots]                    = "  Update Roots";
 664   _phase_names[drain_satb]                      = "  Drain SATB";
 665   _phase_names[weakrefs]                        = "  Weak References";
 666   _phase_names[class_unloading]                 = "  Class Unloading";
 667   _phase_names[prepare_evac]                    = "  Prepare Evacuation";
 668   _phase_names[init_evac]                       = "  Initial Evacuation";
 669 
 670   _phase_names[scan_thread_roots]               = "    S: Thread Roots";
 671   _phase_names[scan_code_roots]                 = "    S: Code Cache Roots";
 672   _phase_names[scan_string_table_roots]         = "    S: String Table Roots";
 673   _phase_names[scan_universe_roots]             = "    S: Universe Roots";
 674   _phase_names[scan_jni_roots]                  = "    S: JNI Roots";
 675   _phase_names[scan_jni_weak_roots]             = "    S: JNI Weak Roots";
 676   _phase_names[scan_synchronizer_roots]         = "    S: Synchronizer Roots";
 677   _phase_names[scan_flat_profiler_roots]        = "    S: Flat Profiler Roots";
 678   _phase_names[scan_management_roots]           = "    S: Management Roots";
 679   _phase_names[scan_system_dictionary_roots]    = "    S: System Dict Roots";
 680   _phase_names[scan_cldg_roots]                 = "    S: CLDG Roots";
 681   _phase_names[scan_jvmti_roots]                = "    S: JVMTI Roots";
 682 
 683   _phase_names[update_thread_roots]             = "    U: Thread Roots";
 684   _phase_names[update_code_roots]               = "    U: Code Cache Roots";
 685   _phase_names[update_string_table_roots]       = "    U: String Table Roots";
 686   _phase_names[update_universe_roots]           = "    U: Universe Roots";
 687   _phase_names[update_jni_roots]                = "    U: JNI Roots";
 688   _phase_names[update_jni_weak_roots]           = "    U: JNI Weak Roots";
 689   _phase_names[update_synchronizer_roots]       = "    U: Synchronizer Roots";
 690   _phase_names[update_flat_profiler_roots]      = "    U: Flat Profiler Roots";
 691   _phase_names[update_management_roots]         = "    U: Management Roots";
 692   _phase_names[update_system_dictionary_roots]  = "    U: System Dict Roots";
 693   _phase_names[update_cldg_roots]               = "    U: CLDG Roots";
 694   _phase_names[update_jvmti_roots]              = "    U: JVMTI Roots";
 695 
 696   _phase_names[evac_thread_roots]               = "    E: Thread Roots";
 697   _phase_names[evac_code_roots]                 = "    E: Code Cache Roots";
 698   _phase_names[evac_string_table_roots]         = "    E: String Table Roots";
 699   _phase_names[evac_universe_roots]             = "    E: Universe Roots";
 700   _phase_names[evac_jni_roots]                  = "    E: JNI Roots";
 701   _phase_names[evac_jni_weak_roots]             = "    E: JNI Weak Roots";
 702   _phase_names[evac_synchronizer_roots]         = "    E: Synchronizer Roots";
 703   _phase_names[evac_flat_profiler_roots]        = "    E: Flat Profiler Roots";
 704   _phase_names[evac_management_roots]           = "    E: Management Roots";
 705   _phase_names[evac_system_dictionary_roots]    = "    E: System Dict Roots";
 706   _phase_names[evac_cldg_roots]                 = "    E: CLDG Roots";
 707   _phase_names[evac_jvmti_roots]                = "    E: JVMTI Roots";
 708 
 709   _phase_names[recycle_regions]                 = "  Recycle regions";
 710   _phase_names[reset_bitmaps]                   = "Reset Bitmaps";
 711   _phase_names[resize_tlabs]                    = "Resize TLABs";
 712 
 713   _phase_names[full_gc]                         = "Full GC";
 714   _phase_names[full_gc_heapdumps]               = "  Heap Dumps";
 715   _phase_names[full_gc_prepare]                 = "  Prepare";
 716   _phase_names[full_gc_mark]                    = "  Mark";
 717   _phase_names[full_gc_mark_drain_queues]       = "    Drain Queues";
 718   _phase_names[full_gc_mark_weakrefs]           = "    Weak References";
 719   _phase_names[full_gc_mark_class_unloading]    = "    Class Unloading";
 720   _phase_names[full_gc_calculate_addresses]     = "  Calculate Addresses";
 721   _phase_names[full_gc_adjust_pointers]         = "  Adjust Pointers";
 722   _phase_names[full_gc_copy_objects]            = "  Copy Objects";
 723 
 724   _phase_names[conc_mark]                       = "Concurrent Marking";
 725   _phase_names[conc_evac]                       = "Concurrent Evacuation";
 726 
 727   if (ShenandoahGCHeuristics != NULL) {
 728     if (strcmp(ShenandoahGCHeuristics, "aggressive") == 0) {
 729       log_info(gc, init)("Shenandoah heuristics: aggressive");
 730       _heuristics = new AggressiveHeuristics();
 731     } else if (strcmp(ShenandoahGCHeuristics, "dynamic") == 0) {
 732       log_info(gc, init)("Shenandoah heuristics: dynamic");
 733       _heuristics = new DynamicHeuristics();
 734     } else if (strcmp(ShenandoahGCHeuristics, "global") == 0) {
 735       log_info(gc, init)("Shenandoah heuristics: global");
 736       _heuristics = new GlobalHeuristics();
 737     } else if (strcmp(ShenandoahGCHeuristics, "ratio") == 0) {
 738       log_info(gc, init)("Shenandoah heuristics: ratio");
 739       _heuristics = new RatioHeuristics();
 740     } else if (strcmp(ShenandoahGCHeuristics, "adaptive") == 0) {
 741       log_info(gc, init)("Shenandoah heuristics: adaptive");
 742       _heuristics = new AdaptiveHeuristics();
 743     } else if (strcmp(ShenandoahGCHeuristics, "passive") == 0) {
 744       log_info(gc, init)("Shenandoah heuristics: passive");
 745       _heuristics = new PassiveHeuristics();
 746     } else if (strcmp(ShenandoahGCHeuristics, "connections") == 0) {
 747       log_info(gc, init)("Shenandoah heuristics: connections");
 748       _heuristics = new ConnectionHeuristics();
 749     } else {
 750       vm_exit_during_initialization("Unknown -XX:ShenandoahGCHeuristics option");
 751     }
 752     _heuristics->print_thresholds();
 753   } else {
 754       ShouldNotReachHere();
 755   }
 756   _phase_times = new ShenandoahPhaseTimes(MAX2(ConcGCThreads, ParallelGCThreads));
 757 }
 758 
 759 ShenandoahCollectorPolicy* ShenandoahCollectorPolicy::as_pgc_policy() {
 760   return this;
 761 }
 762 
 763 BarrierSet::Name ShenandoahCollectorPolicy::barrier_set_name() {
 764   return BarrierSet::ShenandoahBarrierSet;
 765 }
 766 
 767 HeapWord* ShenandoahCollectorPolicy::mem_allocate_work(size_t size,
 768                                                        bool is_tlab,
 769                                                        bool* gc_overhead_limit_was_exceeded) {
 770   guarantee(false, "Not using this policy feature yet.");
 771   return NULL;
 772 }
 773 
 774 HeapWord* ShenandoahCollectorPolicy::satisfy_failed_allocation(size_t size, bool is_tlab) {
 775   guarantee(false, "Not using this policy feature yet.");
 776   return NULL;
 777 }
 778 
 779 void ShenandoahCollectorPolicy::initialize_alignments() {
 780 
 781   // This is expected by our algorithm for ShenandoahHeap::heap_region_containing().
 782   _space_alignment = ShenandoahHeapRegion::RegionSizeBytes;
 783   _heap_alignment = ShenandoahHeapRegion::RegionSizeBytes;
 784 }
 785 
 786 void ShenandoahCollectorPolicy::post_heap_initialize() {
 787   // Nothing to do here (yet).
 788 }
 789 
 790 void ShenandoahCollectorPolicy::record_bytes_allocated(size_t bytes) {
 791   _heuristics->record_bytes_allocated(bytes);
 792 }
 793 
 794 void ShenandoahCollectorPolicy::record_bytes_start_CM(size_t bytes) {
 795   _heuristics->record_bytes_start_CM(bytes);
 796 }
 797 
 798 void ShenandoahCollectorPolicy::record_bytes_end_CM(size_t bytes) {
 799   _heuristics->record_bytes_end_CM(bytes);
 800 }
 801 
 802 void ShenandoahCollectorPolicy::record_bytes_reclaimed(size_t bytes) {
 803   _heuristics->record_bytes_reclaimed(bytes);
 804 }
 805 
 806 void ShenandoahCollectorPolicy::record_user_requested_gc() {
 807   _user_requested_gcs++;
 808 }
 809 
 810 void ShenandoahCollectorPolicy::record_allocation_failure_gc() {
 811   _allocation_failure_gcs++;
 812 }
 813 
 814 bool ShenandoahCollectorPolicy::should_start_concurrent_mark(size_t used,
 815                                                              size_t capacity) {
 816   return _heuristics->should_start_concurrent_mark(used, capacity);
 817 }
 818 
 819 bool ShenandoahCollectorPolicy::handover_cancelled_marking() {
 820   return _heuristics->handover_cancelled_marking();
 821 }
 822 
 823 void ShenandoahCollectorPolicy::record_cm_success() {
 824   _heuristics->record_cm_success();
 825   _successful_cm++;
 826 }
 827 
 828 void ShenandoahCollectorPolicy::record_cm_degenerated() {
 829   _degenerated_cm++;
 830 }
 831 
 832 void ShenandoahCollectorPolicy::record_cm_cancelled() {
 833   _heuristics->record_cm_cancelled();
 834 }
 835 
 836 void ShenandoahCollectorPolicy::record_full_gc() {
 837   _heuristics->record_full_gc();
 838 }
 839 
 840 void ShenandoahCollectorPolicy::choose_collection_set(ShenandoahCollectionSet* collection_set, int* connections) {
 841   _heuristics->choose_collection_set(collection_set, connections);
 842 }
 843 
 844 void ShenandoahCollectorPolicy::choose_free_set(ShenandoahFreeSet* free_set) {
 845    _heuristics->choose_free_set(free_set);
 846 }
 847 
 848 
 849 bool ShenandoahCollectorPolicy::process_references() {
 850   return _heuristics->process_references();
 851 }
 852 
 853 bool ShenandoahCollectorPolicy::unload_classes() {
 854   return _heuristics->unload_classes();
 855 }
 856 
 857 void ShenandoahCollectorPolicy::print_tracing_info(outputStream* out) {
 858   out->cr();
 859   out->print_cr("GC STATISTICS:");
 860   out->print_cr("  \"gross\" pauses include time to safepoint. \"net\" pauses are times spent in GC.");
 861   out->print_cr("  \"a\" is average time for each phase, look at levels to see if average makes sense.");
 862   out->print_cr("  \"lvls\" are quantiles: 0%% (minimum), 25%%, 50%% (median), 75%%, 100%% (maximum).");
 863   out->cr();
 864 
 865   for (uint i = 0; i < _num_phases; i++) {
 866     if (_timing_data[i]._secs.maximum() != 0) {
 867       print_summary_sd(out, _phase_names[i], &(_timing_data[i]._secs));
 868     }
 869   }
 870 
 871   out->cr();
 872   out->print_cr("" SIZE_FORMAT " allocation failure and " SIZE_FORMAT " user requested GCs", _allocation_failure_gcs, _user_requested_gcs);
 873   out->print_cr("" SIZE_FORMAT " successful and " SIZE_FORMAT " degenerated concurrent markings", _successful_cm, _degenerated_cm);
 874   out->cr();
 875 }
 876 
 877 void ShenandoahCollectorPolicy::print_summary_sd(outputStream* out, const char* str, const HdrSeq* seq)  {
 878   out->print_cr("%-27s = %8.2lf s (a = %8.0lf us) (n = "INT32_FORMAT_W(5)") (lvls, us = %8.0lf, %8.0lf, %8.0lf, %8.0lf, %8.0lf)",
 879           str,
 880           seq->sum(),
 881           seq->avg() * 1000000.0,
 882           seq->num(),
 883           seq->percentile(0)  * 1000000.0,
 884           seq->percentile(25) * 1000000.0,
 885           seq->percentile(50) * 1000000.0,
 886           seq->percentile(75) * 1000000.0,
 887           seq->maximum() * 1000000.0
 888   );
 889 }
 890 
 891 void ShenandoahCollectorPolicy::increase_cycle_counter() {
 892   _cycle_counter++;
 893 }
 894 
 895 size_t ShenandoahCollectorPolicy::cycle_counter() const {
 896   return _cycle_counter;
 897 }
 898 
 899  ShenandoahPhaseTimes* ShenandoahCollectorPolicy::phase_times() {
 900   return _phase_times;
 901 }
 902 
 903 
 904 uint ShenandoahCollectorPolicy::calc_workers_for_java_threads(uint application_workers) {
 905   return (uint)ShenandoahGCWorkerPerJavaThread * application_workers;
 906 }
 907 
 908 uint ShenandoahCollectorPolicy::calc_workers_for_live_set(size_t live_data) {
 909   return (uint)(live_data / HeapSizePerGCThread);
 910 }
 911 
 912 
 913 uint ShenandoahCollectorPolicy::calc_default_active_workers(
 914                                                      uint total_workers,
 915                                                      uint min_workers,
 916                                                      uint active_workers,
 917                                                      uint application_workers,
 918                                                      uint  workers_by_java_threads,
 919                                                      uint  workers_by_liveset) {
 920   // If the user has turned off using a dynamic number of GC threads
 921   // or the users has requested a specific number, set the active
 922   // number of workers to all the workers.
 923   uint new_active_workers = total_workers;
 924   uint prev_active_workers = active_workers;
 925   uint active_workers_by_JT = 0;
 926   uint active_workers_by_liveset = 0;
 927 
 928   active_workers_by_JT = MAX2(workers_by_java_threads, min_workers);
 929 
 930   // Choose a number of GC threads based on the live set.
 931   active_workers_by_liveset =
 932       MAX2((uint) 2U, workers_by_liveset);
 933 
 934   uint max_active_workers =
 935     MAX2(active_workers_by_JT, active_workers_by_liveset);
 936 
 937   new_active_workers = MIN2(max_active_workers, total_workers);
 938 
 939   // Increase GC workers instantly but decrease them more
 940   // slowly.
 941   if (new_active_workers < prev_active_workers) {
 942     new_active_workers =
 943       MAX2(min_workers, (prev_active_workers + new_active_workers) / 2);
 944   }
 945 
 946   if (UseNUMA) {
 947     uint numa_groups = (uint)os::numa_get_groups_num();
 948     assert(numa_groups <= total_workers, "Not enough workers to cover all numa groups");
 949     new_active_workers = MAX2(new_active_workers, numa_groups);
 950   }
 951 
 952   // Check once more that the number of workers is within the limits.
 953   assert(min_workers <= total_workers, "Minimum workers not consistent with total workers");
 954   assert(new_active_workers >= min_workers, "Minimum workers not observed");
 955   assert(new_active_workers <= total_workers, "Total workers not observed");
 956 
 957   log_trace(gc, task)("ShenandoahCollectorPolicy::calc_default_active_workers() : "
 958      "active_workers(): " UINTX_FORMAT "  new_active_workers: " UINTX_FORMAT "  "
 959      "prev_active_workers: " UINTX_FORMAT "\n"
 960      " active_workers_by_JT: " UINTX_FORMAT "  active_workers_by_liveset: " UINTX_FORMAT,
 961      (uintx)active_workers, (uintx)new_active_workers, (uintx)prev_active_workers,
 962      (uintx)active_workers_by_JT, (uintx)active_workers_by_liveset);
 963   assert(new_active_workers > 0, "Always need at least 1");
 964   return new_active_workers;
 965 }
 966 
 967 /**
 968  * Initial marking phase also update references of live objects from previous concurrent GC cycle,
 969  * so we take Java threads and live set into account.
 970  */
 971 uint ShenandoahCollectorPolicy::calc_workers_for_init_marking(uint active_workers,
 972                                             uint application_workers) {
 973 
 974   if (!UseDynamicNumberOfGCThreads ||
 975      (!FLAG_IS_DEFAULT(ParallelGCThreads) && !ForceDynamicNumberOfGCThreads)) {
 976     assert(ParallelGCThreads > 0, "Always need at least 1");
 977     return ParallelGCThreads;
 978   } else {
 979     ShenandoahCollectorPolicy* policy = (ShenandoahCollectorPolicy*)ShenandoahHeap::heap()->collector_policy();
 980     size_t live_data = policy->_heuristics->bytes_in_cset();
 981 
 982     return calc_default_active_workers(ParallelGCThreads, (ParallelGCThreads > 1) ? 2 : 1,
 983       active_workers, application_workers,
 984       calc_workers_for_java_threads(application_workers),
 985       calc_workers_for_live_set(live_data));
 986   }
 987 }
 988 
 989 uint ShenandoahCollectorPolicy::calc_workers_for_conc_marking(uint active_workers,
 990                                             uint application_workers) {
 991 
 992   if (!UseDynamicNumberOfGCThreads ||
 993      (!FLAG_IS_DEFAULT(ConcGCThreads) && !ForceDynamicNumberOfGCThreads)) {
 994     assert(ConcGCThreads > 0, "Always need at least 1");
 995     return ConcGCThreads;
 996   } else {
 997     return calc_default_active_workers(ConcGCThreads,
 998       (ConcGCThreads > 1 ? 2 : 1), active_workers,
 999       application_workers, calc_workers_for_java_threads(application_workers), 0);
1000   }
1001 }
1002 
1003 uint ShenandoahCollectorPolicy::calc_workers_for_final_marking(uint active_workers,
1004                                             uint application_workers) {
1005 
1006   if (!UseDynamicNumberOfGCThreads ||
1007      (!FLAG_IS_DEFAULT(ParallelGCThreads) && !ForceDynamicNumberOfGCThreads)) {
1008     assert(ParallelGCThreads > 0, "Always need at least 1");
1009     return ParallelGCThreads;
1010   } else {
1011     return calc_default_active_workers(ParallelGCThreads,
1012       (ParallelGCThreads > 1 ? 2 : 1), active_workers,
1013       application_workers, calc_workers_for_java_threads(application_workers), 0);
1014   }
1015 }
1016 
1017   // Calculate workers for concurrent evacuation (concurrent GC)
1018 uint ShenandoahCollectorPolicy::calc_workers_for_conc_evacuation(uint active_workers,
1019                                             uint application_workers) {
1020   if (!UseDynamicNumberOfGCThreads ||
1021      (!FLAG_IS_DEFAULT(ConcGCThreads) && !ForceDynamicNumberOfGCThreads)) {
1022     assert(ConcGCThreads > 0, "Always need at least 1");
1023     return ConcGCThreads;
1024   } else {
1025     return calc_workers_for_evacuation(false, // not a full GC
1026       ConcGCThreads, active_workers, application_workers);
1027   }
1028 }
1029 
1030   // Calculate workers for parallel evaculation (full GC)
1031 uint ShenandoahCollectorPolicy::calc_workers_for_parallel_evacuation(uint active_workers,
1032                                             uint application_workers) {
1033   if (!UseDynamicNumberOfGCThreads ||
1034      (!FLAG_IS_DEFAULT(ParallelGCThreads) && !ForceDynamicNumberOfGCThreads)) {
1035     assert(ParallelGCThreads > 0, "Always need at least 1");
1036     return ParallelGCThreads;
1037   } else {
1038     return calc_workers_for_evacuation(true, // a full GC
1039       ParallelGCThreads, active_workers, application_workers);
1040   }
1041 }
1042 
1043 
1044 uint ShenandoahCollectorPolicy::calc_workers_for_evacuation(bool full_gc,
1045                                             uint total_workers,
1046                                             uint active_workers,
1047                                             uint application_workers) {
1048 
1049   // Calculation based on live set
1050   size_t live_data = 0;
1051   ShenandoahHeap* heap = ShenandoahHeap::heap();
1052   if (full_gc) {
1053     ShenandoahHeapRegionSet* regions = heap->regions();
1054     for (size_t index = 0; index < regions->active_regions(); index ++) {
1055       live_data += regions->get_fast(index)->get_live_data_bytes();
1056     }
1057   } else {
1058     ShenandoahCollectorPolicy* policy = (ShenandoahCollectorPolicy*)heap->collector_policy();
1059     live_data = policy->_heuristics->bytes_in_cset();
1060   }
1061 
1062   uint active_workers_by_liveset = calc_workers_for_live_set(live_data);
1063   return calc_default_active_workers(total_workers,
1064       (total_workers > 1 ? 2 : 1), active_workers,
1065       application_workers, 0, active_workers_by_liveset);
1066 }
1067