1 /*
   2  * Copyright (c) 2013, 2015, Red Hat, Inc. and/or its affiliates.
   3  *
   4  * This code is free software; you can redistribute it and/or modify it
   5  * under the terms of the GNU General Public License version 2 only, as
   6  * published by the Free Software Foundation.
   7  *
   8  * This code is distributed in the hope that it will be useful, but WITHOUT
   9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  11  * version 2 for more details (a copy is included in the LICENSE file that
  12  * accompanied this code).
  13  *
  14  * You should have received a copy of the GNU General Public License version
  15  * 2 along with this work; if not, write to the Free Software Foundation,
  16  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  17  *
  18  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  19  * or visit www.oracle.com if you need additional information or have any
  20  * questions.
  21  *
  22  */
  23 
  24 #include "precompiled.hpp"
  25 #include "gc/shared/gcPolicyCounters.hpp"
  26 #include "gc/shenandoah/shenandoahCollectionSet.hpp"
  27 #include "gc/shenandoah/shenandoahFreeSet.hpp"
  28 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
  29 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
  30 #include "gc/shenandoah/shenandoahPartialGC.hpp"
  31 #include "gc/shenandoah/shenandoahPhaseTimes.hpp"
  32 #include "runtime/os.hpp"
  33 
  34 class ShenandoahHeuristics : public CHeapObj<mtGC> {
  35 
  36   NumberSeq _allocation_rate_bytes;
  37   NumberSeq _reclamation_rate_bytes;
  38 
  39   size_t _bytes_allocated_since_CM;
  40   size_t _bytes_reclaimed_this_cycle;
  41 
  42 protected:
  43   typedef struct {
  44     size_t region_number;
  45     size_t garbage;
  46   } RegionGarbage;
  47 
  48   static int compare_by_garbage(RegionGarbage a, RegionGarbage b) {
  49     if (a.garbage > b.garbage)
  50       return -1;
  51     else if (a.garbage < b.garbage)
  52       return 1;
  53     else return 0;
  54   }
  55 
  56   RegionGarbage* get_region_garbage_cache(size_t num) {
  57     RegionGarbage* res = _region_garbage;
  58     if (res == NULL) {
  59       res = NEW_C_HEAP_ARRAY(RegionGarbage, num, mtGC);
  60       _region_garbage_size = num;
  61     } else if (_region_garbage_size < num) {
  62       REALLOC_C_HEAP_ARRAY(RegionGarbage, _region_garbage, num, mtGC);
  63       _region_garbage_size = num;
  64     }
  65     return res;
  66   }
  67 
  68   RegionGarbage* _region_garbage;
  69   size_t _region_garbage_size;
  70 
  71   size_t _bytes_allocated_start_CM;
  72   size_t _bytes_allocated_during_CM;
  73 
  74   size_t _bytes_allocated_after_last_gc;
  75 
  76   uint _cancelled_cm_cycles_in_a_row;
  77   uint _successful_cm_cycles_in_a_row;
  78 
  79   uint _cancelled_uprefs_cycles_in_a_row;
  80   uint _successful_uprefs_cycles_in_a_row;
  81 
  82   size_t _bytes_in_cset;
  83 
  84 public:
  85 
  86   ShenandoahHeuristics();
  87   ~ShenandoahHeuristics();
  88 
  89   void record_bytes_allocated(size_t bytes);
  90   void record_bytes_reclaimed(size_t bytes);
  91   void record_bytes_start_CM(size_t bytes);
  92   void record_bytes_end_CM(size_t bytes);
  93 
  94   void record_gc_start() {
  95     // Do nothing.
  96   }
  97 
  98   void record_gc_end() {
  99     _bytes_allocated_after_last_gc = ShenandoahHeap::heap()->used();
 100   }
 101 
 102   size_t bytes_in_cset() const { return _bytes_in_cset; }
 103 
 104   virtual void print_thresholds() {
 105   }
 106 
 107   virtual bool should_start_concurrent_mark(size_t used, size_t capacity) const=0;
 108 
 109   virtual bool update_refs_early() const {
 110     return ShenandoahUpdateRefsEarly;
 111   }
 112 
 113   virtual bool should_start_partial_gc() {
 114     return false;
 115   }
 116 
 117   virtual bool handover_cancelled_marking() {
 118     return _cancelled_cm_cycles_in_a_row <= ShenandoahFullGCThreshold;
 119   }
 120 
 121   virtual bool handover_cancelled_uprefs() {
 122     return _cancelled_uprefs_cycles_in_a_row <= ShenandoahFullGCThreshold;
 123   }
 124 
 125   virtual void record_cm_cancelled() {
 126     _cancelled_cm_cycles_in_a_row++;
 127     _successful_cm_cycles_in_a_row = 0;
 128   }
 129 
 130   virtual void record_cm_success() {
 131     _cancelled_cm_cycles_in_a_row = 0;
 132     _successful_cm_cycles_in_a_row++;
 133   }
 134 
 135   virtual void record_uprefs_cancelled() {
 136     _cancelled_uprefs_cycles_in_a_row++;
 137     _successful_uprefs_cycles_in_a_row = 0;
 138   }
 139 
 140   virtual void record_uprefs_success() {
 141     _cancelled_uprefs_cycles_in_a_row = 0;
 142     _successful_uprefs_cycles_in_a_row++;
 143   }
 144 
 145   virtual void record_full_gc() {
 146     _bytes_in_cset = 0;
 147   }
 148 
 149   virtual void record_peak_occupancy() {
 150   }
 151 
 152   virtual void start_choose_collection_set() {
 153   }
 154   virtual void end_choose_collection_set() {
 155   }
 156   virtual bool region_in_collection_set(ShenandoahHeapRegion* r, size_t immediate_garbage) = 0;
 157 
 158   virtual void choose_collection_set(ShenandoahCollectionSet* collection_set, int* connections=NULL);
 159   virtual void choose_free_set(ShenandoahFreeSet* free_set);
 160 
 161   virtual bool process_references() {
 162     if (ShenandoahRefProcFrequency == 0) return false;
 163     size_t cycle = ShenandoahHeap::heap()->shenandoahPolicy()->cycle_counter();
 164     // Process references every Nth GC cycle.
 165     return cycle % ShenandoahRefProcFrequency == 0;
 166   }
 167 
 168   virtual bool unload_classes() {
 169     if (ShenandoahUnloadClassesFrequency == 0) return false;
 170     size_t cycle = ShenandoahHeap::heap()->shenandoahPolicy()->cycle_counter();
 171     // Unload classes every Nth GC cycle.
 172     // This should not happen in the same cycle as process_references to amortize costs.
 173     // Offsetting by one is enough to break the rendezvous when periods are equal.
 174     // When periods are not equal, offsetting by one is just as good as any other guess.
 175     return (cycle + 1) % ShenandoahUnloadClassesFrequency == 0;
 176   }
 177 
 178   virtual bool needs_regions_sorted_by_garbage() {
 179     // Most of them do not.
 180     return false;
 181   }
 182 };
 183 
 184 ShenandoahHeuristics::ShenandoahHeuristics() :
 185   _bytes_allocated_since_CM(0),
 186   _bytes_reclaimed_this_cycle(0),
 187   _bytes_allocated_start_CM(0),
 188   _bytes_allocated_during_CM(0),
 189   _bytes_allocated_after_last_gc(0),
 190   _bytes_in_cset(0),
 191   _cancelled_cm_cycles_in_a_row(0),
 192   _successful_cm_cycles_in_a_row(0),
 193   _cancelled_uprefs_cycles_in_a_row(0),
 194   _successful_uprefs_cycles_in_a_row(0),
 195   _region_garbage(NULL),
 196   _region_garbage_size(0)
 197 {
 198 }
 199 
 200 ShenandoahHeuristics::~ShenandoahHeuristics() {
 201   if (_region_garbage != NULL) {
 202     FREE_C_HEAP_ARRAY(RegionGarbage, _region_garbage);
 203   }
 204 }
 205 
 206 void ShenandoahHeuristics::choose_collection_set(ShenandoahCollectionSet* collection_set, int* connections) {
 207   start_choose_collection_set();
 208 
 209   ShenandoahHeap* heap = ShenandoahHeap::heap();
 210 
 211   // Poll this before populating collection set.
 212   size_t total_garbage = heap->garbage();
 213 
 214   // Step 1. Build up the region candidates we care about, rejecting losers and accepting winners right away.
 215 
 216   ShenandoahHeapRegionSet* regions = heap->regions();
 217   size_t active = regions->active_regions();
 218 
 219   RegionGarbage* candidates = get_region_garbage_cache(active);
 220 
 221   size_t cand_idx = 0;
 222   _bytes_in_cset = 0;
 223 
 224   size_t immediate_garbage = 0;
 225   size_t immediate_regions = 0;
 226   for (size_t i = 0; i < active; i++) {
 227     ShenandoahHeapRegion* region = regions->get(i);
 228 
 229     if (! region->is_humongous() && ! region->is_pinned()) {
 230       if ((! region->is_empty()) && ! region->has_live()) {
 231         // We can recycle it right away and put it in the free set.
 232         immediate_regions++;
 233         immediate_garbage += region->garbage();
 234         heap->decrease_used(region->used());
 235         region->recycle();
 236         log_develop_trace(gc)("Choose region " SIZE_FORMAT " for immediate reclaim with garbage = " SIZE_FORMAT
 237                               " and live = " SIZE_FORMAT "\n",
 238                               region->region_number(), region->garbage(), region->get_live_data_bytes());
 239       } else {
 240         // This is our candidate for later consideration.
 241         candidates[cand_idx].region_number = region->region_number();
 242         candidates[cand_idx].garbage = region->garbage();
 243         cand_idx++;
 244       }
 245     } else {
 246       assert(region->has_live() || region->is_empty() || region->is_pinned() || region->is_humongous(), "check rejected");
 247       log_develop_trace(gc)("Rejected region " SIZE_FORMAT " with garbage = " SIZE_FORMAT
 248                             " and live = " SIZE_FORMAT "\n",
 249                             region->region_number(), region->garbage(), region->get_live_data_bytes());
 250     }
 251   }
 252 
 253   // Step 2. Process the remanining candidates, if any.
 254 
 255   if (cand_idx > 0) {
 256     if (needs_regions_sorted_by_garbage()) {
 257       QuickSort::sort<RegionGarbage>(candidates, (int)cand_idx, compare_by_garbage, false);
 258     }
 259 
 260     for (size_t i = 0; i < cand_idx; i++) {
 261       ShenandoahHeapRegion *region = regions->get(candidates[i].region_number);
 262       if (region_in_collection_set(region, immediate_garbage)) {
 263         log_develop_trace(gc)("Choose region " SIZE_FORMAT " with garbage = " SIZE_FORMAT
 264                                       " and live = " SIZE_FORMAT "\n",
 265                               region->region_number(), region->garbage(), region->get_live_data_bytes());
 266         collection_set->add_region(region);
 267         region->set_in_collection_set(true);
 268         _bytes_in_cset += region->used();
 269       }
 270     }
 271   }
 272 
 273   end_choose_collection_set();
 274 
 275   log_info(gc, ergo)("Total Garbage: "SIZE_FORMAT"M",
 276                      total_garbage / M);
 277   log_info(gc, ergo)("Immediate Garbage: "SIZE_FORMAT"M, "SIZE_FORMAT" regions",
 278                      immediate_garbage / M, immediate_regions);
 279   log_info(gc, ergo)("Garbage to be collected: "SIZE_FORMAT"M ("SIZE_FORMAT"%% of total), "SIZE_FORMAT" regions",
 280                      collection_set->garbage() / M, collection_set->garbage() * 100 / MAX2(total_garbage, (size_t)1), collection_set->count());
 281   log_info(gc, ergo)("Live objects to be evacuated: "SIZE_FORMAT"M",
 282                      collection_set->live_data() / M);
 283   log_info(gc, ergo)("Live/garbage ratio in collected regions: "SIZE_FORMAT"%%",
 284                      collection_set->live_data() * 100 / MAX2(collection_set->garbage(), (size_t)1));
 285 }
 286 
 287 void ShenandoahHeuristics::choose_free_set(ShenandoahFreeSet* free_set) {
 288 
 289   ShenandoahHeapRegionSet* ordered_regions = ShenandoahHeap::heap()->regions();
 290   size_t i = 0;
 291   size_t end = ordered_regions->active_regions();
 292 
 293   ShenandoahHeap* heap = ShenandoahHeap::heap();
 294   while (i < end) {
 295     ShenandoahHeapRegion* region = ordered_regions->get(i++);
 296     if ((! heap->in_collection_set(region))
 297         && (! region->is_humongous())
 298         && (! region->is_pinned())) {
 299       free_set->add_region(region);
 300     }
 301   }
 302 }
 303 
 304 void ShenandoahCollectorPolicy::record_workers_start(TimingPhase phase) {
 305   for (uint i = 0; i < ShenandoahPhaseTimes::GCParPhasesSentinel; i++) {
 306     _phase_times->reset(i);
 307   }
 308 }
 309 
 310 void ShenandoahCollectorPolicy::record_workers_end(TimingPhase phase) {
 311   guarantee(phase == init_evac ||
 312             phase == scan_roots ||
 313             phase == update_roots ||
 314             phase == partial_gc_work ||
 315             phase == final_update_refs_roots ||
 316             phase == _num_phases,
 317             "only in these phases we can add per-thread phase times");
 318   if (phase != _num_phases) {
 319     // Merge _phase_time to counters below the given phase.
 320     for (uint i = 0; i < ShenandoahPhaseTimes::GCParPhasesSentinel; i++) {
 321       double t = _phase_times->average(i);
 322       _timing_data[phase + i + 1]._secs.add(t);
 323     }
 324   }
 325 }
 326 
 327 void ShenandoahCollectorPolicy::record_phase_start(TimingPhase phase) {
 328   _timing_data[phase]._start = os::elapsedTime();
 329 
 330 }
 331 
 332 void ShenandoahCollectorPolicy::record_phase_end(TimingPhase phase) {
 333   double end = os::elapsedTime();
 334   double elapsed = end - _timing_data[phase]._start;
 335   _timing_data[phase]._secs.add(elapsed);
 336 }
 337 
 338 void ShenandoahCollectorPolicy::record_gc_start() {
 339   _heuristics->record_gc_start();
 340 }
 341 
 342 void ShenandoahCollectorPolicy::record_gc_end() {
 343   _heuristics->record_gc_end();
 344 }
 345 
 346 void ShenandoahCollectorPolicy::report_concgc_cancelled() {
 347 }
 348 
 349 void ShenandoahHeuristics::record_bytes_allocated(size_t bytes) {
 350   _bytes_allocated_since_CM = bytes;
 351   _bytes_allocated_start_CM = bytes;
 352   _allocation_rate_bytes.add(bytes);
 353 }
 354 
 355 void ShenandoahHeuristics::record_bytes_reclaimed(size_t bytes) {
 356   _bytes_reclaimed_this_cycle = bytes;
 357   _reclamation_rate_bytes.add(bytes);
 358 }
 359 
 360 void ShenandoahHeuristics::record_bytes_start_CM(size_t bytes) {
 361   _bytes_allocated_start_CM = bytes;
 362 }
 363 
 364 void ShenandoahHeuristics::record_bytes_end_CM(size_t bytes) {
 365   _bytes_allocated_during_CM = (bytes > _bytes_allocated_start_CM) ? (bytes - _bytes_allocated_start_CM)
 366                                                                    : bytes;
 367 }
 368 
 369 class PassiveHeuristics : public ShenandoahHeuristics {
 370 public:
 371   PassiveHeuristics() : ShenandoahHeuristics() {
 372   }
 373 
 374   virtual bool region_in_collection_set(ShenandoahHeapRegion* r, size_t immediate_garbage) {
 375     return r->garbage() > 0;
 376   }
 377 
 378   virtual bool should_start_concurrent_mark(size_t used, size_t capacity) const {
 379     // Never do concurrent GCs.
 380     return false;
 381   }
 382 
 383   virtual bool process_references() {
 384     // Randomly process refs with 50% chance.
 385     return (os::random() & 1) == 1;
 386   }
 387 
 388   virtual bool unload_classes() {
 389     // Randomly unload classes with 50% chance.
 390     return (os::random() & 1) == 1;
 391   }
 392 };
 393 
 394 class AggressiveHeuristics : public ShenandoahHeuristics {
 395 public:
 396   AggressiveHeuristics() : ShenandoahHeuristics() {
 397   }
 398 
 399   virtual bool region_in_collection_set(ShenandoahHeapRegion* r, size_t immediate_garbage) {
 400     return r->garbage() > 0;
 401   }
 402 
 403   virtual bool should_start_concurrent_mark(size_t used, size_t capacity) const {
 404     return true;
 405   }
 406 
 407   virtual bool process_references() {
 408     // Randomly process refs with 50% chance.
 409     return (os::random() & 1) == 1;
 410   }
 411 
 412   virtual bool unload_classes() {
 413     // Randomly unload classes with 50% chance.
 414     return (os::random() & 1) == 1;
 415   }
 416 };
 417 
 418 class DynamicHeuristics : public ShenandoahHeuristics {
 419 public:
 420   DynamicHeuristics() : ShenandoahHeuristics() {
 421   }
 422 
 423   void print_thresholds() {
 424     log_info(gc, init)("Shenandoah heuristics thresholds: allocation "SIZE_FORMAT", free "SIZE_FORMAT", garbage "SIZE_FORMAT,
 425                        ShenandoahAllocationThreshold,
 426                        ShenandoahFreeThreshold,
 427                        ShenandoahGarbageThreshold);
 428   }
 429 
 430   virtual ~DynamicHeuristics() {}
 431 
 432   virtual bool should_start_concurrent_mark(size_t used, size_t capacity) const {
 433 
 434     bool shouldStartConcurrentMark = false;
 435 
 436     ShenandoahHeap* heap = ShenandoahHeap::heap();
 437     size_t free_capacity = heap->free_regions()->capacity();
 438     size_t free_used = heap->free_regions()->used();
 439     assert(free_used <= free_capacity, "must use less than capacity");
 440     size_t available =  free_capacity - free_used;
 441 
 442     if (!update_refs_early()) {
 443       // Count in the memory available after cset reclamation.
 444       size_t cset = MIN2(_bytes_in_cset, (ShenandoahCSetThreshold * capacity) / 100);
 445       available += cset;
 446     }
 447 
 448     uintx threshold = ShenandoahFreeThreshold + ShenandoahCSetThreshold;
 449     size_t targetStartMarking = (capacity * threshold) / 100;
 450 
 451     size_t threshold_bytes_allocated = heap->capacity() * ShenandoahAllocationThreshold / 100;
 452     if (available < targetStartMarking &&
 453         heap->bytes_allocated_since_cm() > threshold_bytes_allocated)
 454     {
 455       // Need to check that an appropriate number of regions have
 456       // been allocated since last concurrent mark too.
 457       shouldStartConcurrentMark = true;
 458     }
 459 
 460     return shouldStartConcurrentMark;
 461   }
 462 
 463   virtual bool region_in_collection_set(ShenandoahHeapRegion* r, size_t immediate_garbage) {
 464     size_t threshold = ShenandoahHeapRegion::region_size_bytes() * ShenandoahGarbageThreshold / 100;
 465     return r->garbage() > threshold;
 466   }
 467 
 468 };
 469 
 470 
 471 class AdaptiveHeuristics : public ShenandoahHeuristics {
 472 private:
 473   uintx _free_threshold;
 474   TruncatedSeq* _cset_history;
 475   size_t _peak_occupancy;
 476 public:
 477   AdaptiveHeuristics() :
 478     ShenandoahHeuristics(),
 479     _free_threshold(ShenandoahInitFreeThreshold),
 480     _peak_occupancy(0),
 481     _cset_history(new TruncatedSeq((uint)ShenandoahHappyCyclesThreshold)) {
 482 
 483     _cset_history->add((double) ShenandoahCSetThreshold);
 484     _cset_history->add((double) ShenandoahCSetThreshold);
 485   }
 486 
 487   virtual ~AdaptiveHeuristics() {
 488     delete _cset_history;
 489   }
 490 
 491   virtual bool region_in_collection_set(ShenandoahHeapRegion* r, size_t immediate_garbage) {
 492     size_t threshold = ShenandoahHeapRegion::region_size_bytes() * ShenandoahGarbageThreshold / 100;
 493     return r->garbage() > threshold;
 494   }
 495 
 496   void handle_gc_success(uint successful_cycles) {
 497     ShenandoahHeap* heap = ShenandoahHeap::heap();
 498     size_t capacity = heap->capacity();
 499     size_t available = heap->capacity() - _peak_occupancy;
 500     if (available * 100 / capacity <= ShenandoahMinFreeThreshold) {
 501       pessimize_free_threshold();
 502     } else {
 503       optimize_free_threshold(successful_cycles);
 504     }
 505     _peak_occupancy = 0;
 506   }
 507 
 508   void optimize_free_threshold(uint successful_cycles) {
 509     if (successful_cycles > ShenandoahHappyCyclesThreshold &&
 510         _free_threshold > 0) {
 511       _free_threshold--;
 512       log_info(gc,ergo)("reducing free threshold to: "UINTX_FORMAT, _free_threshold);
 513       _successful_cm_cycles_in_a_row = 0;
 514     }
 515   }
 516 
 517   void pessimize_free_threshold() {
 518     if (_free_threshold < ShenandoahMaxFreeThreshold) {
 519       _free_threshold++;
 520       log_info(gc,ergo)("increasing free threshold to: "UINTX_FORMAT, _free_threshold);
 521     }
 522   }
 523 
 524   virtual void record_cm_cancelled() {
 525     ShenandoahHeuristics::record_cm_cancelled();
 526     pessimize_free_threshold();
 527   }
 528 
 529   virtual void record_cm_success() {
 530     ShenandoahHeuristics::record_cm_success();
 531     if (! update_refs_early()) {
 532       handle_gc_success(_successful_cm_cycles_in_a_row);
 533     }
 534   }
 535 
 536   virtual void record_uprefs_cancelled() {
 537     ShenandoahHeuristics::record_uprefs_cancelled();
 538     pessimize_free_threshold();
 539   }
 540 
 541   virtual void record_uprefs_success() {
 542     ShenandoahHeuristics::record_uprefs_success();
 543     handle_gc_success(_successful_uprefs_cycles_in_a_row);
 544   }
 545 
 546   virtual void record_full_gc() {
 547     ShenandoahHeuristics::record_full_gc();
 548     pessimize_free_threshold();
 549   }
 550 
 551   virtual void record_peak_occupancy() {
 552     _peak_occupancy = MAX2(_peak_occupancy, ShenandoahHeap::heap()->used());
 553   }
 554 
 555   virtual bool should_start_concurrent_mark(size_t used, size_t capacity) const {
 556     bool shouldStartConcurrentMark = false;
 557 
 558     ShenandoahHeap* heap = ShenandoahHeap::heap();
 559     size_t free_capacity = heap->free_regions()->capacity();
 560     size_t free_used = heap->free_regions()->used();
 561     assert(free_used <= free_capacity, "must use less than capacity");
 562     size_t available =  free_capacity - free_used;
 563     uintx factor = _free_threshold;
 564     size_t cset_threshold = 0;
 565     if (!update_refs_early()) {
 566       // Count in the memory available after cset reclamation.
 567       cset_threshold = (size_t) _cset_history->davg();
 568       size_t cset = MIN2(_bytes_in_cset, (cset_threshold * capacity) / 100);
 569       available += cset;
 570       factor += cset_threshold;
 571     }
 572 
 573     size_t targetStartMarking = (capacity * factor) / 100;
 574 
 575     size_t threshold_bytes_allocated = heap->capacity() * ShenandoahAllocationThreshold / 100;
 576     if (available < targetStartMarking &&
 577         heap->bytes_allocated_since_cm() > threshold_bytes_allocated)
 578     {
 579       // Need to check that an appropriate number of regions have
 580       // been allocated since last concurrent mark too.
 581       shouldStartConcurrentMark = true;
 582     }
 583 
 584     if (shouldStartConcurrentMark) {
 585       if (! update_refs_early()) {
 586         log_info(gc,ergo)("predicted cset threshold: "SIZE_FORMAT, cset_threshold);
 587         log_info(gc,ergo)("Starting concurrent mark at "SIZE_FORMAT"K CSet ("SIZE_FORMAT"%%)", _bytes_in_cset / K, _bytes_in_cset * 100 / capacity);
 588         _cset_history->add((double) (_bytes_in_cset * 100 / capacity));
 589       }
 590     }
 591     return shouldStartConcurrentMark;
 592   }
 593 
 594 };
 595 
 596 class GlobalHeuristics : public DynamicHeuristics {
 597 private:
 598   size_t _garbage;
 599   size_t _min_garbage;
 600 public:
 601   GlobalHeuristics() : DynamicHeuristics() {
 602     if (FLAG_IS_DEFAULT(ShenandoahGarbageThreshold)) {
 603       FLAG_SET_DEFAULT(ShenandoahGarbageThreshold, 90);
 604     }
 605   }
 606   virtual ~GlobalHeuristics() {}
 607 
 608   virtual void start_choose_collection_set() {
 609     _garbage = 0;
 610     size_t heap_garbage = ShenandoahHeap::heap()->garbage();
 611     _min_garbage =  heap_garbage * ShenandoahGarbageThreshold / 100;
 612   }
 613 
 614   virtual bool region_in_collection_set(ShenandoahHeapRegion* r, size_t immediate_garbage) {
 615     if (_garbage + immediate_garbage < _min_garbage && ! r->is_empty()) {
 616       _garbage += r->garbage();
 617       return true;
 618     } else {
 619       return false;
 620     }
 621   }
 622 
 623   virtual bool needs_regions_sorted_by_garbage() {
 624     return true;
 625   }
 626 };
 627 
 628 class RatioHeuristics : public DynamicHeuristics {
 629 private:
 630   size_t _garbage;
 631   size_t _live;
 632 public:
 633   RatioHeuristics() : DynamicHeuristics() {
 634     if (FLAG_IS_DEFAULT(ShenandoahGarbageThreshold)) {
 635       FLAG_SET_DEFAULT(ShenandoahGarbageThreshold, 95);
 636     }
 637   }
 638   virtual ~RatioHeuristics() {}
 639 
 640   virtual void start_choose_collection_set() {
 641     _garbage = 0;
 642     _live = 0;
 643   }
 644 
 645   virtual bool region_in_collection_set(ShenandoahHeapRegion* r, size_t immediate_garbage) {
 646     size_t min_ratio = 100 - ShenandoahGarbageThreshold;
 647     if (_live * 100 / MAX2(_garbage + immediate_garbage, (size_t)1) < min_ratio && ! r->is_empty()) {
 648       _garbage += r->garbage();
 649       _live += r->get_live_data_bytes();
 650       return true;
 651     } else {
 652       return false;
 653     }
 654   }
 655 
 656   virtual bool needs_regions_sorted_by_garbage() {
 657     return true;
 658   }
 659 };
 660 
 661 class ConnectionHeuristics : public ShenandoahHeuristics {
 662 private:
 663   size_t _max_live_data;
 664   double _used_threshold_factor;
 665   double _garbage_threshold_factor;
 666   double _allocation_threshold_factor;
 667 
 668   uintx _used_threshold;
 669   uintx _garbage_threshold;
 670   uintx _allocation_threshold;
 671 
 672 public:
 673   ConnectionHeuristics() : ShenandoahHeuristics() {
 674     _max_live_data = 0;
 675 
 676     _used_threshold = 0;
 677     _garbage_threshold = 0;
 678     _allocation_threshold = 0;
 679 
 680     _used_threshold_factor = 0.;
 681     _garbage_threshold_factor = 0.1;
 682     _allocation_threshold_factor = 0.;
 683   }
 684 
 685   virtual bool should_start_concurrent_mark(size_t used, size_t capacity) const {
 686     size_t half_gig = 64 * 1024 * 1024;
 687     size_t bytes_alloc = ShenandoahHeap::heap()->bytes_allocated_since_cm();
 688     bool result =  bytes_alloc > half_gig;
 689     if (result) tty->print("Starting a concurrent mark");
 690     return result;
 691   }
 692 
 693   bool maybe_add_heap_region(ShenandoahHeapRegion* hr, ShenandoahCollectionSet* collection_set) {
 694     if (!hr->is_humongous() && hr->has_live() && !collection_set->contains(hr)) {
 695       collection_set->add_region_check_for_duplicates(hr);
 696       hr->set_in_collection_set(true);
 697       return true;
 698     }
 699     return false;
 700   }
 701 
 702   virtual void choose_collection_set(ShenandoahCollectionSet* collection_set, int* connections) {
 703     ShenandoahHeapRegionSet* regions = ShenandoahHeap::heap()->regions();
 704     size_t active = regions->active_regions();
 705 
 706     RegionGarbage* sorted_by_garbage = get_region_garbage_cache(active);
 707     for (size_t i = 0; i < active; i++) {
 708       ShenandoahHeapRegion* r = regions->get(i);
 709       sorted_by_garbage[i].region_number = r->region_number();
 710       sorted_by_garbage[i].garbage = r->garbage();
 711     }
 712 
 713     QuickSort::sort<RegionGarbage>(sorted_by_garbage, (int) active, compare_by_garbage, false);
 714 
 715     size_t num = ShenandoahHeap::heap()->num_regions();
 716     // simulate write heuristics by picking best region.
 717     int r = 0;
 718     ShenandoahHeapRegion* choosenOne = regions->get(sorted_by_garbage[0].region_number);
 719 
 720     while (! maybe_add_heap_region(choosenOne, collection_set)) {
 721       choosenOne = regions->get(sorted_by_garbage[++r].region_number);
 722     }
 723 
 724     size_t region_number = choosenOne->region_number();
 725     log_develop_trace(gc)("Adding choosen region " SIZE_FORMAT, region_number);
 726 
 727     // Add all the regions which point to this region.
 728     for (size_t i = 0; i < num; i++) {
 729       if (connections[i * num + region_number] > 0) {
 730         ShenandoahHeapRegion* candidate = regions->get(sorted_by_garbage[i].region_number);
 731         if (maybe_add_heap_region(candidate, collection_set)) {
 732           log_develop_trace(gc)("Adding region " SIZE_FORMAT " which points to the choosen region", i);
 733         }
 734       }
 735     }
 736 
 737     // Add all the regions they point to.
 738     for (size_t ci = 0; ci < collection_set->active_regions(); ci++) {
 739       ShenandoahHeapRegion* cs_heap_region = collection_set->get(ci);
 740       size_t cs_heap_region_number = cs_heap_region->region_number();
 741       for (size_t i = 0; i < num; i++) {
 742         if (connections[i * num + cs_heap_region_number] > 0) {
 743           ShenandoahHeapRegion* candidate = regions->get(sorted_by_garbage[i].region_number);
 744           if (maybe_add_heap_region(candidate, collection_set)) {
 745             log_develop_trace(gc)
 746               ("Adding region " SIZE_FORMAT " which is pointed to by region " SIZE_FORMAT, i, cs_heap_region_number);
 747           }
 748         }
 749       }
 750     }
 751     _max_live_data = MAX2(_max_live_data, collection_set->live_data());
 752     collection_set->print();
 753   }
 754 
 755   virtual bool region_in_collection_set(ShenandoahHeapRegion* r, size_t immediate_garbage) {
 756     assert(false, "Shouldn't get here");
 757     return false;
 758   }
 759 };
 760 class PartialHeuristics : public AdaptiveHeuristics {
 761 public:
 762   PartialHeuristics() : AdaptiveHeuristics() {
 763     if (FLAG_IS_DEFAULT(ShenandoahAllocationThreshold)) {
 764       FLAG_SET_DEFAULT(ShenandoahAllocationThreshold, 5);
 765     }
 766     FLAG_SET_DEFAULT(UseShenandoahMatrix, true);
 767     // TODO: Disable this optimization for now, as it also requires the matrix barriers.
 768     FLAG_SET_DEFAULT(ArrayCopyLoadStoreMaxElem, 0);
 769   }
 770 
 771   virtual ~PartialHeuristics() {}
 772 
 773   bool update_refs_early() const {
 774     return true;
 775   }
 776 
 777   bool should_start_partial_gc() {
 778     ShenandoahHeap* heap = ShenandoahHeap::heap();
 779     size_t capacity = heap->capacity();
 780 
 781     size_t used = heap->used();
 782     return (used - _bytes_allocated_after_last_gc) * 100 / capacity > ShenandoahAllocationThreshold;
 783   }
 784 };
 785 
 786 ShenandoahCollectorPolicy::ShenandoahCollectorPolicy() :
 787   _cycle_counter(0),
 788   _successful_cm(0),
 789   _degenerated_cm(0),
 790   _successful_uprefs(0),
 791   _degenerated_uprefs(0)
 792 {
 793 
 794   ShenandoahHeapRegion::setup_heap_region_size(initial_heap_byte_size(), max_heap_byte_size());
 795 
 796   initialize_all();
 797 
 798   _tracer = new (ResourceObj::C_HEAP, mtGC) ShenandoahTracer();
 799   _stw_timer = new (ResourceObj::C_HEAP, mtGC) STWGCTimer();
 800   _conc_timer = new (ResourceObj::C_HEAP, mtGC) ConcurrentGCTimer();
 801   _user_requested_gcs = 0;
 802   _allocation_failure_gcs = 0;
 803   _conc_gc_aborted = false;
 804 
 805   _phase_names[total_pause]                     = "Total Pauses (N)";
 806   _phase_names[total_pause_gross]               = "Total Pauses (G)";
 807   _phase_names[init_mark]                       = "Initial Mark Pauses (N)";
 808   _phase_names[init_mark_gross]                 = "Initial Mark Pauses (G)";
 809   _phase_names[final_mark]                      = "Final Mark Pauses (N)";
 810   _phase_names[final_mark_gross]                = "Final Mark Pauses (G)";
 811   _phase_names[accumulate_stats]                = "  Accumulate Stats";
 812   _phase_names[make_parsable]                   = "  Make Parsable";
 813   _phase_names[clear_liveness]                  = "  Clear Liveness";
 814   _phase_names[finish_queues]                   = "  Finish Queues";
 815   _phase_names[weakrefs]                        = "  Weak References";
 816   _phase_names[class_unloading]                 = "  Class Unloading";
 817   _phase_names[prepare_evac]                    = "  Prepare Evacuation";
 818 
 819   _phase_names[scan_roots]                      = "  Scan Roots";
 820   _phase_names[scan_thread_roots]               = "    S: Thread Roots";
 821   _phase_names[scan_code_roots]                 = "    S: Code Cache Roots";
 822   _phase_names[scan_string_table_roots]         = "    S: String Table Roots";
 823   _phase_names[scan_universe_roots]             = "    S: Universe Roots";
 824   _phase_names[scan_jni_roots]                  = "    S: JNI Roots";
 825   _phase_names[scan_jni_weak_roots]             = "    S: JNI Weak Roots";
 826   _phase_names[scan_synchronizer_roots]         = "    S: Synchronizer Roots";
 827   _phase_names[scan_flat_profiler_roots]        = "    S: Flat Profiler Roots";
 828   _phase_names[scan_management_roots]           = "    S: Management Roots";
 829   _phase_names[scan_system_dictionary_roots]    = "    S: System Dict Roots";
 830   _phase_names[scan_cldg_roots]                 = "    S: CLDG Roots";
 831   _phase_names[scan_jvmti_roots]                = "    S: JVMTI Roots";
 832 
 833   _phase_names[update_roots]                    = "  Update Roots";
 834   _phase_names[update_thread_roots]             = "    U: Thread Roots";
 835   _phase_names[update_code_roots]               = "    U: Code Cache Roots";
 836   _phase_names[update_string_table_roots]       = "    U: String Table Roots";
 837   _phase_names[update_universe_roots]           = "    U: Universe Roots";
 838   _phase_names[update_jni_roots]                = "    U: JNI Roots";
 839   _phase_names[update_jni_weak_roots]           = "    U: JNI Weak Roots";
 840   _phase_names[update_synchronizer_roots]       = "    U: Synchronizer Roots";
 841   _phase_names[update_flat_profiler_roots]      = "    U: Flat Profiler Roots";
 842   _phase_names[update_management_roots]         = "    U: Management Roots";
 843   _phase_names[update_system_dictionary_roots]  = "    U: System Dict Roots";
 844   _phase_names[update_cldg_roots]               = "    U: CLDG Roots";
 845   _phase_names[update_jvmti_roots]              = "    U: JVMTI Roots";
 846 
 847   _phase_names[init_evac]                       = "  Initial Evacuation";
 848   _phase_names[evac_thread_roots]               = "    E: Thread Roots";
 849   _phase_names[evac_code_roots]                 = "    E: Code Cache Roots";
 850   _phase_names[evac_string_table_roots]         = "    E: String Table Roots";
 851   _phase_names[evac_universe_roots]             = "    E: Universe Roots";
 852   _phase_names[evac_jni_roots]                  = "    E: JNI Roots";
 853   _phase_names[evac_jni_weak_roots]             = "    E: JNI Weak Roots";
 854   _phase_names[evac_synchronizer_roots]         = "    E: Synchronizer Roots";
 855   _phase_names[evac_flat_profiler_roots]        = "    E: Flat Profiler Roots";
 856   _phase_names[evac_management_roots]           = "    E: Management Roots";
 857   _phase_names[evac_system_dictionary_roots]    = "    E: System Dict Roots";
 858   _phase_names[evac_cldg_roots]                 = "    E: CLDG Roots";
 859   _phase_names[evac_jvmti_roots]                = "    E: JVMTI Roots";
 860 
 861   _phase_names[recycle_regions]                 = "  Recycle regions";
 862   _phase_names[reset_bitmaps]                   = "Reset Bitmaps";
 863   _phase_names[resize_tlabs]                    = "Resize TLABs";
 864 
 865   _phase_names[full_gc]                         = "Full GC";
 866   _phase_names[full_gc_heapdumps]               = "  Heap Dumps";
 867   _phase_names[full_gc_prepare]                 = "  Prepare";
 868   _phase_names[full_gc_mark]                    = "  Mark";
 869   _phase_names[full_gc_mark_finish_queues]      = "    Finish Queues";
 870   _phase_names[full_gc_mark_weakrefs]           = "    Weak References";
 871   _phase_names[full_gc_mark_class_unloading]    = "    Class Unloading";
 872   _phase_names[full_gc_calculate_addresses]     = "  Calculate Addresses";
 873   _phase_names[full_gc_adjust_pointers]         = "  Adjust Pointers";
 874   _phase_names[full_gc_copy_objects]            = "  Copy Objects";
 875 
 876   _phase_names[partial_gc_gross]                = "Pause Partial GC (G)";
 877   _phase_names[partial_gc]                      = "Pause Partial GC (N)";
 878   _phase_names[partial_gc_prepare]              = "  Prepare";
 879   _phase_names[partial_gc_work]                 = "  Work";
 880   _phase_names[partial_gc_thread_roots]         = "    P: Thread Roots";
 881   _phase_names[partial_gc_code_roots]           = "    P: Code Cache Roots";
 882   _phase_names[partial_gc_string_table_roots]   = "    P: String Table Roots";
 883   _phase_names[partial_gc_universe_roots]       = "    P: Universe Roots";
 884   _phase_names[partial_gc_jni_roots]            = "    P: JNI Roots";
 885   _phase_names[partial_gc_jni_weak_roots]       = "    P: JNI Weak Roots";
 886   _phase_names[partial_gc_synchronizer_roots]   = "    P: Synchronizer Roots";
 887   _phase_names[partial_gc_flat_profiler_roots]  = "    P: Flat Profiler Roots";
 888   _phase_names[partial_gc_management_roots]     = "    P: Management Roots";
 889   _phase_names[partial_gc_system_dict_roots]    = "    P: System Dict Roots";
 890   _phase_names[partial_gc_cldg_roots]           = "    P: CLDG Roots";
 891   _phase_names[partial_gc_jvmti_roots]          = "    P: JVMTI Roots";
 892   _phase_names[partial_gc_recycle]              = "  Recycle";
 893 
 894   _phase_names[conc_mark]                       = "Concurrent Marking";
 895   _phase_names[conc_evac]                       = "Concurrent Evacuation";
 896 
 897   _phase_names[init_update_refs_gross]          = "Pause Init  Update Refs (G)";
 898   _phase_names[init_update_refs]                = "Pause Init  Update Refs (N)";
 899   _phase_names[conc_update_refs]                = "Concurrent Update Refs";
 900   _phase_names[final_update_refs_gross]         = "Pause Final Update Refs (G)";
 901   _phase_names[final_update_refs]               = "Pause Final Update Refs (N)";
 902 
 903   _phase_names[final_update_refs_roots]                = "  Update Roots";
 904   _phase_names[final_update_refs_thread_roots]         = "    UR: Thread Roots";
 905   _phase_names[final_update_refs_code_roots]           = "    UR: Code Cache Roots";
 906   _phase_names[final_update_refs_string_table_roots]   = "    UR: String Table Roots";
 907   _phase_names[final_update_refs_universe_roots]       = "    UR: Universe Roots";
 908   _phase_names[final_update_refs_jni_roots]            = "    UR: JNI Roots";
 909   _phase_names[final_update_refs_jni_weak_roots]       = "    UR: JNI Weak Roots";
 910   _phase_names[final_update_refs_synchronizer_roots]   = "    UR: Synchronizer Roots";
 911   _phase_names[final_update_refs_flat_profiler_roots]  = "    UR: Flat Profiler Roots";
 912   _phase_names[final_update_refs_management_roots]     = "    UR: Management Roots";
 913   _phase_names[final_update_refs_system_dict_roots]    = "    UR: System Dict Roots";
 914   _phase_names[final_update_refs_cldg_roots]           = "    UR: CLDG Roots";
 915   _phase_names[final_update_refs_jvmti_roots]          = "    UR: JVMTI Roots";
 916 
 917   if (ShenandoahGCHeuristics != NULL) {
 918     if (strcmp(ShenandoahGCHeuristics, "aggressive") == 0) {
 919       log_info(gc, init)("Shenandoah heuristics: aggressive");
 920       _heuristics = new AggressiveHeuristics();
 921     } else if (strcmp(ShenandoahGCHeuristics, "dynamic") == 0) {
 922       log_info(gc, init)("Shenandoah heuristics: dynamic");
 923       _heuristics = new DynamicHeuristics();
 924     } else if (strcmp(ShenandoahGCHeuristics, "global") == 0) {
 925       log_info(gc, init)("Shenandoah heuristics: global");
 926       _heuristics = new GlobalHeuristics();
 927     } else if (strcmp(ShenandoahGCHeuristics, "ratio") == 0) {
 928       log_info(gc, init)("Shenandoah heuristics: ratio");
 929       _heuristics = new RatioHeuristics();
 930     } else if (strcmp(ShenandoahGCHeuristics, "adaptive") == 0) {
 931       log_info(gc, init)("Shenandoah heuristics: adaptive");
 932       _heuristics = new AdaptiveHeuristics();
 933     } else if (strcmp(ShenandoahGCHeuristics, "passive") == 0) {
 934       log_info(gc, init)("Shenandoah heuristics: passive");
 935       _heuristics = new PassiveHeuristics();
 936     } else if (strcmp(ShenandoahGCHeuristics, "connections") == 0) {
 937       log_info(gc, init)("Shenandoah heuristics: connections");
 938       _heuristics = new ConnectionHeuristics();
 939     } else if (strcmp(ShenandoahGCHeuristics, "partial") == 0) {
 940       log_info(gc, init)("Shenandoah heuristics: partial GC");
 941       _heuristics = new PartialHeuristics();
 942     } else {
 943       vm_exit_during_initialization("Unknown -XX:ShenandoahGCHeuristics option");
 944     }
 945     _heuristics->print_thresholds();
 946   } else {
 947       ShouldNotReachHere();
 948   }
 949   _phase_times = new ShenandoahPhaseTimes(MAX2(ConcGCThreads, ParallelGCThreads));
 950 }
 951 
 952 ShenandoahCollectorPolicy* ShenandoahCollectorPolicy::as_pgc_policy() {
 953   return this;
 954 }
 955 
 956 BarrierSet::Name ShenandoahCollectorPolicy::barrier_set_name() {
 957   return BarrierSet::ShenandoahBarrierSet;
 958 }
 959 
 960 HeapWord* ShenandoahCollectorPolicy::mem_allocate_work(size_t size,
 961                                                        bool is_tlab,
 962                                                        bool* gc_overhead_limit_was_exceeded) {
 963   guarantee(false, "Not using this policy feature yet.");
 964   return NULL;
 965 }
 966 
 967 HeapWord* ShenandoahCollectorPolicy::satisfy_failed_allocation(size_t size, bool is_tlab) {
 968   guarantee(false, "Not using this policy feature yet.");
 969   return NULL;
 970 }
 971 
 972 void ShenandoahCollectorPolicy::initialize_alignments() {
 973 
 974   // This is expected by our algorithm for ShenandoahHeap::heap_region_containing().
 975   _space_alignment = ShenandoahHeapRegion::region_size_bytes();
 976   _heap_alignment = ShenandoahHeapRegion::region_size_bytes();
 977 }
 978 
 979 void ShenandoahCollectorPolicy::post_heap_initialize() {
 980   // Nothing to do here (yet).
 981 }
 982 
 983 void ShenandoahCollectorPolicy::record_bytes_allocated(size_t bytes) {
 984   _heuristics->record_bytes_allocated(bytes);
 985 }
 986 
 987 void ShenandoahCollectorPolicy::record_bytes_start_CM(size_t bytes) {
 988   _heuristics->record_bytes_start_CM(bytes);
 989 }
 990 
 991 void ShenandoahCollectorPolicy::record_bytes_end_CM(size_t bytes) {
 992   _heuristics->record_bytes_end_CM(bytes);
 993 }
 994 
 995 void ShenandoahCollectorPolicy::record_bytes_reclaimed(size_t bytes) {
 996   _heuristics->record_bytes_reclaimed(bytes);
 997 }
 998 
 999 void ShenandoahCollectorPolicy::record_user_requested_gc() {
1000   _user_requested_gcs++;
1001 }
1002 
1003 void ShenandoahCollectorPolicy::record_allocation_failure_gc() {
1004   _allocation_failure_gcs++;
1005 }
1006 
1007 bool ShenandoahCollectorPolicy::should_start_concurrent_mark(size_t used,
1008                                                              size_t capacity) {
1009   return _heuristics->should_start_concurrent_mark(used, capacity);
1010 }
1011 
1012 bool ShenandoahCollectorPolicy::handover_cancelled_marking() {
1013   return _heuristics->handover_cancelled_marking();
1014 }
1015 
1016 bool ShenandoahCollectorPolicy::handover_cancelled_uprefs() {
1017   return _heuristics->handover_cancelled_uprefs();
1018 }
1019 
1020 bool ShenandoahCollectorPolicy::update_refs_early() {
1021   return _heuristics->update_refs_early();
1022 }
1023 
1024 void ShenandoahCollectorPolicy::record_cm_success() {
1025   _heuristics->record_cm_success();
1026   _successful_cm++;
1027 }
1028 
1029 void ShenandoahCollectorPolicy::record_cm_degenerated() {
1030   _degenerated_cm++;
1031 }
1032 
1033 void ShenandoahCollectorPolicy::record_cm_cancelled() {
1034   _heuristics->record_cm_cancelled();
1035 }
1036 
1037 void ShenandoahCollectorPolicy::record_uprefs_success() {
1038   _heuristics->record_uprefs_success();
1039   _successful_uprefs++;
1040 }
1041 
1042 void ShenandoahCollectorPolicy::record_uprefs_degenerated() {
1043   _degenerated_uprefs++;
1044 }
1045 
1046 void ShenandoahCollectorPolicy::record_uprefs_cancelled() {
1047   _heuristics->record_uprefs_cancelled();
1048 }
1049 
1050 void ShenandoahCollectorPolicy::record_full_gc() {
1051   _heuristics->record_full_gc();
1052 }
1053 
1054 void ShenandoahCollectorPolicy::record_peak_occupancy() {
1055   _heuristics->record_peak_occupancy();
1056 }
1057 
1058 void ShenandoahCollectorPolicy::choose_collection_set(ShenandoahCollectionSet* collection_set, int* connections) {
1059   _heuristics->choose_collection_set(collection_set, connections);
1060 }
1061 
1062 void ShenandoahCollectorPolicy::choose_free_set(ShenandoahFreeSet* free_set) {
1063    _heuristics->choose_free_set(free_set);
1064 }
1065 
1066 
1067 bool ShenandoahCollectorPolicy::process_references() {
1068   return _heuristics->process_references();
1069 }
1070 
1071 bool ShenandoahCollectorPolicy::unload_classes() {
1072   return _heuristics->unload_classes();
1073 }
1074 
1075 void ShenandoahCollectorPolicy::print_tracing_info(outputStream* out) {
1076   out->cr();
1077   out->print_cr("GC STATISTICS:");
1078   out->print_cr("  \"(G)\" (gross) pauses include time to safepoint. \"(N)\" (net) pauses are times spent in GC.");
1079   out->print_cr("  \"a\" is average time for each phase, look at levels to see if average makes sense.");
1080   out->print_cr("  \"lvls\" are quantiles: 0%% (minimum), 25%%, 50%% (median), 75%%, 100%% (maximum).");
1081   out->cr();
1082 
1083   for (uint i = 0; i < _num_phases; i++) {
1084     if (_timing_data[i]._secs.maximum() != 0) {
1085       print_summary_sd(out, _phase_names[i], &(_timing_data[i]._secs));
1086     }
1087   }
1088 
1089   out->cr();
1090   out->print_cr("" SIZE_FORMAT " allocation failure and " SIZE_FORMAT " user requested GCs", _allocation_failure_gcs, _user_requested_gcs);
1091   out->print_cr("" SIZE_FORMAT " successful and " SIZE_FORMAT " degenerated concurrent markings", _successful_cm, _degenerated_cm);
1092   out->print_cr("" SIZE_FORMAT " successful and " SIZE_FORMAT " degenerated update references  ", _successful_uprefs, _degenerated_uprefs);
1093   out->cr();
1094 }
1095 
1096 void ShenandoahCollectorPolicy::print_summary_sd(outputStream* out, const char* str, const HdrSeq* seq)  {
1097   out->print_cr("%-27s = %8.2lf s (a = %8.0lf us) (n = "INT32_FORMAT_W(5)") (lvls, us = %8.0lf, %8.0lf, %8.0lf, %8.0lf, %8.0lf)",
1098           str,
1099           seq->sum(),
1100           seq->avg() * 1000000.0,
1101           seq->num(),
1102           seq->percentile(0)  * 1000000.0,
1103           seq->percentile(25) * 1000000.0,
1104           seq->percentile(50) * 1000000.0,
1105           seq->percentile(75) * 1000000.0,
1106           seq->maximum() * 1000000.0
1107   );
1108 }
1109 
1110 void ShenandoahCollectorPolicy::increase_cycle_counter() {
1111   _cycle_counter++;
1112 }
1113 
1114 size_t ShenandoahCollectorPolicy::cycle_counter() const {
1115   return _cycle_counter;
1116 }
1117 
1118  ShenandoahPhaseTimes* ShenandoahCollectorPolicy::phase_times() {
1119   return _phase_times;
1120 }
1121 
1122 
1123 uint ShenandoahCollectorPolicy::calc_workers_for_java_threads(uint application_workers) {
1124   return (uint)(ShenandoahGCWorkerPerJavaThread * application_workers);
1125 }
1126 
1127 uint ShenandoahCollectorPolicy::calc_workers_for_live_set(size_t live_data) {
1128   return (uint)(live_data / HeapSizePerGCThread);
1129 }
1130 
1131 
1132 uint ShenandoahCollectorPolicy::calc_default_active_workers(
1133                                                      uint total_workers,
1134                                                      uint min_workers,
1135                                                      uint active_workers,
1136                                                      uint application_workers,
1137                                                      uint  workers_by_java_threads,
1138                                                      uint  workers_by_liveset) {
1139   // If the user has turned off using a dynamic number of GC threads
1140   // or the users has requested a specific number, set the active
1141   // number of workers to all the workers.
1142   uint new_active_workers = total_workers;
1143   uint prev_active_workers = active_workers;
1144   uint active_workers_by_JT = 0;
1145   uint active_workers_by_liveset = 0;
1146 
1147   active_workers_by_JT = MAX2(workers_by_java_threads, min_workers);
1148 
1149   // Choose a number of GC threads based on the live set.
1150   active_workers_by_liveset =
1151       MAX2((uint) 2U, workers_by_liveset);
1152 
1153   uint max_active_workers =
1154     MAX2(active_workers_by_JT, active_workers_by_liveset);
1155 
1156   new_active_workers = MIN2(max_active_workers, new_active_workers);
1157 
1158   // Increase GC workers instantly but decrease them more
1159   // slowly.
1160   if (new_active_workers < prev_active_workers) {
1161     new_active_workers =
1162       MAX2(min_workers, (prev_active_workers + new_active_workers) / 2);
1163   }
1164 
1165   if (UseNUMA) {
1166     uint numa_groups = (uint)os::numa_get_groups_num();
1167     assert(numa_groups <= total_workers, "Not enough workers to cover all numa groups");
1168     new_active_workers = MAX2(new_active_workers, numa_groups);
1169   }
1170 
1171   // Check once more that the number of workers is within the limits.
1172   assert(min_workers <= total_workers, "Minimum workers not consistent with total workers");
1173   assert(new_active_workers >= min_workers, "Minimum workers not observed");
1174   assert(new_active_workers <= total_workers, "Total workers not observed");
1175 
1176   log_trace(gc, task)("ShenandoahCollectorPolicy::calc_default_active_workers() : "
1177      "active_workers(): " UINTX_FORMAT "  new_active_workers: " UINTX_FORMAT "  "
1178      "prev_active_workers: " UINTX_FORMAT "\n"
1179      " active_workers_by_JT: " UINTX_FORMAT "  active_workers_by_liveset: " UINTX_FORMAT,
1180      (uintx)active_workers, (uintx)new_active_workers, (uintx)prev_active_workers,
1181      (uintx)active_workers_by_JT, (uintx)active_workers_by_liveset);
1182   assert(new_active_workers > 0, "Always need at least 1");
1183   return new_active_workers;
1184 }
1185 
1186 /**
1187  * Initial marking phase also update references of live objects from previous concurrent GC cycle,
1188  * so we take Java threads and live set into account.
1189  */
1190 uint ShenandoahCollectorPolicy::calc_workers_for_init_marking(uint active_workers,
1191                                             uint application_workers) {
1192 
1193   if (!UseDynamicNumberOfGCThreads ||
1194      (!FLAG_IS_DEFAULT(ParallelGCThreads) && !ForceDynamicNumberOfGCThreads)) {
1195     assert(ParallelGCThreads > 0, "Always need at least 1");
1196     return ParallelGCThreads;
1197   } else {
1198     ShenandoahCollectorPolicy* policy = (ShenandoahCollectorPolicy*)ShenandoahHeap::heap()->collector_policy();
1199     size_t live_data = policy->_heuristics->bytes_in_cset();
1200 
1201     return calc_default_active_workers(ParallelGCThreads, (ParallelGCThreads > 1) ? 2 : 1,
1202       active_workers, application_workers,
1203       calc_workers_for_java_threads(application_workers),
1204       calc_workers_for_live_set(live_data));
1205   }
1206 }
1207 
1208 uint ShenandoahCollectorPolicy::calc_workers_for_conc_marking(uint active_workers,
1209                                             uint application_workers) {
1210 
1211   if (!UseDynamicNumberOfGCThreads ||
1212      (!FLAG_IS_DEFAULT(ConcGCThreads) && !ForceDynamicNumberOfGCThreads)) {
1213     assert(ConcGCThreads > 0, "Always need at least 1");
1214     return ConcGCThreads;
1215   } else {
1216     return calc_default_active_workers(ConcGCThreads,
1217       (ConcGCThreads > 1 ? 2 : 1), active_workers,
1218       application_workers, calc_workers_for_java_threads(application_workers), 0);
1219   }
1220 }
1221 
1222 uint ShenandoahCollectorPolicy::calc_workers_for_final_marking(uint active_workers,
1223                                             uint application_workers) {
1224 
1225   if (!UseDynamicNumberOfGCThreads ||
1226      (!FLAG_IS_DEFAULT(ParallelGCThreads) && !ForceDynamicNumberOfGCThreads)) {
1227     assert(ParallelGCThreads > 0, "Always need at least 1");
1228     return ParallelGCThreads;
1229   } else {
1230     return calc_default_active_workers(ParallelGCThreads,
1231       (ParallelGCThreads > 1 ? 2 : 1), active_workers,
1232       application_workers, calc_workers_for_java_threads(application_workers), 0);
1233   }
1234 }
1235 
1236   // Calculate workers for concurrent evacuation (concurrent GC)
1237 uint ShenandoahCollectorPolicy::calc_workers_for_conc_evacuation(uint active_workers,
1238                                             uint application_workers) {
1239   if (!UseDynamicNumberOfGCThreads ||
1240      (!FLAG_IS_DEFAULT(ConcGCThreads) && !ForceDynamicNumberOfGCThreads)) {
1241     assert(ConcGCThreads > 0, "Always need at least 1");
1242     return ConcGCThreads;
1243   } else {
1244     return calc_workers_for_evacuation(false, // not a full GC
1245       ConcGCThreads, active_workers, application_workers);
1246   }
1247 }
1248 
1249   // Calculate workers for parallel evaculation (full GC)
1250 uint ShenandoahCollectorPolicy::calc_workers_for_parallel_evacuation(uint active_workers,
1251                                             uint application_workers) {
1252   if (!UseDynamicNumberOfGCThreads ||
1253      (!FLAG_IS_DEFAULT(ParallelGCThreads) && !ForceDynamicNumberOfGCThreads)) {
1254     assert(ParallelGCThreads > 0, "Always need at least 1");
1255     return ParallelGCThreads;
1256   } else {
1257     return calc_workers_for_evacuation(true, // a full GC
1258       ParallelGCThreads, active_workers, application_workers);
1259   }
1260 }
1261 
1262 
1263 uint ShenandoahCollectorPolicy::calc_workers_for_evacuation(bool full_gc,
1264                                             uint total_workers,
1265                                             uint active_workers,
1266                                             uint application_workers) {
1267 
1268   // Calculation based on live set
1269   size_t live_data = 0;
1270   ShenandoahHeap* heap = ShenandoahHeap::heap();
1271   if (full_gc) {
1272     ShenandoahHeapRegionSet* regions = heap->regions();
1273     for (size_t index = 0; index < regions->active_regions(); index ++) {
1274       live_data += regions->get(index)->get_live_data_bytes();
1275     }
1276   } else {
1277     ShenandoahCollectorPolicy* policy = (ShenandoahCollectorPolicy*)heap->collector_policy();
1278     live_data = policy->_heuristics->bytes_in_cset();
1279   }
1280 
1281   uint active_workers_by_liveset = calc_workers_for_live_set(live_data);
1282   return calc_default_active_workers(total_workers,
1283       (total_workers > 1 ? 2 : 1), active_workers,
1284       application_workers, 0, active_workers_by_liveset);
1285 }
1286 
1287 bool ShenandoahCollectorPolicy::should_start_partial_gc() {
1288   return _heuristics->should_start_partial_gc();
1289 }