1 /*
   2  * Copyright (c) 2013, 2015, Red Hat, Inc. and/or its affiliates.
   3  *
   4  * This code is free software; you can redistribute it and/or modify it
   5  * under the terms of the GNU General Public License version 2 only, as
   6  * published by the Free Software Foundation.
   7  *
   8  * This code is distributed in the hope that it will be useful, but WITHOUT
   9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  11  * version 2 for more details (a copy is included in the LICENSE file that
  12  * accompanied this code).
  13  *
  14  * You should have received a copy of the GNU General Public License version
  15  * 2 along with this work; if not, write to the Free Software Foundation,
  16  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  17  *
  18  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  19  * or visit www.oracle.com if you need additional information or have any
  20  * questions.
  21  *
  22  */
  23 
  24 #include "precompiled.hpp"
  25 #include "gc/shared/gcPolicyCounters.hpp"
  26 #include "gc/shenandoah/shenandoahCollectionSet.hpp"
  27 #include "gc/shenandoah/shenandoahFreeSet.hpp"
  28 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
  29 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
  30 #include "gc/shenandoah/shenandoahPartialGC.hpp"
  31 #include "gc/shenandoah/shenandoahPhaseTimes.hpp"
  32 #include "runtime/os.hpp"
  33 
  34 class ShenandoahHeuristics : public CHeapObj<mtGC> {
  35 
  36   NumberSeq _allocation_rate_bytes;
  37   NumberSeq _reclamation_rate_bytes;
  38 
  39   size_t _bytes_allocated_since_CM;
  40   size_t _bytes_reclaimed_this_cycle;
  41 
  42 protected:
  43   typedef struct {
  44     size_t region_number;
  45     size_t garbage;
  46   } RegionGarbage;
  47 
  48   static int compare_by_garbage(RegionGarbage a, RegionGarbage b) {
  49     if (a.garbage > b.garbage)
  50       return -1;
  51     else if (a.garbage < b.garbage)
  52       return 1;
  53     else return 0;
  54   }
  55 
  56   RegionGarbage* get_region_garbage_cache(size_t num) {
  57     RegionGarbage* res = _region_garbage;
  58     if (res == NULL) {
  59       res = NEW_C_HEAP_ARRAY(RegionGarbage, num, mtGC);
  60       _region_garbage_size = num;
  61     } else if (_region_garbage_size < num) {
  62       REALLOC_C_HEAP_ARRAY(RegionGarbage, _region_garbage, num, mtGC);
  63       _region_garbage_size = num;
  64     }
  65     return res;
  66   }
  67 
  68   RegionGarbage* _region_garbage;
  69   size_t _region_garbage_size;
  70 
  71   size_t _bytes_allocated_start_CM;
  72   size_t _bytes_allocated_during_CM;
  73 
  74   size_t _bytes_allocated_after_last_gc;
  75 
  76   uint _cancelled_cm_cycles_in_a_row;
  77   uint _successful_cm_cycles_in_a_row;
  78 
  79   uint _cancelled_uprefs_cycles_in_a_row;
  80   uint _successful_uprefs_cycles_in_a_row;
  81 
  82   size_t _bytes_in_cset;
  83 
  84 public:
  85 
  86   ShenandoahHeuristics();
  87   ~ShenandoahHeuristics();
  88 
  89   void record_bytes_allocated(size_t bytes);
  90   void record_bytes_reclaimed(size_t bytes);
  91   void record_bytes_start_CM(size_t bytes);
  92   void record_bytes_end_CM(size_t bytes);
  93 
  94   void record_gc_start() {
  95     // Do nothing.
  96   }
  97 
  98   void record_gc_end() {
  99     _bytes_allocated_after_last_gc = ShenandoahHeap::heap()->used();
 100   }
 101 
 102   size_t bytes_in_cset() const { return _bytes_in_cset; }
 103 
 104   virtual void print_thresholds() {
 105   }
 106 
 107   virtual bool should_start_concurrent_mark(size_t used, size_t capacity) const=0;
 108 
 109   virtual bool update_refs_early() const {
 110     return ShenandoahUpdateRefsEarly;
 111   }
 112 
 113   virtual bool should_start_partial_gc() {
 114     return false;
 115   }
 116 
 117   virtual bool handover_cancelled_marking() {
 118     return _cancelled_cm_cycles_in_a_row <= ShenandoahFullGCThreshold;
 119   }
 120 
 121   virtual bool handover_cancelled_uprefs() {
 122     return _cancelled_uprefs_cycles_in_a_row <= ShenandoahFullGCThreshold;
 123   }
 124 
 125   virtual void record_cm_cancelled() {
 126     _cancelled_cm_cycles_in_a_row++;
 127     _successful_cm_cycles_in_a_row = 0;
 128   }
 129 
 130   virtual void record_cm_success() {
 131     _cancelled_cm_cycles_in_a_row = 0;
 132     _successful_cm_cycles_in_a_row++;
 133   }
 134 
 135   virtual void record_uprefs_cancelled() {
 136     _cancelled_uprefs_cycles_in_a_row++;
 137     _successful_uprefs_cycles_in_a_row = 0;
 138   }
 139 
 140   virtual void record_uprefs_success() {
 141     _cancelled_uprefs_cycles_in_a_row = 0;
 142     _successful_uprefs_cycles_in_a_row++;
 143   }
 144 
 145   virtual void record_full_gc() {
 146     _bytes_in_cset = 0;
 147   }
 148 
 149   virtual void start_choose_collection_set() {
 150   }
 151   virtual void end_choose_collection_set() {
 152   }
 153   virtual bool region_in_collection_set(ShenandoahHeapRegion* r, size_t immediate_garbage) = 0;
 154 
 155   virtual void choose_collection_set(ShenandoahCollectionSet* collection_set, int* connections=NULL);
 156   virtual void choose_free_set(ShenandoahFreeSet* free_set);
 157 
 158   virtual bool process_references() {
 159     if (ShenandoahRefProcFrequency == 0) return false;
 160     size_t cycle = ShenandoahHeap::heap()->shenandoahPolicy()->cycle_counter();
 161     // Process references every Nth GC cycle.
 162     return cycle % ShenandoahRefProcFrequency == 0;
 163   }
 164 
 165   virtual bool unload_classes() {
 166     if (ShenandoahUnloadClassesFrequency == 0) return false;
 167     size_t cycle = ShenandoahHeap::heap()->shenandoahPolicy()->cycle_counter();
 168     // Unload classes every Nth GC cycle.
 169     // This should not happen in the same cycle as process_references to amortize costs.
 170     // Offsetting by one is enough to break the rendezvous when periods are equal.
 171     // When periods are not equal, offsetting by one is just as good as any other guess.
 172     return (cycle + 1) % ShenandoahUnloadClassesFrequency == 0;
 173   }
 174 
 175   virtual bool needs_regions_sorted_by_garbage() {
 176     // Most of them do not.
 177     return false;
 178   }
 179 };
 180 
 181 ShenandoahHeuristics::ShenandoahHeuristics() :
 182   _bytes_allocated_since_CM(0),
 183   _bytes_reclaimed_this_cycle(0),
 184   _bytes_allocated_start_CM(0),
 185   _bytes_allocated_during_CM(0),
 186   _bytes_allocated_after_last_gc(0),
 187   _bytes_in_cset(0),
 188   _cancelled_cm_cycles_in_a_row(0),
 189   _successful_cm_cycles_in_a_row(0),
 190   _cancelled_uprefs_cycles_in_a_row(0),
 191   _successful_uprefs_cycles_in_a_row(0),
 192   _region_garbage(NULL),
 193   _region_garbage_size(0)
 194 {
 195 }
 196 
 197 ShenandoahHeuristics::~ShenandoahHeuristics() {
 198   if (_region_garbage != NULL) {
 199     FREE_C_HEAP_ARRAY(RegionGarbage, _region_garbage);
 200   }
 201 }
 202 
 203 void ShenandoahHeuristics::choose_collection_set(ShenandoahCollectionSet* collection_set, int* connections) {
 204   start_choose_collection_set();
 205 
 206   ShenandoahHeap* heap = ShenandoahHeap::heap();
 207 
 208   // Poll this before populating collection set.
 209   size_t total_garbage = heap->garbage();
 210 
 211   // Step 1. Build up the region candidates we care about, rejecting losers and accepting winners right away.
 212 
 213   ShenandoahHeapRegionSet* regions = heap->regions();
 214   size_t active = regions->active_regions();
 215 
 216   RegionGarbage* candidates = get_region_garbage_cache(active);
 217 
 218   size_t cand_idx = 0;
 219   _bytes_in_cset = 0;
 220 
 221   size_t immediate_garbage = 0;
 222   size_t immediate_regions = 0;
 223   for (size_t i = 0; i < active; i++) {
 224     ShenandoahHeapRegion* region = regions->get(i);
 225 
 226     if (! region->is_humongous() && ! region->is_pinned()) {
 227       if ((! region->is_empty()) && ! region->has_live()) {
 228         // We can recycle it right away and put it in the free set.
 229         immediate_regions++;
 230         immediate_garbage += region->garbage();
 231         heap->decrease_used(region->used());
 232         region->recycle();
 233         log_develop_trace(gc)("Choose region " SIZE_FORMAT " for immediate reclaim with garbage = " SIZE_FORMAT
 234                               " and live = " SIZE_FORMAT "\n",
 235                               region->region_number(), region->garbage(), region->get_live_data_bytes());
 236       } else {
 237         // This is our candidate for later consideration.
 238         candidates[cand_idx].region_number = region->region_number();
 239         candidates[cand_idx].garbage = region->garbage();
 240         cand_idx++;
 241       }
 242     } else {
 243       assert(region->has_live() || region->is_empty() || region->is_pinned() || region->is_humongous(), "check rejected");
 244       log_develop_trace(gc)("Rejected region " SIZE_FORMAT " with garbage = " SIZE_FORMAT
 245                             " and live = " SIZE_FORMAT "\n",
 246                             region->region_number(), region->garbage(), region->get_live_data_bytes());
 247     }
 248   }
 249 
 250   // Step 2. Process the remanining candidates, if any.
 251 
 252   if (cand_idx > 0) {
 253     if (needs_regions_sorted_by_garbage()) {
 254       QuickSort::sort<RegionGarbage>(candidates, (int)cand_idx, compare_by_garbage, false);
 255     }
 256 
 257     for (size_t i = 0; i < cand_idx; i++) {
 258       ShenandoahHeapRegion *region = regions->get_fast(candidates[i].region_number);
 259       if (region_in_collection_set(region, immediate_garbage)) {
 260         log_develop_trace(gc)("Choose region " SIZE_FORMAT " with garbage = " SIZE_FORMAT
 261                                       " and live = " SIZE_FORMAT "\n",
 262                               region->region_number(), region->garbage(), region->get_live_data_bytes());
 263         collection_set->add_region(region);
 264         region->set_in_collection_set(true);
 265         _bytes_in_cset += region->used();
 266       }
 267     }
 268   }
 269 
 270   end_choose_collection_set();
 271 
 272   log_info(gc, ergo)("Total Garbage: "SIZE_FORMAT"M",
 273                      total_garbage / M);
 274   log_info(gc, ergo)("Immediate Garbage: "SIZE_FORMAT"M, "SIZE_FORMAT" regions",
 275                      immediate_garbage / M, immediate_regions);
 276   log_info(gc, ergo)("Garbage to be collected: "SIZE_FORMAT"M ("SIZE_FORMAT"%% of total), "SIZE_FORMAT" regions",
 277                      collection_set->garbage() / M, collection_set->garbage() * 100 / MAX2(total_garbage, (size_t)1), collection_set->count());
 278   log_info(gc, ergo)("Live objects to be evacuated: "SIZE_FORMAT"M",
 279                      collection_set->live_data() / M);
 280   log_info(gc, ergo)("Live/garbage ratio in collected regions: "SIZE_FORMAT"%%",
 281                      collection_set->live_data() * 100 / MAX2(collection_set->garbage(), (size_t)1));
 282 }
 283 
 284 void ShenandoahHeuristics::choose_free_set(ShenandoahFreeSet* free_set) {
 285 
 286   ShenandoahHeapRegionSet* ordered_regions = ShenandoahHeap::heap()->regions();
 287   size_t i = 0;
 288   size_t end = ordered_regions->active_regions();
 289 
 290   ShenandoahHeap* heap = ShenandoahHeap::heap();
 291   while (i < end) {
 292     ShenandoahHeapRegion* region = ordered_regions->get(i++);
 293     if ((! heap->in_collection_set(region))
 294         && (! region->is_humongous())
 295         && (! region->is_pinned())) {
 296       free_set->add_region(region);
 297     }
 298   }
 299 }
 300 
 301 void ShenandoahCollectorPolicy::record_workers_start(TimingPhase phase) {
 302   for (uint i = 0; i < ShenandoahPhaseTimes::GCParPhasesSentinel; i++) {
 303     _phase_times->reset(i);
 304   }
 305 }
 306 
 307 void ShenandoahCollectorPolicy::record_workers_end(TimingPhase phase) {
 308   guarantee(phase == init_evac ||
 309             phase == scan_roots ||
 310             phase == update_roots ||
 311             phase == partial_gc_work ||
 312             phase == final_update_refs_roots ||
 313             phase == _num_phases,
 314             "only in these phases we can add per-thread phase times");
 315   if (phase != _num_phases) {
 316     // Merge _phase_time to counters below the given phase.
 317     for (uint i = 0; i < ShenandoahPhaseTimes::GCParPhasesSentinel; i++) {
 318       double t = _phase_times->average(i);
 319       _timing_data[phase + i + 1]._secs.add(t);
 320     }
 321   }
 322 }
 323 
 324 void ShenandoahCollectorPolicy::record_phase_start(TimingPhase phase) {
 325   _timing_data[phase]._start = os::elapsedTime();
 326 
 327 }
 328 
 329 void ShenandoahCollectorPolicy::record_phase_end(TimingPhase phase) {
 330   double end = os::elapsedTime();
 331   double elapsed = end - _timing_data[phase]._start;
 332   _timing_data[phase]._secs.add(elapsed);
 333 }
 334 
 335 void ShenandoahCollectorPolicy::record_gc_start() {
 336   _heuristics->record_gc_start();
 337 }
 338 
 339 void ShenandoahCollectorPolicy::record_gc_end() {
 340   _heuristics->record_gc_end();
 341 }
 342 
 343 void ShenandoahCollectorPolicy::report_concgc_cancelled() {
 344 }
 345 
 346 void ShenandoahHeuristics::record_bytes_allocated(size_t bytes) {
 347   _bytes_allocated_since_CM = bytes;
 348   _bytes_allocated_start_CM = bytes;
 349   _allocation_rate_bytes.add(bytes);
 350 }
 351 
 352 void ShenandoahHeuristics::record_bytes_reclaimed(size_t bytes) {
 353   _bytes_reclaimed_this_cycle = bytes;
 354   _reclamation_rate_bytes.add(bytes);
 355 }
 356 
 357 void ShenandoahHeuristics::record_bytes_start_CM(size_t bytes) {
 358   _bytes_allocated_start_CM = bytes;
 359 }
 360 
 361 void ShenandoahHeuristics::record_bytes_end_CM(size_t bytes) {
 362   _bytes_allocated_during_CM = (bytes > _bytes_allocated_start_CM) ? (bytes - _bytes_allocated_start_CM)
 363                                                                    : bytes;
 364 }
 365 
 366 class PassiveHeuristics : public ShenandoahHeuristics {
 367 public:
 368   PassiveHeuristics() : ShenandoahHeuristics() {
 369   }
 370 
 371   virtual bool region_in_collection_set(ShenandoahHeapRegion* r, size_t immediate_garbage) {
 372     return r->garbage() > 0;
 373   }
 374 
 375   virtual bool should_start_concurrent_mark(size_t used, size_t capacity) const {
 376     // Never do concurrent GCs.
 377     return false;
 378   }
 379 
 380   virtual bool process_references() {
 381     // Randomly process refs with 50% chance.
 382     return (os::random() & 1) == 1;
 383   }
 384 
 385   virtual bool unload_classes() {
 386     // Randomly unload classes with 50% chance.
 387     return (os::random() & 1) == 1;
 388   }
 389 };
 390 
 391 class AggressiveHeuristics : public ShenandoahHeuristics {
 392 public:
 393   AggressiveHeuristics() : ShenandoahHeuristics() {
 394   }
 395 
 396   virtual bool region_in_collection_set(ShenandoahHeapRegion* r, size_t immediate_garbage) {
 397     return r->garbage() > 0;
 398   }
 399 
 400   virtual bool should_start_concurrent_mark(size_t used, size_t capacity) const {
 401     return true;
 402   }
 403 
 404   virtual bool process_references() {
 405     // Randomly process refs with 50% chance.
 406     return (os::random() & 1) == 1;
 407   }
 408 
 409   virtual bool unload_classes() {
 410     // Randomly unload classes with 50% chance.
 411     return (os::random() & 1) == 1;
 412   }
 413 };
 414 
 415 class DynamicHeuristics : public ShenandoahHeuristics {
 416 public:
 417   DynamicHeuristics() : ShenandoahHeuristics() {
 418   }
 419 
 420   void print_thresholds() {
 421     log_info(gc, init)("Shenandoah heuristics thresholds: allocation "SIZE_FORMAT", free "SIZE_FORMAT", garbage "SIZE_FORMAT,
 422                        ShenandoahAllocationThreshold,
 423                        ShenandoahFreeThreshold,
 424                        ShenandoahGarbageThreshold);
 425   }
 426 
 427   virtual ~DynamicHeuristics() {}
 428 
 429   virtual bool should_start_concurrent_mark(size_t used, size_t capacity) const {
 430 
 431     bool shouldStartConcurrentMark = false;
 432 
 433     ShenandoahHeap* heap = ShenandoahHeap::heap();
 434     size_t free_capacity = heap->free_regions()->capacity();
 435     size_t free_used = heap->free_regions()->used();
 436     assert(free_used <= free_capacity, "must use less than capacity");
 437     size_t available =  free_capacity - free_used;
 438 
 439     if (!update_refs_early()) {
 440       // Count in the memory available after cset reclamation.
 441       size_t cset = MIN2(_bytes_in_cset, (ShenandoahCSetThreshold * capacity) / 100);
 442       available += cset;
 443     }
 444 
 445     uintx threshold = ShenandoahFreeThreshold + ShenandoahCSetThreshold;
 446     size_t targetStartMarking = (capacity * threshold) / 100;
 447 
 448     size_t threshold_bytes_allocated = heap->capacity() * ShenandoahAllocationThreshold / 100;
 449     if (available < targetStartMarking &&
 450         heap->bytes_allocated_since_cm() > threshold_bytes_allocated)
 451     {
 452       // Need to check that an appropriate number of regions have
 453       // been allocated since last concurrent mark too.
 454       shouldStartConcurrentMark = true;
 455     }
 456 
 457     return shouldStartConcurrentMark;
 458   }
 459 
 460   virtual bool region_in_collection_set(ShenandoahHeapRegion* r, size_t immediate_garbage) {
 461     size_t threshold = ShenandoahHeapRegion::region_size_bytes() * ShenandoahGarbageThreshold / 100;
 462     return r->garbage() > threshold;
 463   }
 464 
 465 };
 466 
 467 
 468 class AdaptiveHeuristics : public ShenandoahHeuristics {
 469 private:
 470   uintx _free_threshold;
 471   TruncatedSeq* _cset_history;
 472 
 473 public:
 474   AdaptiveHeuristics() :
 475     ShenandoahHeuristics(),
 476     _free_threshold(ShenandoahInitFreeThreshold),
 477     _cset_history(new TruncatedSeq((uint)ShenandoahHappyCyclesThreshold)) {
 478 
 479     _cset_history->add((double) ShenandoahCSetThreshold);
 480     _cset_history->add((double) ShenandoahCSetThreshold);
 481   }
 482 
 483   virtual ~AdaptiveHeuristics() {
 484     delete _cset_history;
 485   }
 486 
 487   virtual bool region_in_collection_set(ShenandoahHeapRegion* r, size_t immediate_garbage) {
 488     size_t threshold = ShenandoahHeapRegion::region_size_bytes() * ShenandoahGarbageThreshold / 100;
 489     return r->garbage() > threshold;
 490   }
 491 
 492   void optimize_free_threshold(uint successful_cycles) {
 493     if (successful_cycles > ShenandoahHappyCyclesThreshold &&
 494         _free_threshold > ShenandoahMinFreeThreshold) {
 495       _free_threshold--;
 496       log_info(gc,ergo)("reducing free threshold to: "UINTX_FORMAT, _free_threshold);
 497       _successful_cm_cycles_in_a_row = 0;
 498     }
 499   }
 500 
 501   void pessimize_free_threshold() {
 502     if (_free_threshold < ShenandoahMaxFreeThreshold) {
 503       _free_threshold++;
 504       log_info(gc,ergo)("increasing free threshold to: "UINTX_FORMAT, _free_threshold);
 505     }
 506   }
 507 
 508   virtual void record_cm_cancelled() {
 509     ShenandoahHeuristics::record_cm_cancelled();
 510     pessimize_free_threshold();
 511   }
 512 
 513   virtual void record_cm_success() {
 514     ShenandoahHeuristics::record_cm_success();
 515     if (update_refs_early()) {
 516       optimize_free_threshold(_successful_cm_cycles_in_a_row);
 517     }
 518   }
 519 
 520   virtual void record_uprefs_cancelled() {
 521     ShenandoahHeuristics::record_uprefs_cancelled();
 522     pessimize_free_threshold();
 523   }
 524 
 525   virtual void record_uprefs_success() {
 526     ShenandoahHeuristics::record_uprefs_success();
 527     optimize_free_threshold(_successful_uprefs_cycles_in_a_row);
 528   }
 529 
 530   virtual void record_full_gc() {
 531     ShenandoahHeuristics::record_full_gc();
 532     pessimize_free_threshold();
 533   }
 534 
 535 
 536   virtual bool should_start_concurrent_mark(size_t used, size_t capacity) const {
 537     bool shouldStartConcurrentMark = false;
 538 
 539     ShenandoahHeap* heap = ShenandoahHeap::heap();
 540     size_t free_capacity = heap->free_regions()->capacity();
 541     size_t free_used = heap->free_regions()->used();
 542     assert(free_used <= free_capacity, "must use less than capacity");
 543     size_t available =  free_capacity - free_used;
 544     uintx factor = _free_threshold;
 545     size_t cset_threshold = 0;
 546     if (!update_refs_early()) {
 547       // Count in the memory available after cset reclamation.
 548       cset_threshold = (size_t) _cset_history->davg();
 549       size_t cset = MIN2(_bytes_in_cset, (cset_threshold * capacity) / 100);
 550       available += cset;
 551       factor += cset_threshold;
 552     }
 553 
 554     size_t targetStartMarking = (capacity * factor) / 100;
 555 
 556     size_t threshold_bytes_allocated = heap->capacity() * ShenandoahAllocationThreshold / 100;
 557     if (available < targetStartMarking &&
 558         heap->bytes_allocated_since_cm() > threshold_bytes_allocated)
 559     {
 560       // Need to check that an appropriate number of regions have
 561       // been allocated since last concurrent mark too.
 562       shouldStartConcurrentMark = true;
 563     }
 564 
 565     if (shouldStartConcurrentMark) {
 566       if (! update_refs_early()) {
 567         log_info(gc,ergo)("predicted cset threshold: "SIZE_FORMAT, cset_threshold);
 568         log_info(gc,ergo)("Starting concurrent mark at "SIZE_FORMAT"K CSet ("SIZE_FORMAT"%%)", _bytes_in_cset / K, _bytes_in_cset * 100 / capacity);
 569         _cset_history->add((double) (_bytes_in_cset * 100 / capacity));
 570       }
 571     }
 572     return shouldStartConcurrentMark;
 573   }
 574 
 575 };
 576 
 577 class GlobalHeuristics : public DynamicHeuristics {
 578 private:
 579   size_t _garbage;
 580   size_t _min_garbage;
 581 public:
 582   GlobalHeuristics() : DynamicHeuristics() {
 583     if (FLAG_IS_DEFAULT(ShenandoahGarbageThreshold)) {
 584       FLAG_SET_DEFAULT(ShenandoahGarbageThreshold, 90);
 585     }
 586   }
 587   virtual ~GlobalHeuristics() {}
 588 
 589   virtual void start_choose_collection_set() {
 590     _garbage = 0;
 591     size_t heap_garbage = ShenandoahHeap::heap()->garbage();
 592     _min_garbage =  heap_garbage * ShenandoahGarbageThreshold / 100;
 593   }
 594 
 595   virtual bool region_in_collection_set(ShenandoahHeapRegion* r, size_t immediate_garbage) {
 596     if (_garbage + immediate_garbage < _min_garbage && ! r->is_empty()) {
 597       _garbage += r->garbage();
 598       return true;
 599     } else {
 600       return false;
 601     }
 602   }
 603 
 604   virtual bool needs_regions_sorted_by_garbage() {
 605     return true;
 606   }
 607 };
 608 
 609 class RatioHeuristics : public DynamicHeuristics {
 610 private:
 611   size_t _garbage;
 612   size_t _live;
 613 public:
 614   RatioHeuristics() : DynamicHeuristics() {
 615     if (FLAG_IS_DEFAULT(ShenandoahGarbageThreshold)) {
 616       FLAG_SET_DEFAULT(ShenandoahGarbageThreshold, 95);
 617     }
 618   }
 619   virtual ~RatioHeuristics() {}
 620 
 621   virtual void start_choose_collection_set() {
 622     _garbage = 0;
 623     _live = 0;
 624   }
 625 
 626   virtual bool region_in_collection_set(ShenandoahHeapRegion* r, size_t immediate_garbage) {
 627     size_t min_ratio = 100 - ShenandoahGarbageThreshold;
 628     if (_live * 100 / MAX2(_garbage + immediate_garbage, (size_t)1) < min_ratio && ! r->is_empty()) {
 629       _garbage += r->garbage();
 630       _live += r->get_live_data_bytes();
 631       return true;
 632     } else {
 633       return false;
 634     }
 635   }
 636 
 637   virtual bool needs_regions_sorted_by_garbage() {
 638     return true;
 639   }
 640 };
 641 
 642 class ConnectionHeuristics : public ShenandoahHeuristics {
 643 private:
 644   size_t _max_live_data;
 645   double _used_threshold_factor;
 646   double _garbage_threshold_factor;
 647   double _allocation_threshold_factor;
 648 
 649   uintx _used_threshold;
 650   uintx _garbage_threshold;
 651   uintx _allocation_threshold;
 652 
 653 public:
 654   ConnectionHeuristics() : ShenandoahHeuristics() {
 655     _max_live_data = 0;
 656 
 657     _used_threshold = 0;
 658     _garbage_threshold = 0;
 659     _allocation_threshold = 0;
 660 
 661     _used_threshold_factor = 0.;
 662     _garbage_threshold_factor = 0.1;
 663     _allocation_threshold_factor = 0.;
 664   }
 665 
 666   virtual bool should_start_concurrent_mark(size_t used, size_t capacity) const {
 667     size_t half_gig = 64 * 1024 * 1024;
 668     size_t bytes_alloc = ShenandoahHeap::heap()->bytes_allocated_since_cm();
 669     bool result =  bytes_alloc > half_gig;
 670     if (result) tty->print("Starting a concurrent mark");
 671     return result;
 672   }
 673 
 674   bool maybe_add_heap_region(ShenandoahHeapRegion* hr, ShenandoahCollectionSet* collection_set) {
 675     if (!hr->is_humongous() && hr->has_live() && !collection_set->contains(hr)) {
 676       collection_set->add_region_check_for_duplicates(hr);
 677       hr->set_in_collection_set(true);
 678       return true;
 679     }
 680     return false;
 681   }
 682 
 683   virtual void choose_collection_set(ShenandoahCollectionSet* collection_set, int* connections) {
 684     ShenandoahHeapRegionSet* regions = ShenandoahHeap::heap()->regions();
 685     size_t active = regions->active_regions();
 686 
 687     RegionGarbage* sorted_by_garbage = get_region_garbage_cache(active);
 688     for (size_t i = 0; i < active; i++) {
 689       ShenandoahHeapRegion* r = regions->get_fast(i);
 690       sorted_by_garbage[i].region_number = r->region_number();
 691       sorted_by_garbage[i].garbage = r->garbage();
 692     }
 693 
 694     QuickSort::sort<RegionGarbage>(sorted_by_garbage, (int) active, compare_by_garbage, false);
 695 
 696     size_t num = ShenandoahHeap::heap()->num_regions();
 697     // simulate write heuristics by picking best region.
 698     int r = 0;
 699     ShenandoahHeapRegion* choosenOne = regions->get(sorted_by_garbage[0].region_number);
 700 
 701     while (! maybe_add_heap_region(choosenOne, collection_set)) {
 702       choosenOne = regions->get(sorted_by_garbage[++r].region_number);
 703     }
 704 
 705     size_t region_number = choosenOne->region_number();
 706     log_develop_trace(gc)("Adding choosen region " SIZE_FORMAT, region_number);
 707 
 708     // Add all the regions which point to this region.
 709     for (size_t i = 0; i < num; i++) {
 710       if (connections[i * num + region_number] > 0) {
 711         ShenandoahHeapRegion* candidate = regions->get(sorted_by_garbage[i].region_number);
 712         if (maybe_add_heap_region(candidate, collection_set)) {
 713           log_develop_trace(gc)("Adding region " SIZE_FORMAT " which points to the choosen region", i);
 714         }
 715       }
 716     }
 717 
 718     // Add all the regions they point to.
 719     for (size_t ci = 0; ci < collection_set->active_regions(); ci++) {
 720       ShenandoahHeapRegion* cs_heap_region = collection_set->get(ci);
 721       size_t cs_heap_region_number = cs_heap_region->region_number();
 722       for (size_t i = 0; i < num; i++) {
 723         if (connections[i * num + cs_heap_region_number] > 0) {
 724           ShenandoahHeapRegion* candidate = regions->get(sorted_by_garbage[i].region_number);
 725           if (maybe_add_heap_region(candidate, collection_set)) {
 726             log_develop_trace(gc)
 727               ("Adding region " SIZE_FORMAT " which is pointed to by region " SIZE_FORMAT, i, cs_heap_region_number);
 728           }
 729         }
 730       }
 731     }
 732     _max_live_data = MAX2(_max_live_data, collection_set->live_data());
 733     collection_set->print();
 734   }
 735 
 736   virtual bool region_in_collection_set(ShenandoahHeapRegion* r, size_t immediate_garbage) {
 737     assert(false, "Shouldn't get here");
 738     return false;
 739   }
 740 };
 741 class PartialHeuristics : public AdaptiveHeuristics {
 742 public:
 743   PartialHeuristics() : AdaptiveHeuristics() {
 744     if (FLAG_IS_DEFAULT(ShenandoahAllocationThreshold)) {
 745       FLAG_SET_DEFAULT(ShenandoahAllocationThreshold, 5);
 746     }
 747     FLAG_SET_DEFAULT(UseShenandoahMatrix, true);
 748     // TODO: Disable this optimization for now, as it also requires the matrix barriers.
 749     FLAG_SET_DEFAULT(ArrayCopyLoadStoreMaxElem, 0);
 750   }
 751 
 752   virtual ~PartialHeuristics() {}
 753 
 754   bool update_refs_early() const {
 755     return true;
 756   }
 757 
 758   bool should_start_partial_gc() {
 759     ShenandoahHeap* heap = ShenandoahHeap::heap();
 760     size_t capacity = heap->capacity();
 761 
 762     size_t used = heap->used();
 763     return (used - _bytes_allocated_after_last_gc) * 100 / capacity > ShenandoahAllocationThreshold;
 764   }
 765 };
 766 
 767 ShenandoahCollectorPolicy::ShenandoahCollectorPolicy() :
 768   _cycle_counter(0),
 769   _successful_cm(0),
 770   _degenerated_cm(0),
 771   _successful_uprefs(0),
 772   _degenerated_uprefs(0)
 773 {
 774 
 775   ShenandoahHeapRegion::setup_heap_region_size(initial_heap_byte_size(), max_heap_byte_size());
 776 
 777   initialize_all();
 778 
 779   _tracer = new (ResourceObj::C_HEAP, mtGC) ShenandoahTracer();
 780   _stw_timer = new (ResourceObj::C_HEAP, mtGC) STWGCTimer();
 781   _conc_timer = new (ResourceObj::C_HEAP, mtGC) ConcurrentGCTimer();
 782   _user_requested_gcs = 0;
 783   _allocation_failure_gcs = 0;
 784   _conc_gc_aborted = false;
 785 
 786   _phase_names[total_pause]                     = "Total Pauses (N)";
 787   _phase_names[total_pause_gross]               = "Total Pauses (G)";
 788   _phase_names[init_mark]                       = "Initial Mark Pauses (N)";
 789   _phase_names[init_mark_gross]                 = "Initial Mark Pauses (G)";
 790   _phase_names[final_mark]                      = "Final Mark Pauses (N)";
 791   _phase_names[final_mark_gross]                = "Final Mark Pauses (G)";
 792   _phase_names[accumulate_stats]                = "  Accumulate Stats";
 793   _phase_names[make_parsable]                   = "  Make Parsable";
 794   _phase_names[clear_liveness]                  = "  Clear Liveness";
 795   _phase_names[finish_queues]                   = "  Finish Queues";
 796   _phase_names[weakrefs]                        = "  Weak References";
 797   _phase_names[class_unloading]                 = "  Class Unloading";
 798   _phase_names[prepare_evac]                    = "  Prepare Evacuation";
 799 
 800   _phase_names[scan_roots]                      = "  Scan Roots";
 801   _phase_names[scan_thread_roots]               = "    S: Thread Roots";
 802   _phase_names[scan_code_roots]                 = "    S: Code Cache Roots";
 803   _phase_names[scan_string_table_roots]         = "    S: String Table Roots";
 804   _phase_names[scan_universe_roots]             = "    S: Universe Roots";
 805   _phase_names[scan_jni_roots]                  = "    S: JNI Roots";
 806   _phase_names[scan_jni_weak_roots]             = "    S: JNI Weak Roots";
 807   _phase_names[scan_synchronizer_roots]         = "    S: Synchronizer Roots";
 808   _phase_names[scan_flat_profiler_roots]        = "    S: Flat Profiler Roots";
 809   _phase_names[scan_management_roots]           = "    S: Management Roots";
 810   _phase_names[scan_system_dictionary_roots]    = "    S: System Dict Roots";
 811   _phase_names[scan_cldg_roots]                 = "    S: CLDG Roots";
 812   _phase_names[scan_jvmti_roots]                = "    S: JVMTI Roots";
 813 
 814   _phase_names[update_roots]                    = "  Update Roots";
 815   _phase_names[update_thread_roots]             = "    U: Thread Roots";
 816   _phase_names[update_code_roots]               = "    U: Code Cache Roots";
 817   _phase_names[update_string_table_roots]       = "    U: String Table Roots";
 818   _phase_names[update_universe_roots]           = "    U: Universe Roots";
 819   _phase_names[update_jni_roots]                = "    U: JNI Roots";
 820   _phase_names[update_jni_weak_roots]           = "    U: JNI Weak Roots";
 821   _phase_names[update_synchronizer_roots]       = "    U: Synchronizer Roots";
 822   _phase_names[update_flat_profiler_roots]      = "    U: Flat Profiler Roots";
 823   _phase_names[update_management_roots]         = "    U: Management Roots";
 824   _phase_names[update_system_dictionary_roots]  = "    U: System Dict Roots";
 825   _phase_names[update_cldg_roots]               = "    U: CLDG Roots";
 826   _phase_names[update_jvmti_roots]              = "    U: JVMTI Roots";
 827 
 828   _phase_names[init_evac]                       = "  Initial Evacuation";
 829   _phase_names[evac_thread_roots]               = "    E: Thread Roots";
 830   _phase_names[evac_code_roots]                 = "    E: Code Cache Roots";
 831   _phase_names[evac_string_table_roots]         = "    E: String Table Roots";
 832   _phase_names[evac_universe_roots]             = "    E: Universe Roots";
 833   _phase_names[evac_jni_roots]                  = "    E: JNI Roots";
 834   _phase_names[evac_jni_weak_roots]             = "    E: JNI Weak Roots";
 835   _phase_names[evac_synchronizer_roots]         = "    E: Synchronizer Roots";
 836   _phase_names[evac_flat_profiler_roots]        = "    E: Flat Profiler Roots";
 837   _phase_names[evac_management_roots]           = "    E: Management Roots";
 838   _phase_names[evac_system_dictionary_roots]    = "    E: System Dict Roots";
 839   _phase_names[evac_cldg_roots]                 = "    E: CLDG Roots";
 840   _phase_names[evac_jvmti_roots]                = "    E: JVMTI Roots";
 841 
 842   _phase_names[recycle_regions]                 = "  Recycle regions";
 843   _phase_names[reset_bitmaps]                   = "Reset Bitmaps";
 844   _phase_names[resize_tlabs]                    = "Resize TLABs";
 845 
 846   _phase_names[full_gc]                         = "Full GC";
 847   _phase_names[full_gc_heapdumps]               = "  Heap Dumps";
 848   _phase_names[full_gc_prepare]                 = "  Prepare";
 849   _phase_names[full_gc_mark]                    = "  Mark";
 850   _phase_names[full_gc_mark_finish_queues]      = "    Finish Queues";
 851   _phase_names[full_gc_mark_weakrefs]           = "    Weak References";
 852   _phase_names[full_gc_mark_class_unloading]    = "    Class Unloading";
 853   _phase_names[full_gc_calculate_addresses]     = "  Calculate Addresses";
 854   _phase_names[full_gc_adjust_pointers]         = "  Adjust Pointers";
 855   _phase_names[full_gc_copy_objects]            = "  Copy Objects";
 856 
 857   _phase_names[partial_gc_gross]                = "Pause Partial GC (G)";
 858   _phase_names[partial_gc]                      = "Pause Partial GC (N)";
 859   _phase_names[partial_gc_prepare]              = "  Prepare";
 860   _phase_names[partial_gc_work]                 = "  Work";
 861   _phase_names[partial_gc_thread_roots]         = "    P: Thread Roots";
 862   _phase_names[partial_gc_code_roots]           = "    P: Code Cache Roots";
 863   _phase_names[partial_gc_string_table_roots]   = "    P: String Table Roots";
 864   _phase_names[partial_gc_universe_roots]       = "    P: Universe Roots";
 865   _phase_names[partial_gc_jni_roots]            = "    P: JNI Roots";
 866   _phase_names[partial_gc_jni_weak_roots]       = "    P: JNI Weak Roots";
 867   _phase_names[partial_gc_synchronizer_roots]   = "    P: Synchronizer Roots";
 868   _phase_names[partial_gc_flat_profiler_roots]  = "    P: Flat Profiler Roots";
 869   _phase_names[partial_gc_management_roots]     = "    P: Management Roots";
 870   _phase_names[partial_gc_system_dict_roots]    = "    P: System Dict Roots";
 871   _phase_names[partial_gc_cldg_roots]           = "    P: CLDG Roots";
 872   _phase_names[partial_gc_jvmti_roots]          = "    P: JVMTI Roots";
 873   _phase_names[partial_gc_recycle]              = "  Recycle";
 874 
 875   _phase_names[conc_mark]                       = "Concurrent Marking";
 876   _phase_names[conc_evac]                       = "Concurrent Evacuation";
 877 
 878   _phase_names[init_update_refs_gross]          = "Pause Init  Update Refs (G)";
 879   _phase_names[init_update_refs]                = "Pause Init  Update Refs (N)";
 880   _phase_names[conc_update_refs]                = "Concurrent Update Refs";
 881   _phase_names[final_update_refs_gross]         = "Pause Final Update Refs (G)";
 882   _phase_names[final_update_refs]               = "Pause Final Update Refs (N)";
 883 
 884   _phase_names[final_update_refs_roots]                = "  Update Roots";
 885   _phase_names[final_update_refs_thread_roots]         = "    UR: Thread Roots";
 886   _phase_names[final_update_refs_code_roots]           = "    UR: Code Cache Roots";
 887   _phase_names[final_update_refs_string_table_roots]   = "    UR: String Table Roots";
 888   _phase_names[final_update_refs_universe_roots]       = "    UR: Universe Roots";
 889   _phase_names[final_update_refs_jni_roots]            = "    UR: JNI Roots";
 890   _phase_names[final_update_refs_jni_weak_roots]       = "    UR: JNI Weak Roots";
 891   _phase_names[final_update_refs_synchronizer_roots]   = "    UR: Synchronizer Roots";
 892   _phase_names[final_update_refs_flat_profiler_roots]  = "    UR: Flat Profiler Roots";
 893   _phase_names[final_update_refs_management_roots]     = "    UR: Management Roots";
 894   _phase_names[final_update_refs_system_dict_roots]    = "    UR: System Dict Roots";
 895   _phase_names[final_update_refs_cldg_roots]           = "    UR: CLDG Roots";
 896   _phase_names[final_update_refs_jvmti_roots]          = "    UR: JVMTI Roots";
 897 
 898   if (ShenandoahGCHeuristics != NULL) {
 899     if (strcmp(ShenandoahGCHeuristics, "aggressive") == 0) {
 900       log_info(gc, init)("Shenandoah heuristics: aggressive");
 901       _heuristics = new AggressiveHeuristics();
 902     } else if (strcmp(ShenandoahGCHeuristics, "dynamic") == 0) {
 903       log_info(gc, init)("Shenandoah heuristics: dynamic");
 904       _heuristics = new DynamicHeuristics();
 905     } else if (strcmp(ShenandoahGCHeuristics, "global") == 0) {
 906       log_info(gc, init)("Shenandoah heuristics: global");
 907       _heuristics = new GlobalHeuristics();
 908     } else if (strcmp(ShenandoahGCHeuristics, "ratio") == 0) {
 909       log_info(gc, init)("Shenandoah heuristics: ratio");
 910       _heuristics = new RatioHeuristics();
 911     } else if (strcmp(ShenandoahGCHeuristics, "adaptive") == 0) {
 912       log_info(gc, init)("Shenandoah heuristics: adaptive");
 913       _heuristics = new AdaptiveHeuristics();
 914     } else if (strcmp(ShenandoahGCHeuristics, "passive") == 0) {
 915       log_info(gc, init)("Shenandoah heuristics: passive");
 916       _heuristics = new PassiveHeuristics();
 917     } else if (strcmp(ShenandoahGCHeuristics, "connections") == 0) {
 918       log_info(gc, init)("Shenandoah heuristics: connections");
 919       _heuristics = new ConnectionHeuristics();
 920     } else if (strcmp(ShenandoahGCHeuristics, "partial") == 0) {
 921       log_info(gc, init)("Shenandoah heuristics: partial GC");
 922       _heuristics = new PartialHeuristics();
 923     } else {
 924       vm_exit_during_initialization("Unknown -XX:ShenandoahGCHeuristics option");
 925     }
 926     _heuristics->print_thresholds();
 927   } else {
 928       ShouldNotReachHere();
 929   }
 930   _phase_times = new ShenandoahPhaseTimes(MAX2(ConcGCThreads, ParallelGCThreads));
 931 }
 932 
 933 ShenandoahCollectorPolicy* ShenandoahCollectorPolicy::as_pgc_policy() {
 934   return this;
 935 }
 936 
 937 BarrierSet::Name ShenandoahCollectorPolicy::barrier_set_name() {
 938   return BarrierSet::ShenandoahBarrierSet;
 939 }
 940 
 941 HeapWord* ShenandoahCollectorPolicy::mem_allocate_work(size_t size,
 942                                                        bool is_tlab,
 943                                                        bool* gc_overhead_limit_was_exceeded) {
 944   guarantee(false, "Not using this policy feature yet.");
 945   return NULL;
 946 }
 947 
 948 HeapWord* ShenandoahCollectorPolicy::satisfy_failed_allocation(size_t size, bool is_tlab) {
 949   guarantee(false, "Not using this policy feature yet.");
 950   return NULL;
 951 }
 952 
 953 void ShenandoahCollectorPolicy::initialize_alignments() {
 954 
 955   // This is expected by our algorithm for ShenandoahHeap::heap_region_containing().
 956   _space_alignment = ShenandoahHeapRegion::region_size_bytes();
 957   _heap_alignment = ShenandoahHeapRegion::region_size_bytes();
 958 }
 959 
 960 void ShenandoahCollectorPolicy::post_heap_initialize() {
 961   // Nothing to do here (yet).
 962 }
 963 
 964 void ShenandoahCollectorPolicy::record_bytes_allocated(size_t bytes) {
 965   _heuristics->record_bytes_allocated(bytes);
 966 }
 967 
 968 void ShenandoahCollectorPolicy::record_bytes_start_CM(size_t bytes) {
 969   _heuristics->record_bytes_start_CM(bytes);
 970 }
 971 
 972 void ShenandoahCollectorPolicy::record_bytes_end_CM(size_t bytes) {
 973   _heuristics->record_bytes_end_CM(bytes);
 974 }
 975 
 976 void ShenandoahCollectorPolicy::record_bytes_reclaimed(size_t bytes) {
 977   _heuristics->record_bytes_reclaimed(bytes);
 978 }
 979 
 980 void ShenandoahCollectorPolicy::record_user_requested_gc() {
 981   _user_requested_gcs++;
 982 }
 983 
 984 void ShenandoahCollectorPolicy::record_allocation_failure_gc() {
 985   _allocation_failure_gcs++;
 986 }
 987 
 988 bool ShenandoahCollectorPolicy::should_start_concurrent_mark(size_t used,
 989                                                              size_t capacity) {
 990   return _heuristics->should_start_concurrent_mark(used, capacity);
 991 }
 992 
 993 bool ShenandoahCollectorPolicy::handover_cancelled_marking() {
 994   return _heuristics->handover_cancelled_marking();
 995 }
 996 
 997 bool ShenandoahCollectorPolicy::handover_cancelled_uprefs() {
 998   return _heuristics->handover_cancelled_uprefs();
 999 }
1000 
1001 bool ShenandoahCollectorPolicy::update_refs_early() {
1002   return _heuristics->update_refs_early();
1003 }
1004 
1005 void ShenandoahCollectorPolicy::record_cm_success() {
1006   _heuristics->record_cm_success();
1007   _successful_cm++;
1008 }
1009 
1010 void ShenandoahCollectorPolicy::record_cm_degenerated() {
1011   _degenerated_cm++;
1012 }
1013 
1014 void ShenandoahCollectorPolicy::record_cm_cancelled() {
1015   _heuristics->record_cm_cancelled();
1016 }
1017 
1018 void ShenandoahCollectorPolicy::record_uprefs_success() {
1019   _heuristics->record_uprefs_success();
1020   _successful_uprefs++;
1021 }
1022 
1023 void ShenandoahCollectorPolicy::record_uprefs_degenerated() {
1024   _degenerated_uprefs++;
1025 }
1026 
1027 void ShenandoahCollectorPolicy::record_uprefs_cancelled() {
1028   _heuristics->record_uprefs_cancelled();
1029 }
1030 
1031 void ShenandoahCollectorPolicy::record_full_gc() {
1032   _heuristics->record_full_gc();
1033 }
1034 
1035 void ShenandoahCollectorPolicy::choose_collection_set(ShenandoahCollectionSet* collection_set, int* connections) {
1036   _heuristics->choose_collection_set(collection_set, connections);
1037 }
1038 
1039 void ShenandoahCollectorPolicy::choose_free_set(ShenandoahFreeSet* free_set) {
1040    _heuristics->choose_free_set(free_set);
1041 }
1042 
1043 
1044 bool ShenandoahCollectorPolicy::process_references() {
1045   return _heuristics->process_references();
1046 }
1047 
1048 bool ShenandoahCollectorPolicy::unload_classes() {
1049   return _heuristics->unload_classes();
1050 }
1051 
1052 void ShenandoahCollectorPolicy::print_tracing_info(outputStream* out) {
1053   out->cr();
1054   out->print_cr("GC STATISTICS:");
1055   out->print_cr("  \"(G)\" (gross) pauses include time to safepoint. \"(N)\" (net) pauses are times spent in GC.");
1056   out->print_cr("  \"a\" is average time for each phase, look at levels to see if average makes sense.");
1057   out->print_cr("  \"lvls\" are quantiles: 0%% (minimum), 25%%, 50%% (median), 75%%, 100%% (maximum).");
1058   out->cr();
1059 
1060   for (uint i = 0; i < _num_phases; i++) {
1061     if (_timing_data[i]._secs.maximum() != 0) {
1062       print_summary_sd(out, _phase_names[i], &(_timing_data[i]._secs));
1063     }
1064   }
1065 
1066   out->cr();
1067   out->print_cr("" SIZE_FORMAT " allocation failure and " SIZE_FORMAT " user requested GCs", _allocation_failure_gcs, _user_requested_gcs);
1068   out->print_cr("" SIZE_FORMAT " successful and " SIZE_FORMAT " degenerated concurrent markings", _successful_cm, _degenerated_cm);
1069   out->print_cr("" SIZE_FORMAT " successful and " SIZE_FORMAT " degenerated update references  ", _successful_uprefs, _degenerated_uprefs);
1070   out->cr();
1071 }
1072 
1073 void ShenandoahCollectorPolicy::print_summary_sd(outputStream* out, const char* str, const HdrSeq* seq)  {
1074   out->print_cr("%-27s = %8.2lf s (a = %8.0lf us) (n = "INT32_FORMAT_W(5)") (lvls, us = %8.0lf, %8.0lf, %8.0lf, %8.0lf, %8.0lf)",
1075           str,
1076           seq->sum(),
1077           seq->avg() * 1000000.0,
1078           seq->num(),
1079           seq->percentile(0)  * 1000000.0,
1080           seq->percentile(25) * 1000000.0,
1081           seq->percentile(50) * 1000000.0,
1082           seq->percentile(75) * 1000000.0,
1083           seq->maximum() * 1000000.0
1084   );
1085 }
1086 
1087 void ShenandoahCollectorPolicy::increase_cycle_counter() {
1088   _cycle_counter++;
1089 }
1090 
1091 size_t ShenandoahCollectorPolicy::cycle_counter() const {
1092   return _cycle_counter;
1093 }
1094 
1095  ShenandoahPhaseTimes* ShenandoahCollectorPolicy::phase_times() {
1096   return _phase_times;
1097 }
1098 
1099 
1100 uint ShenandoahCollectorPolicy::calc_workers_for_java_threads(uint application_workers) {
1101   return (uint)(ShenandoahGCWorkerPerJavaThread * application_workers);
1102 }
1103 
1104 uint ShenandoahCollectorPolicy::calc_workers_for_live_set(size_t live_data) {
1105   return (uint)(live_data / HeapSizePerGCThread);
1106 }
1107 
1108 
1109 uint ShenandoahCollectorPolicy::calc_default_active_workers(
1110                                                      uint total_workers,
1111                                                      uint min_workers,
1112                                                      uint active_workers,
1113                                                      uint application_workers,
1114                                                      uint  workers_by_java_threads,
1115                                                      uint  workers_by_liveset) {
1116   // If the user has turned off using a dynamic number of GC threads
1117   // or the users has requested a specific number, set the active
1118   // number of workers to all the workers.
1119   uint new_active_workers = total_workers;
1120   uint prev_active_workers = active_workers;
1121   uint active_workers_by_JT = 0;
1122   uint active_workers_by_liveset = 0;
1123 
1124   active_workers_by_JT = MAX2(workers_by_java_threads, min_workers);
1125 
1126   // Choose a number of GC threads based on the live set.
1127   active_workers_by_liveset =
1128       MAX2((uint) 2U, workers_by_liveset);
1129 
1130   uint max_active_workers =
1131     MAX2(active_workers_by_JT, active_workers_by_liveset);
1132 
1133   new_active_workers = MIN2(max_active_workers, new_active_workers);
1134 
1135   // Increase GC workers instantly but decrease them more
1136   // slowly.
1137   if (new_active_workers < prev_active_workers) {
1138     new_active_workers =
1139       MAX2(min_workers, (prev_active_workers + new_active_workers) / 2);
1140   }
1141 
1142   if (UseNUMA) {
1143     uint numa_groups = (uint)os::numa_get_groups_num();
1144     assert(numa_groups <= total_workers, "Not enough workers to cover all numa groups");
1145     new_active_workers = MAX2(new_active_workers, numa_groups);
1146   }
1147 
1148   // Check once more that the number of workers is within the limits.
1149   assert(min_workers <= total_workers, "Minimum workers not consistent with total workers");
1150   assert(new_active_workers >= min_workers, "Minimum workers not observed");
1151   assert(new_active_workers <= total_workers, "Total workers not observed");
1152 
1153   log_trace(gc, task)("ShenandoahCollectorPolicy::calc_default_active_workers() : "
1154      "active_workers(): " UINTX_FORMAT "  new_active_workers: " UINTX_FORMAT "  "
1155      "prev_active_workers: " UINTX_FORMAT "\n"
1156      " active_workers_by_JT: " UINTX_FORMAT "  active_workers_by_liveset: " UINTX_FORMAT,
1157      (uintx)active_workers, (uintx)new_active_workers, (uintx)prev_active_workers,
1158      (uintx)active_workers_by_JT, (uintx)active_workers_by_liveset);
1159   assert(new_active_workers > 0, "Always need at least 1");
1160   return new_active_workers;
1161 }
1162 
1163 /**
1164  * Initial marking phase also update references of live objects from previous concurrent GC cycle,
1165  * so we take Java threads and live set into account.
1166  */
1167 uint ShenandoahCollectorPolicy::calc_workers_for_init_marking(uint active_workers,
1168                                             uint application_workers) {
1169 
1170   if (!UseDynamicNumberOfGCThreads ||
1171      (!FLAG_IS_DEFAULT(ParallelGCThreads) && !ForceDynamicNumberOfGCThreads)) {
1172     assert(ParallelGCThreads > 0, "Always need at least 1");
1173     return ParallelGCThreads;
1174   } else {
1175     ShenandoahCollectorPolicy* policy = (ShenandoahCollectorPolicy*)ShenandoahHeap::heap()->collector_policy();
1176     size_t live_data = policy->_heuristics->bytes_in_cset();
1177 
1178     return calc_default_active_workers(ParallelGCThreads, (ParallelGCThreads > 1) ? 2 : 1,
1179       active_workers, application_workers,
1180       calc_workers_for_java_threads(application_workers),
1181       calc_workers_for_live_set(live_data));
1182   }
1183 }
1184 
1185 uint ShenandoahCollectorPolicy::calc_workers_for_conc_marking(uint active_workers,
1186                                             uint application_workers) {
1187 
1188   if (!UseDynamicNumberOfGCThreads ||
1189      (!FLAG_IS_DEFAULT(ConcGCThreads) && !ForceDynamicNumberOfGCThreads)) {
1190     assert(ConcGCThreads > 0, "Always need at least 1");
1191     return ConcGCThreads;
1192   } else {
1193     return calc_default_active_workers(ConcGCThreads,
1194       (ConcGCThreads > 1 ? 2 : 1), active_workers,
1195       application_workers, calc_workers_for_java_threads(application_workers), 0);
1196   }
1197 }
1198 
1199 uint ShenandoahCollectorPolicy::calc_workers_for_final_marking(uint active_workers,
1200                                             uint application_workers) {
1201 
1202   if (!UseDynamicNumberOfGCThreads ||
1203      (!FLAG_IS_DEFAULT(ParallelGCThreads) && !ForceDynamicNumberOfGCThreads)) {
1204     assert(ParallelGCThreads > 0, "Always need at least 1");
1205     return ParallelGCThreads;
1206   } else {
1207     return calc_default_active_workers(ParallelGCThreads,
1208       (ParallelGCThreads > 1 ? 2 : 1), active_workers,
1209       application_workers, calc_workers_for_java_threads(application_workers), 0);
1210   }
1211 }
1212 
1213   // Calculate workers for concurrent evacuation (concurrent GC)
1214 uint ShenandoahCollectorPolicy::calc_workers_for_conc_evacuation(uint active_workers,
1215                                             uint application_workers) {
1216   if (!UseDynamicNumberOfGCThreads ||
1217      (!FLAG_IS_DEFAULT(ConcGCThreads) && !ForceDynamicNumberOfGCThreads)) {
1218     assert(ConcGCThreads > 0, "Always need at least 1");
1219     return ConcGCThreads;
1220   } else {
1221     return calc_workers_for_evacuation(false, // not a full GC
1222       ConcGCThreads, active_workers, application_workers);
1223   }
1224 }
1225 
1226   // Calculate workers for parallel evaculation (full GC)
1227 uint ShenandoahCollectorPolicy::calc_workers_for_parallel_evacuation(uint active_workers,
1228                                             uint application_workers) {
1229   if (!UseDynamicNumberOfGCThreads ||
1230      (!FLAG_IS_DEFAULT(ParallelGCThreads) && !ForceDynamicNumberOfGCThreads)) {
1231     assert(ParallelGCThreads > 0, "Always need at least 1");
1232     return ParallelGCThreads;
1233   } else {
1234     return calc_workers_for_evacuation(true, // a full GC
1235       ParallelGCThreads, active_workers, application_workers);
1236   }
1237 }
1238 
1239 
1240 uint ShenandoahCollectorPolicy::calc_workers_for_evacuation(bool full_gc,
1241                                             uint total_workers,
1242                                             uint active_workers,
1243                                             uint application_workers) {
1244 
1245   // Calculation based on live set
1246   size_t live_data = 0;
1247   ShenandoahHeap* heap = ShenandoahHeap::heap();
1248   if (full_gc) {
1249     ShenandoahHeapRegionSet* regions = heap->regions();
1250     for (size_t index = 0; index < regions->active_regions(); index ++) {
1251       live_data += regions->get_fast(index)->get_live_data_bytes();
1252     }
1253   } else {
1254     ShenandoahCollectorPolicy* policy = (ShenandoahCollectorPolicy*)heap->collector_policy();
1255     live_data = policy->_heuristics->bytes_in_cset();
1256   }
1257 
1258   uint active_workers_by_liveset = calc_workers_for_live_set(live_data);
1259   return calc_default_active_workers(total_workers,
1260       (total_workers > 1 ? 2 : 1), active_workers,
1261       application_workers, 0, active_workers_by_liveset);
1262 }
1263 
1264 bool ShenandoahCollectorPolicy::should_start_partial_gc() {
1265   return _heuristics->should_start_partial_gc();
1266 }