1 /*
   2  * Copyright (c) 2013, 2017, Red Hat, Inc. and/or its affiliates.
   3  *
   4  * This code is free software; you can redistribute it and/or modify it
   5  * under the terms of the GNU General Public License version 2 only, as
   6  * published by the Free Software Foundation.
   7  *
   8  * This code is distributed in the hope that it will be useful, but WITHOUT
   9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  11  * version 2 for more details (a copy is included in the LICENSE file that
  12  * accompanied this code).
  13  *
  14  * You should have received a copy of the GNU General Public License version
  15  * 2 along with this work; if not, write to the Free Software Foundation,
  16  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  17  *
  18  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  19  * or visit www.oracle.com if you need additional information or have any
  20  * questions.
  21  *
  22  */
  23 
  24 #include "precompiled.hpp"
  25 #include "gc/shared/gcPolicyCounters.hpp"
  26 #include "gc/shenandoah/shenandoahCollectionSet.hpp"
  27 #include "gc/shenandoah/shenandoahConnectionMatrix.hpp"
  28 #include "gc/shenandoah/shenandoahFreeSet.hpp"
  29 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
  30 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
  31 #include "gc/shenandoah/shenandoahTraversalGC.hpp"
  32 #include "runtime/os.hpp"
  33 #include "utilities/quickSort.hpp"
  34 
  35 #define SHENANDOAH_ERGO_DISABLE_FLAG(name)                                  \
  36   do {                                                                      \
  37     if (FLAG_IS_DEFAULT(name) && (name)) {                                  \
  38       log_info(gc)("Heuristics ergonomically sets -XX:-" #name);            \
  39       FLAG_SET_DEFAULT(name, false);                                        \
  40     }                                                                       \
  41   } while (0)
  42 
  43 #define SHENANDOAH_ERGO_ENABLE_FLAG(name)                                   \
  44   do {                                                                      \
  45     if (FLAG_IS_DEFAULT(name) && !(name)) {                                 \
  46       log_info(gc)("Heuristics ergonomically sets -XX:+" #name);            \
  47       FLAG_SET_DEFAULT(name, true);                                         \
  48     }                                                                       \
  49   } while (0)
  50 
  51 #define SHENANDOAH_ERGO_OVERRIDE_DEFAULT(name, value)                       \
  52   do {                                                                      \
  53     if (FLAG_IS_DEFAULT(name)) {                                            \
  54       log_info(gc)("Heuristics ergonomically sets -XX:" #name "=" #value);  \
  55       FLAG_SET_DEFAULT(name, value);                                        \
  56     }                                                                       \
  57   } while (0)
  58 
  59 class ShenandoahHeuristics : public CHeapObj<mtGC> {
  60 protected:
  61   bool _update_refs_early;
  62   bool _update_refs_adaptive;
  63 
  64   typedef struct {
  65     ShenandoahHeapRegion* _region;
  66     size_t _garbage;
  67     uint64_t _seqnum_last_alloc;
  68   } RegionData;
  69 
  70   static int compare_by_garbage(RegionData a, RegionData b) {
  71     if (a._garbage > b._garbage)
  72       return -1;
  73     else if (a._garbage < b._garbage)
  74       return 1;
  75     else return 0;
  76   }
  77 
  78   static int compare_by_alloc_seq_ascending(RegionData a, RegionData b) {
  79     if (a._seqnum_last_alloc == b._seqnum_last_alloc)
  80       return 0;
  81     else if (a._seqnum_last_alloc < b._seqnum_last_alloc)
  82       return -1;
  83     else return 1;
  84   }
  85 
  86   static int compare_by_alloc_seq_descending(RegionData a, RegionData b) {
  87     return -compare_by_alloc_seq_ascending(a, b);
  88   }
  89 
  90   RegionData* get_region_data_cache(size_t num) {
  91     RegionData* res = _region_data;
  92     if (res == NULL) {
  93       res = NEW_C_HEAP_ARRAY(RegionData, num, mtGC);
  94       _region_data = res;
  95       _region_data_size = num;
  96     } else if (_region_data_size < num) {
  97       res = REALLOC_C_HEAP_ARRAY(RegionData, _region_data, num, mtGC);
  98       _region_data = res;
  99       _region_data_size = num;
 100     }
 101     return res;
 102   }
 103 
 104   RegionData* _region_data;
 105   size_t _region_data_size;
 106 
 107   typedef struct {
 108     ShenandoahHeapRegion* _region;
 109     size_t _connections;
 110   } RegionConnections;
 111 
 112   RegionConnections* get_region_connects_cache(size_t num) {
 113     RegionConnections* res = _region_connects;
 114     if (res == NULL) {
 115       res = NEW_C_HEAP_ARRAY(RegionConnections, num, mtGC);
 116       _region_connects = res;
 117       _region_connects_size = num;
 118     } else if (_region_connects_size < num) {
 119       res = REALLOC_C_HEAP_ARRAY(RegionConnections, _region_connects, num, mtGC);
 120       _region_connects = res;
 121       _region_connects_size = num;
 122     }
 123     return res;
 124   }
 125 
 126   static int compare_by_connects(RegionConnections a, RegionConnections b) {
 127     if (a._connections == b._connections)
 128       return 0;
 129     else if (a._connections < b._connections)
 130       return -1;
 131     else return 1;
 132   }
 133 
 134   RegionConnections* _region_connects;
 135   size_t _region_connects_size;
 136 
 137   uint _degenerated_cycles_in_a_row;
 138   uint _successful_cycles_in_a_row;
 139 
 140   size_t _bytes_in_cset;
 141 
 142   double _last_cycle_end;
 143 
 144 public:
 145 
 146   ShenandoahHeuristics();
 147   virtual ~ShenandoahHeuristics();
 148 
 149   void record_gc_start() {
 150     ShenandoahHeap::heap()->set_alloc_seq_gc_start();
 151   }
 152 
 153   void record_gc_end() {
 154     ShenandoahHeap::heap()->set_alloc_seq_gc_end();
 155   }
 156 
 157   virtual void record_cycle_start() {
 158     // Do nothing
 159   }
 160 
 161   virtual void record_cycle_end() {
 162     _last_cycle_end = os::elapsedTime();
 163   }
 164 
 165   virtual void record_phase_time(ShenandoahPhaseTimings::Phase phase, double secs) {
 166     // Do nothing
 167   }
 168 
 169   virtual void print_thresholds() {
 170   }
 171 
 172   virtual bool should_start_normal_gc() const = 0;
 173 
 174   virtual bool should_start_update_refs() {
 175     return _update_refs_early;
 176   }
 177 
 178   virtual bool update_refs() const {
 179     return _update_refs_early;
 180   }
 181 
 182   virtual ShenandoahHeap::GCCycleMode should_start_traversal_gc() {
 183     return ShenandoahHeap::NONE;
 184   }
 185 
 186   virtual bool can_do_traversal_gc() {
 187     return false;
 188   }
 189 
 190   virtual bool should_degenerate_cycle() {
 191     return _degenerated_cycles_in_a_row <= ShenandoahFullGCThreshold;
 192   }
 193 
 194   virtual void record_success_concurrent() {
 195     _degenerated_cycles_in_a_row = 0;
 196     _successful_cycles_in_a_row++;
 197   }
 198 
 199   virtual void record_success_degenerated() {
 200     _degenerated_cycles_in_a_row++;
 201     _successful_cycles_in_a_row = 0;
 202   }
 203 
 204   virtual void record_success_full() {
 205     _degenerated_cycles_in_a_row = 0;
 206     _successful_cycles_in_a_row++;
 207   }
 208 
 209   virtual void record_allocation_failure_gc() {
 210     _bytes_in_cset = 0;
 211   }
 212 
 213   virtual void record_explicit_gc() {
 214     _bytes_in_cset = 0;
 215   }
 216 
 217   virtual void record_peak_occupancy() {
 218   }
 219 
 220   virtual void choose_collection_set(ShenandoahCollectionSet* collection_set);
 221 
 222   virtual bool should_process_references() {
 223     if (ShenandoahRefProcFrequency == 0) return false;
 224     size_t cycle = ShenandoahHeap::heap()->shenandoahPolicy()->cycle_counter();
 225     // Process references every Nth GC cycle.
 226     return cycle % ShenandoahRefProcFrequency == 0;
 227   }
 228 
 229   virtual bool should_unload_classes() {
 230     if (ShenandoahUnloadClassesFrequency == 0) return false;
 231     size_t cycle = ShenandoahHeap::heap()->shenandoahPolicy()->cycle_counter();
 232     // Unload classes every Nth GC cycle.
 233     // This should not happen in the same cycle as process_references to amortize costs.
 234     // Offsetting by one is enough to break the rendezvous when periods are equal.
 235     // When periods are not equal, offsetting by one is just as good as any other guess.
 236     return (cycle + 1) % ShenandoahUnloadClassesFrequency == 0;
 237   }
 238 
 239   bool maybe_add_heap_region(ShenandoahHeapRegion* hr,
 240                         ShenandoahCollectionSet* cs);
 241 
 242   virtual const char* name() = 0;
 243   virtual bool is_diagnostic() = 0;
 244   virtual bool is_experimental() = 0;
 245   virtual void initialize() {}
 246 
 247 protected:
 248   virtual void choose_collection_set_from_regiondata(ShenandoahCollectionSet* set,
 249                                                      RegionData* data, size_t data_size,
 250                                                      size_t free) = 0;
 251 };
 252 
 253 ShenandoahHeuristics::ShenandoahHeuristics() :
 254   _bytes_in_cset(0),
 255   _degenerated_cycles_in_a_row(0),
 256   _successful_cycles_in_a_row(0),
 257   _region_data(NULL),
 258   _region_data_size(0),
 259   _region_connects(NULL),
 260   _region_connects_size(0),
 261   _update_refs_early(false),
 262   _update_refs_adaptive(false),
 263   _last_cycle_end(0)
 264 {
 265   if (strcmp(ShenandoahUpdateRefsEarly, "on") == 0 ||
 266       strcmp(ShenandoahUpdateRefsEarly, "true") == 0 ) {
 267     _update_refs_early = true;
 268   } else if (strcmp(ShenandoahUpdateRefsEarly, "off") == 0 ||
 269              strcmp(ShenandoahUpdateRefsEarly, "false") == 0 ) {
 270     _update_refs_early = false;
 271   } else if (strcmp(ShenandoahUpdateRefsEarly, "adaptive") == 0) {
 272     _update_refs_adaptive = true;
 273     _update_refs_early = true;
 274   } else {
 275     vm_exit_during_initialization("Unknown -XX:ShenandoahUpdateRefsEarly option: %s", ShenandoahUpdateRefsEarly);
 276   }
 277 }
 278 
 279 ShenandoahHeuristics::~ShenandoahHeuristics() {
 280   if (_region_data != NULL) {
 281     FREE_C_HEAP_ARRAY(RegionGarbage, _region_data);
 282   }
 283   if (_region_connects != NULL) {
 284     FREE_C_HEAP_ARRAY(RegionConnections, _region_connects);
 285   }
 286 }
 287 
 288 bool ShenandoahHeuristics::maybe_add_heap_region(ShenandoahHeapRegion* hr,
 289                                                  ShenandoahCollectionSet* collection_set) {
 290     if (hr->is_regular() && hr->has_live() && !collection_set->is_in(hr)) {
 291       collection_set->add_region(hr);
 292       return true;
 293     }
 294     return false;
 295 }
 296 
 297 void ShenandoahHeuristics::choose_collection_set(ShenandoahCollectionSet* collection_set) {
 298   assert(collection_set->count() == 0, "Must be empty");
 299 
 300   ShenandoahHeap* heap = ShenandoahHeap::heap();
 301 
 302   // Step 1. Build up the region candidates we care about, rejecting losers and accepting winners right away.
 303 
 304   size_t num_regions = heap->num_regions();
 305 
 306   RegionData* candidates = get_region_data_cache(num_regions);
 307 
 308   size_t cand_idx = 0;
 309 
 310   size_t total_garbage = 0;
 311 
 312   size_t immediate_garbage = 0;
 313   size_t immediate_regions = 0;
 314 
 315   size_t free = 0;
 316   size_t free_regions = 0;
 317 
 318   for (size_t i = 0; i < num_regions; i++) {
 319     ShenandoahHeapRegion* region = heap->get_region(i);
 320 
 321     size_t garbage = region->garbage();
 322     total_garbage += garbage;
 323 
 324     if (region->is_empty()) {
 325       free_regions++;
 326       free += ShenandoahHeapRegion::region_size_bytes();
 327     } else if (region->is_regular()) {
 328       if (!region->has_live()) {
 329         // We can recycle it right away and put it in the free set.
 330         immediate_regions++;
 331         immediate_garbage += garbage;
 332         region->make_trash();
 333       } else {
 334         // This is our candidate for later consideration.
 335         candidates[cand_idx]._region = region;
 336         candidates[cand_idx]._garbage = garbage;
 337         cand_idx++;
 338       }
 339     } else if (region->is_humongous_start()) {
 340       // Reclaim humongous regions here, and count them as the immediate garbage
 341 #ifdef ASSERT
 342       bool reg_live = region->has_live();
 343       bool bm_live = heap->is_marked_complete(oop(region->bottom() + BrooksPointer::word_size()));
 344       assert(reg_live == bm_live,
 345              "Humongous liveness and marks should agree. Region live: %s; Bitmap live: %s; Region Live Words: " SIZE_FORMAT,
 346              BOOL_TO_STR(reg_live), BOOL_TO_STR(bm_live), region->get_live_data_words());
 347 #endif
 348       if (!region->has_live()) {
 349         heap->trash_humongous_region_at(region);
 350 
 351         // Count only the start. Continuations would be counted on "trash" path
 352         immediate_regions++;
 353         immediate_garbage += garbage;
 354       }
 355     } else if (region->is_trash()) {
 356       // Count in just trashed collection set, during coalesced CM-with-UR
 357       immediate_regions++;
 358       immediate_garbage += garbage;
 359     }
 360   }
 361 
 362   // Step 2. Look back at garbage statistics, and decide if we want to collect anything,
 363   // given the amount of immediately reclaimable garbage. If we do, figure out the collection set.
 364 
 365   assert (immediate_garbage <= total_garbage,
 366           "Cannot have more immediate garbage than total garbage: " SIZE_FORMAT "M vs " SIZE_FORMAT "M",
 367           immediate_garbage / M, total_garbage / M);
 368 
 369   size_t immediate_percent = total_garbage == 0 ? 0 : (immediate_garbage * 100 / total_garbage);
 370 
 371   if (immediate_percent <= ShenandoahImmediateThreshold) {
 372     choose_collection_set_from_regiondata(collection_set, candidates, cand_idx, immediate_garbage + free);
 373     collection_set->update_region_status();
 374 
 375     size_t cset_percent = total_garbage == 0 ? 0 : (collection_set->garbage() * 100 / total_garbage);
 376     log_info(gc, ergo)("Collectable Garbage: "SIZE_FORMAT"M ("SIZE_FORMAT"%% of total), "SIZE_FORMAT"M CSet, "SIZE_FORMAT" CSet regions",
 377                        collection_set->garbage() / M, cset_percent, collection_set->live_data() / M, collection_set->count());
 378   }
 379 
 380   log_info(gc, ergo)("Immediate Garbage: "SIZE_FORMAT"M ("SIZE_FORMAT"%% of total), "SIZE_FORMAT" regions",
 381                      immediate_garbage / M, immediate_percent, immediate_regions);
 382 }
 383 
 384 void ShenandoahCollectorPolicy::record_gc_start() {
 385   _heuristics->record_gc_start();
 386 }
 387 
 388 void ShenandoahCollectorPolicy::record_gc_end() {
 389   _heuristics->record_gc_end();
 390 }
 391 
 392 class ShenandoahPassiveHeuristics : public ShenandoahHeuristics {
 393 public:
 394   ShenandoahPassiveHeuristics() : ShenandoahHeuristics() {
 395     // Do not allow concurrent cycles.
 396     FLAG_SET_DEFAULT(ExplicitGCInvokesConcurrent, false);
 397 
 398     // Passive runs with max speed, reacts on allocation failure.
 399     FLAG_SET_DEFAULT(ShenandoahPacing, false);
 400 
 401     // Disable known barriers by default.
 402     SHENANDOAH_ERGO_DISABLE_FLAG(ShenandoahSATBBarrier);
 403     SHENANDOAH_ERGO_DISABLE_FLAG(ShenandoahKeepAliveBarrier);
 404     SHENANDOAH_ERGO_DISABLE_FLAG(ShenandoahWriteBarrier);
 405     SHENANDOAH_ERGO_DISABLE_FLAG(ShenandoahReadBarrier);
 406     SHENANDOAH_ERGO_DISABLE_FLAG(ShenandoahStoreValEnqueueBarrier);
 407     SHENANDOAH_ERGO_DISABLE_FLAG(ShenandoahStoreValReadBarrier);
 408     SHENANDOAH_ERGO_DISABLE_FLAG(ShenandoahCASBarrier);
 409     SHENANDOAH_ERGO_DISABLE_FLAG(ShenandoahAcmpBarrier);
 410     SHENANDOAH_ERGO_DISABLE_FLAG(ShenandoahCloneBarrier);
 411     SHENANDOAH_ERGO_DISABLE_FLAG(UseShenandoahMatrix);
 412   }
 413 
 414   virtual void choose_collection_set_from_regiondata(ShenandoahCollectionSet* cset,
 415                                                      RegionData* data, size_t size,
 416                                                      size_t free) {
 417     for (size_t idx = 0; idx < size; idx++) {
 418       ShenandoahHeapRegion* r = data[idx]._region;
 419       if (r->garbage() > 0) {
 420         cset->add_region(r);
 421       }
 422     }
 423   }
 424 
 425   virtual bool should_start_normal_gc() const {
 426     // Never do concurrent GCs.
 427     return false;
 428   }
 429 
 430   virtual bool should_process_references() {
 431     if (ShenandoahRefProcFrequency == 0) return false;
 432     // Always process references.
 433     return true;
 434   }
 435 
 436   virtual bool should_unload_classes() {
 437     if (ShenandoahUnloadClassesFrequency == 0) return false;
 438     // Always unload classes.
 439     return true;
 440   }
 441 
 442   virtual bool should_degenerate_cycle() {
 443     // Always fail to Full GC
 444     return false;
 445   }
 446 
 447   virtual const char* name() {
 448     return "passive";
 449   }
 450 
 451   virtual bool is_diagnostic() {
 452     return true;
 453   }
 454 
 455   virtual bool is_experimental() {
 456     return false;
 457   }
 458 };
 459 
 460 class ShenandoahAggressiveHeuristics : public ShenandoahHeuristics {
 461 public:
 462   ShenandoahAggressiveHeuristics() : ShenandoahHeuristics() {
 463     // Do not shortcut evacuation
 464     SHENANDOAH_ERGO_OVERRIDE_DEFAULT(ShenandoahImmediateThreshold, 100);
 465 
 466     // Aggressive runs with max speed for allocation, to capture races against mutator
 467     SHENANDOAH_ERGO_DISABLE_FLAG(ShenandoahPacing);
 468   }
 469 
 470   virtual void choose_collection_set_from_regiondata(ShenandoahCollectionSet* cset,
 471                                                      RegionData* data, size_t size,
 472                                                      size_t free) {
 473     for (size_t idx = 0; idx < size; idx++) {
 474       ShenandoahHeapRegion* r = data[idx]._region;
 475       if (r->garbage() > 0) {
 476         cset->add_region(r);
 477       }
 478     }
 479   }
 480 
 481   virtual bool should_start_normal_gc() const {
 482     return true;
 483   }
 484 
 485   virtual bool should_process_references() {
 486     if (ShenandoahRefProcFrequency == 0) return false;
 487     // Randomly process refs with 50% chance.
 488     return (os::random() & 1) == 1;
 489   }
 490 
 491   virtual bool should_unload_classes() {
 492     if (ShenandoahUnloadClassesFrequency == 0) return false;
 493     // Randomly unload classes with 50% chance.
 494     return (os::random() & 1) == 1;
 495   }
 496 
 497   virtual const char* name() {
 498     return "aggressive";
 499   }
 500 
 501   virtual bool is_diagnostic() {
 502     return true;
 503   }
 504 
 505   virtual bool is_experimental() {
 506     return false;
 507   }
 508 };
 509 
 510 class ShenandoahStaticHeuristics : public ShenandoahHeuristics {
 511 public:
 512   ShenandoahStaticHeuristics() : ShenandoahHeuristics() {
 513     // Static heuristics may degrade to continuous if live data is larger
 514     // than free threshold. ShenandoahAllocationThreshold is supposed to break this,
 515     // but it only works if it is non-zero.
 516     SHENANDOAH_ERGO_OVERRIDE_DEFAULT(ShenandoahImmediateThreshold, 1);
 517   }
 518 
 519   void print_thresholds() {
 520     log_info(gc, init)("Shenandoah heuristics thresholds: allocation "SIZE_FORMAT", free "SIZE_FORMAT", garbage "SIZE_FORMAT,
 521                        ShenandoahAllocationThreshold,
 522                        ShenandoahFreeThreshold,
 523                        ShenandoahGarbageThreshold);
 524   }
 525 
 526   virtual ~ShenandoahStaticHeuristics() {}
 527 
 528   virtual bool should_start_normal_gc() const {
 529     ShenandoahHeap* heap = ShenandoahHeap::heap();
 530 
 531     size_t capacity = heap->capacity();
 532     size_t available = heap->free_set()->available();
 533     size_t threshold_available = (capacity * ShenandoahFreeThreshold) / 100;
 534     size_t threshold_bytes_allocated = heap->capacity() * ShenandoahAllocationThreshold / 100;
 535     size_t bytes_allocated = heap->bytes_allocated_since_gc_start();
 536 
 537     double last_time_ms = (os::elapsedTime() - _last_cycle_end) * 1000;
 538     bool periodic_gc = (last_time_ms > ShenandoahGuaranteedGCInterval);
 539 
 540     if (available < threshold_available &&
 541             bytes_allocated > threshold_bytes_allocated) {
 542       // Need to check that an appropriate number of regions have
 543       // been allocated since last concurrent mark too.
 544       log_info(gc,ergo)("Concurrent marking triggered. Free: " SIZE_FORMAT "M, Free Threshold: " SIZE_FORMAT
 545                                 "M; Allocated: " SIZE_FORMAT "M, Alloc Threshold: " SIZE_FORMAT "M",
 546                         available / M, threshold_available / M, bytes_allocated / M, threshold_bytes_allocated / M);
 547       return true;
 548     } else if (periodic_gc) {
 549       log_info(gc,ergo)("Periodic GC triggered. Time since last GC: %.0f ms, Guaranteed Interval: " UINTX_FORMAT " ms",
 550                         last_time_ms, ShenandoahGuaranteedGCInterval);
 551       return true;
 552     }
 553 
 554     return false;
 555   }
 556 
 557   virtual void choose_collection_set_from_regiondata(ShenandoahCollectionSet* cset,
 558                                                      RegionData* data, size_t size,
 559                                                      size_t free) {
 560     size_t threshold = ShenandoahHeapRegion::region_size_bytes() * ShenandoahGarbageThreshold / 100;
 561 
 562     for (size_t idx = 0; idx < size; idx++) {
 563       ShenandoahHeapRegion* r = data[idx]._region;
 564       if (r->garbage() > threshold) {
 565         cset->add_region(r);
 566       }
 567     }
 568   }
 569 
 570   virtual const char* name() {
 571     return "dynamic";
 572   }
 573 
 574   virtual bool is_diagnostic() {
 575     return false;
 576   }
 577 
 578   virtual bool is_experimental() {
 579     return false;
 580   }
 581 };
 582 
 583 class ShenandoahCompactHeuristics : public ShenandoahHeuristics {
 584 public:
 585   ShenandoahCompactHeuristics() : ShenandoahHeuristics() {
 586     SHENANDOAH_ERGO_ENABLE_FLAG(ShenandoahUncommit);
 587     SHENANDOAH_ERGO_OVERRIDE_DEFAULT(ShenandoahAllocationThreshold,  10);
 588     SHENANDOAH_ERGO_OVERRIDE_DEFAULT(ShenandoahImmediateThreshold,   100);
 589     SHENANDOAH_ERGO_OVERRIDE_DEFAULT(ShenandoahUncommitDelay,        5000);
 590     SHENANDOAH_ERGO_OVERRIDE_DEFAULT(ShenandoahGuaranteedGCInterval, 30000);
 591     SHENANDOAH_ERGO_OVERRIDE_DEFAULT(ShenandoahGarbageThreshold,     20);
 592   }
 593 
 594   virtual bool should_start_normal_gc() const {
 595     ShenandoahHeap* heap = ShenandoahHeap::heap();
 596 
 597     size_t available = heap->free_set()->available();
 598     double last_time_ms = (os::elapsedTime() - _last_cycle_end) * 1000;
 599     bool periodic_gc = (last_time_ms > ShenandoahGuaranteedGCInterval);
 600     size_t bytes_allocated = heap->bytes_allocated_since_gc_start();
 601     size_t threshold_bytes_allocated = heap->capacity() * ShenandoahAllocationThreshold / 100;
 602 
 603     if (available < threshold_bytes_allocated || bytes_allocated > threshold_bytes_allocated) {
 604       log_info(gc,ergo)("Concurrent marking triggered. Free: " SIZE_FORMAT "M, Allocated: " SIZE_FORMAT "M, Alloc Threshold: " SIZE_FORMAT "M",
 605                          available / M, bytes_allocated / M, threshold_bytes_allocated / M);
 606       return true;
 607     } else if (periodic_gc) {
 608       log_info(gc,ergo)("Periodic GC triggered. Time since last GC: %.0f ms, Guaranteed Interval: " UINTX_FORMAT " ms",
 609                         last_time_ms, ShenandoahGuaranteedGCInterval);
 610       return true;
 611     }
 612 
 613     return false;
 614   }
 615 
 616   virtual void choose_collection_set_from_regiondata(ShenandoahCollectionSet* cset,
 617                                                      RegionData* data, size_t size,
 618                                                      size_t actual_free) {
 619 
 620     // Do not select too large CSet that would overflow the available free space
 621     size_t max_cset = actual_free * 3 / 4;
 622 
 623     log_info(gc, ergo)("CSet Selection. Actual Free: " SIZE_FORMAT "M, Max CSet: " SIZE_FORMAT "M",
 624                        actual_free / M, max_cset / M);
 625 
 626     size_t threshold = ShenandoahHeapRegion::region_size_bytes() * ShenandoahGarbageThreshold / 100;
 627 
 628     size_t live_cset = 0;
 629     for (size_t idx = 0; idx < size; idx++) {
 630       ShenandoahHeapRegion* r = data[idx]._region;
 631       size_t new_cset = live_cset + r->get_live_data_bytes();
 632       if (new_cset < max_cset && r->garbage() > threshold) {
 633         live_cset = new_cset;
 634         cset->add_region(r);
 635       }
 636     }
 637   }
 638 
 639   virtual const char* name() {
 640     return "compact";
 641   }
 642 
 643   virtual bool is_diagnostic() {
 644     return false;
 645   }
 646 
 647   virtual bool is_experimental() {
 648     return false;
 649   }
 650 };
 651 
 652 class ShenandoahAdaptiveHeuristics : public ShenandoahHeuristics {
 653 private:
 654   uintx _free_threshold;
 655   size_t _peak_occupancy;
 656   TruncatedSeq* _cycle_gap_history;
 657   TruncatedSeq* _conc_mark_duration_history;
 658   TruncatedSeq* _conc_uprefs_duration_history;
 659 public:
 660   ShenandoahAdaptiveHeuristics() :
 661     ShenandoahHeuristics(),
 662     _free_threshold(ShenandoahInitFreeThreshold),
 663     _peak_occupancy(0),
 664     _conc_mark_duration_history(new TruncatedSeq(5)),
 665     _conc_uprefs_duration_history(new TruncatedSeq(5)),
 666     _cycle_gap_history(new TruncatedSeq(5)) {
 667   }
 668 
 669   virtual ~ShenandoahAdaptiveHeuristics() {}
 670 
 671   virtual void choose_collection_set_from_regiondata(ShenandoahCollectionSet* cset,
 672                                                      RegionData* data, size_t size,
 673                                                      size_t actual_free) {
 674     size_t garbage_threshold = ShenandoahHeapRegion::region_size_bytes() * ShenandoahGarbageThreshold / 100;
 675 
 676     // The logic for cset selection in adaptive is as follows:
 677     //
 678     //   1. We cannot get cset larger than available free space. Otherwise we guarantee OOME
 679     //      during evacuation, and thus guarantee full GC. In practice, we also want to let
 680     //      application to allocate something. This is why we limit CSet to some fraction of
 681     //      available space. In non-overloaded heap, max_cset would contain all plausible candidates
 682     //      over garbage threshold.
 683     //
 684     //   2. We should not get cset too low so that free threshold would not be met right
 685     //      after the cycle. Otherwise we get back-to-back cycles for no reason if heap is
 686     //      too fragmented. In non-overloaded non-fragmented heap min_cset would be around zero.
 687     //
 688     // Therefore, we start by sorting the regions by garbage. Then we unconditionally add the best candidates
 689     // before we meet min_cset. Then we add all candidates that fit with a garbage threshold before
 690     // we hit max_cset. When max_cset is hit, we terminate the cset selection. Note that in this scheme,
 691     // ShenandoahGarbageThreshold is the soft threshold which would be ignored until min_cset is hit.
 692 
 693     size_t free_target = MIN2<size_t>(_free_threshold + MaxNormalStep, 100) * ShenandoahHeap::heap()->capacity() / 100;
 694     size_t min_cset = free_target > actual_free ? (free_target - actual_free) : 0;
 695     size_t max_cset = actual_free * 3 / 4;
 696     min_cset = MIN2(min_cset, max_cset);
 697 
 698     log_info(gc, ergo)("Adaptive CSet Selection. Target Free: " SIZE_FORMAT "M, Actual Free: "
 699                                SIZE_FORMAT "M, Target CSet: [" SIZE_FORMAT "M, " SIZE_FORMAT "M]",
 700                        free_target / M, actual_free / M, min_cset / M, max_cset / M);
 701 
 702     // Better select garbage-first regions
 703     QuickSort::sort<RegionData>(data, (int)size, compare_by_garbage, false);
 704 
 705     size_t live_cset = 0;
 706     _bytes_in_cset = 0;
 707     for (size_t idx = 0; idx < size; idx++) {
 708       ShenandoahHeapRegion* r = data[idx]._region;
 709 
 710       size_t new_cset = live_cset + r->get_live_data_bytes();
 711 
 712       if (new_cset < min_cset) {
 713         cset->add_region(r);
 714         _bytes_in_cset += r->used();
 715         live_cset = new_cset;
 716       } else if (new_cset <= max_cset) {
 717         if (r->garbage() > garbage_threshold) {
 718           cset->add_region(r);
 719           _bytes_in_cset += r->used();
 720           live_cset = new_cset;
 721         }
 722       } else {
 723         break;
 724       }
 725     }
 726   }
 727 
 728   static const intx MaxNormalStep = 5;      // max step towards goal under normal conditions
 729   static const intx DegeneratedGC_Hit = 10; // how much to step on degenerated GC
 730   static const intx AllocFailure_Hit = 20;  // how much to step on allocation failure full GC
 731   static const intx UserRequested_Hit = 0;  // how much to step on user requested full GC
 732 
 733   void handle_cycle_success() {
 734     ShenandoahHeap* heap = ShenandoahHeap::heap();
 735     size_t capacity = heap->capacity();
 736 
 737     size_t current_threshold = (capacity - _peak_occupancy) * 100 / capacity;
 738     size_t min_threshold = ShenandoahMinFreeThreshold;
 739     intx step = min_threshold - current_threshold;
 740     step = MAX2(step, (intx) -MaxNormalStep);
 741     step = MIN2(step, (intx) MaxNormalStep);
 742 
 743     log_info(gc, ergo)("Capacity: " SIZE_FORMAT "M, Peak Occupancy: " SIZE_FORMAT
 744                               "M, Lowest Free: " SIZE_FORMAT "M, Free Threshold: " UINTX_FORMAT "M",
 745                        capacity / M, _peak_occupancy / M,
 746                        (capacity - _peak_occupancy) / M, ShenandoahMinFreeThreshold * capacity / 100 / M);
 747 
 748     if (step > 0) {
 749       // Pessimize
 750       adjust_free_threshold(step);
 751     } else if (step < 0) {
 752       // Optimize, if enough happy cycles happened
 753       if (_successful_cycles_in_a_row > ShenandoahHappyCyclesThreshold &&
 754           _free_threshold > 0) {
 755         adjust_free_threshold(step);
 756         _successful_cycles_in_a_row = 0;
 757       }
 758     } else {
 759       // do nothing
 760     }
 761     _peak_occupancy = 0;
 762   }
 763 
 764   void record_cycle_start() {
 765     ShenandoahHeuristics::record_cycle_start();
 766     double last_cycle_gap = (os::elapsedTime() - _last_cycle_end);
 767     _cycle_gap_history->add(last_cycle_gap);
 768   }
 769 
 770   virtual void record_phase_time(ShenandoahPhaseTimings::Phase phase, double secs) {
 771     if (phase == ShenandoahPhaseTimings::conc_mark) {
 772       _conc_mark_duration_history->add(secs);
 773     } else if (phase == ShenandoahPhaseTimings::conc_update_refs) {
 774       _conc_uprefs_duration_history->add(secs);
 775     } // Else ignore
 776   }
 777 
 778   void adjust_free_threshold(intx adj) {
 779     intx new_value = adj + _free_threshold;
 780     uintx new_threshold = (uintx)MAX2<intx>(new_value, 0);
 781     new_threshold = MAX2(new_threshold, ShenandoahMinFreeThreshold);
 782     new_threshold = MIN2(new_threshold, ShenandoahMaxFreeThreshold);
 783     if (new_threshold != _free_threshold) {
 784       _free_threshold = new_threshold;
 785       log_info(gc,ergo)("Adjusting free threshold to: " UINTX_FORMAT "%% (" SIZE_FORMAT "M)",
 786                         _free_threshold, _free_threshold * ShenandoahHeap::heap()->capacity() / 100 / M);
 787     }
 788   }
 789 
 790   virtual void record_success_concurrent() {
 791     ShenandoahHeuristics::record_success_concurrent();
 792     handle_cycle_success();
 793   }
 794 
 795   virtual void record_success_degenerated() {
 796     ShenandoahHeuristics::record_success_degenerated();
 797     adjust_free_threshold(DegeneratedGC_Hit);
 798   }
 799 
 800   virtual void record_success_full() {
 801     ShenandoahHeuristics::record_success_full();
 802     adjust_free_threshold(AllocFailure_Hit);
 803   }
 804 
 805   virtual void record_explicit_gc() {
 806     ShenandoahHeuristics::record_explicit_gc();
 807     adjust_free_threshold(UserRequested_Hit);
 808   }
 809 
 810   virtual void record_peak_occupancy() {
 811     _peak_occupancy = MAX2(_peak_occupancy, ShenandoahHeap::heap()->used());
 812   }
 813 
 814   virtual bool should_start_normal_gc() const {
 815     ShenandoahHeap* heap = ShenandoahHeap::heap();
 816     size_t capacity = heap->capacity();
 817     size_t available = heap->free_set()->available();
 818 
 819     double last_time_ms = (os::elapsedTime() - _last_cycle_end) * 1000;
 820     bool periodic_gc = (last_time_ms > ShenandoahGuaranteedGCInterval);
 821     size_t threshold_available = (capacity * _free_threshold) / 100;
 822     size_t bytes_allocated = heap->bytes_allocated_since_gc_start();
 823     size_t threshold_bytes_allocated = heap->capacity() * ShenandoahAllocationThreshold / 100;
 824 
 825     if (available < threshold_available &&
 826             bytes_allocated > threshold_bytes_allocated) {
 827       log_info(gc,ergo)("Concurrent marking triggered. Free: " SIZE_FORMAT "M, Free Threshold: " SIZE_FORMAT
 828                                 "M; Allocated: " SIZE_FORMAT "M, Alloc Threshold: " SIZE_FORMAT "M",
 829                         available / M, threshold_available / M, bytes_allocated / M, threshold_bytes_allocated / M);
 830       // Need to check that an appropriate number of regions have
 831       // been allocated since last concurrent mark too.
 832       return true;
 833     } else if (periodic_gc) {
 834       log_info(gc,ergo)("Periodic GC triggered. Time since last GC: %.0f ms, Guaranteed Interval: " UINTX_FORMAT " ms",
 835           last_time_ms, ShenandoahGuaranteedGCInterval);
 836       return true;
 837     }
 838 
 839     return false;
 840   }
 841 
 842   virtual bool should_start_update_refs() {
 843     if (! _update_refs_adaptive) {
 844       return _update_refs_early;
 845     }
 846 
 847     double cycle_gap_avg = _cycle_gap_history->avg();
 848     double conc_mark_avg = _conc_mark_duration_history->avg();
 849     double conc_uprefs_avg = _conc_uprefs_duration_history->avg();
 850 
 851     if (_update_refs_early) {
 852       double threshold = ShenandoahMergeUpdateRefsMinGap / 100.0;
 853       if (conc_mark_avg + conc_uprefs_avg > cycle_gap_avg * threshold) {
 854         _update_refs_early = false;
 855       }
 856     } else {
 857       double threshold = ShenandoahMergeUpdateRefsMaxGap / 100.0;
 858       if (conc_mark_avg + conc_uprefs_avg < cycle_gap_avg * threshold) {
 859         _update_refs_early = true;
 860       }
 861     }
 862     return _update_refs_early;
 863   }
 864 
 865   virtual const char* name() {
 866     return "adaptive";
 867   }
 868 
 869   virtual bool is_diagnostic() {
 870     return false;
 871   }
 872 
 873   virtual bool is_experimental() {
 874     return false;
 875   }
 876 };
 877 
 878 class ShenandoahTraversalHeuristics : public ShenandoahHeuristics {
 879 protected:
 880 
 881 public:
 882   ShenandoahTraversalHeuristics() : ShenandoahHeuristics() {
 883     FLAG_SET_DEFAULT(UseShenandoahMatrix,              false);
 884     FLAG_SET_DEFAULT(ShenandoahSATBBarrier,            false);
 885     FLAG_SET_DEFAULT(ShenandoahStoreValReadBarrier,    false);
 886     FLAG_SET_DEFAULT(ShenandoahStoreValEnqueueBarrier, true);
 887     FLAG_SET_DEFAULT(ShenandoahKeepAliveBarrier,       false);
 888     FLAG_SET_DEFAULT(ShenandoahBarriersForConst,       true);
 889     FLAG_SET_DEFAULT(ShenandoahWriteBarrierRB,         false);
 890     FLAG_SET_DEFAULT(ShenandoahAllocImplicitLive,      false);
 891     FLAG_SET_DEFAULT(ShenandoahAllowMixedAllocs,       false);
 892     FLAG_SET_DEFAULT(ShenandoahRecycleClearsBitmap,    true);
 893 
 894     SHENANDOAH_ERGO_OVERRIDE_DEFAULT(ShenandoahRefProcFrequency, 1);
 895     SHENANDOAH_ERGO_OVERRIDE_DEFAULT(ShenandoahUnloadClassesFrequency, 1);
 896 
 897   }
 898 
 899   virtual bool should_start_normal_gc() const {
 900     return false;
 901   }
 902 
 903   virtual bool is_experimental() {
 904     return true;
 905   }
 906 
 907   virtual bool is_diagnostic() {
 908     return false;
 909   }
 910 
 911   virtual bool can_do_traversal_gc() {
 912     return true;
 913   }
 914 
 915   virtual const char* name() {
 916     return "traversal";
 917   }
 918 
 919   virtual void choose_collection_set(ShenandoahCollectionSet* collection_set) {
 920     ShenandoahHeap* heap = ShenandoahHeap::heap();
 921 
 922     // No root regions in this mode.
 923     ShenandoahTraversalGC* traversal_gc = heap->traversal_gc();
 924     ShenandoahHeapRegionSet* root_regions = traversal_gc->root_regions();
 925     root_regions->clear();
 926 
 927     ShenandoahHeapRegionSet* traversal_set = traversal_gc->traversal_set();
 928     traversal_set->clear();
 929     for (size_t i = 0; i < heap->num_regions(); i++) {
 930       ShenandoahHeapRegion* r = heap->get_region(i);
 931       assert(!collection_set->is_in(r), "must not yet be in cset");
 932       if (r->is_regular() && r->used() > 0) {
 933         size_t garbage_percent = r->garbage() * 100 / ShenandoahHeapRegion::region_size_bytes();
 934         if (garbage_percent > ShenandoahGarbageThreshold) {
 935           collection_set->add_region(r);
 936         }
 937       }
 938       r->clear_live_data();
 939       traversal_set->add_region(r);
 940     }
 941     collection_set->update_region_status();
 942   }
 943 
 944   virtual ShenandoahHeap::GCCycleMode should_start_traversal_gc() {
 945 
 946     ShenandoahHeap* heap = ShenandoahHeap::heap();
 947 
 948     if (heap->has_forwarded_objects()) return ShenandoahHeap::NONE;
 949 
 950     double last_time_ms = (os::elapsedTime() - _last_cycle_end) * 1000;
 951     bool periodic_gc = (last_time_ms > ShenandoahGuaranteedGCInterval);
 952     if (periodic_gc) {
 953       log_info(gc,ergo)("Periodic GC triggered. Time since last GC: %.0f ms, Guaranteed Interval: " UINTX_FORMAT " ms",
 954                         last_time_ms, ShenandoahGuaranteedGCInterval);
 955       return ShenandoahHeap::MAJOR;
 956     }
 957 
 958     size_t capacity  = heap->capacity();
 959     size_t used      = heap->used();
 960     return 100 - (used * 100 / capacity) < ShenandoahFreeThreshold ? ShenandoahHeap::MAJOR : ShenandoahHeap::NONE;
 961   }
 962 
 963 protected:
 964   virtual void choose_collection_set_from_regiondata(ShenandoahCollectionSet* set,
 965                                                      RegionData* data, size_t data_size,
 966                                                      size_t free) {
 967     ShouldNotReachHere();
 968   }
 969 };
 970 
 971 class ShenandoahPartialHeuristics : public ShenandoahTraversalHeuristics {
 972 protected:
 973   size_t* _from_idxs;
 974 
 975   bool is_minor_gc() const { return ShenandoahHeap::heap()->is_minor_gc(); }
 976 
 977   // Utility method to remove any cset regions from root set and
 978   // add all cset regions to the traversal set.
 979   void filter_regions() {
 980     ShenandoahHeap* heap = ShenandoahHeap::heap();
 981     ShenandoahTraversalGC* traversal_gc = heap->traversal_gc();
 982     size_t num_regions = heap->num_regions();
 983     ShenandoahCollectionSet* collection_set = heap->collection_set();
 984     ShenandoahHeapRegionSet* root_regions = traversal_gc->root_regions();
 985     ShenandoahHeapRegionSet* traversal_set = traversal_gc->traversal_set();
 986     traversal_set->clear();
 987 
 988     for (size_t i = 0; i < num_regions; i++) {
 989       ShenandoahHeapRegion* region = heap->get_region(i);
 990       if (collection_set->is_in(i)) {
 991         if (root_regions->is_in(i)) {
 992           root_regions->remove_region(region);
 993         }
 994         traversal_set->add_region_check_for_duplicates(region);
 995         assert(traversal_set->is_in(i), "must be in traversal set now");
 996       }
 997     }
 998   }
 999 
1000 public:
1001   ShenandoahPartialHeuristics() :
1002     ShenandoahTraversalHeuristics() {
1003 
1004     FLAG_SET_DEFAULT(UseShenandoahMatrix, true);
1005 
1006     // TODO: Disable this optimization for now, as it also requires the matrix barriers.
1007 #ifdef COMPILER2
1008     FLAG_SET_DEFAULT(ArrayCopyLoadStoreMaxElem, 0);
1009 #endif
1010   }
1011 
1012   void initialize() {
1013     _from_idxs = NEW_C_HEAP_ARRAY(size_t, ShenandoahHeap::heap()->num_regions(), mtGC);
1014   }
1015 
1016   virtual ~ShenandoahPartialHeuristics() {
1017     FREE_C_HEAP_ARRAY(size_t, _from_idxs);
1018   }
1019 
1020   bool should_start_update_refs() {
1021     return false;
1022   }
1023 
1024   bool update_refs() const {
1025     return false;
1026   }
1027 
1028   virtual bool should_unload_classes() {
1029     return ShenandoahUnloadClassesFrequency != 0;
1030   }
1031 
1032   virtual bool should_process_references() {
1033     return ShenandoahRefProcFrequency != 0;
1034   }
1035 
1036   bool should_start_normal_gc() const {
1037     return false;
1038   }
1039 
1040   virtual bool is_diagnostic() {
1041     return false;
1042   }
1043 
1044   virtual bool is_experimental() {
1045     return true;
1046   }
1047 
1048 };
1049 
1050 class ShenandoahPartialConnectedHeuristics : public ShenandoahPartialHeuristics {
1051 public:
1052   virtual const char* name() {
1053     return "connectedness";
1054   }
1055 
1056   ShenandoahHeap::GCCycleMode should_start_traversal_gc() {
1057     ShenandoahHeap::GCCycleMode cycle_mode = ShenandoahPartialHeuristics::should_start_traversal_gc();
1058     if (cycle_mode != ShenandoahHeap::NONE) {
1059       return cycle_mode;
1060     }
1061 
1062     ShenandoahHeap* heap = ShenandoahHeap::heap();
1063 
1064     if (heap->has_forwarded_objects()) {
1065       // Cannot start partial if heap is not completely updated.
1066       return ShenandoahHeap::NONE;
1067     }
1068 
1069     size_t capacity  = heap->capacity();
1070     size_t used      = heap->used();
1071     size_t prev_used = heap->used_at_last_gc();
1072 
1073     if (used < prev_used) {
1074       // Major collection must have happened, "used" data is unreliable, wait for update.
1075       return ShenandoahHeap::NONE;
1076     }
1077 
1078     size_t threshold = heap->capacity() * ShenandoahConnectednessPercentage / 100;
1079     size_t allocated = used - prev_used;
1080     bool result = allocated > threshold;
1081 
1082     FormatBuffer<> msg("%s. Capacity: " SIZE_FORMAT "M, Used: " SIZE_FORMAT "M, Previous Used: " SIZE_FORMAT
1083                        "M, Allocated: " SIZE_FORMAT "M, Threshold: " SIZE_FORMAT "M",
1084                        result ? "Partial cycle triggered" : "Partial cycle skipped",
1085                        capacity/M, used/M, prev_used/M, allocated/M, threshold/M);
1086 
1087     if (result) {
1088       log_info(gc,ergo)("%s", msg.buffer());
1089     } else {
1090       log_trace(gc,ergo)("%s", msg.buffer());
1091     }
1092     return result ? ShenandoahHeap::MINOR : ShenandoahHeap::NONE;
1093   }
1094 
1095   void choose_collection_set(ShenandoahCollectionSet* collection_set) {
1096     if (!is_minor_gc()) {
1097       return ShenandoahPartialHeuristics::choose_collection_set(collection_set);
1098     }
1099 
1100     ShenandoahHeap* heap = ShenandoahHeap::heap();
1101     ShenandoahTraversalGC* traversal_gc = heap->traversal_gc();
1102     ShenandoahConnectionMatrix* matrix = heap->connection_matrix();
1103     ShenandoahHeapRegionSet* root_regions = traversal_gc->root_regions();
1104     root_regions->clear();
1105     size_t num_regions = heap->num_regions();
1106 
1107     RegionConnections* connects = get_region_connects_cache(num_regions);
1108     size_t connect_cnt = 0;
1109 
1110     for (uint to_idx = 0; to_idx < num_regions; to_idx++) {
1111       ShenandoahHeapRegion* region = heap->get_region(to_idx);
1112       if (!region->is_regular()) continue;
1113 
1114       uint count = matrix->count_connected_to(to_idx, num_regions);
1115       if (count < ShenandoahPartialInboundThreshold) {
1116         connects[connect_cnt]._region = region;
1117         connects[connect_cnt]._connections = count;
1118         connect_cnt++;
1119       }
1120     }
1121 
1122     QuickSort::sort<RegionConnections>(connects, (int)connect_cnt, compare_by_connects, false);
1123 
1124     // Heuristics triggered partial when allocated was larger than a threshold.
1125     // New allocations might have happened while we were preparing for GC,
1126     // capture all them in this cycle. This "adjusts" the threshold automatically.
1127     size_t used      = heap->used();
1128     size_t prev_used = heap->used_at_last_gc();
1129     guarantee(used >= prev_used, "Invariant");
1130     size_t target = MIN3(ShenandoahHeapRegion::required_regions(used - prev_used), num_regions, connect_cnt);
1131 
1132     for (size_t c = 0; c < target; c++) {
1133       assert (c == 0 || connects[c]._connections >= connects[c-1]._connections, "monotonicity");
1134 
1135       ShenandoahHeapRegion* region = connects[c]._region;
1136       size_t to_idx = region->region_number();
1137       assert(region->is_regular(), "filtered before");
1138       assert(! heap->region_in_collection_set(to_idx), "must not be in cset yet");
1139 
1140       size_t from_idx_count = 0;
1141       if (matrix->enumerate_connected_to(to_idx, num_regions,
1142                                          _from_idxs, from_idx_count,
1143                                          ShenandoahPartialInboundThreshold)) {
1144         maybe_add_heap_region(region, collection_set);
1145         for (size_t i = 0; i < from_idx_count; i++) {
1146           ShenandoahHeapRegion* r = heap->get_region(_from_idxs[i]);
1147           root_regions->add_region_check_for_duplicates(r);
1148         }
1149       }
1150     }
1151     filter_regions();
1152     collection_set->update_region_status();
1153   }
1154 };
1155 
1156 class ShenandoahGenerationalPartialHeuristics : public ShenandoahPartialHeuristics {
1157 public:
1158 
1159   ShenandoahGenerationalPartialHeuristics() : ShenandoahPartialHeuristics() {
1160     SHENANDOAH_ERGO_OVERRIDE_DEFAULT(ShenandoahPartialInboundThreshold, 100);
1161   }
1162 
1163   virtual const char* name() {
1164     return "generational";
1165   }
1166 
1167   virtual void choose_collection_set(ShenandoahCollectionSet* collection_set) {
1168     if (!is_minor_gc()) {
1169       return ShenandoahPartialHeuristics::choose_collection_set(collection_set);
1170     }
1171 
1172     ShenandoahHeap* heap = ShenandoahHeap::heap();
1173     ShenandoahTraversalGC* traversal_gc = heap->traversal_gc();
1174     ShenandoahConnectionMatrix* matrix = heap->connection_matrix();
1175     uint64_t alloc_seq_at_last_gc_end   = heap->alloc_seq_at_last_gc_end();
1176     uint64_t alloc_seq_at_last_gc_start = heap->alloc_seq_at_last_gc_start();
1177 
1178     size_t num_regions = heap->num_regions();
1179 
1180     RegionData* candidates = get_region_data_cache(num_regions);
1181 
1182     for (size_t i = 0; i < num_regions; i++) {
1183       candidates[i]._region = heap->get_region(i);
1184       candidates[i]._seqnum_last_alloc = heap->get_region(i)->seqnum_last_alloc();
1185     }
1186 
1187     QuickSort::sort<RegionData>(candidates, (int)num_regions, compare_by_alloc_seq_descending, false);
1188 
1189     // Heuristics triggered partial when allocated was larger than a threshold.
1190     // New allocations might have happened while we were preparing for GC,
1191     // capture all them in this cycle. This "adjusts" the threshold automatically.
1192     size_t used      = heap->used();
1193     size_t prev_used = heap->used_at_last_gc();
1194     guarantee(used >= prev_used, "Invariant");
1195     size_t target = MIN2(ShenandoahHeapRegion::required_regions(used - prev_used), num_regions);
1196 
1197     ShenandoahHeapRegionSet* root_regions = traversal_gc->root_regions();
1198     root_regions->clear();
1199 
1200     uint count = 0;
1201 
1202     for (uint i = 0; (i < num_regions) && (count < target); i++) {
1203       ShenandoahHeapRegion* contender = candidates[i]._region;
1204       if (contender->seqnum_last_alloc() <= alloc_seq_at_last_gc_end) {
1205         break;
1206       }
1207 
1208       size_t index = contender->region_number();
1209       size_t from_idx_count = 0;
1210       if (matrix->enumerate_connected_to(index, num_regions, _from_idxs, from_idx_count,
1211                                          ShenandoahPartialInboundThreshold)) {
1212         if (maybe_add_heap_region(contender, collection_set)) {
1213           count++;
1214         }
1215 
1216         for (uint f = 0; f < from_idx_count; f++) {
1217           ShenandoahHeapRegion* r = heap->get_region(_from_idxs[f]);
1218           root_regions->add_region_check_for_duplicates(r);
1219         }
1220       }
1221     }
1222     filter_regions();
1223     collection_set->update_region_status();
1224 
1225     log_info(gc,ergo)("Regions: Max: " SIZE_FORMAT ", Target: " SIZE_FORMAT " (" SIZE_FORMAT "%%), In CSet: " SIZE_FORMAT,
1226                       num_regions, target, ShenandoahGenerationalYoungGenPercentage, collection_set->count());
1227   }
1228 
1229   ShenandoahHeap::GCCycleMode should_start_traversal_gc() {
1230     ShenandoahHeap::GCCycleMode cycle_mode = ShenandoahPartialHeuristics::should_start_traversal_gc();
1231     if (cycle_mode != ShenandoahHeap::NONE) {
1232       return cycle_mode;
1233     }
1234 
1235     ShenandoahHeap* heap = ShenandoahHeap::heap();
1236 
1237     if (heap->has_forwarded_objects()) {
1238       // Cannot start partial if heap is not completely updated.
1239       return ShenandoahHeap::NONE;
1240     }
1241 
1242     size_t capacity  = heap->capacity();
1243     size_t used      = heap->used();
1244     size_t prev_used = heap->used_at_last_gc();
1245 
1246     if (used < prev_used) {
1247       // Major collection must have happened, "used" data is unreliable, wait for update.
1248       return ShenandoahHeap::NONE;
1249     }
1250 
1251     size_t threshold = heap->capacity() * ShenandoahGenerationalYoungGenPercentage / 100;
1252     size_t allocated = used - prev_used;
1253 
1254     // Start the next young gc after we've allocated percentage_young of the heap.
1255     bool result = allocated > threshold;
1256 
1257     FormatBuffer<> msg("%s. Capacity: " SIZE_FORMAT "M, Used: " SIZE_FORMAT "M, Previous Used: " SIZE_FORMAT
1258                        "M, Allocated: " SIZE_FORMAT "M, Threshold: " SIZE_FORMAT "M",
1259                        result ? "Partial cycle triggered" : "Partial cycle skipped",
1260                        capacity/M, used/M, prev_used/M, allocated/M, threshold/M);
1261 
1262     if (result) {
1263       log_info(gc,ergo)("%s", msg.buffer());
1264     } else {
1265       log_trace(gc,ergo)("%s", msg.buffer());
1266     }
1267     return result ? ShenandoahHeap::MINOR : ShenandoahHeap::NONE;
1268   }
1269 };
1270 
1271 class ShenandoahLRUPartialHeuristics : public ShenandoahPartialHeuristics {
1272 public:
1273   ShenandoahLRUPartialHeuristics() : ShenandoahPartialHeuristics() {
1274     SHENANDOAH_ERGO_OVERRIDE_DEFAULT(ShenandoahPartialInboundThreshold, 100);
1275   }
1276 
1277   virtual const char* name() {
1278     return "LRU";
1279   }
1280 
1281   virtual void choose_collection_set(ShenandoahCollectionSet* collection_set) {
1282     if (!is_minor_gc()) {
1283       return ShenandoahPartialHeuristics::choose_collection_set(collection_set);
1284     }
1285 
1286     ShenandoahHeap* heap = ShenandoahHeap::heap();
1287     ShenandoahTraversalGC* traversal_gc = heap->traversal_gc();
1288     ShenandoahConnectionMatrix* matrix = heap->connection_matrix();
1289     uint64_t alloc_seq_at_last_gc_start = heap->alloc_seq_at_last_gc_start();
1290 
1291     size_t num_regions = heap->num_regions();
1292 
1293     RegionData* candidates = get_region_data_cache(num_regions);
1294     int candidate_idx = 0;
1295     for (size_t i = 0; i < num_regions; i++) {
1296       ShenandoahHeapRegion* r = heap->get_region(i);
1297       if (r->is_regular() && (r->seqnum_last_alloc() > 0)) {
1298         candidates[candidate_idx]._region = heap->get_region(i);
1299         candidates[candidate_idx]._seqnum_last_alloc = heap->get_region(i)->seqnum_last_alloc();
1300         candidate_idx++;
1301       }
1302     }
1303 
1304     size_t sorted_count = candidate_idx;
1305     QuickSort::sort<RegionData>(candidates, (int)sorted_count, compare_by_alloc_seq_ascending, false);
1306 
1307     // Heuristics triggered partial when allocated was larger than a threshold.
1308     // New allocations might have happened while we were preparing for GC,
1309     // capture all them in this cycle. This "adjusts" the threshold automatically.
1310     size_t used      = heap->used();
1311     size_t prev_used = heap->used_at_last_gc();
1312     guarantee(used >= prev_used, "Invariant");
1313     size_t target = MIN2(ShenandoahHeapRegion::required_regions(used - prev_used), sorted_count);
1314 
1315     ShenandoahHeapRegionSet* root_regions = traversal_gc->root_regions();
1316     root_regions->clear();
1317 
1318     uint count = 0;
1319 
1320     for (uint i = 0; (i < sorted_count) && (count < target); i++) {
1321       ShenandoahHeapRegion* contender = candidates[i]._region;
1322       if (contender->seqnum_last_alloc() >= alloc_seq_at_last_gc_start) {
1323         break;
1324       }
1325 
1326       size_t index = contender->region_number();
1327       size_t from_idx_count = 0;
1328       if (matrix->enumerate_connected_to(index, num_regions,_from_idxs, from_idx_count,
1329                                          ShenandoahPartialInboundThreshold)) {
1330         if (maybe_add_heap_region(contender, collection_set)) {
1331           count++;
1332         }
1333         for (uint f = 0; f < from_idx_count; f++) {
1334           ShenandoahHeapRegion* r = heap->get_region(_from_idxs[f]);
1335           root_regions->add_region_check_for_duplicates(r);
1336         }
1337       }
1338     }
1339     filter_regions();
1340     collection_set->update_region_status();
1341 
1342     log_info(gc,ergo)("Regions: Max: " SIZE_FORMAT ", Target: " SIZE_FORMAT " (" SIZE_FORMAT "%%), In CSet: " SIZE_FORMAT,
1343                       num_regions, target, ShenandoahLRUOldGenPercentage, collection_set->count());
1344   }
1345 
1346   ShenandoahHeap::GCCycleMode should_start_traversal_gc() {
1347     ShenandoahHeap::GCCycleMode cycle_mode = ShenandoahPartialHeuristics::should_start_traversal_gc();
1348     if (cycle_mode != ShenandoahHeap::NONE) {
1349       return cycle_mode;
1350     }
1351 
1352     ShenandoahHeap* heap = ShenandoahHeap::heap();
1353 
1354     if (heap->has_forwarded_objects()) {
1355       // Cannot start partial if heap is not completely updated.
1356       return ShenandoahHeap::NONE;
1357     }
1358 
1359     size_t capacity  = heap->capacity();
1360     size_t used      = heap->used();
1361     size_t prev_used = heap->used_at_last_gc();
1362 
1363     if (used < prev_used) {
1364       // Major collection must have happened, "used" data is unreliable, wait for update.
1365       return ShenandoahHeap::NONE;
1366     }
1367 
1368     // For now don't start until we are 40% full
1369     size_t allocated = used - prev_used;
1370     size_t threshold = heap->capacity() * ShenandoahLRUOldGenPercentage / 100;
1371     size_t minimum   = heap->capacity() * 0.4;
1372 
1373     bool result = ((used > minimum) && (allocated > threshold));
1374 
1375     FormatBuffer<> msg("%s. Capacity: " SIZE_FORMAT "M, Used: " SIZE_FORMAT "M, Previous Used: " SIZE_FORMAT
1376                        "M, Allocated: " SIZE_FORMAT "M, Threshold: " SIZE_FORMAT "M, Minimum: " SIZE_FORMAT "M",
1377                        result ? "Partial cycle triggered" : "Partial cycle skipped",
1378                        capacity/M, used/M, prev_used/M, allocated/M, threshold/M, minimum/M);
1379 
1380     if (result) {
1381       log_info(gc,ergo)("%s", msg.buffer());
1382     } else {
1383       log_trace(gc,ergo)("%s", msg.buffer());
1384     }
1385     return result ? ShenandoahHeap::MINOR : ShenandoahHeap::NONE;
1386    }
1387 
1388 };
1389 
1390 ShenandoahCollectorPolicy::ShenandoahCollectorPolicy() :
1391   _cycle_counter(0),
1392   _success_concurrent_gcs(0),
1393   _success_degenerated_gcs(0),
1394   _success_full_gcs(0),
1395   _explicit_concurrent(0),
1396   _explicit_full(0),
1397   _alloc_failure_degenerated(0),
1398   _alloc_failure_full(0),
1399   _alloc_failure_degenerated_upgrade_to_full(0)
1400 {
1401   Copy::zero_to_bytes(_degen_points, sizeof(size_t) * ShenandoahHeap::_DEGENERATED_LIMIT);
1402 
1403   ShenandoahHeapRegion::setup_heap_region_size(initial_heap_byte_size(), max_heap_byte_size());
1404 
1405   initialize_all();
1406 
1407   _tracer = new (ResourceObj::C_HEAP, mtGC) ShenandoahTracer();
1408 
1409   if (ShenandoahGCHeuristics != NULL) {
1410     if (strcmp(ShenandoahGCHeuristics, "aggressive") == 0) {
1411       _heuristics = new ShenandoahAggressiveHeuristics();
1412     } else if (strcmp(ShenandoahGCHeuristics, "static") == 0) {
1413       _heuristics = new ShenandoahStaticHeuristics();
1414     } else if (strcmp(ShenandoahGCHeuristics, "adaptive") == 0) {
1415       _heuristics = new ShenandoahAdaptiveHeuristics();
1416     } else if (strcmp(ShenandoahGCHeuristics, "passive") == 0) {
1417       _heuristics = new ShenandoahPassiveHeuristics();
1418     } else if (strcmp(ShenandoahGCHeuristics, "compact") == 0) {
1419       _heuristics = new ShenandoahCompactHeuristics();
1420     } else if (strcmp(ShenandoahGCHeuristics, "connected") == 0) {
1421       _heuristics = new ShenandoahPartialConnectedHeuristics();
1422     } else if (strcmp(ShenandoahGCHeuristics, "generational") == 0) {
1423       _heuristics = new ShenandoahGenerationalPartialHeuristics();
1424     } else if (strcmp(ShenandoahGCHeuristics, "LRU") == 0) {
1425       _heuristics = new ShenandoahLRUPartialHeuristics();
1426     } else if (strcmp(ShenandoahGCHeuristics, "traversal") == 0) {
1427       _heuristics = new ShenandoahTraversalHeuristics();
1428     } else {
1429       vm_exit_during_initialization("Unknown -XX:ShenandoahGCHeuristics option");
1430     }
1431 
1432     if (_heuristics->is_diagnostic() && !UnlockDiagnosticVMOptions) {
1433       vm_exit_during_initialization(
1434               err_msg("Heuristics \"%s\" is diagnostic, and must be enabled via -XX:+UnlockDiagnosticVMOptions.",
1435                       _heuristics->name()));
1436     }
1437     if (_heuristics->is_experimental() && !UnlockExperimentalVMOptions) {
1438       vm_exit_during_initialization(
1439               err_msg("Heuristics \"%s\" is experimental, and must be enabled via -XX:+UnlockExperimentalVMOptions.",
1440                       _heuristics->name()));
1441     }
1442 
1443     if (ShenandoahStoreValEnqueueBarrier && ShenandoahStoreValReadBarrier) {
1444       vm_exit_during_initialization("Cannot use both ShenandoahStoreValEnqueueBarrier and ShenandoahStoreValReadBarrier");
1445     }
1446     log_info(gc, init)("Shenandoah heuristics: %s",
1447                        _heuristics->name());
1448     _heuristics->print_thresholds();
1449   } else {
1450       ShouldNotReachHere();
1451   }
1452 }
1453 
1454 ShenandoahCollectorPolicy* ShenandoahCollectorPolicy::as_pgc_policy() {
1455   return this;
1456 }
1457 
1458 BarrierSet::Name ShenandoahCollectorPolicy::barrier_set_name() {
1459   return BarrierSet::Shenandoah;
1460 }
1461 
1462 HeapWord* ShenandoahCollectorPolicy::mem_allocate_work(size_t size,
1463                                                        bool is_tlab,
1464                                                        bool* gc_overhead_limit_was_exceeded) {
1465   guarantee(false, "Not using this policy feature yet.");
1466   return NULL;
1467 }
1468 
1469 HeapWord* ShenandoahCollectorPolicy::satisfy_failed_allocation(size_t size, bool is_tlab) {
1470   guarantee(false, "Not using this policy feature yet.");
1471   return NULL;
1472 }
1473 
1474 void ShenandoahCollectorPolicy::initialize_alignments() {
1475 
1476   // This is expected by our algorithm for ShenandoahHeap::heap_region_containing().
1477   _space_alignment = ShenandoahHeapRegion::region_size_bytes();
1478   _heap_alignment = ShenandoahHeapRegion::region_size_bytes();
1479 }
1480 
1481 void ShenandoahCollectorPolicy::post_heap_initialize() {
1482   _heuristics->initialize();
1483 }
1484 
1485 void ShenandoahCollectorPolicy::record_explicit_to_concurrent() {
1486   _heuristics->record_explicit_gc();
1487   _explicit_concurrent++;
1488 }
1489 
1490 void ShenandoahCollectorPolicy::record_explicit_to_full() {
1491   _heuristics->record_explicit_gc();
1492   _explicit_full++;
1493 }
1494 
1495 void ShenandoahCollectorPolicy::record_alloc_failure_to_full() {
1496   _heuristics->record_allocation_failure_gc();
1497   _alloc_failure_full++;
1498 }
1499 
1500 void ShenandoahCollectorPolicy::record_alloc_failure_to_degenerated(ShenandoahHeap::ShenandoahDegenPoint point) {
1501   assert(point < ShenandoahHeap::_DEGENERATED_LIMIT, "sanity");
1502   _heuristics->record_allocation_failure_gc();
1503   _alloc_failure_degenerated++;
1504   _degen_points[point]++;
1505 }
1506 
1507 void ShenandoahCollectorPolicy::record_degenerated_upgrade_to_full() {
1508   _alloc_failure_degenerated_upgrade_to_full++;
1509 }
1510 
1511 void ShenandoahCollectorPolicy::record_success_concurrent() {
1512   _heuristics->record_success_concurrent();
1513   _success_concurrent_gcs++;
1514 }
1515 
1516 void ShenandoahCollectorPolicy::record_success_degenerated() {
1517   _heuristics->record_success_degenerated();
1518   _success_degenerated_gcs++;
1519 }
1520 
1521 void ShenandoahCollectorPolicy::record_success_full() {
1522   _heuristics->record_success_full();
1523   _success_full_gcs++;
1524 }
1525 
1526 bool ShenandoahCollectorPolicy::should_start_normal_gc() {
1527   return _heuristics->should_start_normal_gc();
1528 }
1529 
1530 bool ShenandoahCollectorPolicy::should_degenerate_cycle() {
1531   return _heuristics->should_degenerate_cycle();
1532 }
1533 
1534 bool ShenandoahCollectorPolicy::update_refs() {
1535   return _heuristics->update_refs();
1536 }
1537 
1538 bool ShenandoahCollectorPolicy::should_start_update_refs() {
1539   return _heuristics->should_start_update_refs();
1540 }
1541 
1542 void ShenandoahCollectorPolicy::record_peak_occupancy() {
1543   _heuristics->record_peak_occupancy();
1544 }
1545 
1546 void ShenandoahCollectorPolicy::choose_collection_set(ShenandoahCollectionSet* collection_set,
1547                                                       bool minor) {
1548   _heuristics->choose_collection_set(collection_set);
1549 }
1550 
1551 bool ShenandoahCollectorPolicy::should_process_references() {
1552   return _heuristics->should_process_references();
1553 }
1554 
1555 bool ShenandoahCollectorPolicy::should_unload_classes() {
1556   return _heuristics->should_unload_classes();
1557 }
1558 
1559 size_t ShenandoahCollectorPolicy::cycle_counter() const {
1560   return _cycle_counter;
1561 }
1562 
1563 void ShenandoahCollectorPolicy::record_phase_time(ShenandoahPhaseTimings::Phase phase, double secs) {
1564   _heuristics->record_phase_time(phase, secs);
1565 }
1566 
1567 ShenandoahHeap::GCCycleMode ShenandoahCollectorPolicy::should_start_traversal_gc() {
1568   return _heuristics->should_start_traversal_gc();
1569 }
1570 
1571 bool ShenandoahCollectorPolicy::can_do_traversal_gc() {
1572   return _heuristics->can_do_traversal_gc();
1573 }
1574 
1575 void ShenandoahCollectorPolicy::record_cycle_start() {
1576   _cycle_counter++;
1577   _heuristics->record_cycle_start();
1578 }
1579 
1580 void ShenandoahCollectorPolicy::record_cycle_end() {
1581   _heuristics->record_cycle_end();
1582 }
1583 
1584 void ShenandoahCollectorPolicy::record_shutdown() {
1585   _in_shutdown.set();
1586 }
1587 
1588 bool ShenandoahCollectorPolicy::is_at_shutdown() {
1589   return _in_shutdown.is_set();
1590 }
1591 
1592 void ShenandoahCollectorPolicy::print_gc_stats(outputStream* out) const {
1593   out->print_cr("Under allocation pressure, concurrent cycles may cancel, and either continue cycle");
1594   out->print_cr("under stop-the-world pause or result in stop-the-world Full GC. Increase heap size,");
1595   out->print_cr("tune GC heuristics, set more aggressive pacing delay, or lower allocation rate");
1596   out->print_cr("to avoid Degenerated and Full GC cycles.");
1597   out->cr();
1598 
1599   out->print_cr(SIZE_FORMAT_W(5) " successful concurrent GCs",         _success_concurrent_gcs);
1600   out->print_cr("  " SIZE_FORMAT_W(5) " invoked explicitly",           _explicit_concurrent);
1601   out->cr();
1602 
1603   out->print_cr(SIZE_FORMAT_W(5) " Degenerated GCs",                   _success_degenerated_gcs);
1604   out->print_cr("  " SIZE_FORMAT_W(5) " caused by allocation failure", _alloc_failure_degenerated);
1605   for (int c = 0; c < ShenandoahHeap::_DEGENERATED_LIMIT; c++) {
1606     if (_degen_points[c] > 0) {
1607       const char* desc = ShenandoahHeap::degen_point_to_string((ShenandoahHeap::ShenandoahDegenPoint)c);
1608       out->print_cr("    " SIZE_FORMAT_W(5) " happened at %s",         _degen_points[c], desc);
1609     }
1610   }
1611   out->print_cr("  " SIZE_FORMAT_W(5) " upgraded to Full GC",          _alloc_failure_degenerated_upgrade_to_full);
1612   out->cr();
1613 
1614   out->print_cr(SIZE_FORMAT_W(5) " Full GCs",                          _success_full_gcs + _alloc_failure_degenerated_upgrade_to_full);
1615   out->print_cr("  " SIZE_FORMAT_W(5) " invoked explicitly",           _explicit_full);
1616   out->print_cr("  " SIZE_FORMAT_W(5) " caused by allocation failure", _alloc_failure_full);
1617   out->print_cr("  " SIZE_FORMAT_W(5) " upgraded from Degenerated GC", _alloc_failure_degenerated_upgrade_to_full);
1618 }