1 /*
   2  * Copyright (c) 2013, 2017, Red Hat, Inc. and/or its affiliates.
   3  *
   4  * This code is free software; you can redistribute it and/or modify it
   5  * under the terms of the GNU General Public License version 2 only, as
   6  * published by the Free Software Foundation.
   7  *
   8  * This code is distributed in the hope that it will be useful, but WITHOUT
   9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  11  * version 2 for more details (a copy is included in the LICENSE file that
  12  * accompanied this code).
  13  *
  14  * You should have received a copy of the GNU General Public License version
  15  * 2 along with this work; if not, write to the Free Software Foundation,
  16  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  17  *
  18  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  19  * or visit www.oracle.com if you need additional information or have any
  20  * questions.
  21  *
  22  */
  23 
  24 #include "precompiled.hpp"
  25 #include "gc/shared/gcPolicyCounters.hpp"
  26 #include "gc/shenandoah/shenandoahCollectionSet.hpp"
  27 #include "gc/shenandoah/shenandoahConnectionMatrix.hpp"
  28 #include "gc/shenandoah/shenandoahFreeSet.hpp"
  29 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
  30 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
  31 #include "gc/shenandoah/shenandoahPartialGC.hpp"
  32 #include "runtime/os.hpp"
  33 #include "utilities/quickSort.hpp"
  34 
  35 #define SHENANDOAH_ERGO_DISABLE_FLAG(name)                                  \
  36   do {                                                                      \
  37     if (FLAG_IS_DEFAULT(name) && (name)) {                                  \
  38       log_info(gc)("Heuristics ergonomically sets -XX:-" #name);            \
  39       FLAG_SET_DEFAULT(name, false);                                        \
  40     }                                                                       \
  41   } while (0)
  42 
  43 #define SHENANDOAH_ERGO_ENABLE_FLAG(name)                                   \
  44   do {                                                                      \
  45     if (FLAG_IS_DEFAULT(name) && !(name)) {                                 \
  46       log_info(gc)("Heuristics ergonomically sets -XX:+" #name);            \
  47       FLAG_SET_DEFAULT(name, true);                                         \
  48     }                                                                       \
  49   } while (0)
  50 
  51 #define SHENANDOAH_ERGO_OVERRIDE_DEFAULT(name, value)                       \
  52   do {                                                                      \
  53     if (FLAG_IS_DEFAULT(name)) {                                            \
  54       log_info(gc)("Heuristics ergonomically sets -XX:" #name "=" #value);  \
  55       FLAG_SET_DEFAULT(name, value);                                        \
  56     }                                                                       \
  57   } while (0)
  58 
  59 class ShenandoahHeuristics : public CHeapObj<mtGC> {
  60 protected:
  61   bool _update_refs_early;
  62   bool _update_refs_adaptive;
  63 
  64   typedef struct {
  65     ShenandoahHeapRegion* _region;
  66     size_t _garbage;
  67     uint64_t _seqnum_last_alloc;
  68   } RegionData;
  69 
  70   static int compare_by_garbage(RegionData a, RegionData b) {
  71     if (a._garbage > b._garbage)
  72       return -1;
  73     else if (a._garbage < b._garbage)
  74       return 1;
  75     else return 0;
  76   }
  77 
  78   static int compare_by_alloc_seq_ascending(RegionData a, RegionData b) {
  79     if (a._seqnum_last_alloc == b._seqnum_last_alloc)
  80       return 0;
  81     else if (a._seqnum_last_alloc < b._seqnum_last_alloc)
  82       return -1;
  83     else return 1;
  84   }
  85 
  86   static int compare_by_alloc_seq_descending(RegionData a, RegionData b) {
  87     return -compare_by_alloc_seq_ascending(a, b);
  88   }
  89 
  90   RegionData* get_region_data_cache(size_t num) {
  91     RegionData* res = _region_data;
  92     if (res == NULL) {
  93       res = NEW_C_HEAP_ARRAY(RegionData, num, mtGC);
  94       _region_data = res;
  95       _region_data_size = num;
  96     } else if (_region_data_size < num) {
  97       res = REALLOC_C_HEAP_ARRAY(RegionData, _region_data, num, mtGC);
  98       _region_data = res;
  99       _region_data_size = num;
 100     }
 101     return res;
 102   }
 103 
 104   RegionData* _region_data;
 105   size_t _region_data_size;
 106 
 107   typedef struct {
 108     ShenandoahHeapRegion* _region;
 109     size_t _connections;
 110   } RegionConnections;
 111 
 112   RegionConnections* get_region_connects_cache(size_t num) {
 113     RegionConnections* res = _region_connects;
 114     if (res == NULL) {
 115       res = NEW_C_HEAP_ARRAY(RegionConnections, num, mtGC);
 116       _region_connects = res;
 117       _region_connects_size = num;
 118     } else if (_region_connects_size < num) {
 119       res = REALLOC_C_HEAP_ARRAY(RegionConnections, _region_connects, num, mtGC);
 120       _region_connects = res;
 121       _region_connects_size = num;
 122     }
 123     return res;
 124   }
 125 
 126   static int compare_by_connects(RegionConnections a, RegionConnections b) {
 127     if (a._connections == b._connections)
 128       return 0;
 129     else if (a._connections < b._connections)
 130       return -1;
 131     else return 1;
 132   }
 133 
 134   RegionConnections* _region_connects;
 135   size_t _region_connects_size;
 136 
 137   uint _degenerated_cycles_in_a_row;
 138   uint _successful_cycles_in_a_row;
 139 
 140   size_t _bytes_in_cset;
 141 
 142   double _last_cycle_end;
 143 
 144 public:
 145 
 146   ShenandoahHeuristics();
 147   virtual ~ShenandoahHeuristics();
 148 
 149   void record_gc_start() {
 150     ShenandoahHeap::heap()->set_alloc_seq_gc_start();
 151   }
 152 
 153   void record_gc_end() {
 154     ShenandoahHeap::heap()->set_alloc_seq_gc_end();
 155   }
 156 
 157   virtual void record_cycle_start() {
 158     // Do nothing
 159   }
 160 
 161   virtual void record_cycle_end() {
 162     _last_cycle_end = os::elapsedTime();
 163   }
 164 
 165   virtual void record_phase_time(ShenandoahPhaseTimings::Phase phase, double secs) {
 166     // Do nothing
 167   }
 168 
 169   virtual void print_thresholds() {
 170   }
 171 
 172   virtual bool should_start_normal_gc() const = 0;
 173 
 174   virtual bool should_start_update_refs() {
 175     return _update_refs_early;
 176   }
 177 
 178   virtual bool update_refs() const {
 179     return _update_refs_early;
 180   }
 181 
 182   virtual bool should_start_partial_gc() {
 183     return false;
 184   }
 185 
 186   virtual bool can_do_partial_gc() {
 187     return false;
 188   }
 189 
 190   virtual bool should_start_traversal_gc() {
 191     return false;
 192   }
 193 
 194   virtual bool can_do_traversal_gc() {
 195     return false;
 196   }
 197 
 198   virtual bool should_degenerate_cycle() {
 199     return _degenerated_cycles_in_a_row <= ShenandoahFullGCThreshold;
 200   }
 201 
 202   virtual void record_success_concurrent() {
 203     _degenerated_cycles_in_a_row = 0;
 204     _successful_cycles_in_a_row++;
 205   }
 206 
 207   virtual void record_success_degenerated() {
 208     _degenerated_cycles_in_a_row++;
 209     _successful_cycles_in_a_row = 0;
 210   }
 211 
 212   virtual void record_success_full() {
 213     _degenerated_cycles_in_a_row = 0;
 214     _successful_cycles_in_a_row++;
 215   }
 216 
 217   virtual void record_allocation_failure_gc() {
 218     _bytes_in_cset = 0;
 219   }
 220 
 221   virtual void record_explicit_gc() {
 222     _bytes_in_cset = 0;
 223   }
 224 
 225   virtual void record_peak_occupancy() {
 226   }
 227 
 228   virtual void choose_collection_set(ShenandoahCollectionSet* collection_set);
 229 
 230   virtual bool should_process_references() {
 231     if (ShenandoahRefProcFrequency == 0) return false;
 232     size_t cycle = ShenandoahHeap::heap()->shenandoahPolicy()->cycle_counter();
 233     // Process references every Nth GC cycle.
 234     return cycle % ShenandoahRefProcFrequency == 0;
 235   }
 236 
 237   virtual bool should_unload_classes() {
 238     if (ShenandoahUnloadClassesFrequency == 0) return false;
 239     size_t cycle = ShenandoahHeap::heap()->shenandoahPolicy()->cycle_counter();
 240     // Unload classes every Nth GC cycle.
 241     // This should not happen in the same cycle as process_references to amortize costs.
 242     // Offsetting by one is enough to break the rendezvous when periods are equal.
 243     // When periods are not equal, offsetting by one is just as good as any other guess.
 244     return (cycle + 1) % ShenandoahUnloadClassesFrequency == 0;
 245   }
 246 
 247   bool maybe_add_heap_region(ShenandoahHeapRegion* hr,
 248                         ShenandoahCollectionSet* cs);
 249 
 250   virtual const char* name() = 0;
 251   virtual bool is_diagnostic() = 0;
 252   virtual bool is_experimental() = 0;
 253   virtual void initialize() {}
 254 
 255 protected:
 256   virtual void choose_collection_set_from_regiondata(ShenandoahCollectionSet* set,
 257                                                      RegionData* data, size_t data_size,
 258                                                      size_t free) = 0;
 259 };
 260 
 261 ShenandoahHeuristics::ShenandoahHeuristics() :
 262   _bytes_in_cset(0),
 263   _degenerated_cycles_in_a_row(0),
 264   _successful_cycles_in_a_row(0),
 265   _region_data(NULL),
 266   _region_data_size(0),
 267   _region_connects(NULL),
 268   _region_connects_size(0),
 269   _update_refs_early(false),
 270   _update_refs_adaptive(false),
 271   _last_cycle_end(0)
 272 {
 273   if (strcmp(ShenandoahUpdateRefsEarly, "on") == 0 ||
 274       strcmp(ShenandoahUpdateRefsEarly, "true") == 0 ) {
 275     _update_refs_early = true;
 276   } else if (strcmp(ShenandoahUpdateRefsEarly, "off") == 0 ||
 277              strcmp(ShenandoahUpdateRefsEarly, "false") == 0 ) {
 278     _update_refs_early = false;
 279   } else if (strcmp(ShenandoahUpdateRefsEarly, "adaptive") == 0) {
 280     _update_refs_adaptive = true;
 281     _update_refs_early = true;
 282   } else {
 283     vm_exit_during_initialization("Unknown -XX:ShenandoahUpdateRefsEarly option: %s", ShenandoahUpdateRefsEarly);
 284   }
 285 }
 286 
 287 ShenandoahHeuristics::~ShenandoahHeuristics() {
 288   if (_region_data != NULL) {
 289     FREE_C_HEAP_ARRAY(RegionGarbage, _region_data);
 290   }
 291   if (_region_connects != NULL) {
 292     FREE_C_HEAP_ARRAY(RegionConnections, _region_connects);
 293   }
 294 }
 295 
 296 bool ShenandoahHeuristics::maybe_add_heap_region(ShenandoahHeapRegion* hr,
 297                                                  ShenandoahCollectionSet* collection_set) {
 298     if (hr->is_regular() && hr->has_live() && !collection_set->is_in(hr)) {
 299       collection_set->add_region(hr);
 300       return true;
 301     }
 302     return false;
 303 }
 304 
 305 void ShenandoahHeuristics::choose_collection_set(ShenandoahCollectionSet* collection_set) {
 306   assert(collection_set->count() == 0, "Must be empty");
 307 
 308   ShenandoahHeap* heap = ShenandoahHeap::heap();
 309 
 310   // Step 1. Build up the region candidates we care about, rejecting losers and accepting winners right away.
 311 
 312   size_t num_regions = heap->num_regions();
 313 
 314   RegionData* candidates = get_region_data_cache(num_regions);
 315 
 316   size_t cand_idx = 0;
 317 
 318   size_t total_garbage = 0;
 319 
 320   size_t immediate_garbage = 0;
 321   size_t immediate_regions = 0;
 322 
 323   size_t free = 0;
 324   size_t free_regions = 0;
 325 
 326   for (size_t i = 0; i < num_regions; i++) {
 327     ShenandoahHeapRegion* region = heap->get_region(i);
 328 
 329     size_t garbage = region->garbage();
 330     total_garbage += garbage;
 331 
 332     if (region->is_empty()) {
 333       free_regions++;
 334       free += ShenandoahHeapRegion::region_size_bytes();
 335     } else if (region->is_regular()) {
 336       if (!region->has_live()) {
 337         // We can recycle it right away and put it in the free set.
 338         immediate_regions++;
 339         immediate_garbage += garbage;
 340         region->make_trash();
 341       } else {
 342         // This is our candidate for later consideration.
 343         candidates[cand_idx]._region = region;
 344         candidates[cand_idx]._garbage = garbage;
 345         cand_idx++;
 346       }
 347     } else if (region->is_humongous_start()) {
 348       // Reclaim humongous regions here, and count them as the immediate garbage
 349 #ifdef ASSERT
 350       bool reg_live = region->has_live();
 351       bool bm_live = heap->is_marked_complete(oop(region->bottom() + BrooksPointer::word_size()));
 352       assert(reg_live == bm_live,
 353              "Humongous liveness and marks should agree. Region live: %s; Bitmap live: %s; Region Live Words: " SIZE_FORMAT,
 354              BOOL_TO_STR(reg_live), BOOL_TO_STR(bm_live), region->get_live_data_words());
 355 #endif
 356       if (!region->has_live()) {
 357         heap->trash_humongous_region_at(region);
 358 
 359         // Count only the start. Continuations would be counted on "trash" path
 360         immediate_regions++;
 361         immediate_garbage += garbage;
 362       }
 363     } else if (region->is_trash()) {
 364       // Count in just trashed collection set, during coalesced CM-with-UR
 365       immediate_regions++;
 366       immediate_garbage += garbage;
 367     }
 368   }
 369 
 370   // Step 2. Look back at garbage statistics, and decide if we want to collect anything,
 371   // given the amount of immediately reclaimable garbage. If we do, figure out the collection set.
 372 
 373   assert (immediate_garbage <= total_garbage,
 374           "Cannot have more immediate garbage than total garbage: " SIZE_FORMAT "M vs " SIZE_FORMAT "M",
 375           immediate_garbage / M, total_garbage / M);
 376 
 377   size_t immediate_percent = total_garbage == 0 ? 0 : (immediate_garbage * 100 / total_garbage);
 378 
 379   if (immediate_percent <= ShenandoahImmediateThreshold) {
 380     choose_collection_set_from_regiondata(collection_set, candidates, cand_idx, immediate_garbage + free);
 381     collection_set->update_region_status();
 382 
 383     size_t cset_percent = total_garbage == 0 ? 0 : (collection_set->garbage() * 100 / total_garbage);
 384     log_info(gc, ergo)("Collectable Garbage: "SIZE_FORMAT"M ("SIZE_FORMAT"%% of total), "SIZE_FORMAT"M CSet, "SIZE_FORMAT" CSet regions",
 385                        collection_set->garbage() / M, cset_percent, collection_set->live_data() / M, collection_set->count());
 386   }
 387 
 388   log_info(gc, ergo)("Immediate Garbage: "SIZE_FORMAT"M ("SIZE_FORMAT"%% of total), "SIZE_FORMAT" regions",
 389                      immediate_garbage / M, immediate_percent, immediate_regions);
 390 }
 391 
 392 void ShenandoahCollectorPolicy::record_gc_start() {
 393   _heuristics->record_gc_start();
 394 }
 395 
 396 void ShenandoahCollectorPolicy::record_gc_end() {
 397   _heuristics->record_gc_end();
 398 }
 399 
 400 class ShenandoahPassiveHeuristics : public ShenandoahHeuristics {
 401 public:
 402   ShenandoahPassiveHeuristics() : ShenandoahHeuristics() {
 403     // Do not allow concurrent cycles.
 404     FLAG_SET_DEFAULT(ExplicitGCInvokesConcurrent, false);
 405 
 406     // Passive runs with max speed, reacts on allocation failure.
 407     FLAG_SET_DEFAULT(ShenandoahPacing, false);
 408 
 409     // Disable known barriers by default.
 410     SHENANDOAH_ERGO_DISABLE_FLAG(ShenandoahSATBBarrier);
 411     SHENANDOAH_ERGO_DISABLE_FLAG(ShenandoahConditionalSATBBarrier);
 412     SHENANDOAH_ERGO_DISABLE_FLAG(ShenandoahKeepAliveBarrier);
 413     SHENANDOAH_ERGO_DISABLE_FLAG(ShenandoahWriteBarrier);
 414     SHENANDOAH_ERGO_DISABLE_FLAG(ShenandoahReadBarrier);
 415     SHENANDOAH_ERGO_DISABLE_FLAG(ShenandoahStoreValEnqueueBarrier);
 416     SHENANDOAH_ERGO_DISABLE_FLAG(ShenandoahStoreValWriteBarrier);
 417     SHENANDOAH_ERGO_DISABLE_FLAG(ShenandoahStoreValReadBarrier);
 418     SHENANDOAH_ERGO_DISABLE_FLAG(ShenandoahCASBarrier);
 419     SHENANDOAH_ERGO_DISABLE_FLAG(ShenandoahAcmpBarrier);
 420     SHENANDOAH_ERGO_DISABLE_FLAG(ShenandoahCloneBarrier);
 421     SHENANDOAH_ERGO_DISABLE_FLAG(UseShenandoahMatrix);
 422   }
 423 
 424   virtual void choose_collection_set_from_regiondata(ShenandoahCollectionSet* cset,
 425                                                      RegionData* data, size_t size,
 426                                                      size_t free) {
 427     for (size_t idx = 0; idx < size; idx++) {
 428       ShenandoahHeapRegion* r = data[idx]._region;
 429       if (r->garbage() > 0) {
 430         cset->add_region(r);
 431       }
 432     }
 433   }
 434 
 435   virtual bool should_start_normal_gc() const {
 436     // Never do concurrent GCs.
 437     return false;
 438   }
 439 
 440   virtual bool should_process_references() {
 441     if (ShenandoahRefProcFrequency == 0) return false;
 442     // Always process references.
 443     return true;
 444   }
 445 
 446   virtual bool should_unload_classes() {
 447     if (ShenandoahUnloadClassesFrequency == 0) return false;
 448     // Always unload classes.
 449     return true;
 450   }
 451 
 452   virtual bool should_degenerate_cycle() {
 453     // Always fail to Full GC
 454     return false;
 455   }
 456 
 457   virtual const char* name() {
 458     return "passive";
 459   }
 460 
 461   virtual bool is_diagnostic() {
 462     return true;
 463   }
 464 
 465   virtual bool is_experimental() {
 466     return false;
 467   }
 468 };
 469 
 470 class ShenandoahAggressiveHeuristics : public ShenandoahHeuristics {
 471 public:
 472   ShenandoahAggressiveHeuristics() : ShenandoahHeuristics() {
 473     // Do not shortcut evacuation
 474     SHENANDOAH_ERGO_OVERRIDE_DEFAULT(ShenandoahImmediateThreshold, 100);
 475 
 476     // Aggressive runs with max speed for allocation, to capture races against mutator
 477     SHENANDOAH_ERGO_DISABLE_FLAG(ShenandoahPacing);
 478   }
 479 
 480   virtual void choose_collection_set_from_regiondata(ShenandoahCollectionSet* cset,
 481                                                      RegionData* data, size_t size,
 482                                                      size_t free) {
 483     for (size_t idx = 0; idx < size; idx++) {
 484       ShenandoahHeapRegion* r = data[idx]._region;
 485       if (r->garbage() > 0) {
 486         cset->add_region(r);
 487       }
 488     }
 489   }
 490 
 491   virtual bool should_start_normal_gc() const {
 492     return true;
 493   }
 494 
 495   virtual bool should_process_references() {
 496     if (ShenandoahRefProcFrequency == 0) return false;
 497     // Randomly process refs with 50% chance.
 498     return (os::random() & 1) == 1;
 499   }
 500 
 501   virtual bool should_unload_classes() {
 502     if (ShenandoahUnloadClassesFrequency == 0) return false;
 503     // Randomly unload classes with 50% chance.
 504     return (os::random() & 1) == 1;
 505   }
 506 
 507   virtual const char* name() {
 508     return "aggressive";
 509   }
 510 
 511   virtual bool is_diagnostic() {
 512     return true;
 513   }
 514 
 515   virtual bool is_experimental() {
 516     return false;
 517   }
 518 };
 519 
 520 class ShenandoahStaticHeuristics : public ShenandoahHeuristics {
 521 public:
 522   ShenandoahStaticHeuristics() : ShenandoahHeuristics() {
 523     // Static heuristics may degrade to continuous if live data is larger
 524     // than free threshold. ShenandoahAllocationThreshold is supposed to break this,
 525     // but it only works if it is non-zero.
 526     SHENANDOAH_ERGO_OVERRIDE_DEFAULT(ShenandoahImmediateThreshold, 1);
 527   }
 528 
 529   void print_thresholds() {
 530     log_info(gc, init)("Shenandoah heuristics thresholds: allocation "SIZE_FORMAT", free "SIZE_FORMAT", garbage "SIZE_FORMAT,
 531                        ShenandoahAllocationThreshold,
 532                        ShenandoahFreeThreshold,
 533                        ShenandoahGarbageThreshold);
 534   }
 535 
 536   virtual ~ShenandoahStaticHeuristics() {}
 537 
 538   virtual bool should_start_normal_gc() const {
 539     ShenandoahHeap* heap = ShenandoahHeap::heap();
 540 
 541     size_t capacity = heap->capacity();
 542     size_t available = heap->free_set()->available();
 543     size_t threshold_available = (capacity * ShenandoahFreeThreshold) / 100;
 544     size_t threshold_bytes_allocated = heap->capacity() * ShenandoahAllocationThreshold / 100;
 545     size_t bytes_allocated = heap->bytes_allocated_since_gc_start();
 546 
 547     double last_time_ms = (os::elapsedTime() - _last_cycle_end) * 1000;
 548     bool periodic_gc = (last_time_ms > ShenandoahGuaranteedGCInterval);
 549 
 550     if (available < threshold_available &&
 551             bytes_allocated > threshold_bytes_allocated) {
 552       // Need to check that an appropriate number of regions have
 553       // been allocated since last concurrent mark too.
 554       log_info(gc,ergo)("Concurrent marking triggered. Free: " SIZE_FORMAT "M, Free Threshold: " SIZE_FORMAT
 555                                 "M; Allocated: " SIZE_FORMAT "M, Alloc Threshold: " SIZE_FORMAT "M",
 556                         available / M, threshold_available / M, bytes_allocated / M, threshold_bytes_allocated / M);
 557       return true;
 558     } else if (periodic_gc) {
 559       log_info(gc,ergo)("Periodic GC triggered. Time since last GC: %.0f ms, Guaranteed Interval: " UINTX_FORMAT " ms",
 560                         last_time_ms, ShenandoahGuaranteedGCInterval);
 561       return true;
 562     }
 563 
 564     return false;
 565   }
 566 
 567   virtual void choose_collection_set_from_regiondata(ShenandoahCollectionSet* cset,
 568                                                      RegionData* data, size_t size,
 569                                                      size_t free) {
 570     size_t threshold = ShenandoahHeapRegion::region_size_bytes() * ShenandoahGarbageThreshold / 100;
 571 
 572     for (size_t idx = 0; idx < size; idx++) {
 573       ShenandoahHeapRegion* r = data[idx]._region;
 574       if (r->garbage() > threshold) {
 575         cset->add_region(r);
 576       }
 577     }
 578   }
 579 
 580   virtual const char* name() {
 581     return "dynamic";
 582   }
 583 
 584   virtual bool is_diagnostic() {
 585     return false;
 586   }
 587 
 588   virtual bool is_experimental() {
 589     return false;
 590   }
 591 };
 592 
 593 class ShenandoahCompactHeuristics : public ShenandoahHeuristics {
 594 public:
 595   ShenandoahCompactHeuristics() : ShenandoahHeuristics() {
 596     SHENANDOAH_ERGO_ENABLE_FLAG(ShenandoahUncommit);
 597     SHENANDOAH_ERGO_OVERRIDE_DEFAULT(ShenandoahAllocationThreshold,  10);
 598     SHENANDOAH_ERGO_OVERRIDE_DEFAULT(ShenandoahImmediateThreshold,   100);
 599     SHENANDOAH_ERGO_OVERRIDE_DEFAULT(ShenandoahUncommitDelay,        5000);
 600     SHENANDOAH_ERGO_OVERRIDE_DEFAULT(ShenandoahGuaranteedGCInterval, 30000);
 601     SHENANDOAH_ERGO_OVERRIDE_DEFAULT(ShenandoahGarbageThreshold,     20);
 602   }
 603 
 604   virtual bool should_start_normal_gc() const {
 605     ShenandoahHeap* heap = ShenandoahHeap::heap();
 606 
 607     size_t available = heap->free_set()->available();
 608     double last_time_ms = (os::elapsedTime() - _last_cycle_end) * 1000;
 609     bool periodic_gc = (last_time_ms > ShenandoahGuaranteedGCInterval);
 610     size_t bytes_allocated = heap->bytes_allocated_since_gc_start();
 611     size_t threshold_bytes_allocated = heap->capacity() * ShenandoahAllocationThreshold / 100;
 612 
 613     if (available < threshold_bytes_allocated || bytes_allocated > threshold_bytes_allocated) {
 614       log_info(gc,ergo)("Concurrent marking triggered. Free: " SIZE_FORMAT "M, Allocated: " SIZE_FORMAT "M, Alloc Threshold: " SIZE_FORMAT "M",
 615                          available / M, bytes_allocated / M, threshold_bytes_allocated / M);
 616       return true;
 617     } else if (periodic_gc) {
 618       log_info(gc,ergo)("Periodic GC triggered. Time since last GC: %.0f ms, Guaranteed Interval: " UINTX_FORMAT " ms",
 619                         last_time_ms, ShenandoahGuaranteedGCInterval);
 620       return true;
 621     }
 622 
 623     return false;
 624   }
 625 
 626   virtual void choose_collection_set_from_regiondata(ShenandoahCollectionSet* cset,
 627                                                      RegionData* data, size_t size,
 628                                                      size_t actual_free) {
 629 
 630     // Do not select too large CSet that would overflow the available free space
 631     size_t max_cset = actual_free * 3 / 4;
 632 
 633     log_info(gc, ergo)("CSet Selection. Actual Free: " SIZE_FORMAT "M, Max CSet: " SIZE_FORMAT "M",
 634                        actual_free / M, max_cset / M);
 635 
 636     size_t threshold = ShenandoahHeapRegion::region_size_bytes() * ShenandoahGarbageThreshold / 100;
 637 
 638     size_t live_cset = 0;
 639     for (size_t idx = 0; idx < size; idx++) {
 640       ShenandoahHeapRegion* r = data[idx]._region;
 641       size_t new_cset = live_cset + r->get_live_data_bytes();
 642       if (new_cset < max_cset && r->garbage() > threshold) {
 643         live_cset = new_cset;
 644         cset->add_region(r);
 645       }
 646     }
 647   }
 648 
 649   virtual const char* name() {
 650     return "compact";
 651   }
 652 
 653   virtual bool is_diagnostic() {
 654     return false;
 655   }
 656 
 657   virtual bool is_experimental() {
 658     return false;
 659   }
 660 };
 661 
 662 class ShenandoahAdaptiveHeuristics : public ShenandoahHeuristics {
 663 private:
 664   uintx _free_threshold;
 665   size_t _peak_occupancy;
 666   TruncatedSeq* _cycle_gap_history;
 667   TruncatedSeq* _conc_mark_duration_history;
 668   TruncatedSeq* _conc_uprefs_duration_history;
 669 public:
 670   ShenandoahAdaptiveHeuristics() :
 671     ShenandoahHeuristics(),
 672     _free_threshold(ShenandoahInitFreeThreshold),
 673     _peak_occupancy(0),
 674     _conc_mark_duration_history(new TruncatedSeq(5)),
 675     _conc_uprefs_duration_history(new TruncatedSeq(5)),
 676     _cycle_gap_history(new TruncatedSeq(5)) {
 677   }
 678 
 679   virtual ~ShenandoahAdaptiveHeuristics() {}
 680 
 681   virtual void choose_collection_set_from_regiondata(ShenandoahCollectionSet* cset,
 682                                                      RegionData* data, size_t size,
 683                                                      size_t actual_free) {
 684     size_t garbage_threshold = ShenandoahHeapRegion::region_size_bytes() * ShenandoahGarbageThreshold / 100;
 685 
 686     // The logic for cset selection in adaptive is as follows:
 687     //
 688     //   1. We cannot get cset larger than available free space. Otherwise we guarantee OOME
 689     //      during evacuation, and thus guarantee full GC. In practice, we also want to let
 690     //      application to allocate something. This is why we limit CSet to some fraction of
 691     //      available space. In non-overloaded heap, max_cset would contain all plausible candidates
 692     //      over garbage threshold.
 693     //
 694     //   2. We should not get cset too low so that free threshold would not be met right
 695     //      after the cycle. Otherwise we get back-to-back cycles for no reason if heap is
 696     //      too fragmented. In non-overloaded non-fragmented heap min_cset would be around zero.
 697     //
 698     // Therefore, we start by sorting the regions by garbage. Then we unconditionally add the best candidates
 699     // before we meet min_cset. Then we add all candidates that fit with a garbage threshold before
 700     // we hit max_cset. When max_cset is hit, we terminate the cset selection. Note that in this scheme,
 701     // ShenandoahGarbageThreshold is the soft threshold which would be ignored until min_cset is hit.
 702 
 703     size_t free_target = MIN2<size_t>(_free_threshold + MaxNormalStep, 100) * ShenandoahHeap::heap()->capacity() / 100;
 704     size_t min_cset = free_target > actual_free ? (free_target - actual_free) : 0;
 705     size_t max_cset = actual_free * 3 / 4;
 706     min_cset = MIN2(min_cset, max_cset);
 707 
 708     log_info(gc, ergo)("Adaptive CSet Selection. Target Free: " SIZE_FORMAT "M, Actual Free: "
 709                                SIZE_FORMAT "M, Target CSet: [" SIZE_FORMAT "M, " SIZE_FORMAT "M]",
 710                        free_target / M, actual_free / M, min_cset / M, max_cset / M);
 711 
 712     // Better select garbage-first regions
 713     QuickSort::sort<RegionData>(data, (int)size, compare_by_garbage, false);
 714 
 715     size_t live_cset = 0;
 716     _bytes_in_cset = 0;
 717     for (size_t idx = 0; idx < size; idx++) {
 718       ShenandoahHeapRegion* r = data[idx]._region;
 719 
 720       size_t new_cset = live_cset + r->get_live_data_bytes();
 721 
 722       if (new_cset < min_cset) {
 723         cset->add_region(r);
 724         _bytes_in_cset += r->used();
 725         live_cset = new_cset;
 726       } else if (new_cset <= max_cset) {
 727         if (r->garbage() > garbage_threshold) {
 728           cset->add_region(r);
 729           _bytes_in_cset += r->used();
 730           live_cset = new_cset;
 731         }
 732       } else {
 733         break;
 734       }
 735     }
 736   }
 737 
 738   static const intx MaxNormalStep = 5;      // max step towards goal under normal conditions
 739   static const intx DegeneratedGC_Hit = 10; // how much to step on degenerated GC
 740   static const intx AllocFailure_Hit = 20;  // how much to step on allocation failure full GC
 741   static const intx UserRequested_Hit = 0;  // how much to step on user requested full GC
 742 
 743   void handle_cycle_success() {
 744     ShenandoahHeap* heap = ShenandoahHeap::heap();
 745     size_t capacity = heap->capacity();
 746 
 747     size_t current_threshold = (capacity - _peak_occupancy) * 100 / capacity;
 748     size_t min_threshold = ShenandoahMinFreeThreshold;
 749     intx step = min_threshold - current_threshold;
 750     step = MAX2(step, (intx) -MaxNormalStep);
 751     step = MIN2(step, (intx) MaxNormalStep);
 752 
 753     log_info(gc, ergo)("Capacity: " SIZE_FORMAT "M, Peak Occupancy: " SIZE_FORMAT
 754                               "M, Lowest Free: " SIZE_FORMAT "M, Free Threshold: " UINTX_FORMAT "M",
 755                        capacity / M, _peak_occupancy / M,
 756                        (capacity - _peak_occupancy) / M, ShenandoahMinFreeThreshold * capacity / 100 / M);
 757 
 758     if (step > 0) {
 759       // Pessimize
 760       adjust_free_threshold(step);
 761     } else if (step < 0) {
 762       // Optimize, if enough happy cycles happened
 763       if (_successful_cycles_in_a_row > ShenandoahHappyCyclesThreshold &&
 764           _free_threshold > 0) {
 765         adjust_free_threshold(step);
 766         _successful_cycles_in_a_row = 0;
 767       }
 768     } else {
 769       // do nothing
 770     }
 771     _peak_occupancy = 0;
 772   }
 773 
 774   void record_cycle_start() {
 775     ShenandoahHeuristics::record_cycle_start();
 776     double last_cycle_gap = (os::elapsedTime() - _last_cycle_end);
 777     _cycle_gap_history->add(last_cycle_gap);
 778   }
 779 
 780   virtual void record_phase_time(ShenandoahPhaseTimings::Phase phase, double secs) {
 781     if (phase == ShenandoahPhaseTimings::conc_mark) {
 782       _conc_mark_duration_history->add(secs);
 783     } else if (phase == ShenandoahPhaseTimings::conc_update_refs) {
 784       _conc_uprefs_duration_history->add(secs);
 785     } // Else ignore
 786   }
 787 
 788   void adjust_free_threshold(intx adj) {
 789     intx new_value = adj + _free_threshold;
 790     uintx new_threshold = (uintx)MAX2<intx>(new_value, 0);
 791     new_threshold = MAX2(new_threshold, ShenandoahMinFreeThreshold);
 792     new_threshold = MIN2(new_threshold, ShenandoahMaxFreeThreshold);
 793     if (new_threshold != _free_threshold) {
 794       _free_threshold = new_threshold;
 795       log_info(gc,ergo)("Adjusting free threshold to: " UINTX_FORMAT "%% (" SIZE_FORMAT "M)",
 796                         _free_threshold, _free_threshold * ShenandoahHeap::heap()->capacity() / 100 / M);
 797     }
 798   }
 799 
 800   virtual void record_success_concurrent() {
 801     ShenandoahHeuristics::record_success_concurrent();
 802     handle_cycle_success();
 803   }
 804 
 805   virtual void record_success_degenerated() {
 806     ShenandoahHeuristics::record_success_degenerated();
 807     adjust_free_threshold(DegeneratedGC_Hit);
 808   }
 809 
 810   virtual void record_success_full() {
 811     ShenandoahHeuristics::record_success_full();
 812     adjust_free_threshold(AllocFailure_Hit);
 813   }
 814 
 815   virtual void record_explicit_gc() {
 816     ShenandoahHeuristics::record_explicit_gc();
 817     adjust_free_threshold(UserRequested_Hit);
 818   }
 819 
 820   virtual void record_peak_occupancy() {
 821     _peak_occupancy = MAX2(_peak_occupancy, ShenandoahHeap::heap()->used());
 822   }
 823 
 824   virtual bool should_start_normal_gc() const {
 825     ShenandoahHeap* heap = ShenandoahHeap::heap();
 826     size_t capacity = heap->capacity();
 827     size_t available = heap->free_set()->available();
 828 
 829     double last_time_ms = (os::elapsedTime() - _last_cycle_end) * 1000;
 830     bool periodic_gc = (last_time_ms > ShenandoahGuaranteedGCInterval);
 831     size_t threshold_available = (capacity * _free_threshold) / 100;
 832     size_t bytes_allocated = heap->bytes_allocated_since_gc_start();
 833     size_t threshold_bytes_allocated = heap->capacity() * ShenandoahAllocationThreshold / 100;
 834 
 835     if (available < threshold_available &&
 836             bytes_allocated > threshold_bytes_allocated) {
 837       log_info(gc,ergo)("Concurrent marking triggered. Free: " SIZE_FORMAT "M, Free Threshold: " SIZE_FORMAT
 838                                 "M; Allocated: " SIZE_FORMAT "M, Alloc Threshold: " SIZE_FORMAT "M",
 839                         available / M, threshold_available / M, bytes_allocated / M, threshold_bytes_allocated / M);
 840       // Need to check that an appropriate number of regions have
 841       // been allocated since last concurrent mark too.
 842       return true;
 843     } else if (periodic_gc) {
 844       log_info(gc,ergo)("Periodic GC triggered. Time since last GC: %.0f ms, Guaranteed Interval: " UINTX_FORMAT " ms",
 845           last_time_ms, ShenandoahGuaranteedGCInterval);
 846       return true;
 847     }
 848 
 849     return false;
 850   }
 851 
 852   virtual bool should_start_update_refs() {
 853     if (! _update_refs_adaptive) {
 854       return _update_refs_early;
 855     }
 856 
 857     double cycle_gap_avg = _cycle_gap_history->avg();
 858     double conc_mark_avg = _conc_mark_duration_history->avg();
 859     double conc_uprefs_avg = _conc_uprefs_duration_history->avg();
 860 
 861     if (_update_refs_early) {
 862       double threshold = ShenandoahMergeUpdateRefsMinGap / 100.0;
 863       if (conc_mark_avg + conc_uprefs_avg > cycle_gap_avg * threshold) {
 864         _update_refs_early = false;
 865       }
 866     } else {
 867       double threshold = ShenandoahMergeUpdateRefsMaxGap / 100.0;
 868       if (conc_mark_avg + conc_uprefs_avg < cycle_gap_avg * threshold) {
 869         _update_refs_early = true;
 870       }
 871     }
 872     return _update_refs_early;
 873   }
 874 
 875   virtual const char* name() {
 876     return "adaptive";
 877   }
 878 
 879   virtual bool is_diagnostic() {
 880     return false;
 881   }
 882 
 883   virtual bool is_experimental() {
 884     return false;
 885   }
 886 };
 887 
 888 class ShenandoahPartialHeuristics : public ShenandoahAdaptiveHeuristics {
 889 protected:
 890   size_t* _from_idxs;
 891 
 892 public:
 893   ShenandoahPartialHeuristics() : ShenandoahAdaptiveHeuristics() {
 894     FLAG_SET_DEFAULT(UseShenandoahMatrix, true);
 895 
 896     // Set up special barriers for concurrent partial GC.
 897     FLAG_SET_DEFAULT(ShenandoahConditionalSATBBarrier, true);
 898     FLAG_SET_DEFAULT(ShenandoahSATBBarrier, false);
 899     FLAG_SET_DEFAULT(ShenandoahStoreValWriteBarrier, true);
 900     FLAG_SET_DEFAULT(ShenandoahStoreValReadBarrier, false);
 901 
 902     SHENANDOAH_ERGO_OVERRIDE_DEFAULT(ShenandoahRefProcFrequency, 1);
 903     // TODO: Disable this optimization for now, as it also requires the matrix barriers.
 904 #ifdef COMPILER2
 905     FLAG_SET_DEFAULT(ArrayCopyLoadStoreMaxElem, 0);
 906 #endif
 907   }
 908 
 909   void initialize() {
 910     _from_idxs = NEW_C_HEAP_ARRAY(size_t, ShenandoahHeap::heap()->num_regions(), mtGC);
 911   }
 912 
 913   virtual ~ShenandoahPartialHeuristics() {
 914     FREE_C_HEAP_ARRAY(size_t, _from_idxs);
 915   }
 916 
 917   bool should_start_update_refs() {
 918     return true;
 919   }
 920 
 921   bool update_refs() const {
 922     return true;
 923   }
 924 
 925   bool can_do_partial_gc() {
 926     return true;
 927   }
 928 
 929   bool should_start_normal_gc() const {
 930     return false;
 931   }
 932 
 933   virtual bool is_diagnostic() {
 934     return false;
 935   }
 936 
 937   virtual bool is_experimental() {
 938     return true;
 939   }
 940 
 941   virtual bool should_start_partial_gc() = 0;
 942   virtual void choose_collection_set(ShenandoahCollectionSet* collection_set) = 0;
 943 
 944 };
 945 
 946 class ShenandoahPartialConnectedHeuristics : public ShenandoahPartialHeuristics {
 947 public:
 948   virtual const char* name() {
 949     return "connectedness";
 950   }
 951 
 952   bool should_start_partial_gc() {
 953     ShenandoahHeap* heap = ShenandoahHeap::heap();
 954 
 955     if (heap->has_forwarded_objects()) {
 956       // Cannot start partial if heap is not completely updated.
 957       return false;
 958     }
 959 
 960     size_t capacity  = heap->capacity();
 961     size_t used      = heap->used();
 962     size_t prev_used = heap->used_at_last_gc();
 963 
 964     if (used < prev_used) {
 965       // Major collection must have happened, "used" data is unreliable, wait for update.
 966       return false;
 967     }
 968 
 969     size_t threshold = heap->capacity() * ShenandoahConnectednessPercentage / 100;
 970     size_t allocated = used - prev_used;
 971     bool result = allocated > threshold;
 972 
 973     FormatBuffer<> msg("%s. Capacity: " SIZE_FORMAT "M, Used: " SIZE_FORMAT "M, Previous Used: " SIZE_FORMAT
 974                        "M, Allocated: " SIZE_FORMAT "M, Threshold: " SIZE_FORMAT "M",
 975                        result ? "Partial cycle triggered" : "Partial cycle skipped",
 976                        capacity/M, used/M, prev_used/M, allocated/M, threshold/M);
 977 
 978     if (result) {
 979       log_info(gc,ergo)("%s", msg.buffer());
 980     } else {
 981       log_trace(gc,ergo)("%s", msg.buffer());
 982     }
 983     return result;
 984   }
 985 
 986   void choose_collection_set(ShenandoahCollectionSet* collection_set) {
 987     ShenandoahHeap* heap = ShenandoahHeap::heap();
 988     ShenandoahConnectionMatrix* matrix = heap->connection_matrix();
 989     size_t num_regions = heap->num_regions();
 990 
 991     RegionConnections* connects = get_region_connects_cache(num_regions);
 992     size_t connect_cnt = 0;
 993 
 994     for (uint to_idx = 0; to_idx < num_regions; to_idx++) {
 995       ShenandoahHeapRegion* region = heap->get_region(to_idx);
 996       region->set_root(false);
 997       if (!region->is_regular()) continue;
 998 
 999       uint count = matrix->count_connected_to(to_idx, num_regions);
1000       if (count < ShenandoahPartialInboundThreshold) {
1001         connects[connect_cnt]._region = region;
1002         connects[connect_cnt]._connections = count;
1003         connect_cnt++;
1004       }
1005     }
1006 
1007     QuickSort::sort<RegionConnections>(connects, (int)connect_cnt, compare_by_connects, false);
1008 
1009     // Heuristics triggered partial when allocated was larger than a threshold.
1010     // New allocations might have happened while we were preparing for GC,
1011     // capture all them in this cycle. This "adjusts" the threshold automatically.
1012     size_t used      = heap->used();
1013     size_t prev_used = heap->used_at_last_gc();
1014     guarantee(used >= prev_used, "Invariant");
1015     size_t target = MIN3(ShenandoahHeapRegion::required_regions(used - prev_used), num_regions, connect_cnt);
1016 
1017     for (size_t c = 0; c < target; c++) {
1018       assert (c == 0 || connects[c]._connections >= connects[c-1]._connections, "monotonicity");
1019 
1020       ShenandoahHeapRegion* region = connects[c]._region;
1021       size_t to_idx = region->region_number();
1022       assert(region->is_regular(), "filtered before");
1023       assert(! heap->region_in_collection_set(to_idx), "must not be in cset yet");
1024 
1025       size_t from_idx_count = 0;
1026       if (matrix->enumerate_connected_to(to_idx, num_regions,
1027                                          _from_idxs, from_idx_count,
1028                                          ShenandoahPartialInboundThreshold)) {
1029         maybe_add_heap_region(region, collection_set);
1030         for (size_t i = 0; i < from_idx_count; i++) {
1031           ShenandoahHeapRegion* r = heap->get_region(_from_idxs[i]);
1032           if (!r->is_root()) {
1033             r->set_root(true);
1034           }
1035         }
1036       }
1037     }
1038 
1039     collection_set->update_region_status();
1040   }
1041 };
1042 
1043 class ShenandoahGenerationalPartialHeuristics : public ShenandoahPartialHeuristics {
1044 public:
1045 
1046   ShenandoahGenerationalPartialHeuristics() : ShenandoahPartialHeuristics() {
1047     SHENANDOAH_ERGO_OVERRIDE_DEFAULT(ShenandoahPartialInboundThreshold, 100);
1048   }
1049 
1050   virtual const char* name() {
1051     return "generational";
1052   }
1053 
1054   virtual void choose_collection_set(ShenandoahCollectionSet* collection_set) {
1055     ShenandoahHeap* heap = ShenandoahHeap::heap();
1056     ShenandoahConnectionMatrix* matrix = heap->connection_matrix();
1057     uint64_t alloc_seq_at_last_gc_end   = heap->alloc_seq_at_last_gc_end();
1058     uint64_t alloc_seq_at_last_gc_start = heap->alloc_seq_at_last_gc_start();
1059 
1060     size_t num_regions = heap->num_regions();
1061 
1062     RegionData* candidates = get_region_data_cache(num_regions);
1063 
1064     for (size_t i = 0; i < num_regions; i++) {
1065       candidates[i]._region = heap->get_region(i);
1066       candidates[i]._seqnum_last_alloc = heap->get_region(i)->seqnum_last_alloc();
1067     }
1068 
1069     QuickSort::sort<RegionData>(candidates, (int)num_regions, compare_by_alloc_seq_descending, false);
1070 
1071     // Heuristics triggered partial when allocated was larger than a threshold.
1072     // New allocations might have happened while we were preparing for GC,
1073     // capture all them in this cycle. This "adjusts" the threshold automatically.
1074     size_t used      = heap->used();
1075     size_t prev_used = heap->used_at_last_gc();
1076     guarantee(used >= prev_used, "Invariant");
1077     size_t target = MIN2(ShenandoahHeapRegion::required_regions(used - prev_used), num_regions);
1078 
1079     for (uint to_idx = 0; to_idx < num_regions; to_idx++) {
1080       ShenandoahHeapRegion* region = heap->get_region(to_idx);
1081       region->set_root(false);
1082     }
1083 
1084     uint count = 0;
1085 
1086     for (uint i = 0; (i < num_regions) && (count < target); i++) {
1087       ShenandoahHeapRegion* contender = candidates[i]._region;
1088       if (contender->seqnum_last_alloc() <= alloc_seq_at_last_gc_end) {
1089         break;
1090       }
1091 
1092       size_t index = contender->region_number();
1093       size_t from_idx_count = 0;
1094       if (matrix->enumerate_connected_to(index, num_regions, _from_idxs, from_idx_count,
1095                                          ShenandoahPartialInboundThreshold)) {
1096         if (maybe_add_heap_region(contender, collection_set)) {
1097           count++;
1098         }
1099 
1100         for (uint f = 0; f < from_idx_count; f++) {
1101           ShenandoahHeapRegion* r = heap->get_region(_from_idxs[f]);
1102           if (!r->is_root()) {
1103             r->set_root(true);
1104           }
1105         }
1106       }
1107     }
1108     collection_set->update_region_status();
1109 
1110     log_info(gc,ergo)("Regions: Max: " SIZE_FORMAT ", Target: " SIZE_FORMAT " (" SIZE_FORMAT "%%), In CSet: " SIZE_FORMAT,
1111                       num_regions, target, ShenandoahGenerationalYoungGenPercentage, collection_set->count());
1112   }
1113 
1114   bool should_start_partial_gc() {
1115     ShenandoahHeap* heap = ShenandoahHeap::heap();
1116 
1117     if (heap->has_forwarded_objects()) {
1118       // Cannot start partial if heap is not completely updated.
1119       return false;
1120     }
1121 
1122     size_t capacity  = heap->capacity();
1123     size_t used      = heap->used();
1124     size_t prev_used = heap->used_at_last_gc();
1125 
1126     if (used < prev_used) {
1127       // Major collection must have happened, "used" data is unreliable, wait for update.
1128       return false;
1129     }
1130 
1131     size_t threshold = heap->capacity() * ShenandoahGenerationalYoungGenPercentage / 100;
1132     size_t allocated = used - prev_used;
1133 
1134     // Start the next young gc after we've allocated percentage_young of the heap.
1135     bool result = allocated > threshold;
1136 
1137     FormatBuffer<> msg("%s. Capacity: " SIZE_FORMAT "M, Used: " SIZE_FORMAT "M, Previous Used: " SIZE_FORMAT
1138                        "M, Allocated: " SIZE_FORMAT "M, Threshold: " SIZE_FORMAT "M",
1139                        result ? "Partial cycle triggered" : "Partial cycle skipped",
1140                        capacity/M, used/M, prev_used/M, allocated/M, threshold/M);
1141 
1142     if (result) {
1143       log_info(gc,ergo)("%s", msg.buffer());
1144     } else {
1145       log_trace(gc,ergo)("%s", msg.buffer());
1146     }
1147     return result;
1148   }
1149 };
1150 
1151 class ShenandoahLRUPartialHeuristics : public ShenandoahPartialHeuristics {
1152 public:
1153   ShenandoahLRUPartialHeuristics() : ShenandoahPartialHeuristics() {
1154     SHENANDOAH_ERGO_OVERRIDE_DEFAULT(ShenandoahPartialInboundThreshold, 100);
1155   }
1156 
1157   virtual const char* name() {
1158     return "LRU";
1159   }
1160 
1161   virtual void choose_collection_set(ShenandoahCollectionSet* collection_set) {
1162     ShenandoahHeap* heap = ShenandoahHeap::heap();
1163     ShenandoahConnectionMatrix* matrix = heap->connection_matrix();
1164     uint64_t alloc_seq_at_last_gc_start = heap->alloc_seq_at_last_gc_start();
1165 
1166     size_t num_regions = heap->num_regions();
1167 
1168     RegionData* candidates = get_region_data_cache(num_regions);
1169     int candidate_idx = 0;
1170     for (size_t i = 0; i < num_regions; i++) {
1171       ShenandoahHeapRegion* r = heap->get_region(i);
1172       if (r->is_regular() && (r->seqnum_last_alloc() > 0)) {
1173         candidates[candidate_idx]._region = heap->get_region(i);
1174         candidates[candidate_idx]._seqnum_last_alloc = heap->get_region(i)->seqnum_last_alloc();
1175         candidate_idx++;
1176       }
1177     }
1178 
1179     size_t sorted_count = candidate_idx;
1180     QuickSort::sort<RegionData>(candidates, (int)sorted_count, compare_by_alloc_seq_ascending, false);
1181 
1182     // Heuristics triggered partial when allocated was larger than a threshold.
1183     // New allocations might have happened while we were preparing for GC,
1184     // capture all them in this cycle. This "adjusts" the threshold automatically.
1185     size_t used      = heap->used();
1186     size_t prev_used = heap->used_at_last_gc();
1187     guarantee(used >= prev_used, "Invariant");
1188     size_t target = MIN2(ShenandoahHeapRegion::required_regions(used - prev_used), sorted_count);
1189 
1190     for (uint to_idx = 0; to_idx < num_regions; to_idx++) {
1191       ShenandoahHeapRegion* region = heap->get_region(to_idx);
1192       region->set_root(false);
1193     }
1194     uint count = 0;
1195 
1196     for (uint i = 0; (i < sorted_count) && (count < target); i++) {
1197       ShenandoahHeapRegion* contender = candidates[i]._region;
1198       if (contender->seqnum_last_alloc() >= alloc_seq_at_last_gc_start) {
1199         break;
1200       }
1201 
1202       size_t index = contender->region_number();
1203       size_t from_idx_count = 0;
1204       if (matrix->enumerate_connected_to(index, num_regions,_from_idxs, from_idx_count,
1205                                          ShenandoahPartialInboundThreshold)) {
1206         if (maybe_add_heap_region(contender, collection_set)) {
1207           count++;
1208         }
1209         for (uint f = 0; f < from_idx_count; f++) {
1210           ShenandoahHeapRegion* r = heap->get_region(_from_idxs[f]);
1211           if (!r->is_root()) {
1212             r->set_root(true);
1213           }
1214         }
1215       }
1216     }
1217     collection_set->update_region_status();
1218 
1219     log_info(gc,ergo)("Regions: Max: " SIZE_FORMAT ", Target: " SIZE_FORMAT " (" SIZE_FORMAT "%%), In CSet: " SIZE_FORMAT,
1220                       num_regions, target, ShenandoahLRUOldGenPercentage, collection_set->count());
1221   }
1222 
1223   bool should_start_partial_gc() {
1224     ShenandoahHeap* heap = ShenandoahHeap::heap();
1225 
1226     if (heap->has_forwarded_objects()) {
1227       // Cannot start partial if heap is not completely updated.
1228       return false;
1229     }
1230 
1231     size_t capacity  = heap->capacity();
1232     size_t used      = heap->used();
1233     size_t prev_used = heap->used_at_last_gc();
1234 
1235     if (used < prev_used) {
1236       // Major collection must have happened, "used" data is unreliable, wait for update.
1237       return false;
1238     }
1239 
1240     // For now don't start until we are 40% full
1241     size_t allocated = used - prev_used;
1242     size_t threshold = heap->capacity() * ShenandoahLRUOldGenPercentage / 100;
1243     size_t minimum   = heap->capacity() * 0.4;
1244 
1245     bool result = ((used > minimum) && (allocated > threshold));
1246 
1247     FormatBuffer<> msg("%s. Capacity: " SIZE_FORMAT "M, Used: " SIZE_FORMAT "M, Previous Used: " SIZE_FORMAT
1248                        "M, Allocated: " SIZE_FORMAT "M, Threshold: " SIZE_FORMAT "M, Minimum: " SIZE_FORMAT "M",
1249                        result ? "Partial cycle triggered" : "Partial cycle skipped",
1250                        capacity/M, used/M, prev_used/M, allocated/M, threshold/M, minimum/M);
1251 
1252     if (result) {
1253       log_info(gc,ergo)("%s", msg.buffer());
1254     } else {
1255       log_trace(gc,ergo)("%s", msg.buffer());
1256     }
1257     return result;
1258    }
1259 
1260 };
1261 
1262 class ShenandoahTraversalHeuristics : public ShenandoahHeuristics {
1263 public:
1264   ShenandoahTraversalHeuristics() : ShenandoahHeuristics() {
1265     FLAG_SET_DEFAULT(UseShenandoahMatrix,              false);
1266     FLAG_SET_DEFAULT(ShenandoahSATBBarrier,            false);
1267     FLAG_SET_DEFAULT(ShenandoahConditionalSATBBarrier, false);
1268     FLAG_SET_DEFAULT(ShenandoahStoreValReadBarrier,    false);
1269     FLAG_SET_DEFAULT(ShenandoahStoreValWriteBarrier,   false);
1270     FLAG_SET_DEFAULT(ShenandoahStoreValEnqueueBarrier, true);
1271     FLAG_SET_DEFAULT(ShenandoahKeepAliveBarrier,       false);
1272     FLAG_SET_DEFAULT(ShenandoahBarriersForConst,       true);
1273     FLAG_SET_DEFAULT(ShenandoahWriteBarrierRB,         false);
1274     FLAG_SET_DEFAULT(ShenandoahAllocImplicitLive,      false);
1275     FLAG_SET_DEFAULT(ShenandoahAllowMixedAllocs,       false);
1276   }
1277 
1278   virtual bool should_start_normal_gc() const {
1279     return false;
1280   }
1281 
1282   virtual bool is_experimental() {
1283     return true;
1284   }
1285 
1286   virtual bool is_diagnostic() {
1287     return false;
1288   }
1289 
1290   virtual bool can_do_traversal_gc() {
1291     return true;
1292   }
1293 
1294   virtual const char* name() {
1295     return "traversal";
1296   }
1297 
1298   virtual void choose_collection_set(ShenandoahCollectionSet* collection_set) {
1299     ShenandoahHeap* heap = ShenandoahHeap::heap();
1300     for (size_t i = 0; i < heap->num_regions(); i++) {
1301       ShenandoahHeapRegion* r = heap->get_region(i);
1302       assert(!r->is_root(), "must not be root region");
1303       assert(!collection_set->is_in(r), "must not yet be in cset");
1304       if (r->is_regular() && r->used() > 0) {
1305         size_t garbage_percent = r->garbage() * 100 / ShenandoahHeapRegion::region_size_bytes();
1306         if (garbage_percent > ShenandoahGarbageThreshold) {
1307           collection_set->add_region(r);
1308         }
1309       }
1310       heap->set_next_top_at_mark_start(r->bottom(), r->top());
1311       heap->set_complete_top_at_mark_start(r->bottom(), r->top()); // For debugging purposes
1312       r->clear_live_data();
1313     }
1314     collection_set->update_region_status();
1315   }
1316 
1317   virtual bool should_start_traversal_gc() {
1318 
1319     ShenandoahHeap* heap = ShenandoahHeap::heap();
1320 
1321     if (heap->has_forwarded_objects()) return false;
1322 
1323     double last_time_ms = (os::elapsedTime() - _last_cycle_end) * 1000;
1324     bool periodic_gc = (last_time_ms > ShenandoahGuaranteedGCInterval);
1325     if (periodic_gc) {
1326       log_info(gc,ergo)("Periodic GC triggered. Time since last GC: %.0f ms, Guaranteed Interval: " UINTX_FORMAT " ms",
1327                         last_time_ms, ShenandoahGuaranteedGCInterval);
1328       return true;
1329     }
1330 
1331     size_t capacity  = heap->capacity();
1332     size_t used      = heap->used();
1333     return 100 - (used * 100 / capacity) < ShenandoahFreeThreshold;
1334   }
1335 
1336   virtual void choose_collection_set_from_regiondata(ShenandoahCollectionSet* set,
1337                                                      RegionData* data, size_t data_size,
1338                                                      size_t free) {
1339     ShouldNotReachHere();
1340   }
1341 };
1342 
1343 
1344 ShenandoahCollectorPolicy::ShenandoahCollectorPolicy() :
1345   _cycle_counter(0),
1346   _success_concurrent_gcs(0),
1347   _success_partial_gcs(0),
1348   _success_degenerated_gcs(0),
1349   _success_full_gcs(0),
1350   _explicit_concurrent(0),
1351   _explicit_full(0),
1352   _alloc_failure_degenerated(0),
1353   _alloc_failure_full(0),
1354   _alloc_failure_degenerated_upgrade_to_full(0)
1355 {
1356   Copy::zero_to_bytes(_degen_points, sizeof(size_t) * ShenandoahHeap::_DEGENERATED_LIMIT);
1357 
1358   ShenandoahHeapRegion::setup_heap_region_size(initial_heap_byte_size(), max_heap_byte_size());
1359 
1360   initialize_all();
1361 
1362   _tracer = new (ResourceObj::C_HEAP, mtGC) ShenandoahTracer();
1363 
1364   if (ShenandoahGCHeuristics != NULL) {
1365     _minor_heuristics = NULL;
1366     if (strcmp(ShenandoahGCHeuristics, "aggressive") == 0) {
1367       _heuristics = new ShenandoahAggressiveHeuristics();
1368     } else if (strcmp(ShenandoahGCHeuristics, "static") == 0) {
1369       _heuristics = new ShenandoahStaticHeuristics();
1370     } else if (strcmp(ShenandoahGCHeuristics, "adaptive") == 0) {
1371       _heuristics = new ShenandoahAdaptiveHeuristics();
1372     } else if (strcmp(ShenandoahGCHeuristics, "passive") == 0) {
1373       _heuristics = new ShenandoahPassiveHeuristics();
1374     } else if (strcmp(ShenandoahGCHeuristics, "compact") == 0) {
1375       _heuristics = new ShenandoahCompactHeuristics();
1376     } else if (strcmp(ShenandoahGCHeuristics, "connected") == 0) {
1377       _heuristics = new ShenandoahAdaptiveHeuristics();
1378       _minor_heuristics = new ShenandoahPartialConnectedHeuristics();
1379     } else if (strcmp(ShenandoahGCHeuristics, "generational") == 0) {
1380       _heuristics = new ShenandoahAdaptiveHeuristics();
1381       _minor_heuristics = new ShenandoahGenerationalPartialHeuristics();
1382     } else if (strcmp(ShenandoahGCHeuristics, "LRU") == 0) {
1383       _heuristics = new ShenandoahAdaptiveHeuristics();
1384       _minor_heuristics = new ShenandoahLRUPartialHeuristics();
1385     } else if (strcmp(ShenandoahGCHeuristics, "traversal") == 0) {
1386       _heuristics = new ShenandoahTraversalHeuristics();
1387     } else {
1388       vm_exit_during_initialization("Unknown -XX:ShenandoahGCHeuristics option");
1389     }
1390 
1391     if (_heuristics->is_diagnostic() && !UnlockDiagnosticVMOptions) {
1392       vm_exit_during_initialization(
1393               err_msg("Heuristics \"%s\" is diagnostic, and must be enabled via -XX:+UnlockDiagnosticVMOptions.",
1394                       _heuristics->name()));
1395     }
1396     if (_heuristics->is_experimental() && !UnlockExperimentalVMOptions) {
1397       vm_exit_during_initialization(
1398               err_msg("Heuristics \"%s\" is experimental, and must be enabled via -XX:+UnlockExperimentalVMOptions.",
1399                       _heuristics->name()));
1400     }
1401     if (_minor_heuristics != NULL && _minor_heuristics->is_diagnostic() && !UnlockDiagnosticVMOptions) {
1402       vm_exit_during_initialization(
1403               err_msg("Heuristics \"%s\" is diagnostic, and must be enabled via -XX:+UnlockDiagnosticVMOptions.",
1404                       _minor_heuristics->name()));
1405     }
1406     if (_minor_heuristics != NULL && _minor_heuristics->is_experimental() && !UnlockExperimentalVMOptions) {
1407       vm_exit_during_initialization(
1408               err_msg("Heuristics \"%s\" is experimental, and must be enabled via -XX:+UnlockExperimentalVMOptions.",
1409                       _minor_heuristics->name()));
1410     }
1411 
1412     if (ShenandoahConditionalSATBBarrier && ShenandoahSATBBarrier) {
1413       vm_exit_during_initialization("Cannot use both ShenandoahSATBBarrier and ShenandoahConditionalSATBBarrier");
1414     }
1415     if (ShenandoahStoreValWriteBarrier && ShenandoahStoreValReadBarrier) {
1416       vm_exit_during_initialization("Cannot use both ShenandoahStoreValWriteBarrier and ShenandoahStoreValReadBarrier");
1417     }
1418     if (ShenandoahStoreValEnqueueBarrier && ShenandoahStoreValReadBarrier) {
1419       vm_exit_during_initialization("Cannot use both ShenandoahStoreValEnqueueBarrier and ShenandoahStoreValReadBarrier");
1420     }
1421     if (ShenandoahStoreValWriteBarrier && ShenandoahStoreValEnqueueBarrier) {
1422       vm_exit_during_initialization("Cannot use both ShenandoahStoreValWriteBarrier and ShenandoahStoreValEnqueueBarrier");
1423     }
1424     if (_minor_heuristics != NULL) {
1425       log_info(gc, init)("Shenandoah heuristics: %s minor with %s major",
1426                          _minor_heuristics->name(), _heuristics->name());
1427     } else {
1428       log_info(gc, init)("Shenandoah heuristics: %s",
1429                          _heuristics->name());
1430     }
1431     _heuristics->print_thresholds();
1432   } else {
1433       ShouldNotReachHere();
1434   }
1435 }
1436 
1437 ShenandoahCollectorPolicy* ShenandoahCollectorPolicy::as_pgc_policy() {
1438   return this;
1439 }
1440 
1441 BarrierSet::Name ShenandoahCollectorPolicy::barrier_set_name() {
1442   return BarrierSet::Shenandoah;
1443 }
1444 
1445 HeapWord* ShenandoahCollectorPolicy::mem_allocate_work(size_t size,
1446                                                        bool is_tlab,
1447                                                        bool* gc_overhead_limit_was_exceeded) {
1448   guarantee(false, "Not using this policy feature yet.");
1449   return NULL;
1450 }
1451 
1452 HeapWord* ShenandoahCollectorPolicy::satisfy_failed_allocation(size_t size, bool is_tlab) {
1453   guarantee(false, "Not using this policy feature yet.");
1454   return NULL;
1455 }
1456 
1457 void ShenandoahCollectorPolicy::initialize_alignments() {
1458 
1459   // This is expected by our algorithm for ShenandoahHeap::heap_region_containing().
1460   _space_alignment = ShenandoahHeapRegion::region_size_bytes();
1461   _heap_alignment = ShenandoahHeapRegion::region_size_bytes();
1462 }
1463 
1464 void ShenandoahCollectorPolicy::post_heap_initialize() {
1465   _heuristics->initialize();
1466   if (_minor_heuristics != NULL) {
1467     _minor_heuristics->initialize();
1468   }
1469 }
1470 
1471 void ShenandoahCollectorPolicy::record_explicit_to_concurrent() {
1472   _heuristics->record_explicit_gc();
1473   _explicit_concurrent++;
1474 }
1475 
1476 void ShenandoahCollectorPolicy::record_explicit_to_full() {
1477   _heuristics->record_explicit_gc();
1478   _explicit_full++;
1479 }
1480 
1481 void ShenandoahCollectorPolicy::record_alloc_failure_to_full() {
1482   _heuristics->record_allocation_failure_gc();
1483   _alloc_failure_full++;
1484 }
1485 
1486 void ShenandoahCollectorPolicy::record_alloc_failure_to_degenerated(ShenandoahHeap::ShenandoahDegenPoint point) {
1487   assert(point < ShenandoahHeap::_DEGENERATED_LIMIT, "sanity");
1488   _heuristics->record_allocation_failure_gc();
1489   _alloc_failure_degenerated++;
1490   _degen_points[point]++;
1491 }
1492 
1493 void ShenandoahCollectorPolicy::record_degenerated_upgrade_to_full() {
1494   _alloc_failure_degenerated_upgrade_to_full++;
1495 }
1496 
1497 void ShenandoahCollectorPolicy::record_success_concurrent() {
1498   _heuristics->record_success_concurrent();
1499   _success_concurrent_gcs++;
1500 }
1501 
1502 void ShenandoahCollectorPolicy::record_success_partial() {
1503   _success_partial_gcs++;
1504 }
1505 
1506 void ShenandoahCollectorPolicy::record_success_degenerated() {
1507   _heuristics->record_success_degenerated();
1508   _success_degenerated_gcs++;
1509 }
1510 
1511 void ShenandoahCollectorPolicy::record_success_full() {
1512   _heuristics->record_success_full();
1513   _success_full_gcs++;
1514 }
1515 
1516 bool ShenandoahCollectorPolicy::should_start_normal_gc() {
1517   return _heuristics->should_start_normal_gc();
1518 }
1519 
1520 bool ShenandoahCollectorPolicy::should_degenerate_cycle() {
1521   return _heuristics->should_degenerate_cycle();
1522 }
1523 
1524 bool ShenandoahCollectorPolicy::update_refs() {
1525   if (_minor_heuristics != NULL && _minor_heuristics->update_refs()) {
1526     return true;
1527   }
1528   return _heuristics->update_refs();
1529 }
1530 
1531 bool ShenandoahCollectorPolicy::should_start_update_refs() {
1532   if (_minor_heuristics != NULL && _minor_heuristics->should_start_update_refs()) {
1533     return true;
1534   }
1535   return _heuristics->should_start_update_refs();
1536 }
1537 
1538 void ShenandoahCollectorPolicy::record_peak_occupancy() {
1539   _heuristics->record_peak_occupancy();
1540 }
1541 
1542 void ShenandoahCollectorPolicy::choose_collection_set(ShenandoahCollectionSet* collection_set,
1543                                                       bool minor) {
1544   if (minor)
1545     _minor_heuristics->choose_collection_set(collection_set);
1546   else
1547     _heuristics->choose_collection_set(collection_set);
1548 }
1549 
1550 bool ShenandoahCollectorPolicy::should_process_references() {
1551   return _heuristics->should_process_references();
1552 }
1553 
1554 bool ShenandoahCollectorPolicy::should_unload_classes() {
1555   return _heuristics->should_unload_classes();
1556 }
1557 
1558 size_t ShenandoahCollectorPolicy::cycle_counter() const {
1559   return _cycle_counter;
1560 }
1561 
1562 void ShenandoahCollectorPolicy::record_phase_time(ShenandoahPhaseTimings::Phase phase, double secs) {
1563   _heuristics->record_phase_time(phase, secs);
1564 }
1565 
1566 bool ShenandoahCollectorPolicy::should_start_partial_gc() {
1567   if (_minor_heuristics != NULL) {
1568     return _minor_heuristics->should_start_partial_gc();
1569   } else {
1570     return false; // no minor heuristics -> no partial gc
1571   }
1572 }
1573 
1574 bool ShenandoahCollectorPolicy::can_do_partial_gc() {
1575   if (_minor_heuristics != NULL) {
1576     return _minor_heuristics->can_do_partial_gc();
1577   } else {
1578     return false; // no minor heuristics -> no partial gc
1579   }
1580 }
1581 
1582 bool ShenandoahCollectorPolicy::should_start_traversal_gc() {
1583   return _heuristics->should_start_traversal_gc();
1584 }
1585 
1586 bool ShenandoahCollectorPolicy::can_do_traversal_gc() {
1587   return _heuristics->can_do_traversal_gc();
1588 }
1589 
1590 void ShenandoahCollectorPolicy::record_cycle_start() {
1591   _cycle_counter++;
1592   _heuristics->record_cycle_start();
1593 }
1594 
1595 void ShenandoahCollectorPolicy::record_cycle_end() {
1596   _heuristics->record_cycle_end();
1597 }
1598 
1599 void ShenandoahCollectorPolicy::record_shutdown() {
1600   _in_shutdown.set();
1601 }
1602 
1603 bool ShenandoahCollectorPolicy::is_at_shutdown() {
1604   return _in_shutdown.is_set();
1605 }
1606 
1607 void ShenandoahCollectorPolicy::print_gc_stats(outputStream* out) const {
1608   out->print_cr("Under allocation pressure, concurrent cycles may cancel, and either continue cycle");
1609   out->print_cr("under stop-the-world pause or result in stop-the-world Full GC. Increase heap size,");
1610   out->print_cr("tune GC heuristics, set more aggressive pacing delay, or lower allocation rate");
1611   out->print_cr("to avoid Degenerated and Full GC cycles.");
1612   out->cr();
1613 
1614   out->print_cr(SIZE_FORMAT_W(5) " successful partial concurrent GCs", _success_partial_gcs);
1615   out->cr();
1616 
1617   out->print_cr(SIZE_FORMAT_W(5) " successful concurrent GCs",         _success_concurrent_gcs);
1618   out->print_cr("  " SIZE_FORMAT_W(5) " invoked explicitly",           _explicit_concurrent);
1619   out->cr();
1620 
1621   out->print_cr(SIZE_FORMAT_W(5) " Degenerated GCs",                   _success_degenerated_gcs);
1622   out->print_cr("  " SIZE_FORMAT_W(5) " caused by allocation failure", _alloc_failure_degenerated);
1623   for (int c = 0; c < ShenandoahHeap::_DEGENERATED_LIMIT; c++) {
1624     if (_degen_points[c] > 0) {
1625       const char* desc = ShenandoahHeap::degen_point_to_string((ShenandoahHeap::ShenandoahDegenPoint)c);
1626       out->print_cr("    " SIZE_FORMAT_W(5) " happened at %s",         _degen_points[c], desc);
1627     }
1628   }
1629   out->print_cr("  " SIZE_FORMAT_W(5) " upgraded to Full GC",          _alloc_failure_degenerated_upgrade_to_full);
1630   out->cr();
1631 
1632   out->print_cr(SIZE_FORMAT_W(5) " Full GCs",                          _success_full_gcs + _alloc_failure_degenerated_upgrade_to_full);
1633   out->print_cr("  " SIZE_FORMAT_W(5) " invoked explicitly",           _explicit_full);
1634   out->print_cr("  " SIZE_FORMAT_W(5) " caused by allocation failure", _alloc_failure_full);
1635   out->print_cr("  " SIZE_FORMAT_W(5) " upgraded from Degenerated GC", _alloc_failure_degenerated_upgrade_to_full);
1636 }