1 /*
   2  * Copyright (c) 2013, 2017, Red Hat, Inc. and/or its affiliates.
   3  *
   4  * This code is free software; you can redistribute it and/or modify it
   5  * under the terms of the GNU General Public License version 2 only, as
   6  * published by the Free Software Foundation.
   7  *
   8  * This code is distributed in the hope that it will be useful, but WITHOUT
   9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  11  * version 2 for more details (a copy is included in the LICENSE file that
  12  * accompanied this code).
  13  *
  14  * You should have received a copy of the GNU General Public License version
  15  * 2 along with this work; if not, write to the Free Software Foundation,
  16  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  17  *
  18  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  19  * or visit www.oracle.com if you need additional information or have any
  20  * questions.
  21  *
  22  */
  23 
  24 #include "precompiled.hpp"
  25 #include "gc/shared/gcPolicyCounters.hpp"
  26 #include "gc/shenandoah/shenandoahCollectionSet.hpp"
  27 #include "gc/shenandoah/shenandoahConnectionMatrix.hpp"
  28 #include "gc/shenandoah/shenandoahFreeSet.hpp"
  29 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
  30 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
  31 #include "gc/shenandoah/shenandoahPartialGC.hpp"
  32 #include "runtime/os.hpp"
  33 
  34 class ShenandoahHeuristics : public CHeapObj<mtGC> {
  35 
  36   NumberSeq _allocation_rate_bytes;
  37   NumberSeq _reclamation_rate_bytes;
  38 
  39   size_t _bytes_allocated_since_CM;
  40   size_t _bytes_reclaimed_this_cycle;
  41 
  42 protected:
  43   bool _update_refs_early;
  44   bool _update_refs_adaptive;
  45 
  46   typedef struct {
  47     ShenandoahHeapRegion* _region;
  48     size_t _garbage;
  49   } RegionData;
  50 
  51   static int compare_by_garbage(RegionData a, RegionData b) {
  52     if (a._garbage > b._garbage)
  53       return -1;
  54     else if (a._garbage < b._garbage)
  55       return 1;
  56     else return 0;
  57   }
  58 
  59   static int compare_by_alloc_seq_ascending(ShenandoahHeapRegion* a, ShenandoahHeapRegion* b) {
  60     if (a->last_alloc_seq_num() == b->last_alloc_seq_num())
  61       return 0;
  62     else if (a->last_alloc_seq_num() < b->last_alloc_seq_num())
  63       return -1;
  64     else return 1;
  65   }
  66 
  67   static int compare_by_alloc_seq_descending(ShenandoahHeapRegion* a, ShenandoahHeapRegion* b) {
  68     return -compare_by_alloc_seq_ascending(a, b);
  69   }
  70 
  71   RegionData* get_region_data_cache(size_t num) {
  72     RegionData* res = _region_data;
  73     if (res == NULL) {
  74       res = NEW_C_HEAP_ARRAY(RegionData, num, mtGC);
  75       _region_data = res;
  76       _region_data_size = num;
  77     } else if (_region_data_size < num) {
  78       res = REALLOC_C_HEAP_ARRAY(RegionData, _region_data, num, mtGC);
  79       _region_data = res;
  80       _region_data_size = num;
  81     }
  82     return res;
  83   }
  84 
  85   RegionData* _region_data;
  86   size_t _region_data_size;
  87 
  88   typedef struct {
  89     ShenandoahHeapRegion* _region;
  90     size_t _connections;
  91   } RegionConnections;
  92 
  93   RegionConnections* get_region_connects_cache(size_t num) {
  94     RegionConnections* res = _region_connects;
  95     if (res == NULL) {
  96       res = NEW_C_HEAP_ARRAY(RegionConnections, num, mtGC);
  97       _region_connects = res;
  98       _region_connects_size = num;
  99     } else if (_region_connects_size < num) {
 100       res = REALLOC_C_HEAP_ARRAY(RegionConnections, _region_connects, num, mtGC);
 101       _region_connects = res;
 102       _region_connects_size = num;
 103     }
 104     return res;
 105   }
 106 
 107   static int compare_by_connects(RegionConnections a, RegionConnections b) {
 108     if (a._connections == b._connections)
 109       return 0;
 110     else if (a._connections < b._connections)
 111       return -1;
 112     else return 1;
 113   }
 114 
 115   RegionConnections* _region_connects;
 116   size_t _region_connects_size;
 117 
 118   size_t _bytes_allocated_start_CM;
 119   size_t _bytes_allocated_during_CM;
 120 
 121   uint _cancelled_cm_cycles_in_a_row;
 122   uint _successful_cm_cycles_in_a_row;
 123 
 124   uint _cancelled_uprefs_cycles_in_a_row;
 125   uint _successful_uprefs_cycles_in_a_row;
 126 
 127   size_t _bytes_in_cset;
 128 
 129   double _last_cycle_end;
 130 
 131 public:
 132 
 133   ShenandoahHeuristics();
 134   virtual ~ShenandoahHeuristics();
 135 
 136   void record_bytes_allocated(size_t bytes);
 137   void record_bytes_reclaimed(size_t bytes);
 138   void record_bytes_start_CM(size_t bytes);
 139   void record_bytes_end_CM(size_t bytes);
 140 
 141   void record_gc_start() {
 142     ShenandoahHeap::heap()->set_alloc_seq_gc_start();
 143   }
 144 
 145   void record_gc_end() {
 146     ShenandoahHeap::heap()->set_alloc_seq_gc_end();
 147     ShenandoahHeap::heap()->set_used_at_last_gc();
 148   }
 149 
 150   virtual void record_cycle_start() {
 151     // Do nothing
 152   }
 153 
 154   virtual void record_cycle_end() {
 155     _last_cycle_end = os::elapsedTime();
 156   }
 157 
 158   virtual void record_phase_time(ShenandoahPhaseTimings::Phase phase, double secs) {
 159     // Do nothing
 160   }
 161 
 162   size_t bytes_in_cset() const { return _bytes_in_cset; }
 163 
 164   virtual void print_thresholds() {
 165   }
 166 
 167   virtual bool should_start_concurrent_mark(size_t used, size_t capacity) const=0;
 168 
 169   virtual bool should_start_update_refs() {
 170     return _update_refs_early;
 171   }
 172 
 173   virtual bool update_refs() const {
 174     return _update_refs_early;
 175   }
 176 
 177   virtual bool should_start_partial_gc() {
 178     return false;
 179   }
 180 
 181   virtual bool can_do_partial_gc() {
 182     return false;
 183   }
 184 
 185   virtual bool handover_cancelled_marking() {
 186     return _cancelled_cm_cycles_in_a_row <= ShenandoahFullGCThreshold;
 187   }
 188 
 189   virtual bool handover_cancelled_uprefs() {
 190     return _cancelled_uprefs_cycles_in_a_row <= ShenandoahFullGCThreshold;
 191   }
 192 
 193   virtual void record_cm_cancelled() {
 194     _cancelled_cm_cycles_in_a_row++;
 195     _successful_cm_cycles_in_a_row = 0;
 196   }
 197 
 198   virtual void record_cm_success() {
 199     _cancelled_cm_cycles_in_a_row = 0;
 200     _successful_cm_cycles_in_a_row++;
 201   }
 202 
 203   virtual void record_uprefs_cancelled() {
 204     _cancelled_uprefs_cycles_in_a_row++;
 205     _successful_uprefs_cycles_in_a_row = 0;
 206   }
 207 
 208   virtual void record_uprefs_success() {
 209     _cancelled_uprefs_cycles_in_a_row = 0;
 210     _successful_uprefs_cycles_in_a_row++;
 211   }
 212 
 213   virtual void record_allocation_failure_gc() {
 214     _bytes_in_cset = 0;
 215   }
 216 
 217   virtual void record_user_requested_gc() {
 218     _bytes_in_cset = 0;
 219   }
 220 
 221   virtual void record_peak_occupancy() {
 222   }
 223 
 224   virtual void choose_collection_set(ShenandoahCollectionSet* collection_set);
 225   virtual void choose_free_set(ShenandoahFreeSet* free_set);
 226 
 227   virtual bool process_references() {
 228     if (ShenandoahRefProcFrequency == 0) return false;
 229     size_t cycle = ShenandoahHeap::heap()->shenandoahPolicy()->cycle_counter();
 230     // Process references every Nth GC cycle.
 231     return cycle % ShenandoahRefProcFrequency == 0;
 232   }
 233 
 234   virtual bool unload_classes() {
 235     if (ShenandoahUnloadClassesFrequency == 0) return false;
 236     size_t cycle = ShenandoahHeap::heap()->shenandoahPolicy()->cycle_counter();
 237     // Unload classes every Nth GC cycle.
 238     // This should not happen in the same cycle as process_references to amortize costs.
 239     // Offsetting by one is enough to break the rendezvous when periods are equal.
 240     // When periods are not equal, offsetting by one is just as good as any other guess.
 241     return (cycle + 1) % ShenandoahUnloadClassesFrequency == 0;
 242   }
 243 
 244   bool maybe_add_heap_region(ShenandoahHeapRegion* hr,
 245                         ShenandoahCollectionSet* cs);
 246 
 247   virtual const char* name() = 0;
 248   virtual bool is_diagnostic() = 0;
 249   virtual bool is_experimental() = 0;
 250   virtual void initialize() {}
 251 
 252 protected:
 253   virtual void choose_collection_set_from_regiondata(ShenandoahCollectionSet* set,
 254                                                      RegionData* data, size_t data_size,
 255                                                      size_t trash, size_t free) = 0;
 256 };
 257 
 258 ShenandoahHeuristics::ShenandoahHeuristics() :
 259   _bytes_allocated_since_CM(0),
 260   _bytes_reclaimed_this_cycle(0),
 261   _bytes_allocated_start_CM(0),
 262   _bytes_allocated_during_CM(0),
 263   _bytes_in_cset(0),
 264   _cancelled_cm_cycles_in_a_row(0),
 265   _successful_cm_cycles_in_a_row(0),
 266   _cancelled_uprefs_cycles_in_a_row(0),
 267   _successful_uprefs_cycles_in_a_row(0),
 268   _region_data(NULL),
 269   _region_data_size(0),
 270   _region_connects(NULL),
 271   _region_connects_size(0),
 272   _update_refs_early(false),
 273   _update_refs_adaptive(false),
 274   _last_cycle_end(0)
 275 {
 276   if (strcmp(ShenandoahUpdateRefsEarly, "on") == 0 ||
 277       strcmp(ShenandoahUpdateRefsEarly, "true") == 0 ) {
 278     _update_refs_early = true;
 279   } else if (strcmp(ShenandoahUpdateRefsEarly, "off") == 0 ||
 280              strcmp(ShenandoahUpdateRefsEarly, "false") == 0 ) {
 281     _update_refs_early = false;
 282   } else if (strcmp(ShenandoahUpdateRefsEarly, "adaptive") == 0) {
 283     _update_refs_adaptive = true;
 284     _update_refs_early = true;
 285   } else {
 286     vm_exit_during_initialization("Unknown -XX:ShenandoahUpdateRefsEarly option: %s", ShenandoahUpdateRefsEarly);
 287   }
 288 }
 289 
 290 ShenandoahHeuristics::~ShenandoahHeuristics() {
 291   if (_region_data != NULL) {
 292     FREE_C_HEAP_ARRAY(RegionGarbage, _region_data);
 293   }
 294   if (_region_connects != NULL) {
 295     FREE_C_HEAP_ARRAY(RegionConnections, _region_connects);
 296   }
 297 }
 298 
 299 bool ShenandoahHeuristics::maybe_add_heap_region(ShenandoahHeapRegion* hr,
 300                                                  ShenandoahCollectionSet* collection_set) {
 301     if (hr->is_regular() && hr->has_live() && !collection_set->is_in(hr)) {
 302       collection_set->add_region(hr);
 303       return true;
 304     }
 305     return false;
 306 }
 307 
 308 void ShenandoahHeuristics::choose_collection_set(ShenandoahCollectionSet* collection_set) {
 309   assert(collection_set->count() == 0, "Must be empty");
 310 
 311   ShenandoahHeap* heap = ShenandoahHeap::heap();
 312 
 313   // Poll this before populating collection set.
 314   size_t total_garbage = heap->garbage();
 315 
 316   // Step 1. Build up the region candidates we care about, rejecting losers and accepting winners right away.
 317 
 318   ShenandoahHeapRegionSet* regions = heap->regions();
 319   size_t active = regions->active_regions();
 320 
 321   RegionData* candidates = get_region_data_cache(active);
 322 
 323   size_t cand_idx = 0;
 324 
 325   size_t immediate_garbage = 0;
 326   size_t immediate_regions = 0;
 327 
 328   size_t free = 0;
 329   size_t free_regions = 0;
 330 
 331   for (size_t i = 0; i < active; i++) {
 332     ShenandoahHeapRegion* region = regions->get(i);
 333 
 334     if (region->is_empty()) {
 335       free_regions++;
 336       free += ShenandoahHeapRegion::region_size_bytes();
 337     } else if (region->is_regular()) {
 338       if (!region->has_live()) {
 339         // We can recycle it right away and put it in the free set.
 340         immediate_regions++;
 341         immediate_garbage += region->garbage();
 342         region->make_trash();
 343       } else {
 344         // This is our candidate for later consideration.
 345         candidates[cand_idx]._region = region;
 346         candidates[cand_idx]._garbage = region->garbage();
 347         cand_idx++;
 348       }
 349     } else if (region->is_humongous_start()) {
 350         // Reclaim humongous regions here, and count them as the immediate garbage
 351 #ifdef ASSERT
 352         bool reg_live = region->has_live();
 353         bool bm_live = heap->is_marked(oop(region->bottom() + BrooksPointer::word_size()));
 354         assert(reg_live == bm_live,
 355                "Humongous liveness and marks should agree. Region live: %s; Bitmap live: %s; Region Live Words: " SIZE_FORMAT,
 356                BOOL_TO_STR(reg_live), BOOL_TO_STR(bm_live), region->get_live_data_words());
 357 #endif
 358         if (!region->has_live()) {
 359           size_t reclaimed = heap->trash_humongous_region_at(region);
 360           immediate_regions += reclaimed;
 361           immediate_garbage += reclaimed * ShenandoahHeapRegion::region_size_bytes();
 362         }
 363       }
 364   }
 365 
 366   // Step 2. Process the remaining candidates, if any.
 367 
 368   if (cand_idx > 0) {
 369     choose_collection_set_from_regiondata(collection_set, candidates, cand_idx, immediate_garbage, free);
 370   }
 371 
 372   // Step 3. Look back at collection set, and see if it's worth it to collect,
 373   // given the amount of immediately reclaimable garbage.
 374 
 375   log_info(gc, ergo)("Total Garbage: "SIZE_FORMAT"M",
 376                      total_garbage / M);
 377 
 378   size_t total_garbage_regions = immediate_regions + collection_set->count();
 379   size_t immediate_percent = total_garbage_regions == 0 ? 0 : (immediate_regions * 100 / total_garbage_regions);
 380 
 381   log_info(gc, ergo)("Immediate Garbage: "SIZE_FORMAT"M, "SIZE_FORMAT" regions ("SIZE_FORMAT"%% of total)",
 382                      immediate_garbage / M, immediate_regions, immediate_percent);
 383 
 384   if (immediate_percent > ShenandoahImmediateThreshold) {
 385     collection_set->clear();
 386   } else {
 387     log_info(gc, ergo)("Garbage to be collected: "SIZE_FORMAT"M ("SIZE_FORMAT"%% of total), "SIZE_FORMAT" regions",
 388                        collection_set->garbage() / M, collection_set->garbage() * 100 / MAX2(total_garbage, (size_t)1), collection_set->count());
 389     log_info(gc, ergo)("Live objects to be evacuated: "SIZE_FORMAT"M",
 390                        collection_set->live_data() / M);
 391     log_info(gc, ergo)("Live/garbage ratio in collected regions: "SIZE_FORMAT"%%",
 392                        collection_set->live_data() * 100 / MAX2(collection_set->garbage(), (size_t)1));
 393     log_info(gc, ergo)("Free: "SIZE_FORMAT"M, "SIZE_FORMAT" regions ("SIZE_FORMAT"%% of total)",
 394                        free / M, free_regions, free_regions * 100 / active);
 395   }
 396 
 397   collection_set->update_region_status();
 398 }
 399 
 400 void ShenandoahHeuristics::choose_free_set(ShenandoahFreeSet* free_set) {
 401   ShenandoahHeapRegionSet* ordered_regions = ShenandoahHeap::heap()->regions();
 402   for (size_t i = 0; i < ordered_regions->active_regions(); i++) {
 403     ShenandoahHeapRegion* region = ordered_regions->get(i);
 404     if (region->is_alloc_allowed()) {
 405       free_set->add_region(region);
 406     }
 407   }
 408 }
 409 
 410 
 411 void ShenandoahCollectorPolicy::record_gc_start() {
 412   _heuristics->record_gc_start();
 413 }
 414 
 415 void ShenandoahCollectorPolicy::record_gc_end() {
 416   _heuristics->record_gc_end();
 417 }
 418 
 419 void ShenandoahCollectorPolicy::report_concgc_cancelled() {
 420 }
 421 
 422 void ShenandoahHeuristics::record_bytes_allocated(size_t bytes) {
 423   _bytes_allocated_since_CM = bytes;
 424   _bytes_allocated_start_CM = bytes;
 425   _allocation_rate_bytes.add(bytes);
 426 }
 427 
 428 void ShenandoahHeuristics::record_bytes_reclaimed(size_t bytes) {
 429   _bytes_reclaimed_this_cycle = bytes;
 430   _reclamation_rate_bytes.add(bytes);
 431 }
 432 
 433 void ShenandoahHeuristics::record_bytes_start_CM(size_t bytes) {
 434   _bytes_allocated_start_CM = bytes;
 435 }
 436 
 437 void ShenandoahHeuristics::record_bytes_end_CM(size_t bytes) {
 438   _bytes_allocated_during_CM = (bytes > _bytes_allocated_start_CM) ? (bytes - _bytes_allocated_start_CM)
 439                                                                    : bytes;
 440 }
 441 
 442 class ShenandoahPassiveHeuristics : public ShenandoahHeuristics {
 443 public:
 444   ShenandoahPassiveHeuristics() : ShenandoahHeuristics() {
 445     // Do not allow concurrent cycles.
 446     FLAG_SET_DEFAULT(ExplicitGCInvokesConcurrent, false);
 447   }
 448 
 449   virtual void choose_collection_set_from_regiondata(ShenandoahCollectionSet* cset,
 450                                                      RegionData* data, size_t size,
 451                                                      size_t trash, size_t free) {
 452     for (size_t idx = 0; idx < size; idx++) {
 453       ShenandoahHeapRegion* r = data[idx]._region;
 454       if (r->garbage() > 0) {
 455         cset->add_region(r);
 456       }
 457     }
 458   }
 459 
 460   virtual bool should_start_concurrent_mark(size_t used, size_t capacity) const {
 461     // Never do concurrent GCs.
 462     return false;
 463   }
 464 
 465   virtual bool process_references() {
 466     if (ShenandoahRefProcFrequency == 0) return false;
 467     // Always process references.
 468     return true;
 469   }
 470 
 471   virtual bool unload_classes() {
 472     if (ShenandoahUnloadClassesFrequency == 0) return false;
 473     // Always unload classes.
 474     return true;
 475   }
 476 
 477   virtual const char* name() {
 478     return "passive";
 479   }
 480 
 481   virtual bool is_diagnostic() {
 482     return true;
 483   }
 484 
 485   virtual bool is_experimental() {
 486     return false;
 487   }
 488 };
 489 
 490 class ShenandoahAggressiveHeuristics : public ShenandoahHeuristics {
 491 public:
 492   ShenandoahAggressiveHeuristics() : ShenandoahHeuristics() {
 493     // Do not shortcut evacuation
 494     if (FLAG_IS_DEFAULT(ShenandoahImmediateThreshold)) {
 495       FLAG_SET_DEFAULT(ShenandoahImmediateThreshold, 100);
 496     }
 497   }
 498 
 499   virtual void choose_collection_set_from_regiondata(ShenandoahCollectionSet* cset,
 500                                                      RegionData* data, size_t size,
 501                                                      size_t trash, size_t free) {
 502     for (size_t idx = 0; idx < size; idx++) {
 503       ShenandoahHeapRegion* r = data[idx]._region;
 504       if (r->garbage() > 0) {
 505         cset->add_region(r);
 506       }
 507     }
 508   }
 509 
 510   virtual bool should_start_concurrent_mark(size_t used, size_t capacity) const {
 511     return true;
 512   }
 513 
 514   virtual bool process_references() {
 515     if (ShenandoahRefProcFrequency == 0) return false;
 516     // Randomly process refs with 50% chance.
 517     return (os::random() & 1) == 1;
 518   }
 519 
 520   virtual bool unload_classes() {
 521     if (ShenandoahUnloadClassesFrequency == 0) return false;
 522     // Randomly unload classes with 50% chance.
 523     return (os::random() & 1) == 1;
 524   }
 525 
 526   virtual const char* name() {
 527     return "aggressive";
 528   }
 529 
 530   virtual bool is_diagnostic() {
 531     return true;
 532   }
 533 
 534   virtual bool is_experimental() {
 535     return false;
 536   }
 537 };
 538 
 539 class ShenandoahDynamicHeuristics : public ShenandoahHeuristics {
 540 public:
 541   ShenandoahDynamicHeuristics() : ShenandoahHeuristics() {
 542   }
 543 
 544   void print_thresholds() {
 545     log_info(gc, init)("Shenandoah heuristics thresholds: allocation "SIZE_FORMAT", free "SIZE_FORMAT", garbage "SIZE_FORMAT,
 546                        ShenandoahAllocationThreshold,
 547                        ShenandoahFreeThreshold,
 548                        ShenandoahGarbageThreshold);
 549   }
 550 
 551   virtual ~ShenandoahDynamicHeuristics() {}
 552 
 553   virtual bool should_start_concurrent_mark(size_t used, size_t capacity) const {
 554 
 555     bool shouldStartConcurrentMark = false;
 556 
 557     ShenandoahHeap* heap = ShenandoahHeap::heap();
 558     size_t available = heap->free_regions()->available();
 559 
 560     if (! update_refs()) {
 561       // Count in the memory available after cset reclamation.
 562       size_t cset = MIN2(_bytes_in_cset, (ShenandoahCSetThreshold * capacity) / 100);
 563       available += cset;
 564     }
 565 
 566     uintx threshold = ShenandoahFreeThreshold + ShenandoahCSetThreshold;
 567     size_t targetStartMarking = (capacity * threshold) / 100;
 568 
 569     double last_time_ms = (os::elapsedTime() - _last_cycle_end) * 1000;
 570     bool periodic_gc = (last_time_ms > ShenandoahGuaranteedGCInterval);
 571 
 572     size_t threshold_bytes_allocated = heap->capacity() * ShenandoahAllocationThreshold / 100;
 573     if (available < targetStartMarking &&
 574         heap->bytes_allocated_since_cm() > threshold_bytes_allocated) {
 575       // Need to check that an appropriate number of regions have
 576       // been allocated since last concurrent mark too.
 577       shouldStartConcurrentMark = true;
 578     } else if (periodic_gc) {
 579       log_info(gc,ergo)("Periodic GC triggered. Time since last GC: %.0f ms, Guaranteed Interval: " UINTX_FORMAT " ms",
 580                         last_time_ms, ShenandoahGuaranteedGCInterval);
 581       shouldStartConcurrentMark = true;
 582     }
 583 
 584     return shouldStartConcurrentMark;
 585   }
 586 
 587   virtual void choose_collection_set_from_regiondata(ShenandoahCollectionSet* cset,
 588                                                      RegionData* data, size_t size,
 589                                                      size_t trash, size_t free) {
 590     size_t threshold = ShenandoahHeapRegion::region_size_bytes() * ShenandoahGarbageThreshold / 100;
 591 
 592     _bytes_in_cset = 0;
 593     for (size_t idx = 0; idx < size; idx++) {
 594       ShenandoahHeapRegion* r = data[idx]._region;
 595       if (r->garbage() > threshold) {
 596         cset->add_region(r);
 597         _bytes_in_cset += r->used();
 598       }
 599     }
 600   }
 601 
 602   virtual const char* name() {
 603     return "dynamic";
 604   }
 605 
 606   virtual bool is_diagnostic() {
 607     return false;
 608   }
 609 
 610   virtual bool is_experimental() {
 611     return false;
 612   }
 613 };
 614 
 615 class ShenandoahContinuousHeuristics : public ShenandoahHeuristics {
 616 public:
 617   virtual bool should_start_concurrent_mark(size_t used, size_t capacity) const {
 618     // Start the cycle, unless completely idle.
 619     return ShenandoahHeap::heap()->bytes_allocated_since_cm() > 0;
 620   }
 621 
 622   virtual void choose_collection_set_from_regiondata(ShenandoahCollectionSet* cset,
 623                                                      RegionData* data, size_t size,
 624                                                      size_t trash, size_t free) {
 625     size_t threshold = ShenandoahHeapRegion::region_size_bytes() * ShenandoahGarbageThreshold / 100;
 626     for (size_t idx = 0; idx < size; idx++) {
 627       ShenandoahHeapRegion* r = data[idx]._region;
 628       if (r->garbage() > threshold) {
 629         cset->add_region(r);
 630       }
 631     }
 632   }
 633 
 634   virtual const char* name() {
 635     return "continuous";
 636   }
 637 
 638   virtual bool is_diagnostic() {
 639     return false;
 640   }
 641 
 642   virtual bool is_experimental() {
 643     return false;
 644   }
 645 };
 646 
 647 class ShenandoahAdaptiveHeuristics : public ShenandoahHeuristics {
 648 private:
 649   uintx _free_threshold;
 650   TruncatedSeq* _cset_history;
 651   size_t _peak_occupancy;
 652   TruncatedSeq* _cycle_gap_history;
 653   TruncatedSeq* _conc_mark_duration_history;
 654   TruncatedSeq* _conc_uprefs_duration_history;
 655 public:
 656   ShenandoahAdaptiveHeuristics() :
 657     ShenandoahHeuristics(),
 658     _free_threshold(ShenandoahInitFreeThreshold),
 659     _peak_occupancy(0),
 660     _conc_mark_duration_history(new TruncatedSeq(5)),
 661     _conc_uprefs_duration_history(new TruncatedSeq(5)),
 662     _cycle_gap_history(new TruncatedSeq(5)),
 663     _cset_history(new TruncatedSeq((uint)ShenandoahHappyCyclesThreshold)) {
 664 
 665     _cset_history->add((double) ShenandoahCSetThreshold);
 666     _cset_history->add((double) ShenandoahCSetThreshold);
 667   }
 668 
 669   virtual ~ShenandoahAdaptiveHeuristics() {
 670     delete _cset_history;
 671   }
 672 
 673   virtual void choose_collection_set_from_regiondata(ShenandoahCollectionSet* cset,
 674                                                      RegionData* data, size_t size,
 675                                                      size_t trash, size_t free) {
 676     size_t garbage_threshold = ShenandoahHeapRegion::region_size_bytes() * ShenandoahGarbageThreshold / 100;
 677 
 678     // The logic for cset selection in adaptive is as follows:
 679     //
 680     //   1. We cannot get cset larger than available free space. Otherwise we guarantee OOME
 681     //      during evacuation, and thus guarantee full GC. In practice, we also want to let
 682     //      application to allocate something. This is why we limit CSet to some fraction of
 683     //      available space. In non-overloaded heap, max_cset would contain all plausible candidates
 684     //      over garbage threshold.
 685     //
 686     //   2. We should not get cset too low so that free threshold would not be met right
 687     //      after the cycle. Otherwise we get back-to-back cycles for no reason if heap is
 688     //      too fragmented. In non-overloaded non-fragmented heap min_cset would be around zero.
 689     //
 690     // Therefore, we start by sorting the regions by garbage. Then we unconditionally add the best candidates
 691     // before we meet min_cset. Then we add all candidates that fit with a garbage threshold before
 692     // we hit max_cset. When max_cset is hit, we terminate the cset selection. Note that in this scheme,
 693     // ShenandoahGarbageThreshold is the soft threshold which would be ignored until min_cset is hit.
 694 
 695     size_t free_target = MIN2<size_t>(_free_threshold + MaxNormalStep, 100) * ShenandoahHeap::heap()->capacity() / 100;
 696     size_t actual_free = free + trash;
 697     size_t min_cset = free_target > actual_free ? (free_target - actual_free) : 0;
 698     size_t max_cset = actual_free * 3 / 4;
 699     min_cset = MIN2(min_cset, max_cset);
 700 
 701     log_info(gc, ergo)("Adaptive CSet selection: free target = " SIZE_FORMAT "M, actual free = "
 702                                SIZE_FORMAT "M; min cset = " SIZE_FORMAT "M, max cset = " SIZE_FORMAT "M",
 703                        free_target / M, actual_free / M, min_cset / M, max_cset / M);
 704 
 705     // Better select garbage-first regions
 706     QuickSort::sort<RegionData>(data, (int)size, compare_by_garbage, false);
 707 
 708     size_t live_cset = 0;
 709     _bytes_in_cset = 0;
 710     for (size_t idx = 0; idx < size; idx++) {
 711       ShenandoahHeapRegion* r = data[idx]._region;
 712 
 713       size_t new_cset = live_cset + r->get_live_data_bytes();
 714 
 715       if (new_cset < min_cset) {
 716         cset->add_region(r);
 717         _bytes_in_cset += r->used();
 718         live_cset = new_cset;
 719       } else if (new_cset <= max_cset) {
 720         if (r->garbage() > garbage_threshold) {
 721           cset->add_region(r);
 722           _bytes_in_cset += r->used();
 723           live_cset = new_cset;
 724         }
 725       } else {
 726         break;
 727       }
 728     }
 729   }
 730 
 731   static const intx MaxNormalStep = 5;      // max step towards goal under normal conditions
 732   static const intx CancelledGC_Hit = 10;   // how much to step on cancelled GC
 733   static const intx AllocFailure_Hit = 20;  // how much to step on allocation failure full GC
 734   static const intx UserRequested_Hit = 0;  // how much to step on user requested full GC
 735 
 736   void handle_cycle_success() {
 737     ShenandoahHeap* heap = ShenandoahHeap::heap();
 738     size_t capacity = heap->capacity();
 739 
 740     size_t current_threshold = (capacity - _peak_occupancy) * 100 / capacity;
 741     size_t min_threshold = ShenandoahMinFreeThreshold;
 742     intx step = min_threshold - current_threshold;
 743     step = MAX2(step, (intx) -MaxNormalStep);
 744     step = MIN2(step, (intx) MaxNormalStep);
 745 
 746     log_info(gc, ergo)("Capacity: " SIZE_FORMAT "M, Peak Occupancy: " SIZE_FORMAT
 747                               "M, Lowest Free: " SIZE_FORMAT "M, Free Threshold: " UINTX_FORMAT "M",
 748                        capacity / M, _peak_occupancy / M,
 749                        (capacity - _peak_occupancy) / M, ShenandoahMinFreeThreshold * capacity / 100 / M);
 750 
 751     if (step > 0) {
 752       // Pessimize
 753       adjust_free_threshold(step);
 754     } else if (step < 0) {
 755       // Optimize, if enough happy cycles happened
 756       if (_successful_cm_cycles_in_a_row > ShenandoahHappyCyclesThreshold &&
 757           (! update_refs() || (_successful_uprefs_cycles_in_a_row > ShenandoahHappyCyclesThreshold)) &&
 758           _free_threshold > 0) {
 759         adjust_free_threshold(step);
 760         _successful_cm_cycles_in_a_row = 0;
 761         _successful_uprefs_cycles_in_a_row = 0;
 762       }
 763     } else {
 764       // do nothing
 765     }
 766     _peak_occupancy = 0;
 767   }
 768 
 769   void record_cycle_end() {
 770     ShenandoahHeuristics::record_cycle_end();
 771     handle_cycle_success();
 772   }
 773 
 774   void record_cycle_start() {
 775     ShenandoahHeuristics::record_cycle_start();
 776     double last_cycle_gap = (os::elapsedTime() - _last_cycle_end);
 777     _cycle_gap_history->add(last_cycle_gap);
 778   }
 779 
 780   virtual void record_phase_time(ShenandoahPhaseTimings::Phase phase, double secs) {
 781     if (phase == ShenandoahPhaseTimings::conc_mark) {
 782       _conc_mark_duration_history->add(secs);
 783     } else if (phase == ShenandoahPhaseTimings::conc_update_refs) {
 784       _conc_uprefs_duration_history->add(secs);
 785     } // Else ignore
 786   }
 787 
 788   void adjust_free_threshold(intx adj) {
 789     intx new_value = adj + _free_threshold;
 790     uintx new_threshold = (uintx)MAX2<intx>(new_value, 0);
 791     new_threshold = MAX2(new_threshold, ShenandoahMinFreeThreshold);
 792     new_threshold = MIN2(new_threshold, ShenandoahMaxFreeThreshold);
 793     if (new_threshold != _free_threshold) {
 794       _free_threshold = new_threshold;
 795       log_info(gc,ergo)("Adjusting free threshold to: " UINTX_FORMAT "%% (" SIZE_FORMAT "M)",
 796                         _free_threshold, _free_threshold * ShenandoahHeap::heap()->capacity() / 100 / M);
 797     }
 798   }
 799 
 800   virtual void record_cm_cancelled() {
 801     ShenandoahHeuristics::record_cm_cancelled();
 802     adjust_free_threshold(CancelledGC_Hit);
 803   }
 804 
 805   virtual void record_cm_success() {
 806     ShenandoahHeuristics::record_cm_success();
 807   }
 808 
 809   virtual void record_uprefs_cancelled() {
 810     ShenandoahHeuristics::record_uprefs_cancelled();
 811     adjust_free_threshold(CancelledGC_Hit);
 812   }
 813 
 814   virtual void record_uprefs_success() {
 815     ShenandoahHeuristics::record_uprefs_success();
 816   }
 817 
 818   virtual void record_user_requested_gc() {
 819     ShenandoahHeuristics::record_user_requested_gc();
 820     adjust_free_threshold(UserRequested_Hit);
 821   }
 822 
 823   virtual void record_allocation_failure_gc() {
 824     ShenandoahHeuristics::record_allocation_failure_gc();
 825     adjust_free_threshold(AllocFailure_Hit);
 826   }
 827 
 828   virtual void record_peak_occupancy() {
 829     _peak_occupancy = MAX2(_peak_occupancy, ShenandoahHeap::heap()->used());
 830   }
 831 
 832   virtual bool should_start_concurrent_mark(size_t used, size_t capacity) const {
 833     if (! ShenandoahConcMarkGC) return false;
 834     bool shouldStartConcurrentMark = false;
 835     ShenandoahHeap* heap = ShenandoahHeap::heap();
 836     size_t available = heap->free_regions()->available();
 837     uintx factor = _free_threshold;
 838     size_t cset_threshold = 0;
 839     if (! update_refs()) {
 840       // Count in the memory available after cset reclamation.
 841       cset_threshold = (size_t) _cset_history->davg();
 842       size_t cset = MIN2(_bytes_in_cset, (cset_threshold * capacity) / 100);
 843       available += cset;
 844       factor += cset_threshold;
 845     }
 846 
 847     double last_time_ms = (os::elapsedTime() - _last_cycle_end) * 1000;
 848     bool periodic_gc = (last_time_ms > ShenandoahGuaranteedGCInterval);
 849     size_t threshold_available = (capacity * factor) / 100;
 850     size_t bytes_allocated = heap->bytes_allocated_since_cm();
 851     size_t threshold_bytes_allocated = heap->capacity() * ShenandoahAllocationThreshold / 100;
 852 
 853     if (available < threshold_available &&
 854             bytes_allocated > threshold_bytes_allocated) {
 855       log_info(gc,ergo)("Concurrent marking triggered. Free: " SIZE_FORMAT "M, Free Threshold: " SIZE_FORMAT
 856                                 "M; Allocated: " SIZE_FORMAT "M, Alloc Threshold: " SIZE_FORMAT "M",
 857                         available / M, threshold_available / M, available / M, threshold_bytes_allocated / M);
 858       // Need to check that an appropriate number of regions have
 859       // been allocated since last concurrent mark too.
 860       shouldStartConcurrentMark = true;
 861     } else if (periodic_gc) {
 862       log_info(gc,ergo)("Periodic GC triggered. Time since last GC: %.0f ms, Guaranteed Interval: " UINTX_FORMAT " ms",
 863           last_time_ms, ShenandoahGuaranteedGCInterval);
 864       shouldStartConcurrentMark = true;
 865     }
 866 
 867     if (shouldStartConcurrentMark) {
 868       if (! update_refs()) {
 869         log_info(gc,ergo)("Predicted cset threshold: " SIZE_FORMAT ", " SIZE_FORMAT "K CSet ("SIZE_FORMAT"%%)",
 870                           cset_threshold, _bytes_in_cset / K, _bytes_in_cset * 100 / capacity);
 871         _cset_history->add((double) (_bytes_in_cset * 100 / capacity));
 872       }
 873     }
 874     return shouldStartConcurrentMark;
 875   }
 876 
 877   virtual bool should_start_update_refs() {
 878     if (! _update_refs_adaptive) {
 879       return _update_refs_early;
 880     }
 881 
 882     double cycle_gap_avg = _cycle_gap_history->avg();
 883     double conc_mark_avg = _conc_mark_duration_history->avg();
 884     double conc_uprefs_avg = _conc_uprefs_duration_history->avg();
 885 
 886     if (_update_refs_early) {
 887       double threshold = ShenandoahMergeUpdateRefsMinGap / 100.0;
 888       if (conc_mark_avg + conc_uprefs_avg > cycle_gap_avg * threshold) {
 889         _update_refs_early = false;
 890       }
 891     } else {
 892       double threshold = ShenandoahMergeUpdateRefsMaxGap / 100.0;
 893       if (conc_mark_avg + conc_uprefs_avg < cycle_gap_avg * threshold) {
 894         _update_refs_early = true;
 895       }
 896     }
 897     return _update_refs_early;
 898   }
 899 
 900   virtual const char* name() {
 901     return "adaptive";
 902   }
 903 
 904   virtual bool is_diagnostic() {
 905     return false;
 906   }
 907 
 908   virtual bool is_experimental() {
 909     return false;
 910   }
 911 };
 912 
 913 class ShenandoahPartialHeuristics : public ShenandoahAdaptiveHeuristics {
 914 protected:
 915   size_t* _from_idxs;
 916 
 917 public:
 918   ShenandoahPartialHeuristics() : ShenandoahAdaptiveHeuristics() {
 919     FLAG_SET_DEFAULT(UseShenandoahMatrix, true);
 920 
 921     // Set up special barriers for concurrent partial GC.
 922     FLAG_SET_DEFAULT(ShenandoahConditionalSATBBarrier, true);
 923     FLAG_SET_DEFAULT(ShenandoahSATBBarrier, false);
 924     FLAG_SET_DEFAULT(ShenandoahStoreValWriteBarrier, true);
 925     FLAG_SET_DEFAULT(ShenandoahStoreValReadBarrier, false);
 926     FLAG_SET_DEFAULT(ShenandoahAsmWB, false);
 927 
 928     if (FLAG_IS_DEFAULT(ShenandoahRefProcFrequency)) {
 929        FLAG_SET_DEFAULT(ShenandoahRefProcFrequency,1);
 930     }
 931     // TODO: Disable this optimization for now, as it also requires the matrix barriers.
 932 #ifdef COMPILER2
 933     FLAG_SET_DEFAULT(ArrayCopyLoadStoreMaxElem, 0);
 934 #endif
 935   }
 936 
 937   void initialize() {
 938     _from_idxs = NEW_C_HEAP_ARRAY(size_t, ShenandoahHeap::heap()->num_regions(), mtGC);
 939   }
 940 
 941   virtual ~ShenandoahPartialHeuristics() {
 942     FREE_C_HEAP_ARRAY(size_t, _from_idxs);
 943   }
 944 
 945   bool should_start_update_refs() {
 946     return true;
 947   }
 948 
 949   bool update_refs() const {
 950     return true;
 951   }
 952 
 953   bool can_do_partial_gc() {
 954     return true;
 955   }
 956 
 957   bool should_start_concurrent_mark(size_t used, size_t capacity) const {
 958     return false;
 959   }
 960 
 961   virtual bool is_diagnostic() {
 962     return false;
 963   }
 964 
 965   virtual bool is_experimental() {
 966     return true;
 967   }
 968 
 969   virtual bool should_start_partial_gc() = 0;
 970   virtual void choose_collection_set(ShenandoahCollectionSet* collection_set) = 0;
 971 
 972 };
 973 
 974 class ShenandoahPartialConnectedHeuristics : public ShenandoahPartialHeuristics {
 975 public:
 976   virtual const char* name() {
 977     return "connectedness";
 978   }
 979 
 980   bool should_start_partial_gc() {
 981     ShenandoahHeap* heap = ShenandoahHeap::heap();
 982 
 983     if (heap->need_update_refs()) {
 984       // Cannot start partial if heap is not completely updated.
 985       return false;
 986     }
 987 
 988     size_t capacity  = heap->capacity();
 989     size_t used      = heap->used();
 990     size_t prev_used = heap->used_at_last_gc();
 991 
 992     if (used < prev_used) {
 993       // Major collection must have happened, "used" data is unreliable, wait for update.
 994       return false;
 995     }
 996 
 997     size_t active    = heap->regions()->active_regions() * ShenandoahHeapRegion::region_size_bytes();
 998     size_t threshold = active * ShenandoahConnectednessPercentage / 100;
 999     size_t allocated = used - prev_used;
1000     bool result = allocated > threshold;
1001 
1002     FormatBuffer<> msg("%s. Capacity: " SIZE_FORMAT "M, Used: " SIZE_FORMAT "M, Previous Used: " SIZE_FORMAT
1003                        "M, Allocated: " SIZE_FORMAT "M, Threshold: " SIZE_FORMAT "M",
1004                        result ? "Partial GC triggered" : "Partial GC skipped",
1005                        capacity/M, used/M, prev_used/M, allocated/M, threshold/M);
1006 
1007     if (result) {
1008       log_info(gc,ergo)("%s", msg.buffer());
1009     } else {
1010       log_trace(gc,ergo)("%s", msg.buffer());
1011     }
1012     return result;
1013   }
1014 
1015   void choose_collection_set(ShenandoahCollectionSet* collection_set) {
1016     ShenandoahHeap* heap = ShenandoahHeap::heap();
1017     ShenandoahConnectionMatrix* matrix = heap->connection_matrix();
1018     ShenandoahHeapRegionSet* regions = heap->regions();
1019     size_t num_regions = heap->num_regions();
1020 
1021     RegionConnections* connects = get_region_connects_cache(num_regions);
1022     size_t connect_cnt = 0;
1023 
1024     for (uint to_idx = 0; to_idx < num_regions; to_idx++) {
1025       ShenandoahHeapRegion* region = regions->get(to_idx);
1026       region->set_root(false);
1027       if (!region->is_regular()) continue;
1028 
1029       uint count = matrix->count_connected_to(to_idx, num_regions);
1030       if (count < ShenandoahPartialInboundThreshold) {
1031         connects[connect_cnt]._region = region;
1032         connects[connect_cnt]._connections = count;
1033         connect_cnt++;
1034       }
1035     }
1036 
1037     QuickSort::sort<RegionConnections>(connects, (int)connect_cnt, compare_by_connects, false);
1038 
1039     // Heuristics triggered partial when allocated was larger than a threshold.
1040     // New allocations might have happened while we were preparing for GC,
1041     // capture all them in this cycle. This "adjusts" the threshold automatically.
1042     size_t used      = heap->used();
1043     size_t prev_used = heap->used_at_last_gc();
1044     guarantee(used >= prev_used, "Invariant");
1045     size_t target = MIN3(ShenandoahHeapRegion::required_regions(used - prev_used), num_regions, connect_cnt);
1046 
1047     for (size_t c = 0; c < target; c++) {
1048       assert (c == 0 || connects[c]._connections >= connects[c-1]._connections, "monotonicity");
1049 
1050       ShenandoahHeapRegion* region = connects[c]._region;
1051       size_t to_idx = region->region_number();
1052       assert(region->is_regular(), "filtered before");
1053       assert(! heap->region_in_collection_set(to_idx), "must not be in cset yet");
1054 
1055       size_t from_idx_count = 0;
1056       if (matrix->enumerate_connected_to(to_idx, num_regions,
1057                                          _from_idxs, from_idx_count,
1058                                          ShenandoahPartialInboundThreshold)) {
1059         maybe_add_heap_region(region, collection_set);
1060         for (size_t i = 0; i < from_idx_count; i++) {
1061           ShenandoahHeapRegion* r = regions->get(_from_idxs[i]);
1062           if (!r->is_root()) {
1063             r->set_root(true);
1064           }
1065         }
1066       }
1067     }
1068 
1069     collection_set->update_region_status();
1070   }
1071 };
1072 
1073 class ShenandoahGenerationalPartialHeuristics : public ShenandoahPartialHeuristics {
1074 public:
1075 
1076   ShenandoahGenerationalPartialHeuristics() : ShenandoahPartialHeuristics() {
1077      if (FLAG_IS_DEFAULT(ShenandoahPartialInboundThreshold)) {
1078        FLAG_SET_DEFAULT(ShenandoahPartialInboundThreshold, 100);
1079      }
1080   }
1081 
1082   virtual const char* name() {
1083     return "generational";
1084   }
1085 
1086   virtual void choose_collection_set(ShenandoahCollectionSet* collection_set) {
1087     ShenandoahHeap* heap = ShenandoahHeap::heap();
1088     ShenandoahConnectionMatrix* matrix = heap->connection_matrix();
1089     uint64_t alloc_seq_at_last_gc_end   = heap->alloc_seq_at_last_gc_end();
1090     uint64_t alloc_seq_at_last_gc_start = heap->alloc_seq_at_last_gc_start();
1091 
1092     ShenandoahHeapRegionSet* regions = heap->regions();
1093     size_t active = regions->active_regions();
1094     ShenandoahHeapRegionSet sorted_regions(active);
1095 
1096     for (size_t i = 0; i < active; i++) {
1097       sorted_regions.add_region(regions->get(i));
1098     }
1099 
1100     sorted_regions.sort(compare_by_alloc_seq_descending);
1101 
1102     // Heuristics triggered partial when allocated was larger than a threshold.
1103     // New allocations might have happened while we were preparing for GC,
1104     // capture all them in this cycle. This "adjusts" the threshold automatically.
1105     size_t used      = heap->used();
1106     size_t prev_used = heap->used_at_last_gc();
1107     guarantee(used >= prev_used, "Invariant");
1108     size_t target = MIN2(ShenandoahHeapRegion::required_regions(used - prev_used), sorted_regions.active_regions());
1109 
1110     for (uint to_idx = 0; to_idx < active; to_idx++) {
1111       ShenandoahHeapRegion* region = regions->get(to_idx);
1112       region->set_root(false);
1113     }
1114 
1115     uint count = 0;
1116     size_t sorted_active = sorted_regions.active_regions();
1117 
1118     for (uint i = 0; (i < sorted_active) && (count < target); i++) {
1119       ShenandoahHeapRegion* contender = sorted_regions.get(i);
1120       if (contender->last_alloc_seq_num() <= alloc_seq_at_last_gc_end) {
1121         break;
1122       }
1123 
1124       size_t index = contender->region_number();
1125       size_t num_regions = heap->num_regions();
1126       size_t from_idx_count = 0;
1127       if (matrix->enumerate_connected_to(index, num_regions, _from_idxs, from_idx_count,
1128                                          ShenandoahPartialInboundThreshold)) {
1129         if (maybe_add_heap_region(contender, collection_set)) {
1130           count++;
1131         }
1132 
1133         for (uint f = 0; f < from_idx_count; f++) {
1134           ShenandoahHeapRegion* r = regions->get(_from_idxs[f]);
1135           if (!r->is_root()) {
1136             r->set_root(true);
1137           }
1138         }
1139       }
1140     }
1141     collection_set->update_region_status();
1142 
1143     log_info(gc,ergo)("Regions: Active: " SIZE_FORMAT ", Target: " SIZE_FORMAT " (" SIZE_FORMAT "%%), In CSet: " SIZE_FORMAT,
1144                       active, target, ShenandoahGenerationalYoungGenPercentage, collection_set->count());
1145   }
1146 
1147   bool should_start_partial_gc() {
1148     ShenandoahHeap* heap = ShenandoahHeap::heap();
1149 
1150     if (heap->need_update_refs()) {
1151       // Cannot start partial if heap is not completely updated.
1152       return false;
1153     }
1154 
1155     size_t capacity  = heap->capacity();
1156     size_t used      = heap->used();
1157     size_t prev_used = heap->used_at_last_gc();
1158 
1159     if (used < prev_used) {
1160       // Major collection must have happened, "used" data is unreliable, wait for update.
1161       return false;
1162     }
1163 
1164     size_t active    = heap->regions()->active_regions() * ShenandoahHeapRegion::region_size_bytes();
1165     size_t threshold = active * ShenandoahGenerationalYoungGenPercentage / 100;
1166     size_t allocated = used - prev_used;
1167 
1168     // Start the next young gc after we've allocated percentage_young of the heap.
1169     bool result = allocated > threshold;
1170 
1171     FormatBuffer<> msg("%s. Capacity: " SIZE_FORMAT "M, Used: " SIZE_FORMAT "M, Previous Used: " SIZE_FORMAT
1172                        "M, Allocated: " SIZE_FORMAT "M, Threshold: " SIZE_FORMAT "M",
1173                        result ? "Partial GC triggered" : "Partial GC skipped",
1174                        capacity/M, used/M, prev_used/M, allocated/M, threshold/M);
1175 
1176     if (result) {
1177       log_info(gc,ergo)("%s", msg.buffer());
1178     } else {
1179       log_trace(gc,ergo)("%s", msg.buffer());
1180     }
1181     return result;
1182   }
1183 };
1184 
1185 class ShenandoahLRUPartialHeuristics : public ShenandoahPartialHeuristics {
1186 public:
1187   ShenandoahLRUPartialHeuristics() : ShenandoahPartialHeuristics() {
1188     if (FLAG_IS_DEFAULT(ShenandoahPartialInboundThreshold)) {
1189       FLAG_SET_DEFAULT(ShenandoahPartialInboundThreshold, 100);
1190     }
1191   }
1192 
1193   virtual const char* name() {
1194     return "LRU";
1195   }
1196 
1197   virtual void choose_collection_set(ShenandoahCollectionSet* collection_set) {
1198     ShenandoahHeap* heap = ShenandoahHeap::heap();
1199     ShenandoahConnectionMatrix* matrix = heap->connection_matrix();
1200     uint64_t alloc_seq_at_last_gc_start = heap->alloc_seq_at_last_gc_start();
1201 
1202     ShenandoahHeapRegionSet* regions = ShenandoahHeap::heap()->regions();
1203     size_t active = regions->active_regions();
1204     ShenandoahHeapRegionSet sorted_regions(active);
1205 
1206     for (size_t i = 0; i < active; i++) {
1207       ShenandoahHeapRegion* r = regions->get(i);
1208       if (r->is_regular() && (r->last_alloc_seq_num() > 0)) {
1209         sorted_regions.add_region(regions->get(i));
1210       }
1211     }
1212 
1213     sorted_regions.sort(compare_by_alloc_seq_ascending);
1214 
1215     // Heuristics triggered partial when allocated was larger than a threshold.
1216     // New allocations might have happened while we were preparing for GC,
1217     // capture all them in this cycle. This "adjusts" the threshold automatically.
1218     size_t used      = heap->used();
1219     size_t prev_used = heap->used_at_last_gc();
1220     guarantee(used >= prev_used, "Invariant");
1221     size_t target = MIN2(ShenandoahHeapRegion::required_regions(used - prev_used), sorted_regions.active_regions());
1222 
1223     for (uint to_idx = 0; to_idx < active; to_idx++) {
1224       ShenandoahHeapRegion* region = regions->get(to_idx);
1225       region->set_root(false);
1226     }
1227     uint count = 0;
1228     size_t sorted_active = sorted_regions.active_regions();
1229 
1230     for (uint i = 0; (i < sorted_active) && (count < target); i++) {
1231       ShenandoahHeapRegion* contender = sorted_regions.get(i);
1232       if (contender->last_alloc_seq_num() >= alloc_seq_at_last_gc_start) {
1233         break;
1234       }
1235 
1236       size_t index = contender->region_number();
1237       size_t num_regions = heap->num_regions();
1238       size_t from_idx_count = 0;
1239       if (matrix->enumerate_connected_to(index, num_regions,_from_idxs, from_idx_count,
1240                                          ShenandoahPartialInboundThreshold)) {
1241         if (maybe_add_heap_region(contender, collection_set)) {
1242           count++;
1243         }
1244         for (uint f = 0; f < from_idx_count; f++) {
1245           ShenandoahHeapRegion* r = regions->get(_from_idxs[f]);
1246           if (!r->is_root()) {
1247             r->set_root(true);
1248           }
1249         }
1250       }
1251     }
1252     collection_set->update_region_status();
1253 
1254     log_info(gc,ergo)("Regions: Active: " SIZE_FORMAT ", Target: " SIZE_FORMAT " (" SIZE_FORMAT "%%), In CSet: " SIZE_FORMAT,
1255                       active, target, ShenandoahLRUOldGenPercentage, collection_set->count());
1256   }
1257 
1258   bool should_start_partial_gc() {
1259     ShenandoahHeap* heap = ShenandoahHeap::heap();
1260 
1261     if (heap->need_update_refs()) {
1262       // Cannot start partial if heap is not completely updated.
1263       return false;
1264     }
1265 
1266     size_t capacity  = heap->capacity();
1267     size_t used      = heap->used();
1268     size_t prev_used = heap->used_at_last_gc();
1269 
1270     if (used < prev_used) {
1271       // Major collection must have happened, "used" data is unreliable, wait for update.
1272       return false;
1273     }
1274 
1275     // For now don't start until we are 40% full
1276     size_t allocated = used - prev_used;
1277     size_t active    = heap->regions()->active_regions() * ShenandoahHeapRegion::region_size_bytes();
1278     size_t threshold = active * ShenandoahLRUOldGenPercentage / 100;
1279     size_t minimum   = active * 0.4;
1280 
1281     bool result = ((used > minimum) && (allocated > threshold));
1282 
1283     FormatBuffer<> msg("%s. Capacity: " SIZE_FORMAT "M, Used: " SIZE_FORMAT "M, Previous Used: " SIZE_FORMAT
1284                        "M, Allocated: " SIZE_FORMAT "M, Threshold: " SIZE_FORMAT "M, Minimum: " SIZE_FORMAT "M",
1285                        result ? "Partial GC triggered" : "Partial GC skipped",
1286                        capacity/M, used/M, prev_used/M, allocated/M, threshold/M, minimum/M);
1287 
1288     if (result) {
1289       log_info(gc,ergo)("%s", msg.buffer());
1290     } else {
1291       log_trace(gc,ergo)("%s", msg.buffer());
1292     }
1293     return result;
1294    }
1295 
1296 };
1297 
1298 
1299 ShenandoahCollectorPolicy::ShenandoahCollectorPolicy() :
1300   _cycle_counter(0),
1301   _successful_cm(0),
1302   _degenerated_cm(0),
1303   _successful_uprefs(0),
1304   _degenerated_uprefs(0),
1305   _in_shutdown(0)
1306 {
1307 
1308   ShenandoahHeapRegion::setup_heap_region_size(initial_heap_byte_size(), max_heap_byte_size());
1309 
1310   initialize_all();
1311 
1312   _tracer = new (ResourceObj::C_HEAP, mtGC) ShenandoahTracer();
1313   _user_requested_gcs = 0;
1314   _allocation_failure_gcs = 0;
1315 
1316   if (ShenandoahGCHeuristics != NULL) {
1317     _minor_heuristics = NULL;
1318     if (strcmp(ShenandoahGCHeuristics, "aggressive") == 0) {
1319       _heuristics = new ShenandoahAggressiveHeuristics();
1320     } else if (strcmp(ShenandoahGCHeuristics, "dynamic") == 0) {
1321       _heuristics = new ShenandoahDynamicHeuristics();
1322     } else if (strcmp(ShenandoahGCHeuristics, "adaptive") == 0) {
1323       _heuristics = new ShenandoahAdaptiveHeuristics();
1324     } else if (strcmp(ShenandoahGCHeuristics, "passive") == 0) {
1325       _heuristics = new ShenandoahPassiveHeuristics();
1326     } else if (strcmp(ShenandoahGCHeuristics, "continuous") == 0) {
1327       _heuristics = new ShenandoahContinuousHeuristics();
1328     } else if (strcmp(ShenandoahGCHeuristics, "connected") == 0) {
1329       _heuristics = new ShenandoahAdaptiveHeuristics();
1330       _minor_heuristics = new ShenandoahPartialConnectedHeuristics();
1331     } else if (strcmp(ShenandoahGCHeuristics, "generational") == 0) {
1332       _heuristics = new ShenandoahAdaptiveHeuristics();
1333       _minor_heuristics = new ShenandoahGenerationalPartialHeuristics();
1334     } else if (strcmp(ShenandoahGCHeuristics, "LRU") == 0) {
1335       _heuristics = new ShenandoahAdaptiveHeuristics();
1336       _minor_heuristics = new ShenandoahLRUPartialHeuristics();
1337      } else {
1338       vm_exit_during_initialization("Unknown -XX:ShenandoahGCHeuristics option");
1339     }
1340 
1341     if (_heuristics->is_diagnostic() && !UnlockDiagnosticVMOptions) {
1342       vm_exit_during_initialization(
1343               err_msg("Heuristics \"%s\" is diagnostic, and must be enabled via -XX:+UnlockDiagnosticVMOptions.",
1344                       _heuristics->name()));
1345     }
1346     if (_heuristics->is_experimental() && !UnlockExperimentalVMOptions) {
1347       vm_exit_during_initialization(
1348               err_msg("Heuristics \"%s\" is experimental, and must be enabled via -XX:+UnlockExperimentalVMOptions.",
1349                       _heuristics->name()));
1350     }
1351     if (_minor_heuristics != NULL && _minor_heuristics->is_diagnostic() && !UnlockDiagnosticVMOptions) {
1352       vm_exit_during_initialization(
1353               err_msg("Heuristics \"%s\" is diagnostic, and must be enabled via -XX:+UnlockDiagnosticVMOptions.",
1354                       _minor_heuristics->name()));
1355     }
1356     if (_minor_heuristics != NULL && _minor_heuristics->is_experimental() && !UnlockExperimentalVMOptions) {
1357       vm_exit_during_initialization(
1358               err_msg("Heuristics \"%s\" is experimental, and must be enabled via -XX:+UnlockExperimentalVMOptions.",
1359                       _minor_heuristics->name()));
1360     }
1361 
1362     if (ShenandoahConditionalSATBBarrier && ShenandoahSATBBarrier) {
1363       vm_exit_during_initialization("Cannot use both ShenandoahSATBBarrier and ShenandoahConditionalSATBBarrier");
1364     }
1365     if (ShenandoahStoreValWriteBarrier && ShenandoahStoreValReadBarrier) {
1366       vm_exit_during_initialization("Cannot use both ShenandoahStoreValWriteBarrier and ShenandoahStoreValReadBarrier");
1367     }
1368 
1369     if (_minor_heuristics != NULL) {
1370       log_info(gc, init)("Shenandoah heuristics: %s minor with %s major",
1371                          _minor_heuristics->name(), _heuristics->name());
1372     } else {
1373       log_info(gc, init)("Shenandoah heuristics: %s",
1374                          _heuristics->name());
1375     }
1376     _heuristics->print_thresholds();
1377   } else {
1378       ShouldNotReachHere();
1379   }
1380 }
1381 
1382 ShenandoahCollectorPolicy* ShenandoahCollectorPolicy::as_pgc_policy() {
1383   return this;
1384 }
1385 
1386 BarrierSet::Name ShenandoahCollectorPolicy::barrier_set_name() {
1387   return BarrierSet::ShenandoahBarrierSet;
1388 }
1389 
1390 HeapWord* ShenandoahCollectorPolicy::mem_allocate_work(size_t size,
1391                                                        bool is_tlab,
1392                                                        bool* gc_overhead_limit_was_exceeded) {
1393   guarantee(false, "Not using this policy feature yet.");
1394   return NULL;
1395 }
1396 
1397 HeapWord* ShenandoahCollectorPolicy::satisfy_failed_allocation(size_t size, bool is_tlab) {
1398   guarantee(false, "Not using this policy feature yet.");
1399   return NULL;
1400 }
1401 
1402 void ShenandoahCollectorPolicy::initialize_alignments() {
1403 
1404   // This is expected by our algorithm for ShenandoahHeap::heap_region_containing().
1405   _space_alignment = ShenandoahHeapRegion::region_size_bytes();
1406   _heap_alignment = ShenandoahHeapRegion::region_size_bytes();
1407 }
1408 
1409 void ShenandoahCollectorPolicy::post_heap_initialize() {
1410   _heuristics->initialize();
1411   if (_minor_heuristics != NULL) {
1412     _minor_heuristics->initialize();
1413   }
1414 }
1415 
1416 void ShenandoahCollectorPolicy::record_bytes_allocated(size_t bytes) {
1417   _heuristics->record_bytes_allocated(bytes);
1418 }
1419 
1420 void ShenandoahCollectorPolicy::record_bytes_start_CM(size_t bytes) {
1421   _heuristics->record_bytes_start_CM(bytes);
1422 }
1423 
1424 void ShenandoahCollectorPolicy::record_bytes_end_CM(size_t bytes) {
1425   _heuristics->record_bytes_end_CM(bytes);
1426 }
1427 
1428 void ShenandoahCollectorPolicy::record_bytes_reclaimed(size_t bytes) {
1429   _heuristics->record_bytes_reclaimed(bytes);
1430 }
1431 
1432 void ShenandoahCollectorPolicy::record_user_requested_gc() {
1433   _heuristics->record_user_requested_gc();
1434   _user_requested_gcs++;
1435 }
1436 
1437 void ShenandoahCollectorPolicy::record_allocation_failure_gc() {
1438   _heuristics->record_allocation_failure_gc();
1439   _allocation_failure_gcs++;
1440 }
1441 
1442 bool ShenandoahCollectorPolicy::should_start_concurrent_mark(size_t used,
1443                                                              size_t capacity) {
1444   return _heuristics->should_start_concurrent_mark(used, capacity);
1445 }
1446 
1447 bool ShenandoahCollectorPolicy::handover_cancelled_marking() {
1448   return _heuristics->handover_cancelled_marking();
1449 }
1450 
1451 bool ShenandoahCollectorPolicy::handover_cancelled_uprefs() {
1452   return _heuristics->handover_cancelled_uprefs();
1453 }
1454 
1455 bool ShenandoahCollectorPolicy::update_refs() {
1456   if (_minor_heuristics != NULL && _minor_heuristics->update_refs()) {
1457     return true;
1458   }
1459   return _heuristics->update_refs();
1460 }
1461 
1462 bool ShenandoahCollectorPolicy::should_start_update_refs() {
1463   if (_minor_heuristics != NULL && _minor_heuristics->should_start_update_refs()) {
1464     return true;
1465   }
1466   return _heuristics->should_start_update_refs();
1467 }
1468 
1469 void ShenandoahCollectorPolicy::record_cm_success() {
1470   _heuristics->record_cm_success();
1471   _successful_cm++;
1472 }
1473 
1474 void ShenandoahCollectorPolicy::record_cm_degenerated() {
1475   _degenerated_cm++;
1476 }
1477 
1478 void ShenandoahCollectorPolicy::record_cm_cancelled() {
1479   _heuristics->record_cm_cancelled();
1480 }
1481 
1482 void ShenandoahCollectorPolicy::record_uprefs_success() {
1483   _heuristics->record_uprefs_success();
1484   _successful_uprefs++;
1485 }
1486 
1487 void ShenandoahCollectorPolicy::record_uprefs_degenerated() {
1488   _degenerated_uprefs++;
1489 }
1490 
1491 void ShenandoahCollectorPolicy::record_uprefs_cancelled() {
1492   _heuristics->record_uprefs_cancelled();
1493 }
1494 
1495 void ShenandoahCollectorPolicy::record_peak_occupancy() {
1496   _heuristics->record_peak_occupancy();
1497 }
1498 
1499 void ShenandoahCollectorPolicy::choose_collection_set(ShenandoahCollectionSet* collection_set,
1500                                                       bool minor) {
1501   if (minor)
1502     _minor_heuristics->choose_collection_set(collection_set);
1503   else
1504     _heuristics->choose_collection_set(collection_set);
1505 }
1506 
1507 void ShenandoahCollectorPolicy::choose_free_set(ShenandoahFreeSet* free_set) {
1508    _heuristics->choose_free_set(free_set);
1509 }
1510 
1511 
1512 bool ShenandoahCollectorPolicy::process_references() {
1513   return _heuristics->process_references();
1514 }
1515 
1516 bool ShenandoahCollectorPolicy::unload_classes() {
1517   return _heuristics->unload_classes();
1518 }
1519 
1520 size_t ShenandoahCollectorPolicy::cycle_counter() const {
1521   return _cycle_counter;
1522 }
1523 
1524 void ShenandoahCollectorPolicy::record_phase_time(ShenandoahPhaseTimings::Phase phase, double secs) {
1525   _heuristics->record_phase_time(phase, secs);
1526 }
1527 
1528 bool ShenandoahCollectorPolicy::should_start_partial_gc() {
1529   if (_minor_heuristics != NULL) {
1530     return _minor_heuristics->should_start_partial_gc();
1531   } else {
1532     return false; // no minor heuristics -> no partial gc
1533   }
1534 }
1535 
1536 bool ShenandoahCollectorPolicy::can_do_partial_gc() {
1537   if (_minor_heuristics != NULL) {
1538     return _minor_heuristics->can_do_partial_gc();
1539   } else {
1540     return false; // no minor heuristics -> no partial gc
1541   }
1542 }
1543 
1544 void ShenandoahCollectorPolicy::record_cycle_start() {
1545   _cycle_counter++;
1546   _heuristics->record_cycle_start();
1547 }
1548 
1549 void ShenandoahCollectorPolicy::record_cycle_end() {
1550   _heuristics->record_cycle_end();
1551 }
1552 
1553 void ShenandoahCollectorPolicy::record_shutdown() {
1554   OrderAccess::release_store_fence(&_in_shutdown, 1);
1555 }
1556 
1557 bool ShenandoahCollectorPolicy::is_at_shutdown() {
1558   return OrderAccess::load_acquire(&_in_shutdown) == 1;
1559 }
1560 
1561 void ShenandoahCollectorPolicy::print_gc_stats(outputStream* out) const {
1562   out->print_cr("" SIZE_FORMAT " allocation failure and " SIZE_FORMAT " user requested GCs", _allocation_failure_gcs, _user_requested_gcs);
1563   out->print_cr("" SIZE_FORMAT " successful and " SIZE_FORMAT " degenerated concurrent markings", _successful_cm, _degenerated_cm);
1564   out->print_cr("" SIZE_FORMAT " successful and " SIZE_FORMAT " degenerated update references  ", _successful_uprefs, _degenerated_uprefs);
1565 }