1 /*
   2  * Copyright (c) 2013, 2017, Red Hat, Inc. and/or its affiliates.
   3  *
   4  * This code is free software; you can redistribute it and/or modify it
   5  * under the terms of the GNU General Public License version 2 only, as
   6  * published by the Free Software Foundation.
   7  *
   8  * This code is distributed in the hope that it will be useful, but WITHOUT
   9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  11  * version 2 for more details (a copy is included in the LICENSE file that
  12  * accompanied this code).
  13  *
  14  * You should have received a copy of the GNU General Public License version
  15  * 2 along with this work; if not, write to the Free Software Foundation,
  16  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  17  *
  18  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  19  * or visit www.oracle.com if you need additional information or have any
  20  * questions.
  21  *
  22  */
  23 
  24 #include "precompiled.hpp"
  25 #include "gc/shared/gcPolicyCounters.hpp"
  26 #include "gc/shenandoah/shenandoahCollectionSet.hpp"
  27 #include "gc/shenandoah/shenandoahConnectionMatrix.hpp"
  28 #include "gc/shenandoah/shenandoahFreeSet.hpp"
  29 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
  30 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
  31 #include "gc/shenandoah/shenandoahPartialGC.hpp"
  32 #include "runtime/os.hpp"
  33 
  34 class ShenandoahHeuristics : public CHeapObj<mtGC> {
  35 
  36   NumberSeq _allocation_rate_bytes;
  37   NumberSeq _reclamation_rate_bytes;
  38 
  39   size_t _bytes_allocated_since_CM;
  40   size_t _bytes_reclaimed_this_cycle;
  41 
  42 protected:
  43   bool _update_refs_early;
  44   bool _update_refs_adaptive;
  45 
  46   typedef struct {
  47     ShenandoahHeapRegion* _region;
  48     size_t _garbage;
  49   } RegionData;
  50 
  51   static int compare_by_garbage(RegionData a, RegionData b) {
  52     if (a._garbage > b._garbage)
  53       return -1;
  54     else if (a._garbage < b._garbage)
  55       return 1;
  56     else return 0;
  57   }
  58 
  59   static int compare_by_alloc_seq_ascending(ShenandoahHeapRegion* a, ShenandoahHeapRegion* b) {
  60     if (a->last_alloc_seq_num() == b->last_alloc_seq_num())
  61       return 0;
  62     else if (a->last_alloc_seq_num() < b->last_alloc_seq_num())
  63       return -1;
  64     else return 1;
  65   }
  66 
  67   static int compare_by_alloc_seq_descending(ShenandoahHeapRegion* a, ShenandoahHeapRegion* b) {
  68     return -compare_by_alloc_seq_ascending(a, b);
  69   }
  70 
  71   RegionData* get_region_data_cache(size_t num) {
  72     RegionData* res = _region_data;
  73     if (res == NULL) {
  74       res = NEW_C_HEAP_ARRAY(RegionData, num, mtGC);
  75       _region_data = res;
  76       _region_data_size = num;
  77     } else if (_region_data_size < num) {
  78       res = REALLOC_C_HEAP_ARRAY(RegionData, _region_data, num, mtGC);
  79       _region_data = res;
  80       _region_data_size = num;
  81     }
  82     return res;
  83   }
  84 
  85   RegionData* _region_data;
  86   size_t _region_data_size;
  87 
  88   typedef struct {
  89     ShenandoahHeapRegion* _region;
  90     size_t _connections;
  91   } RegionConnections;
  92 
  93   RegionConnections* get_region_connects_cache(size_t num) {
  94     RegionConnections* res = _region_connects;
  95     if (res == NULL) {
  96       res = NEW_C_HEAP_ARRAY(RegionConnections, num, mtGC);
  97       _region_connects = res;
  98       _region_connects_size = num;
  99     } else if (_region_connects_size < num) {
 100       res = REALLOC_C_HEAP_ARRAY(RegionConnections, _region_connects, num, mtGC);
 101       _region_connects = res;
 102       _region_connects_size = num;
 103     }
 104     return res;
 105   }
 106 
 107   static int compare_by_connects(RegionConnections a, RegionConnections b) {
 108     if (a._connections == b._connections)
 109       return 0;
 110     else if (a._connections < b._connections)
 111       return -1;
 112     else return 1;
 113   }
 114 
 115   RegionConnections* _region_connects;
 116   size_t _region_connects_size;
 117 
 118   size_t _bytes_allocated_start_CM;
 119   size_t _bytes_allocated_during_CM;
 120 
 121   uint _cancelled_cm_cycles_in_a_row;
 122   uint _successful_cm_cycles_in_a_row;
 123 
 124   uint _cancelled_uprefs_cycles_in_a_row;
 125   uint _successful_uprefs_cycles_in_a_row;
 126 
 127   size_t _bytes_in_cset;
 128 
 129   double _last_cycle_end;
 130 
 131 public:
 132 
 133   ShenandoahHeuristics();
 134   virtual ~ShenandoahHeuristics();
 135 
 136   void record_bytes_allocated(size_t bytes);
 137   void record_bytes_reclaimed(size_t bytes);
 138   void record_bytes_start_CM(size_t bytes);
 139   void record_bytes_end_CM(size_t bytes);
 140 
 141   void record_gc_start() {
 142     ShenandoahHeap::heap()->set_alloc_seq_gc_start();
 143   }
 144 
 145   void record_gc_end() {
 146     ShenandoahHeap::heap()->set_alloc_seq_gc_end();
 147     ShenandoahHeap::heap()->set_used_at_last_gc();
 148   }
 149 
 150   virtual void record_cycle_start() {
 151     // Do nothing
 152   }
 153 
 154   virtual void record_cycle_end() {
 155     _last_cycle_end = os::elapsedTime();
 156   }
 157 
 158   virtual void record_phase_time(ShenandoahPhaseTimings::Phase phase, double secs) {
 159     // Do nothing
 160   }
 161 
 162   size_t bytes_in_cset() const { return _bytes_in_cset; }
 163 
 164   virtual void print_thresholds() {
 165   }
 166 
 167   virtual bool should_start_concurrent_mark(size_t used, size_t capacity) const=0;
 168 
 169   virtual bool should_start_update_refs() {
 170     return _update_refs_early;
 171   }
 172 
 173   virtual bool update_refs() const {
 174     return _update_refs_early;
 175   }
 176 
 177   virtual bool should_start_partial_gc() {
 178     return false;
 179   }
 180 
 181   virtual bool can_do_partial_gc() {
 182     return false;
 183   }
 184 
 185   virtual bool handover_cancelled_marking() {
 186     return _cancelled_cm_cycles_in_a_row <= ShenandoahFullGCThreshold;
 187   }
 188 
 189   virtual bool handover_cancelled_uprefs() {
 190     return _cancelled_uprefs_cycles_in_a_row <= ShenandoahFullGCThreshold;
 191   }
 192 
 193   virtual void record_cm_cancelled() {
 194     _cancelled_cm_cycles_in_a_row++;
 195     _successful_cm_cycles_in_a_row = 0;
 196   }
 197 
 198   virtual void record_cm_success() {
 199     _cancelled_cm_cycles_in_a_row = 0;
 200     _successful_cm_cycles_in_a_row++;
 201   }
 202 
 203   virtual void record_uprefs_cancelled() {
 204     _cancelled_uprefs_cycles_in_a_row++;
 205     _successful_uprefs_cycles_in_a_row = 0;
 206   }
 207 
 208   virtual void record_uprefs_success() {
 209     _cancelled_uprefs_cycles_in_a_row = 0;
 210     _successful_uprefs_cycles_in_a_row++;
 211   }
 212 
 213   virtual void record_allocation_failure_gc() {
 214     _bytes_in_cset = 0;
 215   }
 216 
 217   virtual void record_user_requested_gc() {
 218     _bytes_in_cset = 0;
 219   }
 220 
 221   virtual void record_peak_occupancy() {
 222   }
 223 
 224   virtual void choose_collection_set(ShenandoahCollectionSet* collection_set);
 225   virtual void choose_free_set(ShenandoahFreeSet* free_set);
 226 
 227   virtual bool process_references() {
 228     if (ShenandoahRefProcFrequency == 0) return false;
 229     size_t cycle = ShenandoahHeap::heap()->shenandoahPolicy()->cycle_counter();
 230     // Process references every Nth GC cycle.
 231     return cycle % ShenandoahRefProcFrequency == 0;
 232   }
 233 
 234   virtual bool unload_classes() {
 235     if (ShenandoahUnloadClassesFrequency == 0) return false;
 236     size_t cycle = ShenandoahHeap::heap()->shenandoahPolicy()->cycle_counter();
 237     // Unload classes every Nth GC cycle.
 238     // This should not happen in the same cycle as process_references to amortize costs.
 239     // Offsetting by one is enough to break the rendezvous when periods are equal.
 240     // When periods are not equal, offsetting by one is just as good as any other guess.
 241     return (cycle + 1) % ShenandoahUnloadClassesFrequency == 0;
 242   }
 243 
 244   bool maybe_add_heap_region(ShenandoahHeapRegion* hr,
 245                         ShenandoahCollectionSet* cs);
 246 
 247   virtual const char* name() = 0;
 248   virtual bool is_diagnostic() = 0;
 249   virtual bool is_experimental() = 0;
 250   virtual void initialize() {}
 251 
 252 protected:
 253   virtual void choose_collection_set_from_regiondata(ShenandoahCollectionSet* set,
 254                                                      RegionData* data, size_t data_size,
 255                                                      size_t trash, size_t free) = 0;
 256 };
 257 
 258 ShenandoahHeuristics::ShenandoahHeuristics() :
 259   _bytes_allocated_since_CM(0),
 260   _bytes_reclaimed_this_cycle(0),
 261   _bytes_allocated_start_CM(0),
 262   _bytes_allocated_during_CM(0),
 263   _bytes_in_cset(0),
 264   _cancelled_cm_cycles_in_a_row(0),
 265   _successful_cm_cycles_in_a_row(0),
 266   _cancelled_uprefs_cycles_in_a_row(0),
 267   _successful_uprefs_cycles_in_a_row(0),
 268   _region_data(NULL),
 269   _region_data_size(0),
 270   _region_connects(NULL),
 271   _region_connects_size(0),
 272   _update_refs_early(false),
 273   _update_refs_adaptive(false),
 274   _last_cycle_end(0)
 275 {
 276   if (strcmp(ShenandoahUpdateRefsEarly, "on") == 0 ||
 277       strcmp(ShenandoahUpdateRefsEarly, "true") == 0 ) {
 278     _update_refs_early = true;
 279   } else if (strcmp(ShenandoahUpdateRefsEarly, "off") == 0 ||
 280              strcmp(ShenandoahUpdateRefsEarly, "false") == 0 ) {
 281     _update_refs_early = false;
 282   } else if (strcmp(ShenandoahUpdateRefsEarly, "adaptive") == 0) {
 283     _update_refs_adaptive = true;
 284     _update_refs_early = true;
 285   } else {
 286     vm_exit_during_initialization("Unknown -XX:ShenandoahUpdateRefsEarly option: %s", ShenandoahUpdateRefsEarly);
 287   }
 288 }
 289 
 290 ShenandoahHeuristics::~ShenandoahHeuristics() {
 291   if (_region_data != NULL) {
 292     FREE_C_HEAP_ARRAY(RegionGarbage, _region_data);
 293   }
 294   if (_region_connects != NULL) {
 295     FREE_C_HEAP_ARRAY(RegionConnections, _region_connects);
 296   }
 297 }
 298 
 299 bool ShenandoahHeuristics::maybe_add_heap_region(ShenandoahHeapRegion* hr,
 300                                                  ShenandoahCollectionSet* collection_set) {
 301     if (hr->is_regular() && hr->has_live() && !collection_set->is_in(hr)) {
 302       collection_set->add_region(hr);
 303       return true;
 304     }
 305     return false;
 306 }
 307 
 308 void ShenandoahHeuristics::choose_collection_set(ShenandoahCollectionSet* collection_set) {
 309   assert(collection_set->count() == 0, "Must be empty");
 310 
 311   ShenandoahHeap* heap = ShenandoahHeap::heap();
 312 
 313   // Poll this before populating collection set.
 314   size_t total_garbage = heap->garbage();
 315 
 316   // Step 1. Build up the region candidates we care about, rejecting losers and accepting winners right away.
 317 
 318   ShenandoahHeapRegionSet* regions = heap->regions();
 319   size_t active = regions->active_regions();
 320 
 321   RegionData* candidates = get_region_data_cache(active);
 322 
 323   size_t cand_idx = 0;
 324 
 325   size_t immediate_garbage = 0;
 326   size_t immediate_regions = 0;
 327 
 328   size_t free = 0;
 329   size_t free_regions = 0;
 330 
 331   for (size_t i = 0; i < active; i++) {
 332     ShenandoahHeapRegion* region = regions->get(i);
 333 
 334     if (region->is_empty()) {
 335       free_regions++;
 336       free += ShenandoahHeapRegion::region_size_bytes();
 337     } else if (region->is_regular()) {
 338       if (!region->has_live()) {
 339         // We can recycle it right away and put it in the free set.
 340         immediate_regions++;
 341         immediate_garbage += region->garbage();
 342         region->make_trash();
 343       } else {
 344         // This is our candidate for later consideration.
 345         candidates[cand_idx]._region = region;
 346         candidates[cand_idx]._garbage = region->garbage();
 347         cand_idx++;
 348       }
 349     } else if (region->is_humongous_start()) {
 350         // Reclaim humongous regions here, and count them as the immediate garbage
 351 #ifdef ASSERT
 352         bool reg_live = region->has_live();
 353         bool bm_live = heap->is_marked_complete(oop(region->bottom() + BrooksPointer::word_size()));
 354         assert(reg_live == bm_live,
 355                "Humongous liveness and marks should agree. Region live: %s; Bitmap live: %s; Region Live Words: " SIZE_FORMAT,
 356                BOOL_TO_STR(reg_live), BOOL_TO_STR(bm_live), region->get_live_data_words());
 357 #endif
 358         if (!region->has_live()) {
 359           size_t reclaimed = heap->trash_humongous_region_at(region);
 360           immediate_regions += reclaimed;
 361           immediate_garbage += reclaimed * ShenandoahHeapRegion::region_size_bytes();
 362         }
 363       }
 364   }
 365 
 366   // Step 2. Process the remaining candidates, if any.
 367 
 368   if (cand_idx > 0) {
 369     choose_collection_set_from_regiondata(collection_set, candidates, cand_idx, immediate_garbage, free);
 370   }
 371 
 372   // Step 3. Look back at collection set, and see if it's worth it to collect,
 373   // given the amount of immediately reclaimable garbage.
 374 
 375   log_info(gc, ergo)("Total Garbage: "SIZE_FORMAT"M",
 376                      total_garbage / M);
 377 
 378   size_t total_garbage_regions = immediate_regions + collection_set->count();
 379   size_t immediate_percent = total_garbage_regions == 0 ? 0 : (immediate_regions * 100 / total_garbage_regions);
 380 
 381   log_info(gc, ergo)("Immediate Garbage: "SIZE_FORMAT"M, "SIZE_FORMAT" regions ("SIZE_FORMAT"%% of total)",
 382                      immediate_garbage / M, immediate_regions, immediate_percent);
 383 
 384   if (immediate_percent > ShenandoahImmediateThreshold) {
 385     collection_set->clear();
 386   } else {
 387     log_info(gc, ergo)("Garbage to be collected: "SIZE_FORMAT"M ("SIZE_FORMAT"%% of total), "SIZE_FORMAT" regions",
 388                        collection_set->garbage() / M, collection_set->garbage() * 100 / MAX2(total_garbage, (size_t)1), collection_set->count());
 389     log_info(gc, ergo)("Live objects to be evacuated: "SIZE_FORMAT"M",
 390                        collection_set->live_data() / M);
 391     log_info(gc, ergo)("Live/garbage ratio in collected regions: "SIZE_FORMAT"%%",
 392                        collection_set->live_data() * 100 / MAX2(collection_set->garbage(), (size_t)1));
 393     log_info(gc, ergo)("Free: "SIZE_FORMAT"M, "SIZE_FORMAT" regions ("SIZE_FORMAT"%% of total)",
 394                        free / M, free_regions, free_regions * 100 / active);
 395   }
 396 
 397   collection_set->update_region_status();
 398 }
 399 
 400 void ShenandoahHeuristics::choose_free_set(ShenandoahFreeSet* free_set) {
 401   ShenandoahHeapRegionSet* ordered_regions = ShenandoahHeap::heap()->regions();
 402   for (size_t i = 0; i < ordered_regions->active_regions(); i++) {
 403     ShenandoahHeapRegion* region = ordered_regions->get(i);
 404     if (region->is_alloc_allowed()) {
 405       free_set->add_region(region);
 406     }
 407   }
 408 }
 409 
 410 
 411 void ShenandoahCollectorPolicy::record_gc_start() {
 412   _heuristics->record_gc_start();
 413 }
 414 
 415 void ShenandoahCollectorPolicy::record_gc_end() {
 416   _heuristics->record_gc_end();
 417 }
 418 
 419 void ShenandoahCollectorPolicy::report_concgc_cancelled() {
 420 }
 421 
 422 void ShenandoahHeuristics::record_bytes_allocated(size_t bytes) {
 423   _bytes_allocated_since_CM = bytes;
 424   _bytes_allocated_start_CM = bytes;
 425   _allocation_rate_bytes.add(bytes);
 426 }
 427 
 428 void ShenandoahHeuristics::record_bytes_reclaimed(size_t bytes) {
 429   _bytes_reclaimed_this_cycle = bytes;
 430   _reclamation_rate_bytes.add(bytes);
 431 }
 432 
 433 void ShenandoahHeuristics::record_bytes_start_CM(size_t bytes) {
 434   _bytes_allocated_start_CM = bytes;
 435 }
 436 
 437 void ShenandoahHeuristics::record_bytes_end_CM(size_t bytes) {
 438   _bytes_allocated_during_CM = (bytes > _bytes_allocated_start_CM) ? (bytes - _bytes_allocated_start_CM)
 439                                                                    : bytes;
 440 }
 441 
 442 class ShenandoahPassiveHeuristics : public ShenandoahHeuristics {
 443 public:
 444   ShenandoahPassiveHeuristics() : ShenandoahHeuristics() {
 445     // Do not allow concurrent cycles.
 446     FLAG_SET_DEFAULT(ExplicitGCInvokesConcurrent, false);
 447   }
 448 
 449   virtual void choose_collection_set_from_regiondata(ShenandoahCollectionSet* cset,
 450                                                      RegionData* data, size_t size,
 451                                                      size_t trash, size_t free) {
 452     for (size_t idx = 0; idx < size; idx++) {
 453       ShenandoahHeapRegion* r = data[idx]._region;
 454       if (r->garbage() > 0) {
 455         cset->add_region(r);
 456       }
 457     }
 458   }
 459 
 460   virtual bool should_start_concurrent_mark(size_t used, size_t capacity) const {
 461     // Never do concurrent GCs.
 462     return false;
 463   }
 464 
 465   virtual bool process_references() {
 466     if (ShenandoahRefProcFrequency == 0) return false;
 467     // Always process references.
 468     return true;
 469   }
 470 
 471   virtual bool unload_classes() {
 472     if (ShenandoahUnloadClassesFrequency == 0) return false;
 473     // Always unload classes.
 474     return true;
 475   }
 476 
 477   virtual const char* name() {
 478     return "passive";
 479   }
 480 
 481   virtual bool is_diagnostic() {
 482     return true;
 483   }
 484 
 485   virtual bool is_experimental() {
 486     return false;
 487   }
 488 };
 489 
 490 class ShenandoahAggressiveHeuristics : public ShenandoahHeuristics {
 491 public:
 492   ShenandoahAggressiveHeuristics() : ShenandoahHeuristics() {
 493     // Do not shortcut evacuation
 494     if (FLAG_IS_DEFAULT(ShenandoahImmediateThreshold)) {
 495       FLAG_SET_DEFAULT(ShenandoahImmediateThreshold, 100);
 496     }
 497   }
 498 
 499   virtual void choose_collection_set_from_regiondata(ShenandoahCollectionSet* cset,
 500                                                      RegionData* data, size_t size,
 501                                                      size_t trash, size_t free) {
 502     for (size_t idx = 0; idx < size; idx++) {
 503       ShenandoahHeapRegion* r = data[idx]._region;
 504       if (r->garbage() > 0) {
 505         cset->add_region(r);
 506       }
 507     }
 508   }
 509 
 510   virtual bool should_start_concurrent_mark(size_t used, size_t capacity) const {
 511     return true;
 512   }
 513 
 514   virtual bool process_references() {
 515     if (ShenandoahRefProcFrequency == 0) return false;
 516     // Randomly process refs with 50% chance.
 517     return (os::random() & 1) == 1;
 518   }
 519 
 520   virtual bool unload_classes() {
 521     if (ShenandoahUnloadClassesFrequency == 0) return false;
 522     // Randomly unload classes with 50% chance.
 523     return (os::random() & 1) == 1;
 524   }
 525 
 526   virtual const char* name() {
 527     return "aggressive";
 528   }
 529 
 530   virtual bool is_diagnostic() {
 531     return true;
 532   }
 533 
 534   virtual bool is_experimental() {
 535     return false;
 536   }
 537 };
 538 
 539 class ShenandoahDynamicHeuristics : public ShenandoahHeuristics {
 540 public:
 541   ShenandoahDynamicHeuristics() : ShenandoahHeuristics() {
 542   }
 543 
 544   void print_thresholds() {
 545     log_info(gc, init)("Shenandoah heuristics thresholds: allocation "SIZE_FORMAT", free "SIZE_FORMAT", garbage "SIZE_FORMAT,
 546                        ShenandoahAllocationThreshold,
 547                        ShenandoahFreeThreshold,
 548                        ShenandoahGarbageThreshold);
 549   }
 550 
 551   virtual ~ShenandoahDynamicHeuristics() {}
 552 
 553   virtual bool should_start_concurrent_mark(size_t used, size_t capacity) const {
 554 
 555     bool shouldStartConcurrentMark = false;
 556 
 557     ShenandoahHeap* heap = ShenandoahHeap::heap();
 558     size_t available = heap->free_regions()->available();
 559 
 560     if (! update_refs()) {
 561       // Count in the memory available after cset reclamation.
 562       size_t cset = MIN2(_bytes_in_cset, (ShenandoahCSetThreshold * capacity) / 100);
 563       available += cset;
 564     }
 565 
 566     uintx threshold = ShenandoahFreeThreshold + ShenandoahCSetThreshold;
 567     size_t targetStartMarking = (capacity * threshold) / 100;
 568 
 569     double last_time_ms = (os::elapsedTime() - _last_cycle_end) * 1000;
 570     bool periodic_gc = (last_time_ms > ShenandoahGuaranteedGCInterval);
 571 
 572     size_t threshold_bytes_allocated = heap->capacity() * ShenandoahAllocationThreshold / 100;
 573     if (available < targetStartMarking &&
 574         heap->bytes_allocated_since_cm() > threshold_bytes_allocated) {
 575       // Need to check that an appropriate number of regions have
 576       // been allocated since last concurrent mark too.
 577       shouldStartConcurrentMark = true;
 578     } else if (periodic_gc) {
 579       log_info(gc,ergo)("Periodic GC triggered. Time since last GC: %.0f ms, Guaranteed Interval: " UINTX_FORMAT " ms",
 580                         last_time_ms, ShenandoahGuaranteedGCInterval);
 581       shouldStartConcurrentMark = true;
 582     }
 583 
 584     return shouldStartConcurrentMark;
 585   }
 586 
 587   virtual void choose_collection_set_from_regiondata(ShenandoahCollectionSet* cset,
 588                                                      RegionData* data, size_t size,
 589                                                      size_t trash, size_t free) {
 590     size_t threshold = ShenandoahHeapRegion::region_size_bytes() * ShenandoahGarbageThreshold / 100;
 591 
 592     _bytes_in_cset = 0;
 593     for (size_t idx = 0; idx < size; idx++) {
 594       ShenandoahHeapRegion* r = data[idx]._region;
 595       if (r->garbage() > threshold) {
 596         cset->add_region(r);
 597         _bytes_in_cset += r->used();
 598       }
 599     }
 600   }
 601 
 602   virtual const char* name() {
 603     return "dynamic";
 604   }
 605 
 606   virtual bool is_diagnostic() {
 607     return false;
 608   }
 609 
 610   virtual bool is_experimental() {
 611     return false;
 612   }
 613 };
 614 
 615 class ShenandoahContinuousHeuristics : public ShenandoahHeuristics {
 616 public:
 617   virtual bool should_start_concurrent_mark(size_t used, size_t capacity) const {
 618     // Start the cycle, unless completely idle.
 619     return ShenandoahHeap::heap()->bytes_allocated_since_cm() > 0;
 620   }
 621 
 622   virtual void choose_collection_set_from_regiondata(ShenandoahCollectionSet* cset,
 623                                                      RegionData* data, size_t size,
 624                                                      size_t trash, size_t free) {
 625     size_t threshold = ShenandoahHeapRegion::region_size_bytes() * ShenandoahGarbageThreshold / 100;
 626     for (size_t idx = 0; idx < size; idx++) {
 627       ShenandoahHeapRegion* r = data[idx]._region;
 628       if (r->garbage() > threshold) {
 629         cset->add_region(r);
 630       }
 631     }
 632   }
 633 
 634   virtual const char* name() {
 635     return "continuous";
 636   }
 637 
 638   virtual bool is_diagnostic() {
 639     return false;
 640   }
 641 
 642   virtual bool is_experimental() {
 643     return false;
 644   }
 645 };
 646 
 647 class ShenandoahAdaptiveHeuristics : public ShenandoahHeuristics {
 648 private:
 649   uintx _free_threshold;
 650   TruncatedSeq* _cset_history;
 651   size_t _peak_occupancy;
 652   TruncatedSeq* _cycle_gap_history;
 653   TruncatedSeq* _conc_mark_duration_history;
 654   TruncatedSeq* _conc_uprefs_duration_history;
 655 public:
 656   ShenandoahAdaptiveHeuristics() :
 657     ShenandoahHeuristics(),
 658     _free_threshold(ShenandoahInitFreeThreshold),
 659     _peak_occupancy(0),
 660     _conc_mark_duration_history(new TruncatedSeq(5)),
 661     _conc_uprefs_duration_history(new TruncatedSeq(5)),
 662     _cycle_gap_history(new TruncatedSeq(5)),
 663     _cset_history(new TruncatedSeq((uint)ShenandoahHappyCyclesThreshold)) {
 664 
 665     _cset_history->add((double) ShenandoahCSetThreshold);
 666     _cset_history->add((double) ShenandoahCSetThreshold);
 667   }
 668 
 669   virtual ~ShenandoahAdaptiveHeuristics() {
 670     delete _cset_history;
 671   }
 672 
 673   virtual void choose_collection_set_from_regiondata(ShenandoahCollectionSet* cset,
 674                                                      RegionData* data, size_t size,
 675                                                      size_t trash, size_t free) {
 676     size_t garbage_threshold = ShenandoahHeapRegion::region_size_bytes() * ShenandoahGarbageThreshold / 100;
 677 
 678     // The logic for cset selection in adaptive is as follows:
 679     //
 680     //   1. We cannot get cset larger than available free space. Otherwise we guarantee OOME
 681     //      during evacuation, and thus guarantee full GC. In practice, we also want to let
 682     //      application to allocate something. This is why we limit CSet to some fraction of
 683     //      available space. In non-overloaded heap, max_cset would contain all plausible candidates
 684     //      over garbage threshold.
 685     //
 686     //   2. We should not get cset too low so that free threshold would not be met right
 687     //      after the cycle. Otherwise we get back-to-back cycles for no reason if heap is
 688     //      too fragmented. In non-overloaded non-fragmented heap min_cset would be around zero.
 689     //
 690     // Therefore, we start by sorting the regions by garbage. Then we unconditionally add the best candidates
 691     // before we meet min_cset. Then we add all candidates that fit with a garbage threshold before
 692     // we hit max_cset. When max_cset is hit, we terminate the cset selection. Note that in this scheme,
 693     // ShenandoahGarbageThreshold is the soft threshold which would be ignored until min_cset is hit.
 694 
 695     size_t free_target = MIN2<size_t>(_free_threshold + MaxNormalStep, 100) * ShenandoahHeap::heap()->capacity() / 100;
 696     size_t actual_free = free + trash;
 697     size_t min_cset = free_target > actual_free ? (free_target - actual_free) : 0;
 698     size_t max_cset = actual_free * 3 / 4;
 699     min_cset = MIN2(min_cset, max_cset);
 700 
 701     log_info(gc, ergo)("Adaptive CSet selection: free target = " SIZE_FORMAT "M, actual free = "
 702                                SIZE_FORMAT "M; min cset = " SIZE_FORMAT "M, max cset = " SIZE_FORMAT "M",
 703                        free_target / M, actual_free / M, min_cset / M, max_cset / M);
 704 
 705     // Better select garbage-first regions
 706     QuickSort::sort<RegionData>(data, (int)size, compare_by_garbage, false);
 707 
 708     size_t live_cset = 0;
 709     _bytes_in_cset = 0;
 710     for (size_t idx = 0; idx < size; idx++) {
 711       ShenandoahHeapRegion* r = data[idx]._region;
 712 
 713       size_t new_cset = live_cset + r->get_live_data_bytes();
 714 
 715       if (new_cset < min_cset) {
 716         cset->add_region(r);
 717         _bytes_in_cset += r->used();
 718         live_cset = new_cset;
 719       } else if (new_cset <= max_cset) {
 720         if (r->garbage() > garbage_threshold) {
 721           cset->add_region(r);
 722           _bytes_in_cset += r->used();
 723           live_cset = new_cset;
 724         }
 725       } else {
 726         break;
 727       }
 728     }
 729   }
 730 
 731   static const intx MaxNormalStep = 5;      // max step towards goal under normal conditions
 732   static const intx CancelledGC_Hit = 10;   // how much to step on cancelled GC
 733   static const intx AllocFailure_Hit = 20;  // how much to step on allocation failure full GC
 734   static const intx UserRequested_Hit = 0;  // how much to step on user requested full GC
 735 
 736   void handle_cycle_success() {
 737     ShenandoahHeap* heap = ShenandoahHeap::heap();
 738     size_t capacity = heap->capacity();
 739 
 740     size_t current_threshold = (capacity - _peak_occupancy) * 100 / capacity;
 741     size_t min_threshold = ShenandoahMinFreeThreshold;
 742     intx step = min_threshold - current_threshold;
 743     step = MAX2(step, (intx) -MaxNormalStep);
 744     step = MIN2(step, (intx) MaxNormalStep);
 745 
 746     log_info(gc, ergo)("Capacity: " SIZE_FORMAT "M, Peak Occupancy: " SIZE_FORMAT
 747                               "M, Lowest Free: " SIZE_FORMAT "M, Free Threshold: " UINTX_FORMAT "M",
 748                        capacity / M, _peak_occupancy / M,
 749                        (capacity - _peak_occupancy) / M, ShenandoahMinFreeThreshold * capacity / 100 / M);
 750 
 751     if (step > 0) {
 752       // Pessimize
 753       adjust_free_threshold(step);
 754     } else if (step < 0) {
 755       // Optimize, if enough happy cycles happened
 756       if (_successful_cm_cycles_in_a_row > ShenandoahHappyCyclesThreshold &&
 757           (! update_refs() || (_successful_uprefs_cycles_in_a_row > ShenandoahHappyCyclesThreshold)) &&
 758           _free_threshold > 0) {
 759         adjust_free_threshold(step);
 760         _successful_cm_cycles_in_a_row = 0;
 761         _successful_uprefs_cycles_in_a_row = 0;
 762       }
 763     } else {
 764       // do nothing
 765     }
 766     _peak_occupancy = 0;
 767   }
 768 
 769   void record_cycle_end() {
 770     ShenandoahHeuristics::record_cycle_end();
 771     handle_cycle_success();
 772   }
 773 
 774   void record_cycle_start() {
 775     ShenandoahHeuristics::record_cycle_start();
 776     double last_cycle_gap = (os::elapsedTime() - _last_cycle_end);
 777     _cycle_gap_history->add(last_cycle_gap);
 778   }
 779 
 780   virtual void record_phase_time(ShenandoahPhaseTimings::Phase phase, double secs) {
 781     if (phase == ShenandoahPhaseTimings::conc_mark) {
 782       _conc_mark_duration_history->add(secs);
 783     } else if (phase == ShenandoahPhaseTimings::conc_update_refs) {
 784       _conc_uprefs_duration_history->add(secs);
 785     } // Else ignore
 786   }
 787 
 788   void adjust_free_threshold(intx adj) {
 789     intx new_value = adj + _free_threshold;
 790     uintx new_threshold = (uintx)MAX2<intx>(new_value, 0);
 791     new_threshold = MAX2(new_threshold, ShenandoahMinFreeThreshold);
 792     new_threshold = MIN2(new_threshold, ShenandoahMaxFreeThreshold);
 793     if (new_threshold != _free_threshold) {
 794       _free_threshold = new_threshold;
 795       log_info(gc,ergo)("Adjusting free threshold to: " UINTX_FORMAT "%% (" SIZE_FORMAT "M)",
 796                         _free_threshold, _free_threshold * ShenandoahHeap::heap()->capacity() / 100 / M);
 797     }
 798   }
 799 
 800   virtual void record_cm_cancelled() {
 801     ShenandoahHeuristics::record_cm_cancelled();
 802     adjust_free_threshold(CancelledGC_Hit);
 803   }
 804 
 805   virtual void record_cm_success() {
 806     ShenandoahHeuristics::record_cm_success();
 807   }
 808 
 809   virtual void record_uprefs_cancelled() {
 810     ShenandoahHeuristics::record_uprefs_cancelled();
 811     adjust_free_threshold(CancelledGC_Hit);
 812   }
 813 
 814   virtual void record_uprefs_success() {
 815     ShenandoahHeuristics::record_uprefs_success();
 816   }
 817 
 818   virtual void record_user_requested_gc() {
 819     ShenandoahHeuristics::record_user_requested_gc();
 820     adjust_free_threshold(UserRequested_Hit);
 821   }
 822 
 823   virtual void record_allocation_failure_gc() {
 824     ShenandoahHeuristics::record_allocation_failure_gc();
 825     adjust_free_threshold(AllocFailure_Hit);
 826   }
 827 
 828   virtual void record_peak_occupancy() {
 829     _peak_occupancy = MAX2(_peak_occupancy, ShenandoahHeap::heap()->used());
 830   }
 831 
 832   virtual bool should_start_concurrent_mark(size_t used, size_t capacity) const {
 833     bool shouldStartConcurrentMark = false;
 834     ShenandoahHeap* heap = ShenandoahHeap::heap();
 835     size_t available = heap->free_regions()->available();
 836     uintx factor = _free_threshold;
 837     size_t cset_threshold = 0;
 838     if (! update_refs()) {
 839       // Count in the memory available after cset reclamation.
 840       cset_threshold = (size_t) _cset_history->davg();
 841       size_t cset = MIN2(_bytes_in_cset, (cset_threshold * capacity) / 100);
 842       available += cset;
 843       factor += cset_threshold;
 844     }
 845 
 846     double last_time_ms = (os::elapsedTime() - _last_cycle_end) * 1000;
 847     bool periodic_gc = (last_time_ms > ShenandoahGuaranteedGCInterval);
 848     size_t threshold_available = (capacity * factor) / 100;
 849     size_t bytes_allocated = heap->bytes_allocated_since_cm();
 850     size_t threshold_bytes_allocated = heap->capacity() * ShenandoahAllocationThreshold / 100;
 851 
 852     if (available < threshold_available &&
 853             bytes_allocated > threshold_bytes_allocated) {
 854       log_info(gc,ergo)("Concurrent marking triggered. Free: " SIZE_FORMAT "M, Free Threshold: " SIZE_FORMAT
 855                                 "M; Allocated: " SIZE_FORMAT "M, Alloc Threshold: " SIZE_FORMAT "M",
 856                         available / M, threshold_available / M, available / M, threshold_bytes_allocated / M);
 857       // Need to check that an appropriate number of regions have
 858       // been allocated since last concurrent mark too.
 859       shouldStartConcurrentMark = true;
 860     } else if (periodic_gc) {
 861       log_info(gc,ergo)("Periodic GC triggered. Time since last GC: %.0f ms, Guaranteed Interval: " UINTX_FORMAT " ms",
 862           last_time_ms, ShenandoahGuaranteedGCInterval);
 863       shouldStartConcurrentMark = true;
 864     }
 865 
 866     if (shouldStartConcurrentMark) {
 867       if (! update_refs()) {
 868         log_info(gc,ergo)("Predicted cset threshold: " SIZE_FORMAT ", " SIZE_FORMAT "K CSet ("SIZE_FORMAT"%%)",
 869                           cset_threshold, _bytes_in_cset / K, _bytes_in_cset * 100 / capacity);
 870         _cset_history->add((double) (_bytes_in_cset * 100 / capacity));
 871       }
 872     }
 873     return shouldStartConcurrentMark;
 874   }
 875 
 876   virtual bool should_start_update_refs() {
 877     if (! _update_refs_adaptive) {
 878       return _update_refs_early;
 879     }
 880 
 881     double cycle_gap_avg = _cycle_gap_history->avg();
 882     double conc_mark_avg = _conc_mark_duration_history->avg();
 883     double conc_uprefs_avg = _conc_uprefs_duration_history->avg();
 884 
 885     if (_update_refs_early) {
 886       double threshold = ShenandoahMergeUpdateRefsMinGap / 100.0;
 887       if (conc_mark_avg + conc_uprefs_avg > cycle_gap_avg * threshold) {
 888         _update_refs_early = false;
 889       }
 890     } else {
 891       double threshold = ShenandoahMergeUpdateRefsMaxGap / 100.0;
 892       if (conc_mark_avg + conc_uprefs_avg < cycle_gap_avg * threshold) {
 893         _update_refs_early = true;
 894       }
 895     }
 896     return _update_refs_early;
 897   }
 898 
 899   virtual const char* name() {
 900     return "adaptive";
 901   }
 902 
 903   virtual bool is_diagnostic() {
 904     return false;
 905   }
 906 
 907   virtual bool is_experimental() {
 908     return false;
 909   }
 910 };
 911 
 912 class ShenandoahPartialHeuristics : public ShenandoahAdaptiveHeuristics {
 913 protected:
 914   size_t* _from_idxs;
 915 
 916 public:
 917   ShenandoahPartialHeuristics() : ShenandoahAdaptiveHeuristics() {
 918     FLAG_SET_DEFAULT(UseShenandoahMatrix, true);
 919     if (FLAG_IS_DEFAULT(ShenandoahRefProcFrequency)) {
 920        FLAG_SET_DEFAULT(ShenandoahRefProcFrequency,1);
 921     }
 922 
 923     // TODO: Disable this optimization for now, as it also requires the matrix barriers.
 924 #ifdef COMPILER2
 925     FLAG_SET_DEFAULT(ArrayCopyLoadStoreMaxElem, 0);
 926 #endif
 927   }
 928 
 929   void initialize() {
 930     _from_idxs = NEW_C_HEAP_ARRAY(size_t, ShenandoahHeap::heap()->num_regions(), mtGC);
 931   }
 932 
 933   virtual ~ShenandoahPartialHeuristics() {
 934     FREE_C_HEAP_ARRAY(size_t, _from_idxs);
 935   }
 936 
 937   bool should_start_update_refs() {
 938     return true;
 939   }
 940 
 941   bool update_refs() const {
 942     return true;
 943   }
 944 
 945   bool can_do_partial_gc() {
 946     return true;
 947   }
 948 
 949   bool should_start_concurrent_mark(size_t used, size_t capacity) const {
 950     return false;
 951   }
 952 
 953   virtual bool is_diagnostic() {
 954     return false;
 955   }
 956 
 957   virtual bool is_experimental() {
 958     return true;
 959   }
 960 
 961   virtual bool should_start_partial_gc() = 0;
 962   virtual void choose_collection_set(ShenandoahCollectionSet* collection_set) = 0;
 963 };
 964 
 965 class ShenandoahPartialConnectedHeuristics : public ShenandoahPartialHeuristics {
 966 public:
 967   virtual const char* name() {
 968     return "connectedness";
 969   }
 970 
 971   bool should_start_partial_gc() {
 972     ShenandoahHeap* heap = ShenandoahHeap::heap();
 973 
 974     if (heap->need_update_refs()) {
 975       // Cannot start partial if heap is not completely updated.
 976       return false;
 977     }
 978 
 979     size_t capacity  = heap->capacity();
 980     size_t used      = heap->used();
 981     size_t prev_used = heap->used_at_last_gc();
 982 
 983     if (used < prev_used) {
 984       // Major collection must have happened, "used" data is unreliable, wait for update.
 985       return false;
 986     }
 987 
 988     size_t active    = heap->regions()->active_regions() * ShenandoahHeapRegion::region_size_bytes();
 989     size_t threshold = active * ShenandoahConnectednessPercentage / 100;
 990     size_t allocated = used - prev_used;
 991     bool result = allocated > threshold;
 992 
 993     FormatBuffer<> msg("%s. Capacity: " SIZE_FORMAT "M, Used: " SIZE_FORMAT "M, Previous Used: " SIZE_FORMAT
 994                        "M, Allocated: " SIZE_FORMAT "M, Threshold: " SIZE_FORMAT "M",
 995                        result ? "Partial GC triggered" : "Partial GC skipped",
 996                        capacity/M, used/M, prev_used/M, allocated/M, threshold/M);
 997 
 998     if (result) {
 999       log_info(gc,ergo)("%s", msg.buffer());
1000     } else {
1001       log_trace(gc,ergo)("%s", msg.buffer());
1002     }
1003     return result;
1004   }
1005 
1006   void choose_collection_set(ShenandoahCollectionSet* collection_set) {
1007     ShenandoahHeap* heap = ShenandoahHeap::heap();
1008     ShenandoahConnectionMatrix* matrix = heap->connection_matrix();
1009     ShenandoahHeapRegionSet* regions = heap->regions();
1010     size_t num_regions = heap->num_regions();
1011 
1012     RegionConnections* connects = get_region_connects_cache(num_regions);
1013     size_t connect_cnt = 0;
1014 
1015     for (uint to_idx = 0; to_idx < num_regions; to_idx++) {
1016       ShenandoahHeapRegion* region = regions->get(to_idx);
1017       region->set_root(false);
1018       if (!region->is_regular()) continue;
1019 
1020       uint count = matrix->count_connected_to(to_idx, num_regions);
1021       if (count < ShenandoahPartialInboundThreshold) {
1022         connects[connect_cnt]._region = region;
1023         connects[connect_cnt]._connections = count;
1024         connect_cnt++;
1025       }
1026     }
1027 
1028     QuickSort::sort<RegionConnections>(connects, (int)connect_cnt, compare_by_connects, false);
1029 
1030     // Heuristics triggered partial when allocated was larger than a threshold.
1031     // New allocations might have happened while we were preparing for GC,
1032     // capture all them in this cycle. This "adjusts" the threshold automatically.
1033     size_t used      = heap->used();
1034     size_t prev_used = heap->used_at_last_gc();
1035     guarantee(used >= prev_used, "Invariant");
1036     size_t target = MIN3(ShenandoahHeapRegion::required_regions(used - prev_used), num_regions, connect_cnt);
1037 
1038     for (size_t c = 0; c < target; c++) {
1039       assert (c == 0 || connects[c]._connections >= connects[c-1]._connections, "monotonicity");
1040 
1041       ShenandoahHeapRegion* region = connects[c]._region;
1042       size_t to_idx = region->region_number();
1043       assert(region->is_regular(), "filtered before");
1044       assert(! heap->region_in_collection_set(to_idx), "must not be in cset yet");
1045 
1046       size_t from_idx_count = 0;
1047       if (matrix->enumerate_connected_to(to_idx, num_regions,
1048                                          _from_idxs, from_idx_count,
1049                                          ShenandoahPartialInboundThreshold)) {
1050         maybe_add_heap_region(region, collection_set);
1051         for (size_t i = 0; i < from_idx_count; i++) {
1052           ShenandoahHeapRegion* r = regions->get(_from_idxs[i]);
1053           if (!r->is_root()) {
1054             r->set_root(true);
1055           }
1056         }
1057       }
1058     }
1059 
1060     collection_set->update_region_status();
1061   }
1062 };
1063 
1064 class ShenandoahGenerationalPartialHeuristics : public ShenandoahPartialHeuristics {
1065 public:
1066 
1067   ShenandoahGenerationalPartialHeuristics() : ShenandoahPartialHeuristics() {
1068      if (FLAG_IS_DEFAULT(ShenandoahPartialInboundThreshold)) {
1069        FLAG_SET_DEFAULT(ShenandoahPartialInboundThreshold, 100);
1070      }
1071   }
1072 
1073   virtual const char* name() {
1074     return "generational";
1075   }
1076 
1077   virtual void choose_collection_set(ShenandoahCollectionSet* collection_set) {
1078     ShenandoahHeap* heap = ShenandoahHeap::heap();
1079     ShenandoahConnectionMatrix* matrix = heap->connection_matrix();
1080     uint64_t alloc_seq_at_last_gc_end   = heap->alloc_seq_at_last_gc_end();
1081     uint64_t alloc_seq_at_last_gc_start = heap->alloc_seq_at_last_gc_start();
1082 
1083     ShenandoahHeapRegionSet* regions = heap->regions();
1084     size_t active = regions->active_regions();
1085     ShenandoahHeapRegionSet sorted_regions(active);
1086 
1087     for (size_t i = 0; i < active; i++) {
1088       sorted_regions.add_region(regions->get(i));
1089     }
1090 
1091     sorted_regions.sort(compare_by_alloc_seq_descending);
1092 
1093     // Heuristics triggered partial when allocated was larger than a threshold.
1094     // New allocations might have happened while we were preparing for GC,
1095     // capture all them in this cycle. This "adjusts" the threshold automatically.
1096     size_t used      = heap->used();
1097     size_t prev_used = heap->used_at_last_gc();
1098     guarantee(used >= prev_used, "Invariant");
1099     size_t target = MIN2(ShenandoahHeapRegion::required_regions(used - prev_used), sorted_regions.active_regions());
1100 
1101     for (uint to_idx = 0; to_idx < active; to_idx++) {
1102       ShenandoahHeapRegion* region = regions->get(to_idx);
1103       region->set_root(false);
1104     }
1105 
1106     uint count = 0;
1107     size_t sorted_active = sorted_regions.active_regions();
1108 
1109     for (uint i = 0; (i < sorted_active) && (count < target); i++) {
1110       ShenandoahHeapRegion* contender = sorted_regions.get(i);
1111       if (contender->last_alloc_seq_num() <= alloc_seq_at_last_gc_end) {
1112         break;
1113       }
1114 
1115       size_t index = contender->region_number();
1116       size_t num_regions = heap->num_regions();
1117       size_t from_idx_count = 0;
1118       if (matrix->enumerate_connected_to(index, num_regions, _from_idxs, from_idx_count,
1119                                          ShenandoahPartialInboundThreshold)) {
1120         if (maybe_add_heap_region(contender, collection_set)) {
1121           count++;
1122         }
1123 
1124         for (uint f = 0; f < from_idx_count; f++) {
1125           ShenandoahHeapRegion* r = regions->get(_from_idxs[f]);
1126           if (!r->is_root()) {
1127             r->set_root(true);
1128           }
1129         }
1130       }
1131     }
1132     collection_set->update_region_status();
1133 
1134     log_info(gc,ergo)("Regions: Active: " SIZE_FORMAT ", Target: " SIZE_FORMAT " (" SIZE_FORMAT "%%), In CSet: " SIZE_FORMAT,
1135                       active, target, ShenandoahGenerationalYoungGenPercentage, collection_set->count());
1136   }
1137 
1138   bool should_start_partial_gc() {
1139     ShenandoahHeap* heap = ShenandoahHeap::heap();
1140 
1141     if (heap->need_update_refs()) {
1142       // Cannot start partial if heap is not completely updated.
1143       return false;
1144     }
1145 
1146     size_t capacity  = heap->capacity();
1147     size_t used      = heap->used();
1148     size_t prev_used = heap->used_at_last_gc();
1149 
1150     if (used < prev_used) {
1151       // Major collection must have happened, "used" data is unreliable, wait for update.
1152       return false;
1153     }
1154 
1155     size_t active    = heap->regions()->active_regions() * ShenandoahHeapRegion::region_size_bytes();
1156     size_t threshold = active * ShenandoahGenerationalYoungGenPercentage / 100;
1157     size_t allocated = used - prev_used;
1158 
1159     // Start the next young gc after we've allocated percentage_young of the heap.
1160     bool result = allocated > threshold;
1161 
1162     FormatBuffer<> msg("%s. Capacity: " SIZE_FORMAT "M, Used: " SIZE_FORMAT "M, Previous Used: " SIZE_FORMAT
1163                        "M, Allocated: " SIZE_FORMAT "M, Threshold: " SIZE_FORMAT "M",
1164                        result ? "Partial GC triggered" : "Partial GC skipped",
1165                        capacity/M, used/M, prev_used/M, allocated/M, threshold/M);
1166 
1167     if (result) {
1168       log_info(gc,ergo)("%s", msg.buffer());
1169     } else {
1170       log_trace(gc,ergo)("%s", msg.buffer());
1171     }
1172     return result;
1173   }
1174 };
1175 
1176 class ShenandoahLRUPartialHeuristics : public ShenandoahPartialHeuristics {
1177 public:
1178   ShenandoahLRUPartialHeuristics() : ShenandoahPartialHeuristics() {
1179     if (FLAG_IS_DEFAULT(ShenandoahPartialInboundThreshold)) {
1180       FLAG_SET_DEFAULT(ShenandoahPartialInboundThreshold, 100);
1181     }
1182   }
1183 
1184   virtual const char* name() {
1185     return "LRU";
1186   }
1187 
1188   virtual void choose_collection_set(ShenandoahCollectionSet* collection_set) {
1189     ShenandoahHeap* heap = ShenandoahHeap::heap();
1190     ShenandoahConnectionMatrix* matrix = heap->connection_matrix();
1191     uint64_t alloc_seq_at_last_gc_start = heap->alloc_seq_at_last_gc_start();
1192 
1193     ShenandoahHeapRegionSet* regions = ShenandoahHeap::heap()->regions();
1194     size_t active = regions->active_regions();
1195     ShenandoahHeapRegionSet sorted_regions(active);
1196 
1197     for (size_t i = 0; i < active; i++) {
1198       ShenandoahHeapRegion* r = regions->get(i);
1199       if (r->is_regular() && (r->last_alloc_seq_num() > 0)) {
1200         sorted_regions.add_region(regions->get(i));
1201       }
1202     }
1203 
1204     sorted_regions.sort(compare_by_alloc_seq_ascending);
1205 
1206     // Heuristics triggered partial when allocated was larger than a threshold.
1207     // New allocations might have happened while we were preparing for GC,
1208     // capture all them in this cycle. This "adjusts" the threshold automatically.
1209     size_t used      = heap->used();
1210     size_t prev_used = heap->used_at_last_gc();
1211     guarantee(used >= prev_used, "Invariant");
1212     size_t target = MIN2(ShenandoahHeapRegion::required_regions(used - prev_used), sorted_regions.active_regions());
1213 
1214     for (uint to_idx = 0; to_idx < active; to_idx++) {
1215       ShenandoahHeapRegion* region = regions->get(to_idx);
1216       region->set_root(false);
1217     }
1218     uint count = 0;
1219     size_t sorted_active = sorted_regions.active_regions();
1220 
1221     for (uint i = 0; (i < sorted_active) && (count < target); i++) {
1222       ShenandoahHeapRegion* contender = sorted_regions.get(i);
1223       if (contender->last_alloc_seq_num() >= alloc_seq_at_last_gc_start) {
1224         break;
1225       }
1226 
1227       size_t index = contender->region_number();
1228       size_t num_regions = heap->num_regions();
1229       size_t from_idx_count = 0;
1230       if (matrix->enumerate_connected_to(index, num_regions,_from_idxs, from_idx_count,
1231                                          ShenandoahPartialInboundThreshold)) {
1232         if (maybe_add_heap_region(contender, collection_set)) {
1233           count++;
1234         }
1235         for (uint f = 0; f < from_idx_count; f++) {
1236           ShenandoahHeapRegion* r = regions->get(_from_idxs[f]);
1237           if (!r->is_root()) {
1238             r->set_root(true);
1239           }
1240         }
1241       }
1242     }
1243     collection_set->update_region_status();
1244 
1245     log_info(gc,ergo)("Regions: Active: " SIZE_FORMAT ", Target: " SIZE_FORMAT " (" SIZE_FORMAT "%%), In CSet: " SIZE_FORMAT,
1246                       active, target, ShenandoahLRUOldGenPercentage, collection_set->count());
1247   }
1248 
1249   bool should_start_partial_gc() {
1250     ShenandoahHeap* heap = ShenandoahHeap::heap();
1251 
1252     if (heap->need_update_refs()) {
1253       // Cannot start partial if heap is not completely updated.
1254       return false;
1255     }
1256 
1257     size_t capacity  = heap->capacity();
1258     size_t used      = heap->used();
1259     size_t prev_used = heap->used_at_last_gc();
1260 
1261     if (used < prev_used) {
1262       // Major collection must have happened, "used" data is unreliable, wait for update.
1263       return false;
1264     }
1265 
1266     // For now don't start until we are 40% full
1267     size_t allocated = used - prev_used;
1268     size_t active    = heap->regions()->active_regions() * ShenandoahHeapRegion::region_size_bytes();
1269     size_t threshold = active * ShenandoahLRUOldGenPercentage / 100;
1270     size_t minimum   = active * 0.4;
1271 
1272     bool result = ((used > minimum) && (allocated > threshold));
1273 
1274     FormatBuffer<> msg("%s. Capacity: " SIZE_FORMAT "M, Used: " SIZE_FORMAT "M, Previous Used: " SIZE_FORMAT
1275                        "M, Allocated: " SIZE_FORMAT "M, Threshold: " SIZE_FORMAT "M, Minimum: " SIZE_FORMAT "M",
1276                        result ? "Partial GC triggered" : "Partial GC skipped",
1277                        capacity/M, used/M, prev_used/M, allocated/M, threshold/M, minimum/M);
1278 
1279     if (result) {
1280       log_info(gc,ergo)("%s", msg.buffer());
1281     } else {
1282       log_trace(gc,ergo)("%s", msg.buffer());
1283     }
1284     return result;
1285    }
1286 
1287 };
1288 
1289 
1290 ShenandoahCollectorPolicy::ShenandoahCollectorPolicy() :
1291   _cycle_counter(0),
1292   _successful_cm(0),
1293   _degenerated_cm(0),
1294   _successful_uprefs(0),
1295   _degenerated_uprefs(0),
1296   _in_shutdown(0)
1297 {
1298 
1299   ShenandoahHeapRegion::setup_heap_region_size(initial_heap_byte_size(), max_heap_byte_size());
1300 
1301   initialize_all();
1302 
1303   _tracer = new (ResourceObj::C_HEAP, mtGC) ShenandoahTracer();
1304   _user_requested_gcs = 0;
1305   _allocation_failure_gcs = 0;
1306 
1307   if (ShenandoahGCHeuristics != NULL) {
1308     _minor_heuristics = NULL;
1309     if (strcmp(ShenandoahGCHeuristics, "aggressive") == 0) {
1310       _heuristics = new ShenandoahAggressiveHeuristics();
1311     } else if (strcmp(ShenandoahGCHeuristics, "dynamic") == 0) {
1312       _heuristics = new ShenandoahDynamicHeuristics();
1313     } else if (strcmp(ShenandoahGCHeuristics, "adaptive") == 0) {
1314       _heuristics = new ShenandoahAdaptiveHeuristics();
1315     } else if (strcmp(ShenandoahGCHeuristics, "passive") == 0) {
1316       _heuristics = new ShenandoahPassiveHeuristics();
1317     } else if (strcmp(ShenandoahGCHeuristics, "continuous") == 0) {
1318       _heuristics = new ShenandoahContinuousHeuristics();
1319     } else if (strcmp(ShenandoahGCHeuristics, "connected") == 0) {
1320       _heuristics = new ShenandoahAdaptiveHeuristics();
1321       _minor_heuristics = new ShenandoahPartialConnectedHeuristics();
1322     } else if (strcmp(ShenandoahGCHeuristics, "generational") == 0) {
1323       _heuristics = new ShenandoahAdaptiveHeuristics();
1324       _minor_heuristics = new ShenandoahGenerationalPartialHeuristics();
1325     } else if (strcmp(ShenandoahGCHeuristics, "LRU") == 0) {
1326       _heuristics = new ShenandoahAdaptiveHeuristics();
1327       _minor_heuristics = new ShenandoahLRUPartialHeuristics();
1328      } else {
1329       vm_exit_during_initialization("Unknown -XX:ShenandoahGCHeuristics option");
1330     }
1331 
1332     if (_heuristics->is_diagnostic() && !UnlockDiagnosticVMOptions) {
1333       vm_exit_during_initialization(
1334               err_msg("Heuristics \"%s\" is diagnostic, and must be enabled via -XX:+UnlockDiagnosticVMOptions.",
1335                       _heuristics->name()));
1336     }
1337     if (_heuristics->is_experimental() && !UnlockExperimentalVMOptions) {
1338       vm_exit_during_initialization(
1339               err_msg("Heuristics \"%s\" is experimental, and must be enabled via -XX:+UnlockExperimentalVMOptions.",
1340                       _heuristics->name()));
1341     }
1342     if (_minor_heuristics != NULL && _minor_heuristics->is_diagnostic() && !UnlockDiagnosticVMOptions) {
1343       vm_exit_during_initialization(
1344               err_msg("Heuristics \"%s\" is diagnostic, and must be enabled via -XX:+UnlockDiagnosticVMOptions.",
1345                       _minor_heuristics->name()));
1346     }
1347     if (_minor_heuristics != NULL && _minor_heuristics->is_experimental() && !UnlockExperimentalVMOptions) {
1348       vm_exit_during_initialization(
1349               err_msg("Heuristics \"%s\" is experimental, and must be enabled via -XX:+UnlockExperimentalVMOptions.",
1350                       _minor_heuristics->name()));
1351     }
1352 
1353     if (_minor_heuristics != NULL) {
1354       log_info(gc, init)("Shenandoah heuristics: %s minor with %s major",
1355                          _minor_heuristics->name(), _heuristics->name());
1356     } else {
1357       log_info(gc, init)("Shenandoah heuristics: %s",
1358                          _heuristics->name());
1359     }
1360     _heuristics->print_thresholds();
1361   } else {
1362       ShouldNotReachHere();
1363   }
1364 }
1365 
1366 ShenandoahCollectorPolicy* ShenandoahCollectorPolicy::as_pgc_policy() {
1367   return this;
1368 }
1369 
1370 BarrierSet::Name ShenandoahCollectorPolicy::barrier_set_name() {
1371   return BarrierSet::ShenandoahBarrierSet;
1372 }
1373 
1374 HeapWord* ShenandoahCollectorPolicy::mem_allocate_work(size_t size,
1375                                                        bool is_tlab,
1376                                                        bool* gc_overhead_limit_was_exceeded) {
1377   guarantee(false, "Not using this policy feature yet.");
1378   return NULL;
1379 }
1380 
1381 HeapWord* ShenandoahCollectorPolicy::satisfy_failed_allocation(size_t size, bool is_tlab) {
1382   guarantee(false, "Not using this policy feature yet.");
1383   return NULL;
1384 }
1385 
1386 void ShenandoahCollectorPolicy::initialize_alignments() {
1387 
1388   // This is expected by our algorithm for ShenandoahHeap::heap_region_containing().
1389   _space_alignment = ShenandoahHeapRegion::region_size_bytes();
1390   _heap_alignment = ShenandoahHeapRegion::region_size_bytes();
1391 }
1392 
1393 void ShenandoahCollectorPolicy::post_heap_initialize() {
1394   _heuristics->initialize();
1395   if (_minor_heuristics != NULL) {
1396     _minor_heuristics->initialize();
1397   }
1398 }
1399 
1400 void ShenandoahCollectorPolicy::record_bytes_allocated(size_t bytes) {
1401   _heuristics->record_bytes_allocated(bytes);
1402 }
1403 
1404 void ShenandoahCollectorPolicy::record_bytes_start_CM(size_t bytes) {
1405   _heuristics->record_bytes_start_CM(bytes);
1406 }
1407 
1408 void ShenandoahCollectorPolicy::record_bytes_end_CM(size_t bytes) {
1409   _heuristics->record_bytes_end_CM(bytes);
1410 }
1411 
1412 void ShenandoahCollectorPolicy::record_bytes_reclaimed(size_t bytes) {
1413   _heuristics->record_bytes_reclaimed(bytes);
1414 }
1415 
1416 void ShenandoahCollectorPolicy::record_user_requested_gc() {
1417   _heuristics->record_user_requested_gc();
1418   _user_requested_gcs++;
1419 }
1420 
1421 void ShenandoahCollectorPolicy::record_allocation_failure_gc() {
1422   _heuristics->record_allocation_failure_gc();
1423   _allocation_failure_gcs++;
1424 }
1425 
1426 bool ShenandoahCollectorPolicy::should_start_concurrent_mark(size_t used,
1427                                                              size_t capacity) {
1428   return _heuristics->should_start_concurrent_mark(used, capacity);
1429 }
1430 
1431 bool ShenandoahCollectorPolicy::handover_cancelled_marking() {
1432   return _heuristics->handover_cancelled_marking();
1433 }
1434 
1435 bool ShenandoahCollectorPolicy::handover_cancelled_uprefs() {
1436   return _heuristics->handover_cancelled_uprefs();
1437 }
1438 
1439 bool ShenandoahCollectorPolicy::update_refs() {
1440   if (_minor_heuristics != NULL && _minor_heuristics->update_refs()) {
1441     return true;
1442   }
1443   return _heuristics->update_refs();
1444 }
1445 
1446 bool ShenandoahCollectorPolicy::should_start_update_refs() {
1447   if (_minor_heuristics != NULL && _minor_heuristics->should_start_update_refs()) {
1448     return true;
1449   }
1450   return _heuristics->should_start_update_refs();
1451 }
1452 
1453 void ShenandoahCollectorPolicy::record_cm_success() {
1454   _heuristics->record_cm_success();
1455   _successful_cm++;
1456 }
1457 
1458 void ShenandoahCollectorPolicy::record_cm_degenerated() {
1459   _degenerated_cm++;
1460 }
1461 
1462 void ShenandoahCollectorPolicy::record_cm_cancelled() {
1463   _heuristics->record_cm_cancelled();
1464 }
1465 
1466 void ShenandoahCollectorPolicy::record_uprefs_success() {
1467   _heuristics->record_uprefs_success();
1468   _successful_uprefs++;
1469 }
1470 
1471 void ShenandoahCollectorPolicy::record_uprefs_degenerated() {
1472   _degenerated_uprefs++;
1473 }
1474 
1475 void ShenandoahCollectorPolicy::record_uprefs_cancelled() {
1476   _heuristics->record_uprefs_cancelled();
1477 }
1478 
1479 void ShenandoahCollectorPolicy::record_peak_occupancy() {
1480   _heuristics->record_peak_occupancy();
1481 }
1482 
1483 void ShenandoahCollectorPolicy::choose_collection_set(ShenandoahCollectionSet* collection_set,
1484                                                       bool minor) {
1485   if (minor)
1486     _minor_heuristics->choose_collection_set(collection_set);
1487   else
1488     _heuristics->choose_collection_set(collection_set);
1489 }
1490 
1491 void ShenandoahCollectorPolicy::choose_free_set(ShenandoahFreeSet* free_set) {
1492    _heuristics->choose_free_set(free_set);
1493 }
1494 
1495 
1496 bool ShenandoahCollectorPolicy::process_references() {
1497   return _heuristics->process_references();
1498 }
1499 
1500 bool ShenandoahCollectorPolicy::unload_classes() {
1501   return _heuristics->unload_classes();
1502 }
1503 
1504 size_t ShenandoahCollectorPolicy::cycle_counter() const {
1505   return _cycle_counter;
1506 }
1507 
1508 void ShenandoahCollectorPolicy::record_phase_time(ShenandoahPhaseTimings::Phase phase, double secs) {
1509   _heuristics->record_phase_time(phase, secs);
1510 }
1511 
1512 bool ShenandoahCollectorPolicy::should_start_partial_gc() {
1513   if (_minor_heuristics != NULL) {
1514     return _minor_heuristics->should_start_partial_gc();
1515   } else {
1516     return false; // no minor heuristics -> no partial gc
1517   }
1518 }
1519 
1520 bool ShenandoahCollectorPolicy::can_do_partial_gc() {
1521   if (_minor_heuristics != NULL) {
1522     return _minor_heuristics->can_do_partial_gc();
1523   } else {
1524     return false; // no minor heuristics -> no partial gc
1525   }
1526 }
1527 
1528 void ShenandoahCollectorPolicy::record_cycle_start() {
1529   _cycle_counter++;
1530   _heuristics->record_cycle_start();
1531 }
1532 
1533 void ShenandoahCollectorPolicy::record_cycle_end() {
1534   _heuristics->record_cycle_end();
1535 }
1536 
1537 void ShenandoahCollectorPolicy::record_shutdown() {
1538   OrderAccess::release_store_fence(&_in_shutdown, 1);
1539 }
1540 
1541 bool ShenandoahCollectorPolicy::is_at_shutdown() {
1542   return OrderAccess::load_acquire(&_in_shutdown) == 1;
1543 }
1544 
1545 void ShenandoahCollectorPolicy::print_gc_stats(outputStream* out) const {
1546   out->print_cr("" SIZE_FORMAT " allocation failure and " SIZE_FORMAT " user requested GCs", _allocation_failure_gcs, _user_requested_gcs);
1547   out->print_cr("" SIZE_FORMAT " successful and " SIZE_FORMAT " degenerated concurrent markings", _successful_cm, _degenerated_cm);
1548   out->print_cr("" SIZE_FORMAT " successful and " SIZE_FORMAT " degenerated update references  ", _successful_uprefs, _degenerated_uprefs);
1549 }