1 /*
   2  * Copyright (c) 2013, 2019, Red Hat, Inc. All rights reserved.
   3  *
   4  * This code is free software; you can redistribute it and/or modify it
   5  * under the terms of the GNU General Public License version 2 only, as
   6  * published by the Free Software Foundation.
   7  *
   8  * This code is distributed in the hope that it will be useful, but WITHOUT
   9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  11  * version 2 for more details (a copy is included in the LICENSE file that
  12  * accompanied this code).
  13  *
  14  * You should have received a copy of the GNU General Public License version
  15  * 2 along with this work; if not, write to the Free Software Foundation,
  16  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  17  *
  18  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  19  * or visit www.oracle.com if you need additional information or have any
  20  * questions.
  21  *
  22  */
  23 
  24 #include "precompiled.hpp"
  25 #include "memory/allocation.hpp"
  26 #include "memory/universe.hpp"
  27 
  28 #include "gc/shared/gcArguments.hpp"
  29 #include "gc/shared/gcTimer.hpp"
  30 #include "gc/shared/gcTraceTime.inline.hpp"
  31 #include "gc/shared/memAllocator.hpp"
  32 #include "gc/shared/parallelCleaning.hpp"
  33 #include "gc/shared/plab.hpp"
  34 
  35 #include "gc/shenandoah/shenandoahAllocTracker.hpp"
  36 #include "gc/shenandoah/shenandoahBarrierSet.hpp"
  37 #include "gc/shenandoah/shenandoahForwarding.hpp"
  38 #include "gc/shenandoah/shenandoahClosures.inline.hpp"
  39 #include "gc/shenandoah/shenandoahCollectionSet.hpp"
  40 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
  41 #include "gc/shenandoah/shenandoahConcurrentMark.inline.hpp"
  42 #include "gc/shenandoah/shenandoahControlThread.hpp"
  43 #include "gc/shenandoah/shenandoahFreeSet.hpp"
  44 #include "gc/shenandoah/shenandoahPhaseTimings.hpp"
  45 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
  46 #include "gc/shenandoah/shenandoahHeapRegion.hpp"
  47 #include "gc/shenandoah/shenandoahHeapRegionSet.hpp"
  48 #include "gc/shenandoah/shenandoahMarkCompact.hpp"
  49 #include "gc/shenandoah/shenandoahMarkingContext.inline.hpp"
  50 #include "gc/shenandoah/shenandoahMemoryPool.hpp"
  51 #include "gc/shenandoah/shenandoahMetrics.hpp"
  52 #include "gc/shenandoah/shenandoahMonitoringSupport.hpp"
  53 #include "gc/shenandoah/shenandoahOopClosures.inline.hpp"
  54 #include "gc/shenandoah/shenandoahPacer.inline.hpp"
  55 #include "gc/shenandoah/shenandoahRootProcessor.hpp"
  56 #include "gc/shenandoah/shenandoahStringDedup.hpp"
  57 #include "gc/shenandoah/shenandoahUtils.hpp"
  58 #include "gc/shenandoah/shenandoahVerifier.hpp"
  59 #include "gc/shenandoah/shenandoahCodeRoots.hpp"
  60 #include "gc/shenandoah/shenandoahVMOperations.hpp"
  61 #include "gc/shenandoah/shenandoahWorkGroup.hpp"
  62 #include "gc/shenandoah/shenandoahWorkerPolicy.hpp"
  63 #include "gc/shenandoah/heuristics/shenandoahAdaptiveHeuristics.hpp"
  64 #include "gc/shenandoah/heuristics/shenandoahAggressiveHeuristics.hpp"
  65 #include "gc/shenandoah/heuristics/shenandoahCompactHeuristics.hpp"
  66 #include "gc/shenandoah/heuristics/shenandoahPassiveHeuristics.hpp"
  67 #include "gc/shenandoah/heuristics/shenandoahStaticHeuristics.hpp"
  68 #include "gc/shenandoah/heuristics/shenandoahTraversalHeuristics.hpp"
  69 #include "memory/metaspace.hpp"
  70 #include "oops/compressedOops.inline.hpp"
  71 #include "runtime/globals.hpp"
  72 #include "runtime/interfaceSupport.inline.hpp"
  73 #include "runtime/safepointMechanism.hpp"
  74 #include "runtime/vmThread.hpp"
  75 #include "services/mallocTracker.hpp"
  76 
  77 #ifdef ASSERT
  78 template <class T>
  79 void ShenandoahAssertToSpaceClosure::do_oop_work(T* p) {
  80   T o = RawAccess<>::oop_load(p);
  81   if (! CompressedOops::is_null(o)) {
  82     oop obj = CompressedOops::decode_not_null(o);
  83     shenandoah_assert_not_forwarded(p, obj);
  84   }
  85 }
  86 
  87 void ShenandoahAssertToSpaceClosure::do_oop(narrowOop* p) { do_oop_work(p); }
  88 void ShenandoahAssertToSpaceClosure::do_oop(oop* p)       { do_oop_work(p); }
  89 #endif
  90 
  91 class ShenandoahPretouchHeapTask : public AbstractGangTask {
  92 private:
  93   ShenandoahRegionIterator _regions;
  94   const size_t _page_size;
  95 public:
  96   ShenandoahPretouchHeapTask(size_t page_size) :
  97     AbstractGangTask("Shenandoah Pretouch Heap"),
  98     _page_size(page_size) {}
  99 
 100   virtual void work(uint worker_id) {
 101     ShenandoahHeapRegion* r = _regions.next();
 102     while (r != NULL) {
 103       os::pretouch_memory(r->bottom(), r->end(), _page_size);
 104       r = _regions.next();
 105     }
 106   }
 107 };
 108 
 109 class ShenandoahPretouchBitmapTask : public AbstractGangTask {
 110 private:
 111   ShenandoahRegionIterator _regions;
 112   char* _bitmap_base;
 113   const size_t _bitmap_size;
 114   const size_t _page_size;
 115 public:
 116   ShenandoahPretouchBitmapTask(char* bitmap_base, size_t bitmap_size, size_t page_size) :
 117     AbstractGangTask("Shenandoah Pretouch Bitmap"),
 118     _bitmap_base(bitmap_base),
 119     _bitmap_size(bitmap_size),
 120     _page_size(page_size) {}
 121 
 122   virtual void work(uint worker_id) {
 123     ShenandoahHeapRegion* r = _regions.next();
 124     while (r != NULL) {
 125       size_t start = r->region_number()       * ShenandoahHeapRegion::region_size_bytes() / MarkBitMap::heap_map_factor();
 126       size_t end   = (r->region_number() + 1) * ShenandoahHeapRegion::region_size_bytes() / MarkBitMap::heap_map_factor();
 127       assert (end <= _bitmap_size, "end is sane: " SIZE_FORMAT " < " SIZE_FORMAT, end, _bitmap_size);
 128 
 129       os::pretouch_memory(_bitmap_base + start, _bitmap_base + end, _page_size);
 130 
 131       r = _regions.next();
 132     }
 133   }
 134 };
 135 
 136 jint ShenandoahHeap::initialize() {
 137   ShenandoahForwarding::initial_checks();
 138 
 139   initialize_heuristics();
 140 
 141   //
 142   // Figure out heap sizing
 143   //
 144 
 145   size_t init_byte_size = InitialHeapSize;
 146   size_t min_byte_size  = MinHeapSize;
 147   size_t max_byte_size  = MaxHeapSize;
 148   size_t heap_alignment = HeapAlignment;
 149 
 150   size_t reg_size_bytes = ShenandoahHeapRegion::region_size_bytes();
 151 
 152   if (ShenandoahAlwaysPreTouch) {
 153     // Enabled pre-touch means the entire heap is committed right away.
 154     init_byte_size = max_byte_size;
 155   }
 156 
 157   Universe::check_alignment(max_byte_size,  reg_size_bytes, "Shenandoah heap");
 158   Universe::check_alignment(init_byte_size, reg_size_bytes, "Shenandoah heap");
 159 
 160   _num_regions = ShenandoahHeapRegion::region_count();
 161 
 162   size_t num_committed_regions = init_byte_size / reg_size_bytes;
 163   num_committed_regions = MIN2(num_committed_regions, _num_regions);
 164   assert(num_committed_regions <= _num_regions, "sanity");
 165   _initial_size = num_committed_regions * reg_size_bytes;
 166 
 167   size_t num_min_regions = min_byte_size / reg_size_bytes;
 168   num_min_regions = MIN2(num_min_regions, _num_regions);
 169   assert(num_min_regions <= _num_regions, "sanity");
 170   _minimum_size = num_min_regions * reg_size_bytes;
 171 
 172   _committed = _initial_size;
 173 
 174   size_t heap_page_size   = UseLargePages ? (size_t)os::large_page_size() : (size_t)os::vm_page_size();
 175   size_t bitmap_page_size = UseLargePages ? (size_t)os::large_page_size() : (size_t)os::vm_page_size();
 176 
 177   //
 178   // Reserve and commit memory for heap
 179   //
 180 
 181   ReservedSpace heap_rs = Universe::reserve_heap(max_byte_size, heap_alignment);
 182   initialize_reserved_region((HeapWord*)heap_rs.base(), (HeapWord*) (heap_rs.base() + heap_rs.size()));
 183   _heap_region = MemRegion((HeapWord*)heap_rs.base(), heap_rs.size() / HeapWordSize);
 184   _heap_region_special = heap_rs.special();
 185 
 186   assert((((size_t) base()) & ShenandoahHeapRegion::region_size_bytes_mask()) == 0,
 187          "Misaligned heap: " PTR_FORMAT, p2i(base()));
 188 
 189   ReservedSpace sh_rs = heap_rs.first_part(max_byte_size);
 190   if (!_heap_region_special) {
 191     os::commit_memory_or_exit(sh_rs.base(), _initial_size, heap_alignment, false,
 192                               "Cannot commit heap memory");
 193   }
 194 
 195   //
 196   // Reserve and commit memory for bitmap(s)
 197   //
 198 
 199   _bitmap_size = MarkBitMap::compute_size(heap_rs.size());
 200   _bitmap_size = align_up(_bitmap_size, bitmap_page_size);
 201 
 202   size_t bitmap_bytes_per_region = reg_size_bytes / MarkBitMap::heap_map_factor();
 203 
 204   guarantee(bitmap_bytes_per_region != 0,
 205             "Bitmap bytes per region should not be zero");
 206   guarantee(is_power_of_2(bitmap_bytes_per_region),
 207             "Bitmap bytes per region should be power of two: " SIZE_FORMAT, bitmap_bytes_per_region);
 208 
 209   if (bitmap_page_size > bitmap_bytes_per_region) {
 210     _bitmap_regions_per_slice = bitmap_page_size / bitmap_bytes_per_region;
 211     _bitmap_bytes_per_slice = bitmap_page_size;
 212   } else {
 213     _bitmap_regions_per_slice = 1;
 214     _bitmap_bytes_per_slice = bitmap_bytes_per_region;
 215   }
 216 
 217   guarantee(_bitmap_regions_per_slice >= 1,
 218             "Should have at least one region per slice: " SIZE_FORMAT,
 219             _bitmap_regions_per_slice);
 220 
 221   guarantee(((_bitmap_bytes_per_slice) % bitmap_page_size) == 0,
 222             "Bitmap slices should be page-granular: bps = " SIZE_FORMAT ", page size = " SIZE_FORMAT,
 223             _bitmap_bytes_per_slice, bitmap_page_size);
 224 
 225   ReservedSpace bitmap(_bitmap_size, bitmap_page_size);
 226   MemTracker::record_virtual_memory_type(bitmap.base(), mtGC);
 227   _bitmap_region = MemRegion((HeapWord*) bitmap.base(), bitmap.size() / HeapWordSize);
 228   _bitmap_region_special = bitmap.special();
 229 
 230   size_t bitmap_init_commit = _bitmap_bytes_per_slice *
 231                               align_up(num_committed_regions, _bitmap_regions_per_slice) / _bitmap_regions_per_slice;
 232   bitmap_init_commit = MIN2(_bitmap_size, bitmap_init_commit);
 233   if (!_bitmap_region_special) {
 234     os::commit_memory_or_exit((char *) _bitmap_region.start(), bitmap_init_commit, bitmap_page_size, false,
 235                               "Cannot commit bitmap memory");
 236   }
 237 
 238   _marking_context = new ShenandoahMarkingContext(_heap_region, _bitmap_region, _num_regions);
 239 
 240   if (ShenandoahVerify) {
 241     ReservedSpace verify_bitmap(_bitmap_size, bitmap_page_size);
 242     if (!verify_bitmap.special()) {
 243       os::commit_memory_or_exit(verify_bitmap.base(), verify_bitmap.size(), bitmap_page_size, false,
 244                                 "Cannot commit verification bitmap memory");
 245     }
 246     MemTracker::record_virtual_memory_type(verify_bitmap.base(), mtGC);
 247     MemRegion verify_bitmap_region = MemRegion((HeapWord *) verify_bitmap.base(), verify_bitmap.size() / HeapWordSize);
 248     _verification_bit_map.initialize(_heap_region, verify_bitmap_region);
 249     _verifier = new ShenandoahVerifier(this, &_verification_bit_map);
 250   }
 251 
 252   // Reserve aux bitmap for use in object_iterate(). We don't commit it here.
 253   ReservedSpace aux_bitmap(_bitmap_size, bitmap_page_size);
 254   MemTracker::record_virtual_memory_type(aux_bitmap.base(), mtGC);
 255   _aux_bitmap_region = MemRegion((HeapWord*) aux_bitmap.base(), aux_bitmap.size() / HeapWordSize);
 256   _aux_bitmap_region_special = aux_bitmap.special();
 257   _aux_bit_map.initialize(_heap_region, _aux_bitmap_region);
 258 
 259   //
 260   // Create regions and region sets
 261   //
 262 
 263   _regions = NEW_C_HEAP_ARRAY(ShenandoahHeapRegion*, _num_regions, mtGC);
 264   _free_set = new ShenandoahFreeSet(this, _num_regions);
 265   _collection_set = new ShenandoahCollectionSet(this, sh_rs.base(), sh_rs.size());
 266 
 267   {
 268     ShenandoahHeapLocker locker(lock());
 269 
 270     size_t size_words = ShenandoahHeapRegion::region_size_words();
 271 
 272     for (size_t i = 0; i < _num_regions; i++) {
 273       HeapWord* start = (HeapWord*)sh_rs.base() + size_words * i;
 274       bool is_committed = i < num_committed_regions;
 275       ShenandoahHeapRegion* r = new ShenandoahHeapRegion(this, start, size_words, i, is_committed);
 276 
 277       _marking_context->initialize_top_at_mark_start(r);
 278       _regions[i] = r;
 279       assert(!collection_set()->is_in(i), "New region should not be in collection set");
 280     }
 281 
 282     // Initialize to complete
 283     _marking_context->mark_complete();
 284 
 285     _free_set->rebuild();
 286   }
 287 
 288   if (ShenandoahAlwaysPreTouch) {
 289     assert(!AlwaysPreTouch, "Should have been overridden");
 290 
 291     // For NUMA, it is important to pre-touch the storage under bitmaps with worker threads,
 292     // before initialize() below zeroes it with initializing thread. For any given region,
 293     // we touch the region and the corresponding bitmaps from the same thread.
 294     ShenandoahPushWorkerScope scope(workers(), _max_workers, false);
 295 
 296     size_t pretouch_heap_page_size = heap_page_size;
 297     size_t pretouch_bitmap_page_size = bitmap_page_size;
 298 
 299 #ifdef LINUX
 300     // UseTransparentHugePages would madvise that backing memory can be coalesced into huge
 301     // pages. But, the kernel needs to know that every small page is used, in order to coalesce
 302     // them into huge one. Therefore, we need to pretouch with smaller pages.
 303     if (UseTransparentHugePages) {
 304       pretouch_heap_page_size = (size_t)os::vm_page_size();
 305       pretouch_bitmap_page_size = (size_t)os::vm_page_size();
 306     }
 307 #endif
 308 
 309     // OS memory managers may want to coalesce back-to-back pages. Make their jobs
 310     // simpler by pre-touching continuous spaces (heap and bitmap) separately.
 311 
 312     log_info(gc, init)("Pretouch bitmap: " SIZE_FORMAT " regions, " SIZE_FORMAT " bytes page",
 313                        _num_regions, pretouch_bitmap_page_size);
 314     ShenandoahPretouchBitmapTask bcl(bitmap.base(), _bitmap_size, pretouch_bitmap_page_size);
 315     _workers->run_task(&bcl);
 316 
 317     log_info(gc, init)("Pretouch heap: " SIZE_FORMAT " regions, " SIZE_FORMAT " bytes page",
 318                        _num_regions, pretouch_heap_page_size);
 319     ShenandoahPretouchHeapTask hcl(pretouch_heap_page_size);
 320     _workers->run_task(&hcl);
 321   }
 322 
 323   //
 324   // Initialize the rest of GC subsystems
 325   //
 326 
 327   _liveness_cache = NEW_C_HEAP_ARRAY(jushort*, _max_workers, mtGC);
 328   for (uint worker = 0; worker < _max_workers; worker++) {
 329     _liveness_cache[worker] = NEW_C_HEAP_ARRAY(jushort, _num_regions, mtGC);
 330     Copy::fill_to_bytes(_liveness_cache[worker], _num_regions * sizeof(jushort));
 331   }
 332 
 333   // The call below uses stuff (the SATB* things) that are in G1, but probably
 334   // belong into a shared location.
 335   ShenandoahBarrierSet::satb_mark_queue_set().initialize(this,
 336                                                          SATB_Q_CBL_mon,
 337                                                          20 /* G1SATBProcessCompletedThreshold */,
 338                                                          60 /* G1SATBBufferEnqueueingThresholdPercent */);
 339 
 340   _monitoring_support = new ShenandoahMonitoringSupport(this);
 341   _phase_timings = new ShenandoahPhaseTimings();
 342   ShenandoahStringDedup::initialize();
 343   ShenandoahCodeRoots::initialize();
 344 
 345   if (ShenandoahAllocationTrace) {
 346     _alloc_tracker = new ShenandoahAllocTracker();
 347   }
 348 
 349   if (ShenandoahPacing) {
 350     _pacer = new ShenandoahPacer(this);
 351     _pacer->setup_for_idle();
 352   } else {
 353     _pacer = NULL;
 354   }
 355 
 356   _traversal_gc = heuristics()->can_do_traversal_gc() ?
 357                   new ShenandoahTraversalGC(this, _num_regions) :
 358                   NULL;
 359 
 360   _control_thread = new ShenandoahControlThread();
 361 
 362   log_info(gc, init)("Initialize Shenandoah heap: " SIZE_FORMAT "%s initial, " SIZE_FORMAT "%s min, " SIZE_FORMAT "%s max",
 363                      byte_size_in_proper_unit(_initial_size),  proper_unit_for_byte_size(_initial_size),
 364                      byte_size_in_proper_unit(_minimum_size),  proper_unit_for_byte_size(_minimum_size),
 365                      byte_size_in_proper_unit(max_capacity()), proper_unit_for_byte_size(max_capacity())
 366   );
 367 
 368   log_info(gc, init)("Safepointing mechanism: %s",
 369                      SafepointMechanism::uses_thread_local_poll() ? "thread-local poll" :
 370                      (SafepointMechanism::uses_global_page_poll() ? "global-page poll" : "unknown"));
 371 
 372   return JNI_OK;
 373 }
 374 
 375 void ShenandoahHeap::initialize_heuristics() {
 376   if (ShenandoahGCHeuristics != NULL) {
 377     if (strcmp(ShenandoahGCHeuristics, "aggressive") == 0) {
 378       _heuristics = new ShenandoahAggressiveHeuristics();
 379     } else if (strcmp(ShenandoahGCHeuristics, "static") == 0) {
 380       _heuristics = new ShenandoahStaticHeuristics();
 381     } else if (strcmp(ShenandoahGCHeuristics, "adaptive") == 0) {
 382       _heuristics = new ShenandoahAdaptiveHeuristics();
 383     } else if (strcmp(ShenandoahGCHeuristics, "passive") == 0) {
 384       _heuristics = new ShenandoahPassiveHeuristics();
 385     } else if (strcmp(ShenandoahGCHeuristics, "compact") == 0) {
 386       _heuristics = new ShenandoahCompactHeuristics();
 387     } else if (strcmp(ShenandoahGCHeuristics, "traversal") == 0) {
 388       _heuristics = new ShenandoahTraversalHeuristics();
 389     } else {
 390       vm_exit_during_initialization("Unknown -XX:ShenandoahGCHeuristics option");
 391     }
 392 
 393     if (_heuristics->is_diagnostic() && !UnlockDiagnosticVMOptions) {
 394       vm_exit_during_initialization(
 395               err_msg("Heuristics \"%s\" is diagnostic, and must be enabled via -XX:+UnlockDiagnosticVMOptions.",
 396                       _heuristics->name()));
 397     }
 398     if (_heuristics->is_experimental() && !UnlockExperimentalVMOptions) {
 399       vm_exit_during_initialization(
 400               err_msg("Heuristics \"%s\" is experimental, and must be enabled via -XX:+UnlockExperimentalVMOptions.",
 401                       _heuristics->name()));
 402     }
 403     log_info(gc, init)("Shenandoah heuristics: %s",
 404                        _heuristics->name());
 405   } else {
 406       ShouldNotReachHere();
 407   }
 408 
 409 }
 410 
 411 #ifdef _MSC_VER
 412 #pragma warning( push )
 413 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list
 414 #endif
 415 
 416 ShenandoahHeap::ShenandoahHeap(ShenandoahCollectorPolicy* policy) :
 417   CollectedHeap(),
 418   _initial_size(0),
 419   _used(0),
 420   _committed(0),
 421   _bytes_allocated_since_gc_start(0),
 422   _max_workers(MAX2(ConcGCThreads, ParallelGCThreads)),
 423   _workers(NULL),
 424   _safepoint_workers(NULL),
 425   _heap_region_special(false),
 426   _num_regions(0),
 427   _regions(NULL),
 428   _update_refs_iterator(this),
 429   _control_thread(NULL),
 430   _shenandoah_policy(policy),
 431   _heuristics(NULL),
 432   _free_set(NULL),
 433   _scm(new ShenandoahConcurrentMark()),
 434   _traversal_gc(NULL),
 435   _full_gc(new ShenandoahMarkCompact()),
 436   _pacer(NULL),
 437   _verifier(NULL),
 438   _alloc_tracker(NULL),
 439   _phase_timings(NULL),
 440   _monitoring_support(NULL),
 441   _memory_pool(NULL),
 442   _stw_memory_manager("Shenandoah Pauses", "end of GC pause"),
 443   _cycle_memory_manager("Shenandoah Cycles", "end of GC cycle"),
 444   _gc_timer(new (ResourceObj::C_HEAP, mtGC) ConcurrentGCTimer()),
 445   _soft_ref_policy(),
 446   _log_min_obj_alignment_in_bytes(LogMinObjAlignmentInBytes),
 447   _ref_processor(NULL),
 448   _marking_context(NULL),
 449   _bitmap_size(0),
 450   _bitmap_regions_per_slice(0),
 451   _bitmap_bytes_per_slice(0),
 452   _bitmap_region_special(false),
 453   _aux_bitmap_region_special(false),
 454   _liveness_cache(NULL),
 455   _collection_set(NULL)
 456 {
 457   log_info(gc, init)("GC threads: " UINT32_FORMAT " parallel, " UINT32_FORMAT " concurrent", ParallelGCThreads, ConcGCThreads);
 458   log_info(gc, init)("Reference processing: %s", ParallelRefProcEnabled ? "parallel" : "serial");
 459 
 460   BarrierSet::set_barrier_set(new ShenandoahBarrierSet(this));
 461 
 462   _max_workers = MAX2(_max_workers, 1U);
 463   _workers = new ShenandoahWorkGang("Shenandoah GC Threads", _max_workers,
 464                             /* are_GC_task_threads */ true,
 465                             /* are_ConcurrentGC_threads */ true);
 466   if (_workers == NULL) {
 467     vm_exit_during_initialization("Failed necessary allocation.");
 468   } else {
 469     _workers->initialize_workers();
 470   }
 471 
 472   if (ShenandoahParallelSafepointThreads > 1) {
 473     _safepoint_workers = new ShenandoahWorkGang("Safepoint Cleanup Thread",
 474                                                 ShenandoahParallelSafepointThreads,
 475                       /* are_GC_task_threads */ false,
 476                  /* are_ConcurrentGC_threads */ false);
 477     _safepoint_workers->initialize_workers();
 478   }
 479 }
 480 
 481 #ifdef _MSC_VER
 482 #pragma warning( pop )
 483 #endif
 484 
 485 class ShenandoahResetBitmapTask : public AbstractGangTask {
 486 private:
 487   ShenandoahRegionIterator _regions;
 488 
 489 public:
 490   ShenandoahResetBitmapTask() :
 491     AbstractGangTask("Parallel Reset Bitmap Task") {}
 492 
 493   void work(uint worker_id) {
 494     ShenandoahHeapRegion* region = _regions.next();
 495     ShenandoahHeap* heap = ShenandoahHeap::heap();
 496     ShenandoahMarkingContext* const ctx = heap->marking_context();
 497     while (region != NULL) {
 498       if (heap->is_bitmap_slice_committed(region)) {
 499         ctx->clear_bitmap(region);
 500       }
 501       region = _regions.next();
 502     }
 503   }
 504 };
 505 
 506 void ShenandoahHeap::reset_mark_bitmap() {
 507   assert_gc_workers(_workers->active_workers());
 508   mark_incomplete_marking_context();
 509 
 510   ShenandoahResetBitmapTask task;
 511   _workers->run_task(&task);
 512 }
 513 
 514 void ShenandoahHeap::print_on(outputStream* st) const {
 515   st->print_cr("Shenandoah Heap");
 516   st->print_cr(" " SIZE_FORMAT "K total, " SIZE_FORMAT "K committed, " SIZE_FORMAT "K used",
 517                max_capacity() / K, committed() / K, used() / K);
 518   st->print_cr(" " SIZE_FORMAT " x " SIZE_FORMAT"K regions",
 519                num_regions(), ShenandoahHeapRegion::region_size_bytes() / K);
 520 
 521   st->print("Status: ");
 522   if (has_forwarded_objects())               st->print("has forwarded objects, ");
 523   if (is_concurrent_mark_in_progress())      st->print("marking, ");
 524   if (is_evacuation_in_progress())           st->print("evacuating, ");
 525   if (is_update_refs_in_progress())          st->print("updating refs, ");
 526   if (is_concurrent_traversal_in_progress()) st->print("traversal, ");
 527   if (is_degenerated_gc_in_progress())       st->print("degenerated gc, ");
 528   if (is_full_gc_in_progress())              st->print("full gc, ");
 529   if (is_full_gc_move_in_progress())         st->print("full gc move, ");
 530 
 531   if (cancelled_gc()) {
 532     st->print("cancelled");
 533   } else {
 534     st->print("not cancelled");
 535   }
 536   st->cr();
 537 
 538   st->print_cr("Reserved region:");
 539   st->print_cr(" - [" PTR_FORMAT ", " PTR_FORMAT ") ",
 540                p2i(reserved_region().start()),
 541                p2i(reserved_region().end()));
 542 
 543   ShenandoahCollectionSet* cset = collection_set();
 544   st->print_cr("Collection set:");
 545   if (cset != NULL) {
 546     st->print_cr(" - map (vanilla): " PTR_FORMAT, p2i(cset->map_address()));
 547     st->print_cr(" - map (biased):  " PTR_FORMAT, p2i(cset->biased_map_address()));
 548   } else {
 549     st->print_cr(" (NULL)");
 550   }
 551 
 552   st->cr();
 553   MetaspaceUtils::print_on(st);
 554 
 555   if (Verbose) {
 556     print_heap_regions_on(st);
 557   }
 558 }
 559 
 560 class ShenandoahInitWorkerGCLABClosure : public ThreadClosure {
 561 public:
 562   void do_thread(Thread* thread) {
 563     assert(thread != NULL, "Sanity");
 564     assert(thread->is_Worker_thread(), "Only worker thread expected");
 565     ShenandoahThreadLocalData::initialize_gclab(thread);
 566   }
 567 };
 568 
 569 void ShenandoahHeap::post_initialize() {
 570   CollectedHeap::post_initialize();
 571   MutexLocker ml(Threads_lock);
 572 
 573   ShenandoahInitWorkerGCLABClosure init_gclabs;
 574   _workers->threads_do(&init_gclabs);
 575 
 576   // gclab can not be initialized early during VM startup, as it can not determinate its max_size.
 577   // Now, we will let WorkGang to initialize gclab when new worker is created.
 578   _workers->set_initialize_gclab();
 579 
 580   _scm->initialize(_max_workers);
 581   _full_gc->initialize(_gc_timer);
 582 
 583   ref_processing_init();
 584 
 585   _heuristics->initialize();
 586 }
 587 
 588 size_t ShenandoahHeap::used() const {
 589   return OrderAccess::load_acquire(&_used);
 590 }
 591 
 592 size_t ShenandoahHeap::committed() const {
 593   OrderAccess::acquire();
 594   return _committed;
 595 }
 596 
 597 void ShenandoahHeap::increase_committed(size_t bytes) {
 598   assert_heaplock_or_safepoint();
 599   _committed += bytes;
 600 }
 601 
 602 void ShenandoahHeap::decrease_committed(size_t bytes) {
 603   assert_heaplock_or_safepoint();
 604   _committed -= bytes;
 605 }
 606 
 607 void ShenandoahHeap::increase_used(size_t bytes) {
 608   Atomic::add(bytes, &_used);
 609 }
 610 
 611 void ShenandoahHeap::set_used(size_t bytes) {
 612   OrderAccess::release_store_fence(&_used, bytes);
 613 }
 614 
 615 void ShenandoahHeap::decrease_used(size_t bytes) {
 616   assert(used() >= bytes, "never decrease heap size by more than we've left");
 617   Atomic::sub(bytes, &_used);
 618 }
 619 
 620 void ShenandoahHeap::increase_allocated(size_t bytes) {
 621   Atomic::add(bytes, &_bytes_allocated_since_gc_start);
 622 }
 623 
 624 void ShenandoahHeap::notify_mutator_alloc_words(size_t words, bool waste) {
 625   size_t bytes = words * HeapWordSize;
 626   if (!waste) {
 627     increase_used(bytes);
 628   }
 629   increase_allocated(bytes);
 630   if (ShenandoahPacing) {
 631     control_thread()->pacing_notify_alloc(words);
 632     if (waste) {
 633       pacer()->claim_for_alloc(words, true);
 634     }
 635   }
 636 }
 637 
 638 size_t ShenandoahHeap::capacity() const {
 639   return committed();
 640 }
 641 
 642 size_t ShenandoahHeap::max_capacity() const {
 643   return _num_regions * ShenandoahHeapRegion::region_size_bytes();
 644 }
 645 
 646 size_t ShenandoahHeap::min_capacity() const {
 647   return _minimum_size;
 648 }
 649 
 650 size_t ShenandoahHeap::initial_capacity() const {
 651   return _initial_size;
 652 }
 653 
 654 bool ShenandoahHeap::is_in(const void* p) const {
 655   HeapWord* heap_base = (HeapWord*) base();
 656   HeapWord* last_region_end = heap_base + ShenandoahHeapRegion::region_size_words() * num_regions();
 657   return p >= heap_base && p < last_region_end;
 658 }
 659 
 660 void ShenandoahHeap::op_uncommit(double shrink_before) {
 661   assert (ShenandoahUncommit, "should be enabled");
 662 
 663   // Application allocates from the beginning of the heap, and GC allocates at
 664   // the end of it. It is more efficient to uncommit from the end, so that applications
 665   // could enjoy the near committed regions. GC allocations are much less frequent,
 666   // and therefore can accept the committing costs.
 667 
 668   size_t count = 0;
 669   for (size_t i = num_regions(); i > 0; i--) { // care about size_t underflow
 670     ShenandoahHeapRegion* r = get_region(i - 1);
 671     if (r->is_empty_committed() && (r->empty_time() < shrink_before)) {
 672       ShenandoahHeapLocker locker(lock());
 673       if (r->is_empty_committed()) {
 674         // Do not uncommit below minimal capacity
 675         if (committed() < min_capacity() + ShenandoahHeapRegion::region_size_bytes()) {
 676           break;
 677         }
 678 
 679         r->make_uncommitted();
 680         count++;
 681       }
 682     }
 683     SpinPause(); // allow allocators to take the lock
 684   }
 685 
 686   if (count > 0) {
 687     control_thread()->notify_heap_changed();
 688   }
 689 }
 690 
 691 HeapWord* ShenandoahHeap::allocate_from_gclab_slow(Thread* thread, size_t size) {
 692   // New object should fit the GCLAB size
 693   size_t min_size = MAX2(size, PLAB::min_size());
 694 
 695   // Figure out size of new GCLAB, looking back at heuristics. Expand aggressively.
 696   size_t new_size = ShenandoahThreadLocalData::gclab_size(thread) * 2;
 697   new_size = MIN2(new_size, PLAB::max_size());
 698   new_size = MAX2(new_size, PLAB::min_size());
 699 
 700   // Record new heuristic value even if we take any shortcut. This captures
 701   // the case when moderately-sized objects always take a shortcut. At some point,
 702   // heuristics should catch up with them.
 703   ShenandoahThreadLocalData::set_gclab_size(thread, new_size);
 704 
 705   if (new_size < size) {
 706     // New size still does not fit the object. Fall back to shared allocation.
 707     // This avoids retiring perfectly good GCLABs, when we encounter a large object.
 708     return NULL;
 709   }
 710 
 711   // Retire current GCLAB, and allocate a new one.
 712   PLAB* gclab = ShenandoahThreadLocalData::gclab(thread);
 713   gclab->retire();
 714 
 715   size_t actual_size = 0;
 716   HeapWord* gclab_buf = allocate_new_gclab(min_size, new_size, &actual_size);
 717   if (gclab_buf == NULL) {
 718     return NULL;
 719   }
 720 
 721   assert (size <= actual_size, "allocation should fit");
 722 
 723   if (ZeroTLAB) {
 724     // ..and clear it.
 725     Copy::zero_to_words(gclab_buf, actual_size);
 726   } else {
 727     // ...and zap just allocated object.
 728 #ifdef ASSERT
 729     // Skip mangling the space corresponding to the object header to
 730     // ensure that the returned space is not considered parsable by
 731     // any concurrent GC thread.
 732     size_t hdr_size = oopDesc::header_size();
 733     Copy::fill_to_words(gclab_buf + hdr_size, actual_size - hdr_size, badHeapWordVal);
 734 #endif // ASSERT
 735   }
 736   gclab->set_buf(gclab_buf, actual_size);
 737   return gclab->allocate(size);
 738 }
 739 
 740 HeapWord* ShenandoahHeap::allocate_new_tlab(size_t min_size,
 741                                             size_t requested_size,
 742                                             size_t* actual_size) {
 743   ShenandoahAllocRequest req = ShenandoahAllocRequest::for_tlab(min_size, requested_size);
 744   HeapWord* res = allocate_memory(req);
 745   if (res != NULL) {
 746     *actual_size = req.actual_size();
 747   } else {
 748     *actual_size = 0;
 749   }
 750   return res;
 751 }
 752 
 753 HeapWord* ShenandoahHeap::allocate_new_gclab(size_t min_size,
 754                                              size_t word_size,
 755                                              size_t* actual_size) {
 756   ShenandoahAllocRequest req = ShenandoahAllocRequest::for_gclab(min_size, word_size);
 757   HeapWord* res = allocate_memory(req);
 758   if (res != NULL) {
 759     *actual_size = req.actual_size();
 760   } else {
 761     *actual_size = 0;
 762   }
 763   return res;
 764 }
 765 
 766 ShenandoahHeap* ShenandoahHeap::heap() {
 767   CollectedHeap* heap = Universe::heap();
 768   assert(heap != NULL, "Unitialized access to ShenandoahHeap::heap()");
 769   assert(heap->kind() == CollectedHeap::Shenandoah, "not a shenandoah heap");
 770   return (ShenandoahHeap*) heap;
 771 }
 772 
 773 ShenandoahHeap* ShenandoahHeap::heap_no_check() {
 774   CollectedHeap* heap = Universe::heap();
 775   return (ShenandoahHeap*) heap;
 776 }
 777 
 778 HeapWord* ShenandoahHeap::allocate_memory(ShenandoahAllocRequest& req) {
 779   ShenandoahAllocTrace trace_alloc(req.size(), req.type());
 780 
 781   intptr_t pacer_epoch = 0;
 782   bool in_new_region = false;
 783   HeapWord* result = NULL;
 784 
 785   if (req.is_mutator_alloc()) {
 786     if (ShenandoahPacing) {
 787       pacer()->pace_for_alloc(req.size());
 788       pacer_epoch = pacer()->epoch();
 789     }
 790 
 791     if (!ShenandoahAllocFailureALot || !should_inject_alloc_failure()) {
 792       result = allocate_memory_under_lock(req, in_new_region);
 793     }
 794 
 795     // Allocation failed, block until control thread reacted, then retry allocation.
 796     //
 797     // It might happen that one of the threads requesting allocation would unblock
 798     // way later after GC happened, only to fail the second allocation, because
 799     // other threads have already depleted the free storage. In this case, a better
 800     // strategy is to try again, as long as GC makes progress.
 801     //
 802     // Then, we need to make sure the allocation was retried after at least one
 803     // Full GC, which means we want to try more than ShenandoahFullGCThreshold times.
 804 
 805     size_t tries = 0;
 806 
 807     while (result == NULL && _progress_last_gc.is_set()) {
 808       tries++;
 809       control_thread()->handle_alloc_failure(req.size());
 810       result = allocate_memory_under_lock(req, in_new_region);
 811     }
 812 
 813     while (result == NULL && tries <= ShenandoahFullGCThreshold) {
 814       tries++;
 815       control_thread()->handle_alloc_failure(req.size());
 816       result = allocate_memory_under_lock(req, in_new_region);
 817     }
 818 
 819   } else {
 820     assert(req.is_gc_alloc(), "Can only accept GC allocs here");
 821     result = allocate_memory_under_lock(req, in_new_region);
 822     // Do not call handle_alloc_failure() here, because we cannot block.
 823     // The allocation failure would be handled by the LRB slowpath with handle_alloc_failure_evac().
 824   }
 825 
 826   if (in_new_region) {
 827     control_thread()->notify_heap_changed();
 828   }
 829 
 830   if (result != NULL) {
 831     size_t requested = req.size();
 832     size_t actual = req.actual_size();
 833 
 834     assert (req.is_lab_alloc() || (requested == actual),
 835             "Only LAB allocations are elastic: %s, requested = " SIZE_FORMAT ", actual = " SIZE_FORMAT,
 836             ShenandoahAllocRequest::alloc_type_to_string(req.type()), requested, actual);
 837 
 838     if (req.is_mutator_alloc()) {
 839       notify_mutator_alloc_words(actual, false);
 840 
 841       // If we requested more than we were granted, give the rest back to pacer.
 842       // This only matters if we are in the same pacing epoch: do not try to unpace
 843       // over the budget for the other phase.
 844       if (ShenandoahPacing && (pacer_epoch > 0) && (requested > actual)) {
 845         pacer()->unpace_for_alloc(pacer_epoch, requested - actual);
 846       }
 847     } else {
 848       increase_used(actual*HeapWordSize);
 849     }
 850   }
 851 
 852   return result;
 853 }
 854 
 855 HeapWord* ShenandoahHeap::allocate_memory_under_lock(ShenandoahAllocRequest& req, bool& in_new_region) {
 856   ShenandoahHeapLocker locker(lock());
 857   return _free_set->allocate(req, in_new_region);
 858 }
 859 
 860 class ShenandoahMemAllocator : public MemAllocator {
 861 private:
 862   MemAllocator& _initializer;
 863 public:
 864   ShenandoahMemAllocator(MemAllocator& initializer, Klass* klass, size_t word_size, Thread* thread) :
 865   MemAllocator(klass, word_size + ShenandoahForwarding::word_size(), thread),
 866     _initializer(initializer) {}
 867 
 868 protected:
 869   virtual HeapWord* mem_allocate(Allocation& allocation) const {
 870     HeapWord* result = MemAllocator::mem_allocate(allocation);
 871     // Initialize brooks-pointer
 872     if (result != NULL) {
 873       result += ShenandoahForwarding::word_size();
 874       ShenandoahForwarding::initialize(oop(result));
 875       assert(! ShenandoahHeap::heap()->in_collection_set(result), "never allocate in targetted region");
 876     }
 877     return result;
 878   }
 879 
 880   virtual oop initialize(HeapWord* mem) const {
 881      return _initializer.initialize(mem);
 882   }
 883 };
 884 
 885 oop ShenandoahHeap::obj_allocate(Klass* klass, int size, TRAPS) {
 886   ObjAllocator initializer(klass, size, THREAD);
 887   ShenandoahMemAllocator allocator(initializer, klass, size, THREAD);
 888   return allocator.allocate();
 889 }
 890 
 891 oop ShenandoahHeap::array_allocate(Klass* klass, int size, int length, bool do_zero, TRAPS) {
 892   ObjArrayAllocator initializer(klass, size, length, do_zero, THREAD);
 893   ShenandoahMemAllocator allocator(initializer, klass, size, THREAD);
 894   return allocator.allocate();
 895 }
 896 
 897 oop ShenandoahHeap::class_allocate(Klass* klass, int size, TRAPS) {
 898   ClassAllocator initializer(klass, size, THREAD);
 899   ShenandoahMemAllocator allocator(initializer, klass, size, THREAD);
 900   return allocator.allocate();
 901 }
 902 
 903 HeapWord* ShenandoahHeap::mem_allocate(size_t size,
 904                                         bool*  gc_overhead_limit_was_exceeded) {
 905   ShenandoahAllocRequest req = ShenandoahAllocRequest::for_shared(size);
 906   return allocate_memory(req);
 907 }
 908 
 909 MetaWord* ShenandoahHeap::satisfy_failed_metadata_allocation(ClassLoaderData* loader_data,
 910                                                              size_t size,
 911                                                              Metaspace::MetadataType mdtype) {
 912   MetaWord* result;
 913 
 914   // Inform metaspace OOM to GC heuristics if class unloading is possible.
 915   if (heuristics()->can_unload_classes()) {
 916     ShenandoahHeuristics* h = heuristics();
 917     h->record_metaspace_oom();
 918   }
 919 
 920   // Expand and retry allocation
 921   result = loader_data->metaspace_non_null()->expand_and_allocate(size, mdtype);
 922   if (result != NULL) {
 923     return result;
 924   }
 925 
 926   // Start full GC
 927   collect(GCCause::_metadata_GC_clear_soft_refs);
 928 
 929   // Retry allocation
 930   result = loader_data->metaspace_non_null()->allocate(size, mdtype);
 931   if (result != NULL) {
 932     return result;
 933   }
 934 
 935   // Expand and retry allocation
 936   result = loader_data->metaspace_non_null()->expand_and_allocate(size, mdtype);
 937   if (result != NULL) {
 938     return result;
 939   }
 940 
 941   // Out of memory
 942   return NULL;
 943 }
 944 
 945 void ShenandoahHeap::fill_with_dummy_object(HeapWord* start, HeapWord* end, bool zap) {
 946   HeapWord* obj = tlab_post_allocation_setup(start);
 947   CollectedHeap::fill_with_object(obj, end);
 948 }
 949 
 950 size_t ShenandoahHeap::min_dummy_object_size() const {
 951   return CollectedHeap::min_dummy_object_size() + ShenandoahForwarding::word_size();
 952 }
 953 
 954 class ShenandoahConcurrentEvacuateRegionObjectClosure : public ObjectClosure {
 955 private:
 956   ShenandoahHeap* const _heap;
 957   Thread* const _thread;
 958 public:
 959   ShenandoahConcurrentEvacuateRegionObjectClosure(ShenandoahHeap* heap) :
 960     _heap(heap), _thread(Thread::current()) {}
 961 
 962   void do_object(oop p) {
 963     shenandoah_assert_marked(NULL, p);
 964     if (oopDesc::equals_raw(p, ShenandoahBarrierSet::resolve_forwarded_not_null(p))) {
 965       _heap->evacuate_object(p, _thread);
 966     }
 967   }
 968 };
 969 
 970 class ShenandoahEvacuationTask : public AbstractGangTask {
 971 private:
 972   ShenandoahHeap* const _sh;
 973   ShenandoahCollectionSet* const _cs;
 974   bool _concurrent;
 975 public:
 976   ShenandoahEvacuationTask(ShenandoahHeap* sh,
 977                            ShenandoahCollectionSet* cs,
 978                            bool concurrent) :
 979     AbstractGangTask("Parallel Evacuation Task"),
 980     _sh(sh),
 981     _cs(cs),
 982     _concurrent(concurrent)
 983   {}
 984 
 985   void work(uint worker_id) {
 986     if (_concurrent) {
 987       ShenandoahConcurrentWorkerSession worker_session(worker_id);
 988       ShenandoahSuspendibleThreadSetJoiner stsj(ShenandoahSuspendibleWorkers);
 989       ShenandoahEvacOOMScope oom_evac_scope;
 990       do_work();
 991     } else {
 992       ShenandoahParallelWorkerSession worker_session(worker_id);
 993       ShenandoahEvacOOMScope oom_evac_scope;
 994       do_work();
 995     }
 996   }
 997 
 998 private:
 999   void do_work() {
1000     ShenandoahConcurrentEvacuateRegionObjectClosure cl(_sh);
1001     ShenandoahHeapRegion* r;
1002     while ((r =_cs->claim_next()) != NULL) {
1003       assert(r->has_live(), "all-garbage regions are reclaimed early");
1004       _sh->marked_object_iterate(r, &cl);
1005 
1006       if (ShenandoahPacing) {
1007         _sh->pacer()->report_evac(r->used() >> LogHeapWordSize);
1008       }
1009 
1010       if (_sh->check_cancelled_gc_and_yield(_concurrent)) {
1011         break;
1012       }
1013     }
1014   }
1015 };
1016 
1017 void ShenandoahHeap::trash_cset_regions() {
1018   ShenandoahHeapLocker locker(lock());
1019 
1020   ShenandoahCollectionSet* set = collection_set();
1021   ShenandoahHeapRegion* r;
1022   set->clear_current_index();
1023   while ((r = set->next()) != NULL) {
1024     r->make_trash();
1025   }
1026   collection_set()->clear();
1027 }
1028 
1029 void ShenandoahHeap::print_heap_regions_on(outputStream* st) const {
1030   st->print_cr("Heap Regions:");
1031   st->print_cr("EU=empty-uncommitted, EC=empty-committed, R=regular, H=humongous start, HC=humongous continuation, CS=collection set, T=trash, P=pinned");
1032   st->print_cr("BTE=bottom/top/end, U=used, T=TLAB allocs, G=GCLAB allocs, S=shared allocs, L=live data");
1033   st->print_cr("R=root, CP=critical pins, TAMS=top-at-mark-start (previous, next)");
1034   st->print_cr("SN=alloc sequence numbers (first mutator, last mutator, first gc, last gc)");
1035 
1036   for (size_t i = 0; i < num_regions(); i++) {
1037     get_region(i)->print_on(st);
1038   }
1039 }
1040 
1041 void ShenandoahHeap::trash_humongous_region_at(ShenandoahHeapRegion* start) {
1042   assert(start->is_humongous_start(), "reclaim regions starting with the first one");
1043 
1044   oop humongous_obj = oop(start->bottom() + ShenandoahForwarding::word_size());
1045   size_t size = humongous_obj->size() + ShenandoahForwarding::word_size();
1046   size_t required_regions = ShenandoahHeapRegion::required_regions(size * HeapWordSize);
1047   size_t index = start->region_number() + required_regions - 1;
1048 
1049   assert(!start->has_live(), "liveness must be zero");
1050 
1051   for(size_t i = 0; i < required_regions; i++) {
1052     // Reclaim from tail. Otherwise, assertion fails when printing region to trace log,
1053     // as it expects that every region belongs to a humongous region starting with a humongous start region.
1054     ShenandoahHeapRegion* region = get_region(index --);
1055 
1056     assert(region->is_humongous(), "expect correct humongous start or continuation");
1057     assert(!region->is_cset(), "Humongous region should not be in collection set");
1058 
1059     region->make_trash_immediate();
1060   }
1061 }
1062 
1063 class ShenandoahRetireGCLABClosure : public ThreadClosure {
1064 public:
1065   void do_thread(Thread* thread) {
1066     PLAB* gclab = ShenandoahThreadLocalData::gclab(thread);
1067     assert(gclab != NULL, "GCLAB should be initialized for %s", thread->name());
1068     gclab->retire();
1069   }
1070 };
1071 
1072 void ShenandoahHeap::make_parsable(bool retire_tlabs) {
1073   if (UseTLAB) {
1074     CollectedHeap::ensure_parsability(retire_tlabs);
1075   }
1076   ShenandoahRetireGCLABClosure cl;
1077   for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) {
1078     cl.do_thread(t);
1079   }
1080   workers()->threads_do(&cl);
1081 }
1082 
1083 void ShenandoahHeap::resize_tlabs() {
1084   CollectedHeap::resize_all_tlabs();
1085 }
1086 
1087 class ShenandoahEvacuateUpdateRootsTask : public AbstractGangTask {
1088 private:
1089   ShenandoahRootEvacuator* _rp;
1090 
1091 public:
1092   ShenandoahEvacuateUpdateRootsTask(ShenandoahRootEvacuator* rp) :
1093     AbstractGangTask("Shenandoah evacuate and update roots"),
1094     _rp(rp) {}
1095 
1096   void work(uint worker_id) {
1097     ShenandoahParallelWorkerSession worker_session(worker_id);
1098     ShenandoahEvacOOMScope oom_evac_scope;
1099     ShenandoahEvacuateUpdateRootsClosure cl;
1100     MarkingCodeBlobClosure blobsCl(&cl, CodeBlobToOopClosure::FixRelocations);
1101     _rp->process_evacuate_roots(&cl, &blobsCl, worker_id);
1102   }
1103 };
1104 
1105 void ShenandoahHeap::evacuate_and_update_roots() {
1106 #if defined(COMPILER2) || INCLUDE_JVMCI
1107   DerivedPointerTable::clear();
1108 #endif
1109   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Only iterate roots while world is stopped");
1110 
1111   {
1112     ShenandoahRootEvacuator rp(this, workers()->active_workers(), ShenandoahPhaseTimings::init_evac);
1113     ShenandoahEvacuateUpdateRootsTask roots_task(&rp);
1114     workers()->run_task(&roots_task);
1115   }
1116 
1117 #if defined(COMPILER2) || INCLUDE_JVMCI
1118   DerivedPointerTable::update_pointers();
1119 #endif
1120 }
1121 
1122 // Returns size in bytes
1123 size_t ShenandoahHeap::unsafe_max_tlab_alloc(Thread *thread) const {
1124   if (ShenandoahElasticTLAB) {
1125     // With Elastic TLABs, return the max allowed size, and let the allocation path
1126     // figure out the safe size for current allocation.
1127     return ShenandoahHeapRegion::max_tlab_size_bytes();
1128   } else {
1129     return MIN2(_free_set->unsafe_peek_free(), ShenandoahHeapRegion::max_tlab_size_bytes());
1130   }
1131 }
1132 
1133 size_t ShenandoahHeap::max_tlab_size() const {
1134   // Returns size in words
1135   return ShenandoahHeapRegion::max_tlab_size_words();
1136 }
1137 
1138 class ShenandoahRetireAndResetGCLABClosure : public ThreadClosure {
1139 public:
1140   void do_thread(Thread* thread) {
1141     PLAB* gclab = ShenandoahThreadLocalData::gclab(thread);
1142     gclab->retire();
1143     if (ShenandoahThreadLocalData::gclab_size(thread) > 0) {
1144       ShenandoahThreadLocalData::set_gclab_size(thread, 0);
1145     }
1146   }
1147 };
1148 
1149 void ShenandoahHeap::retire_and_reset_gclabs() {
1150   ShenandoahRetireAndResetGCLABClosure cl;
1151   for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) {
1152     cl.do_thread(t);
1153   }
1154   workers()->threads_do(&cl);
1155 }
1156 
1157 void ShenandoahHeap::collect(GCCause::Cause cause) {
1158   control_thread()->request_gc(cause);
1159 }
1160 
1161 void ShenandoahHeap::do_full_collection(bool clear_all_soft_refs) {
1162   //assert(false, "Shouldn't need to do full collections");
1163 }
1164 
1165 HeapWord* ShenandoahHeap::block_start(const void* addr) const {
1166   Space* sp = heap_region_containing(addr);
1167   if (sp != NULL) {
1168     return sp->block_start(addr);
1169   }
1170   return NULL;
1171 }
1172 
1173 bool ShenandoahHeap::block_is_obj(const HeapWord* addr) const {
1174   Space* sp = heap_region_containing(addr);
1175   return sp->block_is_obj(addr);
1176 }
1177 
1178 jlong ShenandoahHeap::millis_since_last_gc() {
1179   double v = heuristics()->time_since_last_gc() * 1000;
1180   assert(0 <= v && v <= max_jlong, "value should fit: %f", v);
1181   return (jlong)v;
1182 }
1183 
1184 void ShenandoahHeap::prepare_for_verify() {
1185   if (SafepointSynchronize::is_at_safepoint() || ! UseTLAB) {
1186     make_parsable(false);
1187   }
1188 }
1189 
1190 void ShenandoahHeap::print_gc_threads_on(outputStream* st) const {
1191   workers()->print_worker_threads_on(st);
1192   if (ShenandoahStringDedup::is_enabled()) {
1193     ShenandoahStringDedup::print_worker_threads_on(st);
1194   }
1195 }
1196 
1197 void ShenandoahHeap::gc_threads_do(ThreadClosure* tcl) const {
1198   workers()->threads_do(tcl);
1199   if (_safepoint_workers != NULL) {
1200     _safepoint_workers->threads_do(tcl);
1201   }
1202   if (ShenandoahStringDedup::is_enabled()) {
1203     ShenandoahStringDedup::threads_do(tcl);
1204   }
1205 }
1206 
1207 void ShenandoahHeap::print_tracing_info() const {
1208   LogTarget(Info, gc, stats) lt;
1209   if (lt.is_enabled()) {
1210     ResourceMark rm;
1211     LogStream ls(lt);
1212 
1213     phase_timings()->print_on(&ls);
1214 
1215     ls.cr();
1216     ls.cr();
1217 
1218     shenandoah_policy()->print_gc_stats(&ls);
1219 
1220     ls.cr();
1221     ls.cr();
1222 
1223     if (ShenandoahPacing) {
1224       pacer()->print_on(&ls);
1225     }
1226 
1227     ls.cr();
1228     ls.cr();
1229 
1230     if (ShenandoahAllocationTrace) {
1231       assert(alloc_tracker() != NULL, "Must be");
1232       alloc_tracker()->print_on(&ls);
1233     } else {
1234       ls.print_cr("  Allocation tracing is disabled, use -XX:+ShenandoahAllocationTrace to enable.");
1235     }
1236   }
1237 }
1238 
1239 void ShenandoahHeap::verify(VerifyOption vo) {
1240   if (ShenandoahSafepoint::is_at_shenandoah_safepoint()) {
1241     if (ShenandoahVerify) {
1242       verifier()->verify_generic(vo);
1243     } else {
1244       // TODO: Consider allocating verification bitmaps on demand,
1245       // and turn this on unconditionally.
1246     }
1247   }
1248 }
1249 size_t ShenandoahHeap::tlab_capacity(Thread *thr) const {
1250   return _free_set->capacity();
1251 }
1252 
1253 class ObjectIterateScanRootClosure : public BasicOopIterateClosure {
1254 private:
1255   MarkBitMap* _bitmap;
1256   Stack<oop,mtGC>* _oop_stack;
1257 
1258   template <class T>
1259   void do_oop_work(T* p) {
1260     T o = RawAccess<>::oop_load(p);
1261     if (!CompressedOops::is_null(o)) {
1262       oop obj = CompressedOops::decode_not_null(o);
1263       obj = ShenandoahBarrierSet::resolve_forwarded_not_null(obj);
1264       assert(oopDesc::is_oop(obj), "must be a valid oop");
1265       if (!_bitmap->is_marked((HeapWord*) obj)) {
1266         _bitmap->mark((HeapWord*) obj);
1267         _oop_stack->push(obj);
1268       }
1269     }
1270   }
1271 public:
1272   ObjectIterateScanRootClosure(MarkBitMap* bitmap, Stack<oop,mtGC>* oop_stack) :
1273     _bitmap(bitmap), _oop_stack(oop_stack) {}
1274   void do_oop(oop* p)       { do_oop_work(p); }
1275   void do_oop(narrowOop* p) { do_oop_work(p); }
1276 };
1277 
1278 /*
1279  * This is public API, used in preparation of object_iterate().
1280  * Since we don't do linear scan of heap in object_iterate() (see comment below), we don't
1281  * need to make the heap parsable. For Shenandoah-internal linear heap scans that we can
1282  * control, we call SH::make_tlabs_parsable().
1283  */
1284 void ShenandoahHeap::ensure_parsability(bool retire_tlabs) {
1285   // No-op.
1286 }
1287 
1288 /*
1289  * Iterates objects in the heap. This is public API, used for, e.g., heap dumping.
1290  *
1291  * We cannot safely iterate objects by doing a linear scan at random points in time. Linear
1292  * scanning needs to deal with dead objects, which may have dead Klass* pointers (e.g.
1293  * calling oopDesc::size() would crash) or dangling reference fields (crashes) etc. Linear
1294  * scanning therefore depends on having a valid marking bitmap to support it. However, we only
1295  * have a valid marking bitmap after successful marking. In particular, we *don't* have a valid
1296  * marking bitmap during marking, after aborted marking or during/after cleanup (when we just
1297  * wiped the bitmap in preparation for next marking).
1298  *
1299  * For all those reasons, we implement object iteration as a single marking traversal, reporting
1300  * objects as we mark+traverse through the heap, starting from GC roots. JVMTI IterateThroughHeap
1301  * is allowed to report dead objects, but is not required to do so.
1302  */
1303 void ShenandoahHeap::object_iterate(ObjectClosure* cl) {
1304   assert(SafepointSynchronize::is_at_safepoint(), "safe iteration is only available during safepoints");
1305   if (!_aux_bitmap_region_special && !os::commit_memory((char*)_aux_bitmap_region.start(), _aux_bitmap_region.byte_size(), false)) {
1306     log_warning(gc)("Could not commit native memory for auxiliary marking bitmap for heap iteration");
1307     return;
1308   }
1309 
1310   // Reset bitmap
1311   _aux_bit_map.clear();
1312 
1313   Stack<oop,mtGC> oop_stack;
1314 
1315   // First, we process all GC roots. This populates the work stack with initial objects.
1316   ShenandoahRootProcessor rp(this, 1, ShenandoahPhaseTimings::_num_phases);
1317   ObjectIterateScanRootClosure oops(&_aux_bit_map, &oop_stack);
1318   CLDToOopClosure clds(&oops, ClassLoaderData::_claim_none);
1319   CodeBlobToOopClosure blobs(&oops, false);
1320   rp.process_all_roots(&oops, &clds, &blobs, NULL, 0);
1321 
1322   // Work through the oop stack to traverse heap.
1323   while (! oop_stack.is_empty()) {
1324     oop obj = oop_stack.pop();
1325     assert(oopDesc::is_oop(obj), "must be a valid oop");
1326     cl->do_object(obj);
1327     obj->oop_iterate(&oops);
1328   }
1329 
1330   assert(oop_stack.is_empty(), "should be empty");
1331 
1332   if (!_aux_bitmap_region_special && !os::uncommit_memory((char*)_aux_bitmap_region.start(), _aux_bitmap_region.byte_size())) {
1333     log_warning(gc)("Could not uncommit native memory for auxiliary marking bitmap for heap iteration");
1334   }
1335 }
1336 
1337 void ShenandoahHeap::safe_object_iterate(ObjectClosure* cl) {
1338   assert(SafepointSynchronize::is_at_safepoint(), "safe iteration is only available during safepoints");
1339   object_iterate(cl);
1340 }
1341 
1342 void ShenandoahHeap::heap_region_iterate(ShenandoahHeapRegionClosure* blk) const {
1343   for (size_t i = 0; i < num_regions(); i++) {
1344     ShenandoahHeapRegion* current = get_region(i);
1345     blk->heap_region_do(current);
1346   }
1347 }
1348 
1349 class ShenandoahParallelHeapRegionTask : public AbstractGangTask {
1350 private:
1351   ShenandoahHeap* const _heap;
1352   ShenandoahHeapRegionClosure* const _blk;
1353 
1354   DEFINE_PAD_MINUS_SIZE(0, DEFAULT_CACHE_LINE_SIZE, sizeof(volatile size_t));
1355   volatile size_t _index;
1356   DEFINE_PAD_MINUS_SIZE(1, DEFAULT_CACHE_LINE_SIZE, 0);
1357 
1358 public:
1359   ShenandoahParallelHeapRegionTask(ShenandoahHeapRegionClosure* blk) :
1360           AbstractGangTask("Parallel Region Task"),
1361           _heap(ShenandoahHeap::heap()), _blk(blk), _index(0) {}
1362 
1363   void work(uint worker_id) {
1364     size_t stride = ShenandoahParallelRegionStride;
1365 
1366     size_t max = _heap->num_regions();
1367     while (_index < max) {
1368       size_t cur = Atomic::add(stride, &_index) - stride;
1369       size_t start = cur;
1370       size_t end = MIN2(cur + stride, max);
1371       if (start >= max) break;
1372 
1373       for (size_t i = cur; i < end; i++) {
1374         ShenandoahHeapRegion* current = _heap->get_region(i);
1375         _blk->heap_region_do(current);
1376       }
1377     }
1378   }
1379 };
1380 
1381 void ShenandoahHeap::parallel_heap_region_iterate(ShenandoahHeapRegionClosure* blk) const {
1382   assert(blk->is_thread_safe(), "Only thread-safe closures here");
1383   if (num_regions() > ShenandoahParallelRegionStride) {
1384     ShenandoahParallelHeapRegionTask task(blk);
1385     workers()->run_task(&task);
1386   } else {
1387     heap_region_iterate(blk);
1388   }
1389 }
1390 
1391 class ShenandoahClearLivenessClosure : public ShenandoahHeapRegionClosure {
1392 private:
1393   ShenandoahMarkingContext* const _ctx;
1394 public:
1395   ShenandoahClearLivenessClosure() : _ctx(ShenandoahHeap::heap()->marking_context()) {}
1396 
1397   void heap_region_do(ShenandoahHeapRegion* r) {
1398     if (r->is_active()) {
1399       r->clear_live_data();
1400       _ctx->capture_top_at_mark_start(r);
1401     } else {
1402       assert(!r->has_live(), "Region " SIZE_FORMAT " should have no live data", r->region_number());
1403       assert(_ctx->top_at_mark_start(r) == r->top(),
1404              "Region " SIZE_FORMAT " should already have correct TAMS", r->region_number());
1405     }
1406   }
1407 
1408   bool is_thread_safe() { return true; }
1409 };
1410 
1411 void ShenandoahHeap::op_init_mark() {
1412   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Should be at safepoint");
1413   assert(Thread::current()->is_VM_thread(), "can only do this in VMThread");
1414 
1415   assert(marking_context()->is_bitmap_clear(), "need clear marking bitmap");
1416   assert(!marking_context()->is_complete(), "should not be complete");
1417 
1418   if (ShenandoahVerify) {
1419     verifier()->verify_before_concmark();
1420   }
1421 
1422   if (VerifyBeforeGC) {
1423     Universe::verify();
1424   }
1425 
1426   set_concurrent_mark_in_progress(true);
1427   // We need to reset all TLABs because we'd lose marks on all objects allocated in them.
1428   {
1429     ShenandoahGCPhase phase(ShenandoahPhaseTimings::make_parsable);
1430     make_parsable(true);
1431   }
1432 
1433   {
1434     ShenandoahGCPhase phase(ShenandoahPhaseTimings::clear_liveness);
1435     ShenandoahClearLivenessClosure clc;
1436     parallel_heap_region_iterate(&clc);
1437   }
1438 
1439   // Make above changes visible to worker threads
1440   OrderAccess::fence();
1441 
1442   concurrent_mark()->mark_roots(ShenandoahPhaseTimings::scan_roots);
1443 
1444   if (UseTLAB) {
1445     ShenandoahGCPhase phase(ShenandoahPhaseTimings::resize_tlabs);
1446     resize_tlabs();
1447   }
1448 
1449   if (ShenandoahPacing) {
1450     pacer()->setup_for_mark();
1451   }
1452 }
1453 
1454 void ShenandoahHeap::op_mark() {
1455   concurrent_mark()->mark_from_roots();
1456 }
1457 
1458 class ShenandoahCompleteLivenessClosure : public ShenandoahHeapRegionClosure {
1459 private:
1460   ShenandoahMarkingContext* const _ctx;
1461 public:
1462   ShenandoahCompleteLivenessClosure() : _ctx(ShenandoahHeap::heap()->complete_marking_context()) {}
1463 
1464   void heap_region_do(ShenandoahHeapRegion* r) {
1465     if (r->is_active()) {
1466       HeapWord *tams = _ctx->top_at_mark_start(r);
1467       HeapWord *top = r->top();
1468       if (top > tams) {
1469         r->increase_live_data_alloc_words(pointer_delta(top, tams));
1470       }
1471     } else {
1472       assert(!r->has_live(), "Region " SIZE_FORMAT " should have no live data", r->region_number());
1473       assert(_ctx->top_at_mark_start(r) == r->top(),
1474              "Region " SIZE_FORMAT " should have correct TAMS", r->region_number());
1475     }
1476   }
1477 
1478   bool is_thread_safe() { return true; }
1479 };
1480 
1481 void ShenandoahHeap::op_final_mark() {
1482   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Should be at safepoint");
1483 
1484   // It is critical that we
1485   // evacuate roots right after finishing marking, so that we don't
1486   // get unmarked objects in the roots.
1487 
1488   if (!cancelled_gc()) {
1489     concurrent_mark()->finish_mark_from_roots(/* full_gc = */ false);
1490 
1491     // Degen may be caused by failed evacuation of roots
1492     if (is_degenerated_gc_in_progress() && has_forwarded_objects()) {
1493       concurrent_mark()->update_roots(ShenandoahPhaseTimings::degen_gc_update_roots);
1494     }
1495 
1496     if (ShenandoahVerify) {
1497       verifier()->verify_roots_no_forwarded();
1498     }
1499 
1500     stop_concurrent_marking();
1501 
1502     {
1503       ShenandoahGCPhase phase(ShenandoahPhaseTimings::complete_liveness);
1504 
1505       // All allocations past TAMS are implicitly live, adjust the region data.
1506       // Bitmaps/TAMS are swapped at this point, so we need to poll complete bitmap.
1507       ShenandoahCompleteLivenessClosure cl;
1508       parallel_heap_region_iterate(&cl);
1509     }
1510 
1511     {
1512       ShenandoahGCPhase prepare_evac(ShenandoahPhaseTimings::prepare_evac);
1513 
1514       make_parsable(true);
1515 
1516       trash_cset_regions();
1517 
1518       {
1519         ShenandoahHeapLocker locker(lock());
1520         _collection_set->clear();
1521         _free_set->clear();
1522 
1523         heuristics()->choose_collection_set(_collection_set);
1524 
1525         _free_set->rebuild();
1526       }
1527     }
1528 
1529     // If collection set has candidates, start evacuation.
1530     // Otherwise, bypass the rest of the cycle.
1531     if (!collection_set()->is_empty()) {
1532       ShenandoahGCPhase init_evac(ShenandoahPhaseTimings::init_evac);
1533 
1534       if (ShenandoahVerify) {
1535         verifier()->verify_before_evacuation();
1536       }
1537 
1538       set_evacuation_in_progress(true);
1539       // From here on, we need to update references.
1540       set_has_forwarded_objects(true);
1541 
1542       evacuate_and_update_roots();
1543 
1544       if (ShenandoahPacing) {
1545         pacer()->setup_for_evac();
1546       }
1547 
1548       if (ShenandoahVerify) {
1549         verifier()->verify_roots_no_forwarded();
1550         verifier()->verify_during_evacuation();
1551       }
1552     } else {
1553       if (ShenandoahVerify) {
1554         verifier()->verify_after_concmark();
1555       }
1556 
1557       if (VerifyAfterGC) {
1558         Universe::verify();
1559       }
1560     }
1561 
1562   } else {
1563     concurrent_mark()->cancel();
1564     stop_concurrent_marking();
1565 
1566     if (process_references()) {
1567       // Abandon reference processing right away: pre-cleaning must have failed.
1568       ReferenceProcessor *rp = ref_processor();
1569       rp->disable_discovery();
1570       rp->abandon_partial_discovery();
1571       rp->verify_no_references_recorded();
1572     }
1573   }
1574 }
1575 
1576 void ShenandoahHeap::op_final_evac() {
1577   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Should be at safepoint");
1578 
1579   set_evacuation_in_progress(false);
1580 
1581   retire_and_reset_gclabs();
1582 
1583   if (ShenandoahVerify) {
1584     verifier()->verify_after_evacuation();
1585   }
1586 
1587   if (VerifyAfterGC) {
1588     Universe::verify();
1589   }
1590 }
1591 
1592 void ShenandoahHeap::op_conc_evac() {
1593   ShenandoahEvacuationTask task(this, _collection_set, true);
1594   workers()->run_task(&task);
1595 }
1596 
1597 void ShenandoahHeap::op_stw_evac() {
1598   ShenandoahEvacuationTask task(this, _collection_set, false);
1599   workers()->run_task(&task);
1600 }
1601 
1602 void ShenandoahHeap::op_updaterefs() {
1603   update_heap_references(true);
1604 }
1605 
1606 void ShenandoahHeap::op_cleanup() {
1607   free_set()->recycle_trash();
1608 }
1609 
1610 void ShenandoahHeap::op_reset() {
1611   reset_mark_bitmap();
1612 }
1613 
1614 void ShenandoahHeap::op_preclean() {
1615   concurrent_mark()->preclean_weak_refs();
1616 }
1617 
1618 void ShenandoahHeap::op_init_traversal() {
1619   traversal_gc()->init_traversal_collection();
1620 }
1621 
1622 void ShenandoahHeap::op_traversal() {
1623   traversal_gc()->concurrent_traversal_collection();
1624 }
1625 
1626 void ShenandoahHeap::op_final_traversal() {
1627   traversal_gc()->final_traversal_collection();
1628 }
1629 
1630 void ShenandoahHeap::op_full(GCCause::Cause cause) {
1631   ShenandoahMetricsSnapshot metrics;
1632   metrics.snap_before();
1633 
1634   full_gc()->do_it(cause);
1635   if (UseTLAB) {
1636     ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_resize_tlabs);
1637     resize_all_tlabs();
1638   }
1639 
1640   metrics.snap_after();
1641   metrics.print();
1642 
1643   if (metrics.is_good_progress("Full GC")) {
1644     _progress_last_gc.set();
1645   } else {
1646     // Nothing to do. Tell the allocation path that we have failed to make
1647     // progress, and it can finally fail.
1648     _progress_last_gc.unset();
1649   }
1650 }
1651 
1652 void ShenandoahHeap::op_degenerated(ShenandoahDegenPoint point) {
1653   // Degenerated GC is STW, but it can also fail. Current mechanics communicates
1654   // GC failure via cancelled_concgc() flag. So, if we detect the failure after
1655   // some phase, we have to upgrade the Degenerate GC to Full GC.
1656 
1657   clear_cancelled_gc();
1658 
1659   ShenandoahMetricsSnapshot metrics;
1660   metrics.snap_before();
1661 
1662   switch (point) {
1663     case _degenerated_traversal:
1664       {
1665         // Drop the collection set. Note: this leaves some already forwarded objects
1666         // behind, which may be problematic, see comments for ShenandoahEvacAssist
1667         // workarounds in ShenandoahTraversalHeuristics.
1668 
1669         ShenandoahHeapLocker locker(lock());
1670         collection_set()->clear_current_index();
1671         for (size_t i = 0; i < collection_set()->count(); i++) {
1672           ShenandoahHeapRegion* r = collection_set()->next();
1673           r->make_regular_bypass();
1674         }
1675         collection_set()->clear();
1676       }
1677       op_final_traversal();
1678       op_cleanup();
1679       return;
1680 
1681     // The cases below form the Duff's-like device: it describes the actual GC cycle,
1682     // but enters it at different points, depending on which concurrent phase had
1683     // degenerated.
1684 
1685     case _degenerated_outside_cycle:
1686       // We have degenerated from outside the cycle, which means something is bad with
1687       // the heap, most probably heavy humongous fragmentation, or we are very low on free
1688       // space. It makes little sense to wait for Full GC to reclaim as much as it can, when
1689       // we can do the most aggressive degen cycle, which includes processing references and
1690       // class unloading, unless those features are explicitly disabled.
1691       //
1692       // Note that we can only do this for "outside-cycle" degens, otherwise we would risk
1693       // changing the cycle parameters mid-cycle during concurrent -> degenerated handover.
1694       set_process_references(heuristics()->can_process_references());
1695       set_unload_classes(heuristics()->can_unload_classes());
1696 
1697       if (heuristics()->can_do_traversal_gc()) {
1698         // Not possible to degenerate from here, upgrade to Full GC right away.
1699         cancel_gc(GCCause::_shenandoah_upgrade_to_full_gc);
1700         op_degenerated_fail();
1701         return;
1702       }
1703 
1704       op_reset();
1705 
1706       op_init_mark();
1707       if (cancelled_gc()) {
1708         op_degenerated_fail();
1709         return;
1710       }
1711 
1712     case _degenerated_mark:
1713       op_final_mark();
1714       if (cancelled_gc()) {
1715         op_degenerated_fail();
1716         return;
1717       }
1718 
1719       op_cleanup();
1720 
1721     case _degenerated_evac:
1722       // If heuristics thinks we should do the cycle, this flag would be set,
1723       // and we can do evacuation. Otherwise, it would be the shortcut cycle.
1724       if (is_evacuation_in_progress()) {
1725 
1726         // Degeneration under oom-evac protocol might have left some objects in
1727         // collection set un-evacuated. Restart evacuation from the beginning to
1728         // capture all objects. For all the objects that are already evacuated,
1729         // it would be a simple check, which is supposed to be fast. This is also
1730         // safe to do even without degeneration, as CSet iterator is at beginning
1731         // in preparation for evacuation anyway.
1732         collection_set()->clear_current_index();
1733 
1734         op_stw_evac();
1735         if (cancelled_gc()) {
1736           op_degenerated_fail();
1737           return;
1738         }
1739       }
1740 
1741       // If heuristics thinks we should do the cycle, this flag would be set,
1742       // and we need to do update-refs. Otherwise, it would be the shortcut cycle.
1743       if (has_forwarded_objects()) {
1744         op_init_updaterefs();
1745         if (cancelled_gc()) {
1746           op_degenerated_fail();
1747           return;
1748         }
1749       }
1750 
1751     case _degenerated_updaterefs:
1752       if (has_forwarded_objects()) {
1753         op_final_updaterefs();
1754         if (cancelled_gc()) {
1755           op_degenerated_fail();
1756           return;
1757         }
1758       }
1759 
1760       op_cleanup();
1761       break;
1762 
1763     default:
1764       ShouldNotReachHere();
1765   }
1766 
1767   if (ShenandoahVerify) {
1768     verifier()->verify_after_degenerated();
1769   }
1770 
1771   if (VerifyAfterGC) {
1772     Universe::verify();
1773   }
1774 
1775   metrics.snap_after();
1776   metrics.print();
1777 
1778   // Check for futility and fail. There is no reason to do several back-to-back Degenerated cycles,
1779   // because that probably means the heap is overloaded and/or fragmented.
1780   if (!metrics.is_good_progress("Degenerated GC")) {
1781     _progress_last_gc.unset();
1782     cancel_gc(GCCause::_shenandoah_upgrade_to_full_gc);
1783     op_degenerated_futile();
1784   } else {
1785     _progress_last_gc.set();
1786   }
1787 }
1788 
1789 void ShenandoahHeap::op_degenerated_fail() {
1790   log_info(gc)("Cannot finish degeneration, upgrading to Full GC");
1791   shenandoah_policy()->record_degenerated_upgrade_to_full();
1792   op_full(GCCause::_shenandoah_upgrade_to_full_gc);
1793 }
1794 
1795 void ShenandoahHeap::op_degenerated_futile() {
1796   shenandoah_policy()->record_degenerated_upgrade_to_full();
1797   op_full(GCCause::_shenandoah_upgrade_to_full_gc);
1798 }
1799 
1800 void ShenandoahHeap::stop_concurrent_marking() {
1801   assert(is_concurrent_mark_in_progress(), "How else could we get here?");
1802   set_concurrent_mark_in_progress(false);
1803   if (!cancelled_gc()) {
1804     // If we needed to update refs, and concurrent marking has been cancelled,
1805     // we need to finish updating references.
1806     set_has_forwarded_objects(false);
1807     mark_complete_marking_context();
1808   }
1809 }
1810 
1811 void ShenandoahHeap::force_satb_flush_all_threads() {
1812   if (!is_concurrent_mark_in_progress() && !is_concurrent_traversal_in_progress()) {
1813     // No need to flush SATBs
1814     return;
1815   }
1816 
1817   for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) {
1818     ShenandoahThreadLocalData::set_force_satb_flush(t, true);
1819   }
1820   // The threads are not "acquiring" their thread-local data, but it does not
1821   // hurt to "release" the updates here anyway.
1822   OrderAccess::fence();
1823 }
1824 
1825 void ShenandoahHeap::set_gc_state_all_threads(char state) {
1826   for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) {
1827     ShenandoahThreadLocalData::set_gc_state(t, state);
1828   }
1829 }
1830 
1831 void ShenandoahHeap::set_gc_state_mask(uint mask, bool value) {
1832   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Should really be Shenandoah safepoint");
1833   _gc_state.set_cond(mask, value);
1834   set_gc_state_all_threads(_gc_state.raw_value());
1835 }
1836 
1837 void ShenandoahHeap::set_concurrent_mark_in_progress(bool in_progress) {
1838   if (has_forwarded_objects()) {
1839     set_gc_state_mask(MARKING | UPDATEREFS, in_progress);
1840   } else {
1841     set_gc_state_mask(MARKING, in_progress);
1842   }
1843   ShenandoahBarrierSet::satb_mark_queue_set().set_active_all_threads(in_progress, !in_progress);
1844 }
1845 
1846 void ShenandoahHeap::set_concurrent_traversal_in_progress(bool in_progress) {
1847    set_gc_state_mask(TRAVERSAL | HAS_FORWARDED | UPDATEREFS, in_progress);
1848    ShenandoahBarrierSet::satb_mark_queue_set().set_active_all_threads(in_progress, !in_progress);
1849 }
1850 
1851 void ShenandoahHeap::set_evacuation_in_progress(bool in_progress) {
1852   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Only call this at safepoint");
1853   set_gc_state_mask(EVACUATION, in_progress);
1854 }
1855 
1856 HeapWord* ShenandoahHeap::tlab_post_allocation_setup(HeapWord* obj) {
1857   // Initialize Brooks pointer for the next object
1858   HeapWord* result = obj + ShenandoahForwarding::word_size();
1859   ShenandoahForwarding::initialize(oop(result));
1860   return result;
1861 }
1862 
1863 void ShenandoahHeap::ref_processing_init() {
1864   assert(_max_workers > 0, "Sanity");
1865 
1866   _ref_processor =
1867     new ReferenceProcessor(&_subject_to_discovery,  // is_subject_to_discovery
1868                            ParallelRefProcEnabled,  // MT processing
1869                            _max_workers,            // Degree of MT processing
1870                            true,                    // MT discovery
1871                            _max_workers,            // Degree of MT discovery
1872                            false,                   // Reference discovery is not atomic
1873                            NULL,                    // No closure, should be installed before use
1874                            true);                   // Scale worker threads
1875 
1876   shenandoah_assert_rp_isalive_not_installed();
1877 }
1878 
1879 GCTracer* ShenandoahHeap::tracer() {
1880   return shenandoah_policy()->tracer();
1881 }
1882 
1883 size_t ShenandoahHeap::tlab_used(Thread* thread) const {
1884   return _free_set->used();
1885 }
1886 
1887 bool ShenandoahHeap::try_cancel_gc() {
1888   while (true) {
1889     jbyte prev = _cancelled_gc.cmpxchg(CANCELLED, CANCELLABLE);
1890     if (prev == CANCELLABLE) return true;
1891     else if (prev == CANCELLED) return false;
1892     assert(ShenandoahSuspendibleWorkers, "should not get here when not using suspendible workers");
1893     assert(prev == NOT_CANCELLED, "must be NOT_CANCELLED");
1894     {
1895       // We need to provide a safepoint here, otherwise we might
1896       // spin forever if a SP is pending.
1897       ThreadBlockInVM sp(JavaThread::current());
1898       SpinPause();
1899     }
1900   }
1901 }
1902 
1903 void ShenandoahHeap::cancel_gc(GCCause::Cause cause) {
1904   if (try_cancel_gc()) {
1905     FormatBuffer<> msg("Cancelling GC: %s", GCCause::to_string(cause));
1906     log_info(gc)("%s", msg.buffer());
1907     Events::log(Thread::current(), "%s", msg.buffer());
1908   }
1909 }
1910 
1911 uint ShenandoahHeap::max_workers() {
1912   return _max_workers;
1913 }
1914 
1915 void ShenandoahHeap::stop() {
1916   // The shutdown sequence should be able to terminate when GC is running.
1917 
1918   // Step 0. Notify policy to disable event recording.
1919   _shenandoah_policy->record_shutdown();
1920 
1921   // Step 1. Notify control thread that we are in shutdown.
1922   // Note that we cannot do that with stop(), because stop() is blocking and waits for the actual shutdown.
1923   // Doing stop() here would wait for the normal GC cycle to complete, never falling through to cancel below.
1924   control_thread()->prepare_for_graceful_shutdown();
1925 
1926   // Step 2. Notify GC workers that we are cancelling GC.
1927   cancel_gc(GCCause::_shenandoah_stop_vm);
1928 
1929   // Step 3. Wait until GC worker exits normally.
1930   control_thread()->stop();
1931 
1932   // Step 4. Stop String Dedup thread if it is active
1933   if (ShenandoahStringDedup::is_enabled()) {
1934     ShenandoahStringDedup::stop();
1935   }
1936 }
1937 
1938 void ShenandoahHeap::unload_classes_and_cleanup_tables(bool full_gc) {
1939   assert(heuristics()->can_unload_classes(), "Class unloading should be enabled");
1940 
1941   ShenandoahGCPhase root_phase(full_gc ?
1942                                ShenandoahPhaseTimings::full_gc_purge :
1943                                ShenandoahPhaseTimings::purge);
1944 
1945   ShenandoahIsAliveSelector alive;
1946   BoolObjectClosure* is_alive = alive.is_alive_closure();
1947 
1948   bool purged_class;
1949 
1950   // Unload classes and purge SystemDictionary.
1951   {
1952     ShenandoahGCPhase phase(full_gc ?
1953                             ShenandoahPhaseTimings::full_gc_purge_class_unload :
1954                             ShenandoahPhaseTimings::purge_class_unload);
1955     purged_class = SystemDictionary::do_unloading(gc_timer());
1956   }
1957 
1958   {
1959     ShenandoahGCPhase phase(full_gc ?
1960                             ShenandoahPhaseTimings::full_gc_purge_par :
1961                             ShenandoahPhaseTimings::purge_par);
1962     uint active = _workers->active_workers();
1963     ParallelCleaningTask unlink_task(is_alive, active, purged_class, true);
1964     _workers->run_task(&unlink_task);
1965   }
1966 
1967   {
1968     ShenandoahGCPhase phase(full_gc ?
1969                       ShenandoahPhaseTimings::full_gc_purge_cldg :
1970                       ShenandoahPhaseTimings::purge_cldg);
1971     ClassLoaderDataGraph::purge();
1972   }
1973 }
1974 
1975 void ShenandoahHeap::set_has_forwarded_objects(bool cond) {
1976   set_gc_state_mask(HAS_FORWARDED, cond);
1977 }
1978 
1979 void ShenandoahHeap::set_process_references(bool pr) {
1980   _process_references.set_cond(pr);
1981 }
1982 
1983 void ShenandoahHeap::set_unload_classes(bool uc) {
1984   _unload_classes.set_cond(uc);
1985 }
1986 
1987 bool ShenandoahHeap::process_references() const {
1988   return _process_references.is_set();
1989 }
1990 
1991 bool ShenandoahHeap::unload_classes() const {
1992   return _unload_classes.is_set();
1993 }
1994 
1995 address ShenandoahHeap::in_cset_fast_test_addr() {
1996   ShenandoahHeap* heap = ShenandoahHeap::heap();
1997   assert(heap->collection_set() != NULL, "Sanity");
1998   return (address) heap->collection_set()->biased_map_address();
1999 }
2000 
2001 address ShenandoahHeap::cancelled_gc_addr() {
2002   return (address) ShenandoahHeap::heap()->_cancelled_gc.addr_of();
2003 }
2004 
2005 address ShenandoahHeap::gc_state_addr() {
2006   return (address) ShenandoahHeap::heap()->_gc_state.addr_of();
2007 }
2008 
2009 size_t ShenandoahHeap::bytes_allocated_since_gc_start() {
2010   return OrderAccess::load_acquire(&_bytes_allocated_since_gc_start);
2011 }
2012 
2013 void ShenandoahHeap::reset_bytes_allocated_since_gc_start() {
2014   OrderAccess::release_store_fence(&_bytes_allocated_since_gc_start, (size_t)0);
2015 }
2016 
2017 void ShenandoahHeap::set_degenerated_gc_in_progress(bool in_progress) {
2018   _degenerated_gc_in_progress.set_cond(in_progress);
2019 }
2020 
2021 void ShenandoahHeap::set_full_gc_in_progress(bool in_progress) {
2022   _full_gc_in_progress.set_cond(in_progress);
2023 }
2024 
2025 void ShenandoahHeap::set_full_gc_move_in_progress(bool in_progress) {
2026   assert (is_full_gc_in_progress(), "should be");
2027   _full_gc_move_in_progress.set_cond(in_progress);
2028 }
2029 
2030 void ShenandoahHeap::set_update_refs_in_progress(bool in_progress) {
2031   set_gc_state_mask(UPDATEREFS, in_progress);
2032 }
2033 
2034 void ShenandoahHeap::register_nmethod(nmethod* nm) {
2035   ShenandoahCodeRoots::add_nmethod(nm);
2036 }
2037 
2038 void ShenandoahHeap::unregister_nmethod(nmethod* nm) {
2039   ShenandoahCodeRoots::remove_nmethod(nm);
2040 }
2041 
2042 oop ShenandoahHeap::pin_object(JavaThread* thr, oop o) {
2043   ShenandoahHeapLocker locker(lock());
2044   heap_region_containing(o)->make_pinned();
2045   return o;
2046 }
2047 
2048 void ShenandoahHeap::unpin_object(JavaThread* thr, oop o) {
2049   ShenandoahHeapLocker locker(lock());
2050   heap_region_containing(o)->make_unpinned();
2051 }
2052 
2053 GCTimer* ShenandoahHeap::gc_timer() const {
2054   return _gc_timer;
2055 }
2056 
2057 #ifdef ASSERT
2058 void ShenandoahHeap::assert_gc_workers(uint nworkers) {
2059   assert(nworkers > 0 && nworkers <= max_workers(), "Sanity");
2060 
2061   if (ShenandoahSafepoint::is_at_shenandoah_safepoint()) {
2062     if (UseDynamicNumberOfGCThreads ||
2063         (FLAG_IS_DEFAULT(ParallelGCThreads) && ForceDynamicNumberOfGCThreads)) {
2064       assert(nworkers <= ParallelGCThreads, "Cannot use more than it has");
2065     } else {
2066       // Use ParallelGCThreads inside safepoints
2067       assert(nworkers == ParallelGCThreads, "Use ParalleGCThreads within safepoints");
2068     }
2069   } else {
2070     if (UseDynamicNumberOfGCThreads ||
2071         (FLAG_IS_DEFAULT(ConcGCThreads) && ForceDynamicNumberOfGCThreads)) {
2072       assert(nworkers <= ConcGCThreads, "Cannot use more than it has");
2073     } else {
2074       // Use ConcGCThreads outside safepoints
2075       assert(nworkers == ConcGCThreads, "Use ConcGCThreads outside safepoints");
2076     }
2077   }
2078 }
2079 #endif
2080 
2081 ShenandoahVerifier* ShenandoahHeap::verifier() {
2082   guarantee(ShenandoahVerify, "Should be enabled");
2083   assert (_verifier != NULL, "sanity");
2084   return _verifier;
2085 }
2086 
2087 template<class T>
2088 class ShenandoahUpdateHeapRefsTask : public AbstractGangTask {
2089 private:
2090   T cl;
2091   ShenandoahHeap* _heap;
2092   ShenandoahRegionIterator* _regions;
2093   bool _concurrent;
2094 public:
2095   ShenandoahUpdateHeapRefsTask(ShenandoahRegionIterator* regions, bool concurrent) :
2096     AbstractGangTask("Concurrent Update References Task"),
2097     cl(T()),
2098     _heap(ShenandoahHeap::heap()),
2099     _regions(regions),
2100     _concurrent(concurrent) {
2101   }
2102 
2103   void work(uint worker_id) {
2104     if (_concurrent) {
2105       ShenandoahConcurrentWorkerSession worker_session(worker_id);
2106       ShenandoahSuspendibleThreadSetJoiner stsj(ShenandoahSuspendibleWorkers);
2107       do_work();
2108     } else {
2109       ShenandoahParallelWorkerSession worker_session(worker_id);
2110       do_work();
2111     }
2112   }
2113 
2114 private:
2115   void do_work() {
2116     ShenandoahHeapRegion* r = _regions->next();
2117     ShenandoahMarkingContext* const ctx = _heap->complete_marking_context();
2118     while (r != NULL) {
2119       HeapWord* top_at_start_ur = r->concurrent_iteration_safe_limit();
2120       assert (top_at_start_ur >= r->bottom(), "sanity");
2121       if (r->is_active() && !r->is_cset()) {
2122         _heap->marked_object_oop_iterate(r, &cl, top_at_start_ur);
2123       }
2124       if (ShenandoahPacing) {
2125         _heap->pacer()->report_updaterefs(pointer_delta(top_at_start_ur, r->bottom()));
2126       }
2127       if (_heap->check_cancelled_gc_and_yield(_concurrent)) {
2128         return;
2129       }
2130       r = _regions->next();
2131     }
2132   }
2133 };
2134 
2135 void ShenandoahHeap::update_heap_references(bool concurrent) {
2136   ShenandoahUpdateHeapRefsTask<ShenandoahUpdateHeapRefsClosure> task(&_update_refs_iterator, concurrent);
2137   workers()->run_task(&task);
2138 }
2139 
2140 void ShenandoahHeap::op_init_updaterefs() {
2141   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "must be at safepoint");
2142 
2143   set_evacuation_in_progress(false);
2144 
2145   retire_and_reset_gclabs();
2146 
2147   if (ShenandoahVerify) {
2148     verifier()->verify_before_updaterefs();
2149   }
2150 
2151   set_update_refs_in_progress(true);
2152   make_parsable(true);
2153   for (uint i = 0; i < num_regions(); i++) {
2154     ShenandoahHeapRegion* r = get_region(i);
2155     r->set_concurrent_iteration_safe_limit(r->top());
2156   }
2157 
2158   // Reset iterator.
2159   _update_refs_iterator.reset();
2160 
2161   if (ShenandoahPacing) {
2162     pacer()->setup_for_updaterefs();
2163   }
2164 }
2165 
2166 void ShenandoahHeap::op_final_updaterefs() {
2167   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "must be at safepoint");
2168 
2169   // Check if there is left-over work, and finish it
2170   if (_update_refs_iterator.has_next()) {
2171     ShenandoahGCPhase final_work(ShenandoahPhaseTimings::final_update_refs_finish_work);
2172 
2173     // Finish updating references where we left off.
2174     clear_cancelled_gc();
2175     update_heap_references(false);
2176   }
2177 
2178   // Clear cancelled GC, if set. On cancellation path, the block before would handle
2179   // everything. On degenerated paths, cancelled gc would not be set anyway.
2180   if (cancelled_gc()) {
2181     clear_cancelled_gc();
2182   }
2183   assert(!cancelled_gc(), "Should have been done right before");
2184 
2185   concurrent_mark()->update_roots(is_degenerated_gc_in_progress() ?
2186                                   ShenandoahPhaseTimings::degen_gc_update_roots:
2187                                   ShenandoahPhaseTimings::final_update_refs_roots);
2188 
2189   ShenandoahGCPhase final_update_refs(ShenandoahPhaseTimings::final_update_refs_recycle);
2190 
2191   trash_cset_regions();
2192   set_has_forwarded_objects(false);
2193   set_update_refs_in_progress(false);
2194 
2195   if (ShenandoahVerify) {
2196     verifier()->verify_roots_no_forwarded();
2197     verifier()->verify_after_updaterefs();
2198   }
2199 
2200   if (VerifyAfterGC) {
2201     Universe::verify();
2202   }
2203 
2204   {
2205     ShenandoahHeapLocker locker(lock());
2206     _free_set->rebuild();
2207   }
2208 }
2209 
2210 #ifdef ASSERT
2211 void ShenandoahHeap::assert_heaplock_owned_by_current_thread() {
2212   _lock.assert_owned_by_current_thread();
2213 }
2214 
2215 void ShenandoahHeap::assert_heaplock_not_owned_by_current_thread() {
2216   _lock.assert_not_owned_by_current_thread();
2217 }
2218 
2219 void ShenandoahHeap::assert_heaplock_or_safepoint() {
2220   _lock.assert_owned_by_current_thread_or_safepoint();
2221 }
2222 #endif
2223 
2224 void ShenandoahHeap::print_extended_on(outputStream *st) const {
2225   print_on(st);
2226   print_heap_regions_on(st);
2227 }
2228 
2229 bool ShenandoahHeap::is_bitmap_slice_committed(ShenandoahHeapRegion* r, bool skip_self) {
2230   size_t slice = r->region_number() / _bitmap_regions_per_slice;
2231 
2232   size_t regions_from = _bitmap_regions_per_slice * slice;
2233   size_t regions_to   = MIN2(num_regions(), _bitmap_regions_per_slice * (slice + 1));
2234   for (size_t g = regions_from; g < regions_to; g++) {
2235     assert (g / _bitmap_regions_per_slice == slice, "same slice");
2236     if (skip_self && g == r->region_number()) continue;
2237     if (get_region(g)->is_committed()) {
2238       return true;
2239     }
2240   }
2241   return false;
2242 }
2243 
2244 bool ShenandoahHeap::commit_bitmap_slice(ShenandoahHeapRegion* r) {
2245   assert_heaplock_owned_by_current_thread();
2246 
2247   // Bitmaps in special regions do not need commits
2248   if (_bitmap_region_special) {
2249     return true;
2250   }
2251 
2252   if (is_bitmap_slice_committed(r, true)) {
2253     // Some other region from the group is already committed, meaning the bitmap
2254     // slice is already committed, we exit right away.
2255     return true;
2256   }
2257 
2258   // Commit the bitmap slice:
2259   size_t slice = r->region_number() / _bitmap_regions_per_slice;
2260   size_t off = _bitmap_bytes_per_slice * slice;
2261   size_t len = _bitmap_bytes_per_slice;
2262   if (!os::commit_memory((char*)_bitmap_region.start() + off, len, false)) {
2263     return false;
2264   }
2265   return true;
2266 }
2267 
2268 bool ShenandoahHeap::uncommit_bitmap_slice(ShenandoahHeapRegion *r) {
2269   assert_heaplock_owned_by_current_thread();
2270 
2271   // Bitmaps in special regions do not need uncommits
2272   if (_bitmap_region_special) {
2273     return true;
2274   }
2275 
2276   if (is_bitmap_slice_committed(r, true)) {
2277     // Some other region from the group is still committed, meaning the bitmap
2278     // slice is should stay committed, exit right away.
2279     return true;
2280   }
2281 
2282   // Uncommit the bitmap slice:
2283   size_t slice = r->region_number() / _bitmap_regions_per_slice;
2284   size_t off = _bitmap_bytes_per_slice * slice;
2285   size_t len = _bitmap_bytes_per_slice;
2286   if (!os::uncommit_memory((char*)_bitmap_region.start() + off, len)) {
2287     return false;
2288   }
2289   return true;
2290 }
2291 
2292 void ShenandoahHeap::safepoint_synchronize_begin() {
2293   if (ShenandoahSuspendibleWorkers || UseStringDeduplication) {
2294     SuspendibleThreadSet::synchronize();
2295   }
2296 }
2297 
2298 void ShenandoahHeap::safepoint_synchronize_end() {
2299   if (ShenandoahSuspendibleWorkers || UseStringDeduplication) {
2300     SuspendibleThreadSet::desynchronize();
2301   }
2302 }
2303 
2304 void ShenandoahHeap::vmop_entry_init_mark() {
2305   TraceCollectorStats tcs(monitoring_support()->stw_collection_counters());
2306   ShenandoahGCPhase total(ShenandoahPhaseTimings::total_pause_gross);
2307   ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_mark_gross);
2308 
2309   try_inject_alloc_failure();
2310   VM_ShenandoahInitMark op;
2311   VMThread::execute(&op); // jump to entry_init_mark() under safepoint
2312 }
2313 
2314 void ShenandoahHeap::vmop_entry_final_mark() {
2315   TraceCollectorStats tcs(monitoring_support()->stw_collection_counters());
2316   ShenandoahGCPhase total(ShenandoahPhaseTimings::total_pause_gross);
2317   ShenandoahGCPhase phase(ShenandoahPhaseTimings::final_mark_gross);
2318 
2319   try_inject_alloc_failure();
2320   VM_ShenandoahFinalMarkStartEvac op;
2321   VMThread::execute(&op); // jump to entry_final_mark under safepoint
2322 }
2323 
2324 void ShenandoahHeap::vmop_entry_final_evac() {
2325   TraceCollectorStats tcs(monitoring_support()->stw_collection_counters());
2326   ShenandoahGCPhase total(ShenandoahPhaseTimings::total_pause_gross);
2327   ShenandoahGCPhase phase(ShenandoahPhaseTimings::final_evac_gross);
2328 
2329   VM_ShenandoahFinalEvac op;
2330   VMThread::execute(&op); // jump to entry_final_evac under safepoint
2331 }
2332 
2333 void ShenandoahHeap::vmop_entry_init_updaterefs() {
2334   TraceCollectorStats tcs(monitoring_support()->stw_collection_counters());
2335   ShenandoahGCPhase total(ShenandoahPhaseTimings::total_pause_gross);
2336   ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_update_refs_gross);
2337 
2338   try_inject_alloc_failure();
2339   VM_ShenandoahInitUpdateRefs op;
2340   VMThread::execute(&op);
2341 }
2342 
2343 void ShenandoahHeap::vmop_entry_final_updaterefs() {
2344   TraceCollectorStats tcs(monitoring_support()->stw_collection_counters());
2345   ShenandoahGCPhase total(ShenandoahPhaseTimings::total_pause_gross);
2346   ShenandoahGCPhase phase(ShenandoahPhaseTimings::final_update_refs_gross);
2347 
2348   try_inject_alloc_failure();
2349   VM_ShenandoahFinalUpdateRefs op;
2350   VMThread::execute(&op);
2351 }
2352 
2353 void ShenandoahHeap::vmop_entry_init_traversal() {
2354   TraceCollectorStats tcs(monitoring_support()->stw_collection_counters());
2355   ShenandoahGCPhase total(ShenandoahPhaseTimings::total_pause_gross);
2356   ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_traversal_gc_gross);
2357 
2358   try_inject_alloc_failure();
2359   VM_ShenandoahInitTraversalGC op;
2360   VMThread::execute(&op);
2361 }
2362 
2363 void ShenandoahHeap::vmop_entry_final_traversal() {
2364   TraceCollectorStats tcs(monitoring_support()->stw_collection_counters());
2365   ShenandoahGCPhase total(ShenandoahPhaseTimings::total_pause_gross);
2366   ShenandoahGCPhase phase(ShenandoahPhaseTimings::final_traversal_gc_gross);
2367 
2368   try_inject_alloc_failure();
2369   VM_ShenandoahFinalTraversalGC op;
2370   VMThread::execute(&op);
2371 }
2372 
2373 void ShenandoahHeap::vmop_entry_full(GCCause::Cause cause) {
2374   TraceCollectorStats tcs(monitoring_support()->full_stw_collection_counters());
2375   ShenandoahGCPhase total(ShenandoahPhaseTimings::total_pause_gross);
2376   ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_gross);
2377 
2378   try_inject_alloc_failure();
2379   VM_ShenandoahFullGC op(cause);
2380   VMThread::execute(&op);
2381 }
2382 
2383 void ShenandoahHeap::vmop_degenerated(ShenandoahDegenPoint point) {
2384   TraceCollectorStats tcs(monitoring_support()->full_stw_collection_counters());
2385   ShenandoahGCPhase total(ShenandoahPhaseTimings::total_pause_gross);
2386   ShenandoahGCPhase phase(ShenandoahPhaseTimings::degen_gc_gross);
2387 
2388   VM_ShenandoahDegeneratedGC degenerated_gc((int)point);
2389   VMThread::execute(&degenerated_gc);
2390 }
2391 
2392 void ShenandoahHeap::entry_init_mark() {
2393   ShenandoahGCPhase total_phase(ShenandoahPhaseTimings::total_pause);
2394   ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_mark);
2395   const char* msg = init_mark_event_message();
2396   GCTraceTime(Info, gc) time(msg, gc_timer());
2397   EventMark em("%s", msg);
2398 
2399   ShenandoahWorkerScope scope(workers(),
2400                               ShenandoahWorkerPolicy::calc_workers_for_init_marking(),
2401                               "init marking");
2402 
2403   op_init_mark();
2404 }
2405 
2406 void ShenandoahHeap::entry_final_mark() {
2407   ShenandoahGCPhase total_phase(ShenandoahPhaseTimings::total_pause);
2408   ShenandoahGCPhase phase(ShenandoahPhaseTimings::final_mark);
2409   const char* msg = final_mark_event_message();
2410   GCTraceTime(Info, gc) time(msg, gc_timer());
2411   EventMark em("%s", msg);
2412 
2413   ShenandoahWorkerScope scope(workers(),
2414                               ShenandoahWorkerPolicy::calc_workers_for_final_marking(),
2415                               "final marking");
2416 
2417   op_final_mark();
2418 }
2419 
2420 void ShenandoahHeap::entry_final_evac() {
2421   ShenandoahGCPhase total_phase(ShenandoahPhaseTimings::total_pause);
2422   ShenandoahGCPhase phase(ShenandoahPhaseTimings::final_evac);
2423   static const char* msg = "Pause Final Evac";
2424   GCTraceTime(Info, gc) time(msg, gc_timer());
2425   EventMark em("%s", msg);
2426 
2427   op_final_evac();
2428 }
2429 
2430 void ShenandoahHeap::entry_init_updaterefs() {
2431   ShenandoahGCPhase total_phase(ShenandoahPhaseTimings::total_pause);
2432   ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_update_refs);
2433 
2434   static const char* msg = "Pause Init Update Refs";
2435   GCTraceTime(Info, gc) time(msg, gc_timer());
2436   EventMark em("%s", msg);
2437 
2438   // No workers used in this phase, no setup required
2439 
2440   op_init_updaterefs();
2441 }
2442 
2443 void ShenandoahHeap::entry_final_updaterefs() {
2444   ShenandoahGCPhase total_phase(ShenandoahPhaseTimings::total_pause);
2445   ShenandoahGCPhase phase(ShenandoahPhaseTimings::final_update_refs);
2446 
2447   static const char* msg = "Pause Final Update Refs";
2448   GCTraceTime(Info, gc) time(msg, gc_timer());
2449   EventMark em("%s", msg);
2450 
2451   ShenandoahWorkerScope scope(workers(),
2452                               ShenandoahWorkerPolicy::calc_workers_for_final_update_ref(),
2453                               "final reference update");
2454 
2455   op_final_updaterefs();
2456 }
2457 
2458 void ShenandoahHeap::entry_init_traversal() {
2459   ShenandoahGCPhase total_phase(ShenandoahPhaseTimings::total_pause);
2460   ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_traversal_gc);
2461 
2462   static const char* msg = "Pause Init Traversal";
2463   GCTraceTime(Info, gc) time(msg, gc_timer());
2464   EventMark em("%s", msg);
2465 
2466   ShenandoahWorkerScope scope(workers(),
2467                               ShenandoahWorkerPolicy::calc_workers_for_stw_traversal(),
2468                               "init traversal");
2469 
2470   op_init_traversal();
2471 }
2472 
2473 void ShenandoahHeap::entry_final_traversal() {
2474   ShenandoahGCPhase total_phase(ShenandoahPhaseTimings::total_pause);
2475   ShenandoahGCPhase phase(ShenandoahPhaseTimings::final_traversal_gc);
2476 
2477   static const char* msg = "Pause Final Traversal";
2478   GCTraceTime(Info, gc) time(msg, gc_timer());
2479   EventMark em("%s", msg);
2480 
2481   ShenandoahWorkerScope scope(workers(),
2482                               ShenandoahWorkerPolicy::calc_workers_for_stw_traversal(),
2483                               "final traversal");
2484 
2485   op_final_traversal();
2486 }
2487 
2488 void ShenandoahHeap::entry_full(GCCause::Cause cause) {
2489   ShenandoahGCPhase total_phase(ShenandoahPhaseTimings::total_pause);
2490   ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc);
2491 
2492   static const char* msg = "Pause Full";
2493   GCTraceTime(Info, gc) time(msg, gc_timer(), cause, true);
2494   EventMark em("%s", msg);
2495 
2496   ShenandoahWorkerScope scope(workers(),
2497                               ShenandoahWorkerPolicy::calc_workers_for_fullgc(),
2498                               "full gc");
2499 
2500   op_full(cause);
2501 }
2502 
2503 void ShenandoahHeap::entry_degenerated(int point) {
2504   ShenandoahGCPhase total_phase(ShenandoahPhaseTimings::total_pause);
2505   ShenandoahGCPhase phase(ShenandoahPhaseTimings::degen_gc);
2506 
2507   ShenandoahDegenPoint dpoint = (ShenandoahDegenPoint)point;
2508   const char* msg = degen_event_message(dpoint);
2509   GCTraceTime(Info, gc) time(msg, NULL, GCCause::_no_gc, true);
2510   EventMark em("%s", msg);
2511 
2512   ShenandoahWorkerScope scope(workers(),
2513                               ShenandoahWorkerPolicy::calc_workers_for_stw_degenerated(),
2514                               "stw degenerated gc");
2515 
2516   set_degenerated_gc_in_progress(true);
2517   op_degenerated(dpoint);
2518   set_degenerated_gc_in_progress(false);
2519 }
2520 
2521 void ShenandoahHeap::entry_mark() {
2522   TraceCollectorStats tcs(monitoring_support()->concurrent_collection_counters());
2523 
2524   const char* msg = conc_mark_event_message();
2525   GCTraceTime(Info, gc) time(msg, NULL, GCCause::_no_gc, true);
2526   EventMark em("%s", msg);
2527 
2528   ShenandoahWorkerScope scope(workers(),
2529                               ShenandoahWorkerPolicy::calc_workers_for_conc_marking(),
2530                               "concurrent marking");
2531 
2532   try_inject_alloc_failure();
2533   op_mark();
2534 }
2535 
2536 void ShenandoahHeap::entry_evac() {
2537   ShenandoahGCPhase conc_evac_phase(ShenandoahPhaseTimings::conc_evac);
2538   TraceCollectorStats tcs(monitoring_support()->concurrent_collection_counters());
2539 
2540   static const char* msg = "Concurrent evacuation";
2541   GCTraceTime(Info, gc) time(msg, NULL, GCCause::_no_gc, true);
2542   EventMark em("%s", msg);
2543 
2544   ShenandoahWorkerScope scope(workers(),
2545                               ShenandoahWorkerPolicy::calc_workers_for_conc_evac(),
2546                               "concurrent evacuation");
2547 
2548   try_inject_alloc_failure();
2549   op_conc_evac();
2550 }
2551 
2552 void ShenandoahHeap::entry_updaterefs() {
2553   ShenandoahGCPhase phase(ShenandoahPhaseTimings::conc_update_refs);
2554 
2555   static const char* msg = "Concurrent update references";
2556   GCTraceTime(Info, gc) time(msg, NULL, GCCause::_no_gc, true);
2557   EventMark em("%s", msg);
2558 
2559   ShenandoahWorkerScope scope(workers(),
2560                               ShenandoahWorkerPolicy::calc_workers_for_conc_update_ref(),
2561                               "concurrent reference update");
2562 
2563   try_inject_alloc_failure();
2564   op_updaterefs();
2565 }
2566 void ShenandoahHeap::entry_cleanup() {
2567   ShenandoahGCPhase phase(ShenandoahPhaseTimings::conc_cleanup);
2568 
2569   static const char* msg = "Concurrent cleanup";
2570   GCTraceTime(Info, gc) time(msg, NULL, GCCause::_no_gc, true);
2571   EventMark em("%s", msg);
2572 
2573   // This phase does not use workers, no need for setup
2574 
2575   try_inject_alloc_failure();
2576   op_cleanup();
2577 }
2578 
2579 void ShenandoahHeap::entry_reset() {
2580   ShenandoahGCPhase phase(ShenandoahPhaseTimings::conc_reset);
2581 
2582   static const char* msg = "Concurrent reset";
2583   GCTraceTime(Info, gc) time(msg, NULL, GCCause::_no_gc, true);
2584   EventMark em("%s", msg);
2585 
2586   ShenandoahWorkerScope scope(workers(),
2587                               ShenandoahWorkerPolicy::calc_workers_for_conc_reset(),
2588                               "concurrent reset");
2589 
2590   try_inject_alloc_failure();
2591   op_reset();
2592 }
2593 
2594 void ShenandoahHeap::entry_preclean() {
2595   if (ShenandoahPreclean && process_references()) {
2596     static const char* msg = "Concurrent precleaning";
2597     GCTraceTime(Info, gc) time(msg, NULL, GCCause::_no_gc, true);
2598     EventMark em("%s", msg);
2599 
2600     ShenandoahGCPhase conc_preclean(ShenandoahPhaseTimings::conc_preclean);
2601 
2602     ShenandoahWorkerScope scope(workers(),
2603                                 ShenandoahWorkerPolicy::calc_workers_for_conc_preclean(),
2604                                 "concurrent preclean",
2605                                 /* check_workers = */ false);
2606 
2607     try_inject_alloc_failure();
2608     op_preclean();
2609   }
2610 }
2611 
2612 void ShenandoahHeap::entry_traversal() {
2613   static const char* msg = "Concurrent traversal";
2614   GCTraceTime(Info, gc) time(msg, NULL, GCCause::_no_gc, true);
2615   EventMark em("%s", msg);
2616 
2617   TraceCollectorStats tcs(monitoring_support()->concurrent_collection_counters());
2618 
2619   ShenandoahWorkerScope scope(workers(),
2620                               ShenandoahWorkerPolicy::calc_workers_for_conc_traversal(),
2621                               "concurrent traversal");
2622 
2623   try_inject_alloc_failure();
2624   op_traversal();
2625 }
2626 
2627 void ShenandoahHeap::entry_uncommit(double shrink_before) {
2628   static const char *msg = "Concurrent uncommit";
2629   GCTraceTime(Info, gc) time(msg, NULL, GCCause::_no_gc, true);
2630   EventMark em("%s", msg);
2631 
2632   ShenandoahGCPhase phase(ShenandoahPhaseTimings::conc_uncommit);
2633 
2634   op_uncommit(shrink_before);
2635 }
2636 
2637 void ShenandoahHeap::try_inject_alloc_failure() {
2638   if (ShenandoahAllocFailureALot && !cancelled_gc() && ((os::random() % 1000) > 950)) {
2639     _inject_alloc_failure.set();
2640     os::naked_short_sleep(1);
2641     if (cancelled_gc()) {
2642       log_info(gc)("Allocation failure was successfully injected");
2643     }
2644   }
2645 }
2646 
2647 bool ShenandoahHeap::should_inject_alloc_failure() {
2648   return _inject_alloc_failure.is_set() && _inject_alloc_failure.try_unset();
2649 }
2650 
2651 void ShenandoahHeap::initialize_serviceability() {
2652   _memory_pool = new ShenandoahMemoryPool(this);
2653   _cycle_memory_manager.add_pool(_memory_pool);
2654   _stw_memory_manager.add_pool(_memory_pool);
2655 }
2656 
2657 GrowableArray<GCMemoryManager*> ShenandoahHeap::memory_managers() {
2658   GrowableArray<GCMemoryManager*> memory_managers(2);
2659   memory_managers.append(&_cycle_memory_manager);
2660   memory_managers.append(&_stw_memory_manager);
2661   return memory_managers;
2662 }
2663 
2664 GrowableArray<MemoryPool*> ShenandoahHeap::memory_pools() {
2665   GrowableArray<MemoryPool*> memory_pools(1);
2666   memory_pools.append(_memory_pool);
2667   return memory_pools;
2668 }
2669 
2670 MemoryUsage ShenandoahHeap::memory_usage() {
2671   return _memory_pool->get_memory_usage();
2672 }
2673 
2674 void ShenandoahHeap::enter_evacuation() {
2675   _oom_evac_handler.enter_evacuation();
2676 }
2677 
2678 void ShenandoahHeap::leave_evacuation() {
2679   _oom_evac_handler.leave_evacuation();
2680 }
2681 
2682 ShenandoahRegionIterator::ShenandoahRegionIterator() :
2683   _heap(ShenandoahHeap::heap()),
2684   _index(0) {}
2685 
2686 ShenandoahRegionIterator::ShenandoahRegionIterator(ShenandoahHeap* heap) :
2687   _heap(heap),
2688   _index(0) {}
2689 
2690 void ShenandoahRegionIterator::reset() {
2691   _index = 0;
2692 }
2693 
2694 bool ShenandoahRegionIterator::has_next() const {
2695   return _index < _heap->num_regions();
2696 }
2697 
2698 char ShenandoahHeap::gc_state() const {
2699   return _gc_state.raw_value();
2700 }
2701 
2702 void ShenandoahHeap::deduplicate_string(oop str) {
2703   assert(java_lang_String::is_instance(str), "invariant");
2704 
2705   if (ShenandoahStringDedup::is_enabled()) {
2706     ShenandoahStringDedup::deduplicate(str);
2707   }
2708 }
2709 
2710 const char* ShenandoahHeap::init_mark_event_message() const {
2711   bool update_refs = has_forwarded_objects();
2712   bool proc_refs = process_references();
2713   bool unload_cls = unload_classes();
2714 
2715   if (update_refs && proc_refs && unload_cls) {
2716     return "Pause Init Mark (update refs) (process weakrefs) (unload classes)";
2717   } else if (update_refs && proc_refs) {
2718     return "Pause Init Mark (update refs) (process weakrefs)";
2719   } else if (update_refs && unload_cls) {
2720     return "Pause Init Mark (update refs) (unload classes)";
2721   } else if (proc_refs && unload_cls) {
2722     return "Pause Init Mark (process weakrefs) (unload classes)";
2723   } else if (update_refs) {
2724     return "Pause Init Mark (update refs)";
2725   } else if (proc_refs) {
2726     return "Pause Init Mark (process weakrefs)";
2727   } else if (unload_cls) {
2728     return "Pause Init Mark (unload classes)";
2729   } else {
2730     return "Pause Init Mark";
2731   }
2732 }
2733 
2734 const char* ShenandoahHeap::final_mark_event_message() const {
2735   bool update_refs = has_forwarded_objects();
2736   bool proc_refs = process_references();
2737   bool unload_cls = unload_classes();
2738 
2739   if (update_refs && proc_refs && unload_cls) {
2740     return "Pause Final Mark (update refs) (process weakrefs) (unload classes)";
2741   } else if (update_refs && proc_refs) {
2742     return "Pause Final Mark (update refs) (process weakrefs)";
2743   } else if (update_refs && unload_cls) {
2744     return "Pause Final Mark (update refs) (unload classes)";
2745   } else if (proc_refs && unload_cls) {
2746     return "Pause Final Mark (process weakrefs) (unload classes)";
2747   } else if (update_refs) {
2748     return "Pause Final Mark (update refs)";
2749   } else if (proc_refs) {
2750     return "Pause Final Mark (process weakrefs)";
2751   } else if (unload_cls) {
2752     return "Pause Final Mark (unload classes)";
2753   } else {
2754     return "Pause Final Mark";
2755   }
2756 }
2757 
2758 const char* ShenandoahHeap::conc_mark_event_message() const {
2759   bool update_refs = has_forwarded_objects();
2760   bool proc_refs = process_references();
2761   bool unload_cls = unload_classes();
2762 
2763   if (update_refs && proc_refs && unload_cls) {
2764     return "Concurrent marking (update refs) (process weakrefs) (unload classes)";
2765   } else if (update_refs && proc_refs) {
2766     return "Concurrent marking (update refs) (process weakrefs)";
2767   } else if (update_refs && unload_cls) {
2768     return "Concurrent marking (update refs) (unload classes)";
2769   } else if (proc_refs && unload_cls) {
2770     return "Concurrent marking (process weakrefs) (unload classes)";
2771   } else if (update_refs) {
2772     return "Concurrent marking (update refs)";
2773   } else if (proc_refs) {
2774     return "Concurrent marking (process weakrefs)";
2775   } else if (unload_cls) {
2776     return "Concurrent marking (unload classes)";
2777   } else {
2778     return "Concurrent marking";
2779   }
2780 }
2781 
2782 const char* ShenandoahHeap::degen_event_message(ShenandoahDegenPoint point) const {
2783   switch (point) {
2784     case _degenerated_unset:
2785       return "Pause Degenerated GC (<UNSET>)";
2786     case _degenerated_traversal:
2787       return "Pause Degenerated GC (Traversal)";
2788     case _degenerated_outside_cycle:
2789       return "Pause Degenerated GC (Outside of Cycle)";
2790     case _degenerated_mark:
2791       return "Pause Degenerated GC (Mark)";
2792     case _degenerated_evac:
2793       return "Pause Degenerated GC (Evacuation)";
2794     case _degenerated_updaterefs:
2795       return "Pause Degenerated GC (Update Refs)";
2796     default:
2797       ShouldNotReachHere();
2798       return "ERROR";
2799   }
2800 }
2801 
2802 jushort* ShenandoahHeap::get_liveness_cache(uint worker_id) {
2803 #ifdef ASSERT
2804   assert(_liveness_cache != NULL, "sanity");
2805   assert(worker_id < _max_workers, "sanity");
2806   for (uint i = 0; i < num_regions(); i++) {
2807     assert(_liveness_cache[worker_id][i] == 0, "liveness cache should be empty");
2808   }
2809 #endif
2810   return _liveness_cache[worker_id];
2811 }
2812 
2813 void ShenandoahHeap::flush_liveness_cache(uint worker_id) {
2814   assert(worker_id < _max_workers, "sanity");
2815   assert(_liveness_cache != NULL, "sanity");
2816   jushort* ld = _liveness_cache[worker_id];
2817   for (uint i = 0; i < num_regions(); i++) {
2818     ShenandoahHeapRegion* r = get_region(i);
2819     jushort live = ld[i];
2820     if (live > 0) {
2821       r->increase_live_data_gc_words(live);
2822       ld[i] = 0;
2823     }
2824   }
2825 }
2826 
2827 size_t ShenandoahHeap::obj_size(oop obj) const {
2828   return CollectedHeap::obj_size(obj) + ShenandoahForwarding::word_size();
2829 }
2830 
2831 ptrdiff_t ShenandoahHeap::cell_header_size() const {
2832   return ShenandoahForwarding::byte_size();
2833 }