1 /*
   2  * Copyright (c) 2013, 2018, Red Hat, Inc. and/or its affiliates.
   3  *
   4  * This code is free software; you can redistribute it and/or modify it
   5  * under the terms of the GNU General Public License version 2 only, as
   6  * published by the Free Software Foundation.
   7  *
   8  * This code is distributed in the hope that it will be useful, but WITHOUT
   9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  11  * version 2 for more details (a copy is included in the LICENSE file that
  12  * accompanied this code).
  13  *
  14  * You should have received a copy of the GNU General Public License version
  15  * 2 along with this work; if not, write to the Free Software Foundation,
  16  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  17  *
  18  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  19  * or visit www.oracle.com if you need additional information or have any
  20  * questions.
  21  *
  22  */
  23 
  24 #include "precompiled.hpp"
  25 #include "memory/allocation.hpp"
  26 
  27 #include "gc/shared/gcTimer.hpp"
  28 #include "gc/shared/gcTraceTime.inline.hpp"
  29 #include "gc/shared/memAllocator.hpp"
  30 #include "gc/shared/parallelCleaning.hpp"
  31 #include "gc/shared/plab.hpp"
  32 
  33 #include "gc/shenandoah/brooksPointer.hpp"
  34 #include "gc/shenandoah/shenandoahAllocTracker.hpp"
  35 #include "gc/shenandoah/shenandoahBarrierSet.hpp"
  36 #include "gc/shenandoah/shenandoahCollectionSet.hpp"
  37 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
  38 #include "gc/shenandoah/shenandoahConcurrentMark.hpp"
  39 #include "gc/shenandoah/shenandoahConcurrentMark.inline.hpp"
  40 #include "gc/shenandoah/shenandoahControlThread.hpp"
  41 #include "gc/shenandoah/shenandoahFreeSet.hpp"
  42 #include "gc/shenandoah/shenandoahPhaseTimings.hpp"
  43 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
  44 #include "gc/shenandoah/shenandoahHeapRegion.hpp"
  45 #include "gc/shenandoah/shenandoahHeapRegionSet.hpp"
  46 #include "gc/shenandoah/shenandoahMarkCompact.hpp"
  47 #include "gc/shenandoah/shenandoahMarkingContext.inline.hpp"
  48 #include "gc/shenandoah/shenandoahMemoryPool.hpp"
  49 #include "gc/shenandoah/shenandoahMetrics.hpp"
  50 #include "gc/shenandoah/shenandoahMonitoringSupport.hpp"
  51 #include "gc/shenandoah/shenandoahOopClosures.inline.hpp"
  52 #include "gc/shenandoah/shenandoahPacer.hpp"
  53 #include "gc/shenandoah/shenandoahPacer.inline.hpp"
  54 #include "gc/shenandoah/shenandoahRootProcessor.hpp"
  55 #include "gc/shenandoah/shenandoahStringDedup.hpp"
  56 #include "gc/shenandoah/shenandoahUtils.hpp"
  57 #include "gc/shenandoah/shenandoahVerifier.hpp"
  58 #include "gc/shenandoah/shenandoahCodeRoots.hpp"
  59 #include "gc/shenandoah/shenandoahWorkerPolicy.hpp"
  60 #include "gc/shenandoah/vm_operations_shenandoah.hpp"
  61 #include "gc/shenandoah/heuristics/shenandoahAdaptiveHeuristics.hpp"
  62 #include "gc/shenandoah/heuristics/shenandoahAggressiveHeuristics.hpp"
  63 #include "gc/shenandoah/heuristics/shenandoahCompactHeuristics.hpp"
  64 #include "gc/shenandoah/heuristics/shenandoahPassiveHeuristics.hpp"
  65 #include "gc/shenandoah/heuristics/shenandoahStaticHeuristics.hpp"
  66 #include "gc/shenandoah/heuristics/shenandoahTraversalHeuristics.hpp"
  67 
  68 #include "memory/metaspace.hpp"
  69 #include "runtime/vmThread.hpp"
  70 #include "services/mallocTracker.hpp"
  71 
  72 ShenandoahUpdateRefsClosure::ShenandoahUpdateRefsClosure() : _heap(ShenandoahHeap::heap()) {}
  73 
  74 #ifdef ASSERT
  75 template <class T>
  76 void ShenandoahAssertToSpaceClosure::do_oop_work(T* p) {
  77   T o = RawAccess<>::oop_load(p);
  78   if (! CompressedOops::is_null(o)) {
  79     oop obj = CompressedOops::decode_not_null(o);
  80     shenandoah_assert_not_forwarded(p, obj);
  81   }
  82 }
  83 
  84 void ShenandoahAssertToSpaceClosure::do_oop(narrowOop* p) { do_oop_work(p); }
  85 void ShenandoahAssertToSpaceClosure::do_oop(oop* p)       { do_oop_work(p); }
  86 #endif
  87 
  88 const char* ShenandoahHeap::name() const {
  89   return "Shenandoah";
  90 }
  91 
  92 class ShenandoahPretouchTask : public AbstractGangTask {
  93 private:
  94   ShenandoahRegionIterator _regions;
  95   const size_t _bitmap_size;
  96   const size_t _page_size;
  97   char* _bitmap0_base;
  98   char* _bitmap1_base;
  99 public:
 100   ShenandoahPretouchTask(char* bitmap0_base, char* bitmap1_base, size_t bitmap_size,
 101                          size_t page_size) :
 102     AbstractGangTask("Shenandoah PreTouch"),
 103     _bitmap_size(bitmap_size),
 104     _page_size(page_size),
 105     _bitmap0_base(bitmap0_base),
 106     _bitmap1_base(bitmap1_base) {
 107   }
 108 
 109   virtual void work(uint worker_id) {
 110     ShenandoahHeapRegion* r = _regions.next();
 111     while (r != NULL) {
 112       os::pretouch_memory(r->bottom(), r->end(), _page_size);
 113 
 114       size_t start = r->region_number()       * ShenandoahHeapRegion::region_size_bytes() / MarkBitMap::heap_map_factor();
 115       size_t end   = (r->region_number() + 1) * ShenandoahHeapRegion::region_size_bytes() / MarkBitMap::heap_map_factor();
 116       assert (end <= _bitmap_size, "end is sane: " SIZE_FORMAT " < " SIZE_FORMAT, end, _bitmap_size);
 117 
 118       os::pretouch_memory(_bitmap0_base + start, _bitmap0_base + end, _page_size);
 119       os::pretouch_memory(_bitmap1_base + start, _bitmap1_base + end, _page_size);
 120 
 121       r = _regions.next();
 122     }
 123   }
 124 };
 125 
 126 jint ShenandoahHeap::initialize() {
 127 
 128   BrooksPointer::initial_checks();
 129 
 130   initialize_heuristics();
 131 
 132   size_t init_byte_size = collector_policy()->initial_heap_byte_size();
 133   size_t max_byte_size = collector_policy()->max_heap_byte_size();
 134   size_t heap_alignment = collector_policy()->heap_alignment();
 135 
 136   if (ShenandoahAlwaysPreTouch) {
 137     // Enabled pre-touch means the entire heap is committed right away.
 138     init_byte_size = max_byte_size;
 139   }
 140 
 141   Universe::check_alignment(max_byte_size,
 142                             ShenandoahHeapRegion::region_size_bytes(),
 143                             "shenandoah heap");
 144   Universe::check_alignment(init_byte_size,
 145                             ShenandoahHeapRegion::region_size_bytes(),
 146                             "shenandoah heap");
 147 
 148   ReservedSpace heap_rs = Universe::reserve_heap(max_byte_size,
 149                                                  heap_alignment);
 150   initialize_reserved_region((HeapWord*)heap_rs.base(), (HeapWord*) (heap_rs.base() + heap_rs.size()));
 151 
 152   ReservedSpace pgc_rs = heap_rs.first_part(max_byte_size);
 153 
 154   _num_regions = ShenandoahHeapRegion::region_count();
 155   size_t num_committed_regions = init_byte_size / ShenandoahHeapRegion::region_size_bytes();
 156   _initial_size = num_committed_regions * ShenandoahHeapRegion::region_size_bytes();
 157   _committed = _initial_size;
 158 
 159   log_info(gc, heap)("Initialize Shenandoah heap with initial size " SIZE_FORMAT " bytes", init_byte_size);
 160   if (!os::commit_memory(pgc_rs.base(), _initial_size, false)) {
 161     vm_exit_out_of_memory(_initial_size, OOM_MMAP_ERROR, "Shenandoah failed to initialize heap");
 162   }
 163 
 164   size_t reg_size_words = ShenandoahHeapRegion::region_size_words();
 165   size_t reg_size_bytes = ShenandoahHeapRegion::region_size_bytes();
 166 
 167   _regions = NEW_C_HEAP_ARRAY(ShenandoahHeapRegion*, _num_regions, mtGC);
 168   _free_set = new ShenandoahFreeSet(this, _num_regions);
 169 
 170   _collection_set = new ShenandoahCollectionSet(this, (HeapWord*)pgc_rs.base());
 171 
 172   if (ShenandoahPacing) {
 173     _pacer = new ShenandoahPacer(this);
 174     _pacer->setup_for_idle();
 175   } else {
 176     _pacer = NULL;
 177   }
 178 
 179   assert((((size_t) base()) & ShenandoahHeapRegion::region_size_bytes_mask()) == 0,
 180          "misaligned heap: "PTR_FORMAT, p2i(base()));
 181 
 182   ShenandoahBarrierSet::satb_mark_queue_set().set_buffer_size(ShenandoahSATBBufferSize);
 183 
 184   // The call below uses stuff (the SATB* things) that are in G1, but probably
 185   // belong into a shared location.
 186   ShenandoahBarrierSet::satb_mark_queue_set().initialize(this,
 187                                                SATB_Q_CBL_mon,
 188                                                SATB_Q_FL_lock,
 189                                                20 /*G1SATBProcessCompletedThreshold */,
 190                                                60 /* G1SATBBufferEnqueueingThresholdPercent */,
 191                                                Shared_SATB_Q_lock);
 192 
 193   // Reserve space for prev and next bitmap.
 194   size_t bitmap_page_size = UseLargePages ? (size_t)os::large_page_size() : (size_t)os::vm_page_size();
 195   _bitmap_size = MarkBitMap::compute_size(heap_rs.size());
 196   _bitmap_size = align_up(_bitmap_size, bitmap_page_size);
 197   _heap_region = MemRegion((HeapWord*) heap_rs.base(), heap_rs.size() / HeapWordSize);
 198 
 199   size_t bitmap_bytes_per_region = reg_size_bytes / MarkBitMap::heap_map_factor();
 200 
 201   guarantee(bitmap_bytes_per_region != 0,
 202             "Bitmap bytes per region should not be zero");
 203   guarantee(is_power_of_2(bitmap_bytes_per_region),
 204             "Bitmap bytes per region should be power of two: " SIZE_FORMAT, bitmap_bytes_per_region);
 205 
 206   if (bitmap_page_size > bitmap_bytes_per_region) {
 207     _bitmap_regions_per_slice = bitmap_page_size / bitmap_bytes_per_region;
 208     _bitmap_bytes_per_slice = bitmap_page_size;
 209   } else {
 210     _bitmap_regions_per_slice = 1;
 211     _bitmap_bytes_per_slice = bitmap_bytes_per_region;
 212   }
 213 
 214   guarantee(_bitmap_regions_per_slice >= 1,
 215             "Should have at least one region per slice: " SIZE_FORMAT,
 216             _bitmap_regions_per_slice);
 217 
 218   guarantee(((_bitmap_bytes_per_slice) % bitmap_page_size) == 0,
 219             "Bitmap slices should be page-granular: bps = " SIZE_FORMAT ", page size = " SIZE_FORMAT,
 220             _bitmap_bytes_per_slice, bitmap_page_size);
 221 
 222   ReservedSpace bitmap0(_bitmap_size, bitmap_page_size);
 223   MemTracker::record_virtual_memory_type(bitmap0.base(), mtGC);
 224   _bitmap0_region = MemRegion((HeapWord*) bitmap0.base(), bitmap0.size() / HeapWordSize);
 225 
 226   ReservedSpace bitmap1(_bitmap_size, bitmap_page_size);
 227   MemTracker::record_virtual_memory_type(bitmap1.base(), mtGC);
 228   _bitmap1_region = MemRegion((HeapWord*) bitmap1.base(), bitmap1.size() / HeapWordSize);
 229 
 230   size_t bitmap_init_commit = _bitmap_bytes_per_slice *
 231                               align_up(num_committed_regions, _bitmap_regions_per_slice) / _bitmap_regions_per_slice;
 232   bitmap_init_commit = MIN2(_bitmap_size, bitmap_init_commit);
 233   os::commit_memory_or_exit((char *) (_bitmap0_region.start()), bitmap_init_commit, false,
 234                             "couldn't allocate initial bitmap");
 235   os::commit_memory_or_exit((char *) (_bitmap1_region.start()), bitmap_init_commit, false,
 236                             "couldn't allocate initial bitmap");
 237 
 238   size_t page_size = UseLargePages ? (size_t)os::large_page_size() : (size_t)os::vm_page_size();
 239 
 240   if (ShenandoahVerify) {
 241     ReservedSpace verify_bitmap(_bitmap_size, page_size);
 242     os::commit_memory_or_exit(verify_bitmap.base(), verify_bitmap.size(), false,
 243                               "couldn't allocate verification bitmap");
 244     MemTracker::record_virtual_memory_type(verify_bitmap.base(), mtGC);
 245     MemRegion verify_bitmap_region = MemRegion((HeapWord *) verify_bitmap.base(), verify_bitmap.size() / HeapWordSize);
 246     _verification_bit_map.initialize(_heap_region, verify_bitmap_region);
 247     _verifier = new ShenandoahVerifier(this, &_verification_bit_map);
 248   }
 249 
 250   _complete_marking_context = new ShenandoahMarkingContext(_heap_region, _bitmap0_region, _num_regions);
 251   _next_marking_context = new ShenandoahMarkingContext(_heap_region, _bitmap1_region, _num_regions);
 252 
 253   {
 254     ShenandoahHeapLocker locker(lock());
 255     for (size_t i = 0; i < _num_regions; i++) {
 256       ShenandoahHeapRegion* r = new ShenandoahHeapRegion(this,
 257                                                          (HeapWord*) pgc_rs.base() + reg_size_words * i,
 258                                                          reg_size_words,
 259                                                          i,
 260                                                          i < num_committed_regions);
 261 
 262       _complete_marking_context->set_top_at_mark_start(i, r->bottom());
 263       _next_marking_context->set_top_at_mark_start(i, r->bottom());
 264       _regions[i] = r;
 265       assert(!collection_set()->is_in(i), "New region should not be in collection set");
 266     }
 267 
 268     _free_set->rebuild();
 269   }
 270 
 271   if (ShenandoahAlwaysPreTouch) {
 272     assert (!AlwaysPreTouch, "Should have been overridden");
 273 
 274     // For NUMA, it is important to pre-touch the storage under bitmaps with worker threads,
 275     // before initialize() below zeroes it with initializing thread. For any given region,
 276     // we touch the region and the corresponding bitmaps from the same thread.
 277     ShenandoahPushWorkerScope scope(workers(), _max_workers, false);
 278 
 279     log_info(gc, heap)("Parallel pretouch " SIZE_FORMAT " regions with " SIZE_FORMAT " byte pages",
 280                        _num_regions, page_size);
 281     ShenandoahPretouchTask cl(bitmap0.base(), bitmap1.base(), _bitmap_size, page_size);
 282     _workers->run_task(&cl);
 283   }
 284 
 285 
 286   // Reserve aux bitmap for use in object_iterate(). We don't commit it here.
 287   ReservedSpace aux_bitmap(_bitmap_size, bitmap_page_size);
 288   MemTracker::record_virtual_memory_type(aux_bitmap.base(), mtGC);
 289   _aux_bitmap_region = MemRegion((HeapWord*) aux_bitmap.base(), aux_bitmap.size() / HeapWordSize);
 290   _aux_bit_map.initialize(_heap_region, _aux_bitmap_region);
 291 
 292   _traversal_gc = heuristics()->can_do_traversal_gc() ?
 293                 new ShenandoahTraversalGC(this, _num_regions) :
 294                 NULL;
 295 
 296   _monitoring_support = new ShenandoahMonitoringSupport(this);
 297 
 298   _phase_timings = new ShenandoahPhaseTimings();
 299 
 300   if (ShenandoahAllocationTrace) {
 301     _alloc_tracker = new ShenandoahAllocTracker();
 302   }
 303 
 304   ShenandoahStringDedup::initialize();
 305 
 306   _control_thread = new ShenandoahControlThread();
 307 
 308   ShenandoahCodeRoots::initialize();
 309 
 310   log_info(gc, init)("Safepointing mechanism: %s",
 311                      SafepointMechanism::uses_thread_local_poll() ? "thread-local poll" :
 312                      (SafepointMechanism::uses_global_page_poll() ? "global-page poll" : "unknown"));
 313 
 314   return JNI_OK;
 315 }
 316 
 317 void ShenandoahHeap::initialize_heuristics() {
 318   if (ShenandoahGCHeuristics != NULL) {
 319     if (strcmp(ShenandoahGCHeuristics, "aggressive") == 0) {
 320       _heuristics = new ShenandoahAggressiveHeuristics();
 321     } else if (strcmp(ShenandoahGCHeuristics, "static") == 0) {
 322       _heuristics = new ShenandoahStaticHeuristics();
 323     } else if (strcmp(ShenandoahGCHeuristics, "adaptive") == 0) {
 324       _heuristics = new ShenandoahAdaptiveHeuristics();
 325     } else if (strcmp(ShenandoahGCHeuristics, "passive") == 0) {
 326       _heuristics = new ShenandoahPassiveHeuristics();
 327     } else if (strcmp(ShenandoahGCHeuristics, "compact") == 0) {
 328       _heuristics = new ShenandoahCompactHeuristics();
 329     } else if (strcmp(ShenandoahGCHeuristics, "traversal") == 0) {
 330       _heuristics = new ShenandoahTraversalHeuristics();
 331     } else {
 332       vm_exit_during_initialization("Unknown -XX:ShenandoahGCHeuristics option");
 333     }
 334 
 335     if (_heuristics->is_diagnostic() && !UnlockDiagnosticVMOptions) {
 336       vm_exit_during_initialization(
 337               err_msg("Heuristics \"%s\" is diagnostic, and must be enabled via -XX:+UnlockDiagnosticVMOptions.",
 338                       _heuristics->name()));
 339     }
 340     if (_heuristics->is_experimental() && !UnlockExperimentalVMOptions) {
 341       vm_exit_during_initialization(
 342               err_msg("Heuristics \"%s\" is experimental, and must be enabled via -XX:+UnlockExperimentalVMOptions.",
 343                       _heuristics->name()));
 344     }
 345 
 346     if (ShenandoahStoreValEnqueueBarrier && ShenandoahStoreValReadBarrier) {
 347       vm_exit_during_initialization("Cannot use both ShenandoahStoreValEnqueueBarrier and ShenandoahStoreValReadBarrier");
 348     }
 349     log_info(gc, init)("Shenandoah heuristics: %s",
 350                        _heuristics->name());
 351   } else {
 352       ShouldNotReachHere();
 353   }
 354 
 355 }
 356 
 357 #ifdef _MSC_VER
 358 #pragma warning( push )
 359 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list
 360 #endif
 361 
 362 ShenandoahHeap::ShenandoahHeap(ShenandoahCollectorPolicy* policy) :
 363   CollectedHeap(),
 364   _shenandoah_policy(policy),
 365   _soft_ref_policy(),
 366   _regions(NULL),
 367   _free_set(NULL),
 368   _collection_set(NULL),
 369   _update_refs_iterator(this),
 370   _scm(new ShenandoahConcurrentMark()),
 371   _full_gc(new ShenandoahMarkCompact()),
 372   _traversal_gc(NULL),
 373   _verifier(NULL),
 374   _pacer(NULL),
 375   _phase_timings(NULL),
 376   _alloc_tracker(NULL),
 377   _max_workers(MAX2(ConcGCThreads, ParallelGCThreads)),
 378   _safepoint_workers(NULL),
 379   _used(0),
 380   _complete_marking_context(NULL),
 381   _next_marking_context(NULL),
 382   _bytes_allocated_since_gc_start(0),
 383   _ref_processor(NULL),
 384   _gc_timer(new (ResourceObj::C_HEAP, mtGC) ConcurrentGCTimer()),
 385   _stw_memory_manager("Shenandoah Pauses", "end of GC pause"),
 386   _cycle_memory_manager("Shenandoah Cycles", "end of GC cycle"),
 387   _memory_pool(NULL),
 388 #ifdef ASSERT
 389   _heap_expansion_count(0),
 390 #endif
 391   _alloc_seq_at_last_gc_start(0),
 392   _alloc_seq_at_last_gc_end(0),
 393   _used_at_last_gc(0) {
 394   log_info(gc, init)("GC threads: " UINT32_FORMAT " parallel, " UINT32_FORMAT " concurrent", ParallelGCThreads, ConcGCThreads);
 395   log_info(gc, init)("Reference processing: %s", ParallelRefProcEnabled ? "parallel" : "serial");
 396 
 397   BarrierSet::set_barrier_set(new ShenandoahBarrierSet(this));
 398 
 399   _max_workers = MAX2(_max_workers, 1U);
 400   _workers = new ShenandoahWorkGang("Shenandoah GC Threads", _max_workers,
 401                             /* are_GC_task_threads */true,
 402                             /* are_ConcurrentGC_threads */false);
 403   if (_workers == NULL) {
 404     vm_exit_during_initialization("Failed necessary allocation.");
 405   } else {
 406     _workers->initialize_workers();
 407   }
 408 
 409   if (ParallelSafepointCleanupThreads > 1) {
 410     _safepoint_workers = new ShenandoahWorkGang("Safepoint Cleanup Thread",
 411                                                 ParallelSafepointCleanupThreads,
 412                                                 false, false);
 413     _safepoint_workers->initialize_workers();
 414   }
 415 }
 416 
 417 #ifdef _MSC_VER
 418 #pragma warning( pop )
 419 #endif
 420 
 421 class ShenandoahResetNextBitmapTask : public AbstractGangTask {
 422 private:
 423   ShenandoahRegionIterator _regions;
 424 
 425 public:
 426   ShenandoahResetNextBitmapTask() :
 427     AbstractGangTask("Parallel Reset Bitmap Task") {}
 428 
 429   void work(uint worker_id) {
 430     ShenandoahHeapRegion* region = _regions.next();
 431     ShenandoahHeap* heap = ShenandoahHeap::heap();
 432     ShenandoahMarkingContext* const ctx = heap->next_marking_context();
 433     while (region != NULL) {
 434       if (heap->is_bitmap_slice_committed(region)) {
 435         HeapWord* bottom = region->bottom();
 436         HeapWord* top = ctx->top_at_mark_start(region->region_number());
 437         if (top > bottom) {
 438           ctx->clear_bitmap(bottom, top);
 439         }
 440         assert(ctx->is_bitmap_clear_range(bottom, region->end()), "must be clear");
 441       }
 442       region = _regions.next();
 443     }
 444   }
 445 };
 446 
 447 void ShenandoahHeap::reset_next_mark_bitmap() {
 448   assert_gc_workers(_workers->active_workers());
 449 
 450   ShenandoahResetNextBitmapTask task;
 451   _workers->run_task(&task);
 452 }
 453 
 454 class ShenandoahResetNextBitmapTraversalTask : public AbstractGangTask {
 455 private:
 456   ShenandoahHeapRegionSetIterator& _regions;
 457 
 458 public:
 459   ShenandoahResetNextBitmapTraversalTask(ShenandoahHeapRegionSetIterator& regions) :
 460     AbstractGangTask("Parallel Reset Bitmap Task for Traversal"),
 461     _regions(regions) {}
 462 
 463   void work(uint worker_id) {
 464     ShenandoahHeap* heap = ShenandoahHeap::heap();
 465     ShenandoahHeapRegion* region = _regions.claim_next();
 466     ShenandoahMarkingContext* const next_ctx = heap->next_marking_context();
 467     ShenandoahMarkingContext* const compl_ctx = heap->complete_marking_context();
 468     while (region != NULL) {
 469       assert(!region->is_trash() && !region->is_empty_uncommitted(), "sanity");
 470       assert(heap->is_bitmap_slice_committed(region), "sanity");
 471       HeapWord* bottom = region->bottom();
 472       HeapWord* top = next_ctx->top_at_mark_start(region->region_number());
 473       if (top > bottom) {
 474         compl_ctx->mark_bit_map()->copy_from(next_ctx->mark_bit_map(), MemRegion(bottom, top));
 475         compl_ctx->set_top_at_mark_start(region->region_number(), top);
 476         next_ctx->clear_bitmap(bottom, top);
 477         next_ctx->set_top_at_mark_start(region->region_number(), bottom);
 478       }
 479       assert(next_ctx->is_bitmap_clear_range(region->bottom(), region->end()),
 480              "need clear next bitmap");
 481       region = _regions.claim_next();
 482     }
 483   }
 484 };
 485 
 486 void ShenandoahHeap::reset_next_mark_bitmap_traversal() {
 487   assert_gc_workers(_workers->active_workers());
 488 
 489   ShenandoahHeapRegionSet* regions = traversal_gc()->traversal_set();
 490   ShenandoahHeapRegionSetIterator iter(regions);
 491   ShenandoahResetNextBitmapTraversalTask task(iter);
 492   _workers->run_task(&task);
 493   assert(next_marking_context()->is_bitmap_clear(), "need clean mark bitmap");
 494 }
 495 
 496 void ShenandoahHeap::print_on(outputStream* st) const {
 497   st->print_cr("Shenandoah Heap");
 498   st->print_cr(" " SIZE_FORMAT "K total, " SIZE_FORMAT "K committed, " SIZE_FORMAT "K used",
 499                capacity() / K, committed() / K, used() / K);
 500   st->print_cr(" " SIZE_FORMAT " x " SIZE_FORMAT"K regions",
 501                num_regions(), ShenandoahHeapRegion::region_size_bytes() / K);
 502 
 503   st->print("Status: ");
 504   if (has_forwarded_objects())               st->print("has forwarded objects, ");
 505   if (is_concurrent_mark_in_progress())      st->print("marking, ");
 506   if (is_evacuation_in_progress())           st->print("evacuating, ");
 507   if (is_update_refs_in_progress())          st->print("updating refs, ");
 508   if (is_concurrent_traversal_in_progress()) st->print("traversal, ");
 509   if (is_degenerated_gc_in_progress())       st->print("degenerated gc, ");
 510   if (is_full_gc_in_progress())              st->print("full gc, ");
 511   if (is_full_gc_move_in_progress())         st->print("full gc move, ");
 512 
 513   if (cancelled_gc()) {
 514     st->print("cancelled");
 515   } else {
 516     st->print("not cancelled");
 517   }
 518   st->cr();
 519 
 520   st->print_cr("Reserved region:");
 521   st->print_cr(" - [" PTR_FORMAT ", " PTR_FORMAT ") ",
 522                p2i(reserved_region().start()),
 523                p2i(reserved_region().end()));
 524 
 525   st->cr();
 526   MetaspaceUtils::print_on(st);
 527 
 528   if (Verbose) {
 529     print_heap_regions_on(st);
 530   }
 531 }
 532 
 533 class ShenandoahInitGCLABClosure : public ThreadClosure {
 534 public:
 535   void do_thread(Thread* thread) {
 536     if (thread != NULL && (thread->is_Java_thread() || thread->is_Worker_thread())) {
 537       ShenandoahThreadLocalData::initialize_gclab(thread);
 538     }
 539   }
 540 };
 541 
 542 void ShenandoahHeap::post_initialize() {
 543   CollectedHeap::post_initialize();
 544   MutexLocker ml(Threads_lock);
 545 
 546   ShenandoahInitGCLABClosure init_gclabs;
 547   Threads::threads_do(&init_gclabs);
 548   _workers->threads_do(&init_gclabs);
 549 
 550   // gclab can not be initialized early during VM startup, as it can not determinate its max_size.
 551   // Now, we will let WorkGang to initialize gclab when new worker is created.
 552   _workers->set_initialize_gclab();
 553 
 554   _scm->initialize(_max_workers);
 555   _full_gc->initialize(_gc_timer);
 556 
 557   ref_processing_init();
 558 
 559   _heuristics->initialize();
 560 }
 561 
 562 size_t ShenandoahHeap::used() const {
 563   return OrderAccess::load_acquire(&_used);
 564 }
 565 
 566 size_t ShenandoahHeap::committed() const {
 567   OrderAccess::acquire();
 568   return _committed;
 569 }
 570 
 571 void ShenandoahHeap::increase_committed(size_t bytes) {
 572   assert_heaplock_or_safepoint();
 573   _committed += bytes;
 574 }
 575 
 576 void ShenandoahHeap::decrease_committed(size_t bytes) {
 577   assert_heaplock_or_safepoint();
 578   _committed -= bytes;
 579 }
 580 
 581 void ShenandoahHeap::increase_used(size_t bytes) {
 582   Atomic::add(bytes, &_used);
 583 }
 584 
 585 void ShenandoahHeap::set_used(size_t bytes) {
 586   OrderAccess::release_store_fence(&_used, bytes);
 587 }
 588 
 589 void ShenandoahHeap::decrease_used(size_t bytes) {
 590   assert(used() >= bytes, "never decrease heap size by more than we've left");
 591   Atomic::sub(bytes, &_used);
 592 }
 593 
 594 void ShenandoahHeap::increase_allocated(size_t bytes) {
 595   Atomic::add(bytes, &_bytes_allocated_since_gc_start);
 596 }
 597 
 598 void ShenandoahHeap::notify_mutator_alloc_words(size_t words, bool waste) {
 599   size_t bytes = words * HeapWordSize;
 600   if (!waste) {
 601     increase_used(bytes);
 602   }
 603   increase_allocated(bytes);
 604   if (ShenandoahPacing) {
 605     control_thread()->pacing_notify_alloc(words);
 606     if (waste) {
 607       pacer()->claim_for_alloc(words, true);
 608     }
 609   }
 610 }
 611 
 612 size_t ShenandoahHeap::capacity() const {
 613   return num_regions() * ShenandoahHeapRegion::region_size_bytes();
 614 }
 615 
 616 bool ShenandoahHeap::is_maximal_no_gc() const {
 617   Unimplemented();
 618   return true;
 619 }
 620 
 621 size_t ShenandoahHeap::max_capacity() const {
 622   return _num_regions * ShenandoahHeapRegion::region_size_bytes();
 623 }
 624 
 625 size_t ShenandoahHeap::initial_capacity() const {
 626   return _initial_size;
 627 }
 628 
 629 bool ShenandoahHeap::is_in(const void* p) const {
 630   HeapWord* heap_base = (HeapWord*) base();
 631   HeapWord* last_region_end = heap_base + ShenandoahHeapRegion::region_size_words() * num_regions();
 632   return p >= heap_base && p < last_region_end;
 633 }
 634 
 635 bool ShenandoahHeap::is_scavengable(oop p) {
 636   return true;
 637 }
 638 
 639 void ShenandoahHeap::op_uncommit(double shrink_before) {
 640   assert (ShenandoahUncommit, "should be enabled");
 641 
 642   size_t count = 0;
 643   for (size_t i = 0; i < num_regions(); i++) {
 644     ShenandoahHeapRegion* r = get_region(i);
 645     if (r->is_empty_committed() && (r->empty_time() < shrink_before)) {
 646       ShenandoahHeapLocker locker(lock());
 647       if (r->is_empty_committed()) {
 648         r->make_uncommitted();
 649         count++;
 650       }
 651     }
 652     SpinPause(); // allow allocators to take the lock
 653   }
 654 
 655   if (count > 0) {
 656     log_info(gc)("Uncommitted " SIZE_FORMAT "M. Heap: " SIZE_FORMAT "M reserved, " SIZE_FORMAT "M committed, " SIZE_FORMAT "M used",
 657                  count * ShenandoahHeapRegion::region_size_bytes() / M, capacity() / M, committed() / M, used() / M);
 658     control_thread()->notify_heap_changed();
 659   }
 660 }
 661 
 662 HeapWord* ShenandoahHeap::allocate_from_gclab_slow(Thread* thread, size_t size) {
 663   // New object should fit the GCLAB size
 664   size_t min_size = MAX2(size, PLAB::min_size());
 665 
 666   // Figure out size of new GCLAB, looking back at heuristics. Expand aggressively.
 667   size_t new_size = ShenandoahThreadLocalData::gclab_size(thread) * 2;
 668   new_size = MIN2(new_size, PLAB::max_size());
 669   new_size = MAX2(new_size, PLAB::min_size());
 670 
 671   // Record new heuristic value even if we take any shortcut. This captures
 672   // the case when moderately-sized objects always take a shortcut. At some point,
 673   // heuristics should catch up with them.
 674   ShenandoahThreadLocalData::set_gclab_size(thread, new_size);
 675 
 676   if (new_size < size) {
 677     // New size still does not fit the object. Fall back to shared allocation.
 678     // This avoids retiring perfectly good GCLABs, when we encounter a large object.
 679     return NULL;
 680   }
 681 
 682   // Retire current GCLAB, and allocate a new one.
 683   PLAB* gclab = ShenandoahThreadLocalData::gclab(thread);
 684   gclab->retire();
 685 
 686   size_t actual_size = 0;
 687   HeapWord* gclab_buf = allocate_new_gclab(min_size, new_size, &actual_size);
 688   if (gclab_buf == NULL) {
 689     return NULL;
 690   }
 691 
 692   assert (size <= actual_size, "allocation should fit");
 693 
 694   if (ZeroTLAB) {
 695     // ..and clear it.
 696     Copy::zero_to_words(gclab_buf, actual_size);
 697   } else {
 698     // ...and zap just allocated object.
 699 #ifdef ASSERT
 700     // Skip mangling the space corresponding to the object header to
 701     // ensure that the returned space is not considered parsable by
 702     // any concurrent GC thread.
 703     size_t hdr_size = oopDesc::header_size();
 704     Copy::fill_to_words(gclab_buf + hdr_size, actual_size - hdr_size, badHeapWordVal);
 705 #endif // ASSERT
 706   }
 707   gclab->set_buf(gclab_buf, actual_size);
 708   return gclab->allocate(size);
 709 }
 710 
 711 HeapWord* ShenandoahHeap::allocate_new_tlab(size_t min_size,
 712                                             size_t requested_size,
 713                                             size_t* actual_size) {
 714   ShenandoahAllocationRequest req = ShenandoahAllocationRequest::for_tlab(min_size, requested_size);
 715   HeapWord* res = allocate_memory(req);
 716   if (res != NULL) {
 717     *actual_size = req.actual_size();
 718   } else {
 719     *actual_size = 0;
 720   }
 721   return res;
 722 }
 723 
 724 HeapWord* ShenandoahHeap::allocate_new_gclab(size_t min_size,
 725                                              size_t word_size,
 726                                              size_t* actual_size) {
 727   ShenandoahAllocationRequest req = ShenandoahAllocationRequest::for_gclab(min_size, word_size);
 728   HeapWord* res = allocate_memory(req);
 729   if (res != NULL) {
 730     *actual_size = req.actual_size();
 731   } else {
 732     *actual_size = 0;
 733   }
 734   return res;
 735 }
 736 
 737 ShenandoahHeap* ShenandoahHeap::heap() {
 738   CollectedHeap* heap = Universe::heap();
 739   assert(heap != NULL, "Unitialized access to ShenandoahHeap::heap()");
 740   assert(heap->kind() == CollectedHeap::Shenandoah, "not a shenandoah heap");
 741   return (ShenandoahHeap*) heap;
 742 }
 743 
 744 ShenandoahHeap* ShenandoahHeap::heap_no_check() {
 745   CollectedHeap* heap = Universe::heap();
 746   return (ShenandoahHeap*) heap;
 747 }
 748 
 749 HeapWord* ShenandoahHeap::allocate_memory(ShenandoahAllocationRequest& req) {
 750   ShenandoahAllocTrace trace_alloc(req.size(), req.type());
 751 
 752   intptr_t pacer_epoch = 0;
 753   bool in_new_region = false;
 754   HeapWord* result = NULL;
 755 
 756   if (req.is_mutator_alloc()) {
 757     if (ShenandoahPacing) {
 758       pacer()->pace_for_alloc(req.size());
 759       pacer_epoch = pacer()->epoch();
 760     }
 761 
 762     if (!ShenandoahAllocFailureALot || !should_inject_alloc_failure()) {
 763       result = allocate_memory_under_lock(req, in_new_region);
 764     }
 765 
 766     // Allocation failed, block until control thread reacted, then retry allocation.
 767     //
 768     // It might happen that one of the threads requesting allocation would unblock
 769     // way later after GC happened, only to fail the second allocation, because
 770     // other threads have already depleted the free storage. In this case, a better
 771     // strategy is to try again, as long as GC makes progress.
 772     //
 773     // Then, we need to make sure the allocation was retried after at least one
 774     // Full GC, which means we want to try more than ShenandoahFullGCThreshold times.
 775 
 776     size_t tries = 0;
 777 
 778     while (result == NULL && last_gc_made_progress()) {
 779       tries++;
 780       control_thread()->handle_alloc_failure(req.size());
 781       result = allocate_memory_under_lock(req, in_new_region);
 782     }
 783 
 784     while (result == NULL && tries <= ShenandoahFullGCThreshold) {
 785       tries++;
 786       control_thread()->handle_alloc_failure(req.size());
 787       result = allocate_memory_under_lock(req, in_new_region);
 788     }
 789 
 790   } else {
 791     assert(req.is_gc_alloc(), "Can only accept GC allocs here");
 792     result = allocate_memory_under_lock(req, in_new_region);
 793     // Do not call handle_alloc_failure() here, because we cannot block.
 794     // The allocation failure would be handled by the WB slowpath with handle_alloc_failure_evac().
 795   }
 796 
 797   if (in_new_region) {
 798     control_thread()->notify_heap_changed();
 799   }
 800 
 801   if (result != NULL) {
 802     size_t requested = req.size();
 803     size_t actual = req.actual_size();
 804 
 805     assert (req.is_lab_alloc() || (requested == actual),
 806             "Only LAB allocations are elastic: %s, requested = " SIZE_FORMAT ", actual = " SIZE_FORMAT,
 807             alloc_type_to_string(req.type()), requested, actual);
 808 
 809     if (req.is_mutator_alloc()) {
 810       notify_mutator_alloc_words(actual, false);
 811 
 812       // If we requested more than we were granted, give the rest back to pacer.
 813       // This only matters if we are in the same pacing epoch: do not try to unpace
 814       // over the budget for the other phase.
 815       if (ShenandoahPacing && (pacer_epoch > 0) && (requested > actual)) {
 816         pacer()->unpace_for_alloc(pacer_epoch, requested - actual);
 817       }
 818     } else {
 819       increase_used(actual*HeapWordSize);
 820     }
 821   }
 822 
 823   return result;
 824 }
 825 
 826 HeapWord* ShenandoahHeap::allocate_memory_under_lock(ShenandoahAllocationRequest& req, bool& in_new_region) {
 827   ShenandoahHeapLocker locker(lock());
 828   return _free_set->allocate(req, in_new_region);
 829 }
 830 
 831 class ShenandoahObjAllocator : public ObjAllocator {
 832 public:
 833   ShenandoahObjAllocator(Klass* klass, size_t word_size, Thread* thread) :
 834     ObjAllocator(klass, word_size, thread) {}
 835 
 836   virtual HeapWord* mem_allocate(Allocation& allocation) {
 837     // Allocate object.
 838     _word_size += BrooksPointer::word_size();
 839     HeapWord* result = ObjAllocator::mem_allocate(allocation);
 840     _word_size -= BrooksPointer::word_size();
 841     // Initialize brooks-pointer
 842     if (result != NULL) {
 843       result += BrooksPointer::word_size();
 844       BrooksPointer::initialize(oop(result));
 845       assert(! ShenandoahHeap::heap()->in_collection_set(result), "never allocate in targetted region");
 846     }
 847     return result;
 848   }
 849 };
 850 
 851 oop ShenandoahHeap::obj_allocate(Klass* klass, int size, TRAPS) {
 852   ShenandoahObjAllocator allocator(klass, size, THREAD);
 853   return allocator.allocate();
 854 }
 855 
 856 class ShenandoahObjArrayAllocator : public ObjArrayAllocator {
 857 public:
 858   ShenandoahObjArrayAllocator(Klass* klass, size_t word_size, int length, bool do_zero,
 859                               Thread* thread) :
 860     ObjArrayAllocator(klass, word_size, length, do_zero, thread) {}
 861 
 862   virtual HeapWord* mem_allocate(Allocation& allocation) {
 863     // Allocate object.
 864     _word_size += BrooksPointer::word_size();
 865     HeapWord* result = ObjArrayAllocator::mem_allocate(allocation);
 866     _word_size -= BrooksPointer::word_size();
 867     if (result != NULL) {
 868       result += BrooksPointer::word_size();
 869       BrooksPointer::initialize(oop(result));
 870       assert(! ShenandoahHeap::heap()->in_collection_set(result), "never allocate in targetted region");
 871     }
 872     return result;
 873   }
 874 
 875 };
 876 
 877 oop ShenandoahHeap::array_allocate(Klass* klass, int size, int length, bool do_zero, TRAPS) {
 878   ShenandoahObjArrayAllocator allocator(klass, size, length, do_zero, THREAD);
 879   return allocator.allocate();
 880 }
 881 
 882 class ShenandoahClassAllocator : public ClassAllocator {
 883 public:
 884   ShenandoahClassAllocator(Klass* klass, size_t word_size, Thread* thread) :
 885     ClassAllocator(klass, word_size, thread) {}
 886 
 887   virtual HeapWord* mem_allocate(Allocation& allocation) {
 888     _word_size += BrooksPointer::word_size();
 889     HeapWord* result = ClassAllocator::mem_allocate(allocation);
 890     _word_size -= BrooksPointer::word_size();
 891     if (result != NULL) {
 892       result += BrooksPointer::word_size();
 893       BrooksPointer::initialize(oop(result));
 894       assert(! ShenandoahHeap::heap()->in_collection_set(result), "never allocate in targetted region");
 895     }
 896     return result;
 897   }
 898 
 899 };
 900 
 901 oop ShenandoahHeap::class_allocate(Klass* klass, int size, TRAPS) {
 902   ShenandoahClassAllocator allocator(klass, size, THREAD);
 903   return allocator.allocate();
 904 }
 905 
 906 HeapWord* ShenandoahHeap::mem_allocate(size_t size,
 907                                         bool*  gc_overhead_limit_was_exceeded) {
 908   ShenandoahAllocationRequest req = ShenandoahAllocationRequest::for_shared(size);
 909   return allocate_memory(req);
 910 }
 911 
 912 void ShenandoahHeap::fill_with_dummy_object(HeapWord* start, HeapWord* end, bool zap) {
 913   HeapWord* obj = tlab_post_allocation_setup(start);
 914   CollectedHeap::fill_with_object(obj, end);
 915 }
 916 
 917 class ShenandoahEvacuateUpdateRootsClosure: public BasicOopIterateClosure {
 918 private:
 919   ShenandoahHeap* _heap;
 920   Thread* _thread;
 921 public:
 922   ShenandoahEvacuateUpdateRootsClosure() :
 923     _heap(ShenandoahHeap::heap()), _thread(Thread::current()) {
 924   }
 925 
 926 private:
 927   template <class T>
 928   void do_oop_work(T* p) {
 929     assert(_heap->is_evacuation_in_progress(), "Only do this when evacuation is in progress");
 930 
 931     T o = RawAccess<>::oop_load(p);
 932     if (! CompressedOops::is_null(o)) {
 933       oop obj = CompressedOops::decode_not_null(o);
 934       if (_heap->in_collection_set(obj)) {
 935         shenandoah_assert_marked_complete(p, obj);
 936         oop resolved = ShenandoahBarrierSet::resolve_forwarded_not_null(obj);
 937         if (oopDesc::unsafe_equals(resolved, obj)) {
 938           resolved = _heap->evacuate_object(obj, _thread);
 939         }
 940         RawAccess<IS_NOT_NULL>::oop_store(p, resolved);
 941       }
 942     }
 943   }
 944 
 945 public:
 946   void do_oop(oop* p) {
 947     do_oop_work(p);
 948   }
 949   void do_oop(narrowOop* p) {
 950     do_oop_work(p);
 951   }
 952 };
 953 
 954 class ShenandoahEvacuateRootsClosure: public BasicOopIterateClosure {
 955 private:
 956   ShenandoahHeap* _heap;
 957   Thread* _thread;
 958 public:
 959   ShenandoahEvacuateRootsClosure() :
 960           _heap(ShenandoahHeap::heap()), _thread(Thread::current()) {
 961   }
 962 
 963 private:
 964   template <class T>
 965   void do_oop_work(T* p) {
 966     T o = RawAccess<>::oop_load(p);
 967     if (! CompressedOops::is_null(o)) {
 968       oop obj = CompressedOops::decode_not_null(o);
 969       if (_heap->in_collection_set(obj)) {
 970         oop resolved = ShenandoahBarrierSet::resolve_forwarded_not_null(obj);
 971         if (oopDesc::unsafe_equals(resolved, obj)) {
 972           _heap->evacuate_object(obj, _thread);
 973         }
 974       }
 975     }
 976   }
 977 
 978 public:
 979   void do_oop(oop* p) {
 980     do_oop_work(p);
 981   }
 982   void do_oop(narrowOop* p) {
 983     do_oop_work(p);
 984   }
 985 };
 986 
 987 class ShenandoahParallelEvacuateRegionObjectClosure : public ObjectClosure {
 988 private:
 989   ShenandoahHeap* const _heap;
 990   Thread* const _thread;
 991 public:
 992   ShenandoahParallelEvacuateRegionObjectClosure(ShenandoahHeap* heap) :
 993     _heap(heap), _thread(Thread::current()) {}
 994 
 995   void do_object(oop p) {
 996     shenandoah_assert_marked_complete(NULL, p);
 997     if (oopDesc::unsafe_equals(p, ShenandoahBarrierSet::resolve_forwarded_not_null(p))) {
 998       _heap->evacuate_object(p, _thread);
 999     }
1000   }
1001 };
1002 
1003 class ShenandoahParallelEvacuationTask : public AbstractGangTask {
1004 private:
1005   ShenandoahHeap* const _sh;
1006   ShenandoahCollectionSet* const _cs;
1007 
1008 public:
1009   ShenandoahParallelEvacuationTask(ShenandoahHeap* sh,
1010                          ShenandoahCollectionSet* cs) :
1011     AbstractGangTask("Parallel Evacuation Task"),
1012     _sh(sh),
1013     _cs(cs)
1014   {}
1015 
1016   void work(uint worker_id) {
1017     ShenandoahWorkerSession worker_session(worker_id);
1018     ShenandoahEvacOOMScope oom_evac_scope;
1019     SuspendibleThreadSetJoiner stsj(ShenandoahSuspendibleWorkers);
1020 
1021     ShenandoahParallelEvacuateRegionObjectClosure cl(_sh);
1022     ShenandoahHeapRegion* r;
1023     while ((r =_cs->claim_next()) != NULL) {
1024       assert(r->has_live(), "all-garbage regions are reclaimed early");
1025       _sh->marked_object_iterate(r, &cl);
1026 
1027       if (ShenandoahPacing) {
1028         _sh->pacer()->report_evac(r->used() >> LogHeapWordSize);
1029       }
1030 
1031       if (_sh->check_cancelled_gc_and_yield()) {
1032         break;
1033       }
1034     }
1035   }
1036 };
1037 
1038 void ShenandoahHeap::trash_cset_regions() {
1039   ShenandoahHeapLocker locker(lock());
1040 
1041   ShenandoahCollectionSet* set = collection_set();
1042   ShenandoahHeapRegion* r;
1043   set->clear_current_index();
1044   while ((r = set->next()) != NULL) {
1045     r->make_trash();
1046   }
1047   collection_set()->clear();
1048 }
1049 
1050 void ShenandoahHeap::print_heap_regions_on(outputStream* st) const {
1051   st->print_cr("Heap Regions:");
1052   st->print_cr("EU=empty-uncommitted, EC=empty-committed, R=regular, H=humongous start, HC=humongous continuation, CS=collection set, T=trash, P=pinned");
1053   st->print_cr("BTE=bottom/top/end, U=used, T=TLAB allocs, G=GCLAB allocs, S=shared allocs, L=live data");
1054   st->print_cr("R=root, CP=critical pins, TAMS=top-at-mark-start (previous, next)");
1055   st->print_cr("SN=alloc sequence numbers (first mutator, last mutator, first gc, last gc)");
1056 
1057   for (size_t i = 0; i < num_regions(); i++) {
1058     get_region(i)->print_on(st);
1059   }
1060 }
1061 
1062 void ShenandoahHeap::trash_humongous_region_at(ShenandoahHeapRegion* start) {
1063   assert(start->is_humongous_start(), "reclaim regions starting with the first one");
1064 
1065   oop humongous_obj = oop(start->bottom() + BrooksPointer::word_size());
1066   size_t size = humongous_obj->size() + BrooksPointer::word_size();
1067   size_t required_regions = ShenandoahHeapRegion::required_regions(size * HeapWordSize);
1068   size_t index = start->region_number() + required_regions - 1;
1069 
1070   assert(!start->has_live(), "liveness must be zero");
1071 
1072   for(size_t i = 0; i < required_regions; i++) {
1073     // Reclaim from tail. Otherwise, assertion fails when printing region to trace log,
1074     // as it expects that every region belongs to a humongous region starting with a humongous start region.
1075     ShenandoahHeapRegion* region = get_region(index --);
1076 
1077     assert(region->is_humongous(), "expect correct humongous start or continuation");
1078     assert(!in_collection_set(region), "Humongous region should not be in collection set");
1079 
1080     region->make_trash();
1081   }
1082 }
1083 
1084 #ifdef ASSERT
1085 class ShenandoahCheckCollectionSetClosure: public ShenandoahHeapRegionClosure {
1086   bool heap_region_do(ShenandoahHeapRegion* r) {
1087     assert(! ShenandoahHeap::heap()->in_collection_set(r), "Should have been cleared by now");
1088     return false;
1089   }
1090 };
1091 #endif
1092 
1093 void ShenandoahHeap::prepare_for_concurrent_evacuation() {
1094   if (!cancelled_gc()) {
1095     make_parsable(true);
1096 
1097     if (ShenandoahVerify) {
1098       verifier()->verify_after_concmark();
1099     }
1100 
1101     trash_cset_regions();
1102 
1103     // NOTE: This needs to be done during a stop the world pause, because
1104     // putting regions into the collection set concurrently with Java threads
1105     // will create a race. In particular, acmp could fail because when we
1106     // resolve the first operand, the containing region might not yet be in
1107     // the collection set, and thus return the original oop. When the 2nd
1108     // operand gets resolved, the region could be in the collection set
1109     // and the oop gets evacuated. If both operands have originally been
1110     // the same, we get false negatives.
1111 
1112     {
1113       ShenandoahHeapLocker locker(lock());
1114       _collection_set->clear();
1115       _free_set->clear();
1116 
1117 #ifdef ASSERT
1118       ShenandoahCheckCollectionSetClosure ccsc;
1119       heap_region_iterate(&ccsc);
1120 #endif
1121 
1122       heuristics()->choose_collection_set(_collection_set);
1123 
1124       _free_set->rebuild();
1125     }
1126 
1127     if (ShenandoahVerify) {
1128       verifier()->verify_before_evacuation();
1129     }
1130   }
1131 }
1132 
1133 
1134 class ShenandoahRetireGCLABClosure : public ThreadClosure {
1135 public:
1136   void do_thread(Thread* thread) {
1137     PLAB* gclab = ShenandoahThreadLocalData::gclab(thread);
1138     assert(gclab != NULL, "GCLAB should be initialized for %s", thread->name());
1139     gclab->retire();
1140   }
1141 };
1142 
1143 void ShenandoahHeap::make_parsable(bool retire_tlabs) {
1144   if (UseTLAB) {
1145     CollectedHeap::ensure_parsability(retire_tlabs);
1146   }
1147   ShenandoahRetireGCLABClosure cl;
1148   for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) {
1149     cl.do_thread(t);
1150   }
1151   workers()->threads_do(&cl);
1152 }
1153 
1154 void ShenandoahHeap::resize_tlabs() {
1155   CollectedHeap::resize_all_tlabs();
1156 }
1157 
1158 void ShenandoahHeap::accumulate_statistics_tlabs() {
1159   CollectedHeap::accumulate_statistics_all_tlabs();
1160 }
1161 
1162 class ShenandoahEvacuateUpdateRootsTask : public AbstractGangTask {
1163   ShenandoahRootEvacuator* _rp;
1164 public:
1165 
1166   ShenandoahEvacuateUpdateRootsTask(ShenandoahRootEvacuator* rp) :
1167     AbstractGangTask("Shenandoah evacuate and update roots"),
1168     _rp(rp)
1169   {
1170     // Nothing else to do.
1171   }
1172 
1173   void work(uint worker_id) {
1174     ShenandoahWorkerSession worker_session(worker_id);
1175     ShenandoahEvacOOMScope oom_evac_scope;
1176     ShenandoahEvacuateUpdateRootsClosure cl;
1177 
1178     MarkingCodeBlobClosure blobsCl(&cl, CodeBlobToOopClosure::FixRelocations);
1179     _rp->process_evacuate_roots(&cl, &blobsCl, worker_id);
1180   }
1181 };
1182 
1183 class ShenandoahFixRootsTask : public AbstractGangTask {
1184   ShenandoahRootEvacuator* _rp;
1185 public:
1186 
1187   ShenandoahFixRootsTask(ShenandoahRootEvacuator* rp) :
1188     AbstractGangTask("Shenandoah update roots"),
1189     _rp(rp)
1190   {
1191     // Nothing else to do.
1192   }
1193 
1194   void work(uint worker_id) {
1195     ShenandoahWorkerSession worker_session(worker_id);
1196     ShenandoahEvacOOMScope oom_evac_scope;
1197     ShenandoahUpdateRefsClosure cl;
1198     MarkingCodeBlobClosure blobsCl(&cl, CodeBlobToOopClosure::FixRelocations);
1199 
1200     _rp->process_evacuate_roots(&cl, &blobsCl, worker_id);
1201   }
1202 };
1203 
1204 void ShenandoahHeap::evacuate_and_update_roots() {
1205 
1206 #if defined(COMPILER2) || INCLUDE_JVMCI
1207   DerivedPointerTable::clear();
1208 #endif
1209   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Only iterate roots while world is stopped");
1210 
1211   {
1212     ShenandoahRootEvacuator rp(this, workers()->active_workers(), ShenandoahPhaseTimings::init_evac);
1213     ShenandoahEvacuateUpdateRootsTask roots_task(&rp);
1214     workers()->run_task(&roots_task);
1215   }
1216 
1217 #if defined(COMPILER2) || INCLUDE_JVMCI
1218   DerivedPointerTable::update_pointers();
1219 #endif
1220   if (cancelled_gc()) {
1221     fixup_roots();
1222   }
1223 }
1224 
1225 void ShenandoahHeap::fixup_roots() {
1226     assert(cancelled_gc(), "Only after concurrent cycle failed");
1227 
1228     // If initial evacuation has been cancelled, we need to update all references
1229     // after all workers have finished. Otherwise we might run into the following problem:
1230     // GC thread 1 cannot allocate anymore, thus evacuation fails, leaves from-space ptr of object X.
1231     // GC thread 2 evacuates the same object X to to-space
1232     // which leaves a truly dangling from-space reference in the first root oop*. This must not happen.
1233     // clear() and update_pointers() must always be called in pairs,
1234     // cannot nest with above clear()/update_pointers().
1235 #if defined(COMPILER2) || INCLUDE_JVMCI
1236     DerivedPointerTable::clear();
1237 #endif
1238     ShenandoahRootEvacuator rp(this, workers()->active_workers(), ShenandoahPhaseTimings::init_evac);
1239     ShenandoahFixRootsTask update_roots_task(&rp);
1240     workers()->run_task(&update_roots_task);
1241 #if defined(COMPILER2) || INCLUDE_JVMCI
1242     DerivedPointerTable::update_pointers();
1243 #endif
1244 }
1245 
1246 void ShenandoahHeap::roots_iterate(OopClosure* cl) {
1247   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Only iterate roots while world is stopped");
1248 
1249   CodeBlobToOopClosure blobsCl(cl, false);
1250   CLDToOopClosure cldCl(cl);
1251 
1252   ShenandoahRootProcessor rp(this, 1, ShenandoahPhaseTimings::_num_phases);
1253   rp.process_all_roots(cl, NULL, &cldCl, &blobsCl, NULL, 0);
1254 }
1255 
1256 bool ShenandoahHeap::supports_tlab_allocation() const {
1257   return true;
1258 }
1259 
1260 // Returns size in bytes
1261 size_t ShenandoahHeap::unsafe_max_tlab_alloc(Thread *thread) const {
1262   if (ShenandoahElasticTLAB) {
1263     // With Elastic TLABs, return the max allowed size, and let the allocation path
1264     // figure out the safe size for current allocation.
1265     return ShenandoahHeapRegion::max_tlab_size_bytes();
1266   } else {
1267     return MIN2(_free_set->unsafe_peek_free(), ShenandoahHeapRegion::max_tlab_size_bytes());
1268   }
1269 }
1270 
1271 size_t ShenandoahHeap::max_tlab_size() const {
1272   // Returns size in words
1273   return ShenandoahHeapRegion::max_tlab_size_words();
1274 }
1275 
1276 class ShenandoahRetireAndResetGCLABClosure : public ThreadClosure {
1277 public:
1278   void do_thread(Thread* thread) {
1279     PLAB* gclab = ShenandoahThreadLocalData::gclab(thread);
1280     gclab->retire();
1281     if (ShenandoahThreadLocalData::gclab_size(thread) > 0) {
1282       ShenandoahThreadLocalData::set_gclab_size(thread, 0);
1283     }
1284   }
1285 };
1286 
1287 void ShenandoahHeap::retire_and_reset_gclabs() {
1288   ShenandoahRetireAndResetGCLABClosure cl;
1289   for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) {
1290     cl.do_thread(t);
1291   }
1292   workers()->threads_do(&cl);
1293 }
1294 
1295 bool  ShenandoahHeap::can_elide_tlab_store_barriers() const {
1296   return true;
1297 }
1298 
1299 oop ShenandoahHeap::new_store_pre_barrier(JavaThread* thread, oop new_obj) {
1300   // Overridden to do nothing.
1301   return new_obj;
1302 }
1303 
1304 bool  ShenandoahHeap::can_elide_initializing_store_barrier(oop new_obj) {
1305   return true;
1306 }
1307 
1308 bool ShenandoahHeap::card_mark_must_follow_store() const {
1309   return false;
1310 }
1311 
1312 void ShenandoahHeap::collect(GCCause::Cause cause) {
1313   control_thread()->handle_explicit_gc(cause);
1314 }
1315 
1316 void ShenandoahHeap::do_full_collection(bool clear_all_soft_refs) {
1317   //assert(false, "Shouldn't need to do full collections");
1318 }
1319 
1320 AdaptiveSizePolicy* ShenandoahHeap::size_policy() {
1321   Unimplemented();
1322   return NULL;
1323 
1324 }
1325 
1326 CollectorPolicy* ShenandoahHeap::collector_policy() const {
1327   return _shenandoah_policy;
1328 }
1329 
1330 
1331 HeapWord* ShenandoahHeap::block_start(const void* addr) const {
1332   Space* sp = heap_region_containing(addr);
1333   if (sp != NULL) {
1334     return sp->block_start(addr);
1335   }
1336   return NULL;
1337 }
1338 
1339 size_t ShenandoahHeap::block_size(const HeapWord* addr) const {
1340   Space* sp = heap_region_containing(addr);
1341   assert(sp != NULL, "block_size of address outside of heap");
1342   return sp->block_size(addr);
1343 }
1344 
1345 bool ShenandoahHeap::block_is_obj(const HeapWord* addr) const {
1346   Space* sp = heap_region_containing(addr);
1347   return sp->block_is_obj(addr);
1348 }
1349 
1350 jlong ShenandoahHeap::millis_since_last_gc() {
1351   return 0;
1352 }
1353 
1354 void ShenandoahHeap::prepare_for_verify() {
1355   if (SafepointSynchronize::is_at_safepoint() || ! UseTLAB) {
1356     make_parsable(false);
1357   }
1358 }
1359 
1360 void ShenandoahHeap::print_gc_threads_on(outputStream* st) const {
1361   workers()->print_worker_threads_on(st);
1362   if (ShenandoahStringDedup::is_enabled()) {
1363     ShenandoahStringDedup::print_worker_threads_on(st);
1364   }
1365 }
1366 
1367 void ShenandoahHeap::gc_threads_do(ThreadClosure* tcl) const {
1368   workers()->threads_do(tcl);
1369   if (ShenandoahStringDedup::is_enabled()) {
1370     ShenandoahStringDedup::threads_do(tcl);
1371   }
1372 }
1373 
1374 void ShenandoahHeap::print_tracing_info() const {
1375   LogTarget(Info, gc, stats) lt;
1376   if (lt.is_enabled()) {
1377     ResourceMark rm;
1378     LogStream ls(lt);
1379 
1380     phase_timings()->print_on(&ls);
1381 
1382     ls.cr();
1383     ls.cr();
1384 
1385     shenandoahPolicy()->print_gc_stats(&ls);
1386 
1387     ls.cr();
1388     ls.cr();
1389 
1390     if (ShenandoahPacing) {
1391       pacer()->print_on(&ls);
1392     }
1393 
1394     ls.cr();
1395     ls.cr();
1396 
1397     if (ShenandoahAllocationTrace) {
1398       assert(alloc_tracker() != NULL, "Must be");
1399       alloc_tracker()->print_on(&ls);
1400     } else {
1401       ls.print_cr("  Allocation tracing is disabled, use -XX:+ShenandoahAllocationTrace to enable.");
1402     }
1403   }
1404 }
1405 
1406 void ShenandoahHeap::verify(VerifyOption vo) {
1407   if (ShenandoahSafepoint::is_at_shenandoah_safepoint()) {
1408     if (ShenandoahVerify) {
1409       verifier()->verify_generic(vo);
1410     } else {
1411       // TODO: Consider allocating verification bitmaps on demand,
1412       // and turn this on unconditionally.
1413     }
1414   }
1415 }
1416 size_t ShenandoahHeap::tlab_capacity(Thread *thr) const {
1417   return _free_set->capacity();
1418 }
1419 
1420 class ObjectIterateScanRootClosure : public BasicOopIterateClosure {
1421 private:
1422   MarkBitMap* _bitmap;
1423   Stack<oop,mtGC>* _oop_stack;
1424 
1425   template <class T>
1426   void do_oop_work(T* p) {
1427     T o = RawAccess<>::oop_load(p);
1428     if (!CompressedOops::is_null(o)) {
1429       oop obj = CompressedOops::decode_not_null(o);
1430       obj = ShenandoahBarrierSet::resolve_forwarded_not_null(obj);
1431       assert(oopDesc::is_oop(obj), "must be a valid oop");
1432       if (!_bitmap->is_marked((HeapWord*) obj)) {
1433         _bitmap->mark((HeapWord*) obj);
1434         _oop_stack->push(obj);
1435       }
1436     }
1437   }
1438 public:
1439   ObjectIterateScanRootClosure(MarkBitMap* bitmap, Stack<oop,mtGC>* oop_stack) :
1440     _bitmap(bitmap), _oop_stack(oop_stack) {}
1441   void do_oop(oop* p)       { do_oop_work(p); }
1442   void do_oop(narrowOop* p) { do_oop_work(p); }
1443 };
1444 
1445 /*
1446  * This is public API, used in preparation of object_iterate().
1447  * Since we don't do linear scan of heap in object_iterate() (see comment below), we don't
1448  * need to make the heap parsable. For Shenandoah-internal linear heap scans that we can
1449  * control, we call SH::make_tlabs_parsable().
1450  */
1451 void ShenandoahHeap::ensure_parsability(bool retire_tlabs) {
1452   // No-op.
1453 }
1454 
1455 /*
1456  * Iterates objects in the heap. This is public API, used for, e.g., heap dumping.
1457  *
1458  * We cannot safely iterate objects by doing a linear scan at random points in time. Linear
1459  * scanning needs to deal with dead objects, which may have dead Klass* pointers (e.g.
1460  * calling oopDesc::size() would crash) or dangling reference fields (crashes) etc. Linear
1461  * scanning therefore depends on having a valid marking bitmap to support it. However, we only
1462  * have a valid marking bitmap after successful marking. In particular, we *don't* have a valid
1463  * marking bitmap during marking, after aborted marking or during/after cleanup (when we just
1464  * wiped the bitmap in preparation for next marking).
1465  *
1466  * For all those reasons, we implement object iteration as a single marking traversal, reporting
1467  * objects as we mark+traverse through the heap, starting from GC roots. JVMTI IterateThroughHeap
1468  * is allowed to report dead objects, but is not required to do so.
1469  */
1470 void ShenandoahHeap::object_iterate(ObjectClosure* cl) {
1471   assert(SafepointSynchronize::is_at_safepoint(), "safe iteration is only available during safepoints");
1472   if (!os::commit_memory((char*)_aux_bitmap_region.start(), _aux_bitmap_region.byte_size(), false)) {
1473     log_warning(gc)("Could not commit native memory for auxiliary marking bitmap for heap iteration");
1474     return;
1475   }
1476 
1477   // Reset bitmap
1478   MemRegion mr = _aux_bit_map.covered();
1479   _aux_bit_map.clear_range_large(mr);
1480 
1481   Stack<oop,mtGC> oop_stack;
1482 
1483   // First, we process all GC roots. This populates the work stack with initial objects.
1484   ShenandoahRootProcessor rp(this, 1, ShenandoahPhaseTimings::_num_phases);
1485   ObjectIterateScanRootClosure oops(&_aux_bit_map, &oop_stack);
1486   CLDToOopClosure clds(&oops, false);
1487   CodeBlobToOopClosure blobs(&oops, false);
1488   rp.process_all_roots(&oops, &oops, &clds, &blobs, NULL, 0);
1489 
1490   // Work through the oop stack to traverse heap.
1491   while (! oop_stack.is_empty()) {
1492     oop obj = oop_stack.pop();
1493     assert(oopDesc::is_oop(obj), "must be a valid oop");
1494     cl->do_object(obj);
1495     obj->oop_iterate(&oops);
1496   }
1497 
1498   assert(oop_stack.is_empty(), "should be empty");
1499 
1500   if (!os::uncommit_memory((char*)_aux_bitmap_region.start(), _aux_bitmap_region.byte_size())) {
1501     log_warning(gc)("Could not uncommit native memory for auxiliary marking bitmap for heap iteration");
1502   }
1503 }
1504 
1505 void ShenandoahHeap::safe_object_iterate(ObjectClosure* cl) {
1506   assert(SafepointSynchronize::is_at_safepoint(), "safe iteration is only available during safepoints");
1507   object_iterate(cl);
1508 }
1509 
1510 // Apply blk->heap_region_do() on all committed regions in address order,
1511 // terminating the iteration early if heap_region_do() returns true.
1512 void ShenandoahHeap::heap_region_iterate(ShenandoahHeapRegionClosure* blk, bool skip_cset_regions, bool skip_humongous_continuation) const {
1513   for (size_t i = 0; i < num_regions(); i++) {
1514     ShenandoahHeapRegion* current  = get_region(i);
1515     if (skip_humongous_continuation && current->is_humongous_continuation()) {
1516       continue;
1517     }
1518     if (skip_cset_regions && in_collection_set(current)) {
1519       continue;
1520     }
1521     if (blk->heap_region_do(current)) {
1522       return;
1523     }
1524   }
1525 }
1526 
1527 class ShenandoahClearLivenessClosure : public ShenandoahHeapRegionClosure {
1528 private:
1529   ShenandoahHeap* sh;
1530 public:
1531   ShenandoahClearLivenessClosure(ShenandoahHeap* heap) : sh(heap) {}
1532 
1533   bool heap_region_do(ShenandoahHeapRegion* r) {
1534     r->clear_live_data();
1535     sh->next_marking_context()->set_top_at_mark_start(r->region_number(), r->top());
1536     return false;
1537   }
1538 };
1539 
1540 void ShenandoahHeap::op_init_mark() {
1541   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Should be at safepoint");
1542 
1543   assert(next_marking_context()->is_bitmap_clear(), "need clear marking bitmap");
1544 
1545   if (ShenandoahVerify) {
1546     verifier()->verify_before_concmark();
1547   }
1548 
1549   {
1550     ShenandoahGCPhase phase(ShenandoahPhaseTimings::accumulate_stats);
1551     accumulate_statistics_tlabs();
1552   }
1553 
1554   set_concurrent_mark_in_progress(true);
1555   // We need to reset all TLABs because we'd lose marks on all objects allocated in them.
1556   {
1557     ShenandoahGCPhase phase(ShenandoahPhaseTimings::make_parsable);
1558     make_parsable(true);
1559   }
1560 
1561   {
1562     ShenandoahGCPhase phase(ShenandoahPhaseTimings::clear_liveness);
1563     ShenandoahClearLivenessClosure clc(this);
1564     heap_region_iterate(&clc);
1565   }
1566 
1567   // Make above changes visible to worker threads
1568   OrderAccess::fence();
1569 
1570   concurrentMark()->init_mark_roots();
1571 
1572   if (UseTLAB) {
1573     ShenandoahGCPhase phase(ShenandoahPhaseTimings::resize_tlabs);
1574     resize_tlabs();
1575   }
1576 
1577   if (ShenandoahPacing) {
1578     pacer()->setup_for_mark();
1579   }
1580 }
1581 
1582 void ShenandoahHeap::op_mark() {
1583   concurrentMark()->mark_from_roots();
1584 }
1585 
1586 void ShenandoahHeap::op_final_mark() {
1587   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Should be at safepoint");
1588 
1589   // It is critical that we
1590   // evacuate roots right after finishing marking, so that we don't
1591   // get unmarked objects in the roots.
1592 
1593   if (!cancelled_gc()) {
1594     concurrentMark()->finish_mark_from_roots();
1595     stop_concurrent_marking();
1596 
1597     {
1598       ShenandoahGCPhase phase(ShenandoahPhaseTimings::complete_liveness);
1599 
1600       // All allocations past TAMS are implicitly live, adjust the region data.
1601       // Bitmaps/TAMS are swapped at this point, so we need to poll complete bitmap.
1602       for (size_t i = 0; i < num_regions(); i++) {
1603         ShenandoahHeapRegion* r = get_region(i);
1604         if (!r->is_active()) continue;
1605 
1606         HeapWord* tams = complete_marking_context()->top_at_mark_start(r->region_number());
1607         HeapWord* top = r->top();
1608         if (top > tams) {
1609           r->increase_live_data_alloc_words(pointer_delta(top, tams));
1610         }
1611       }
1612     }
1613 
1614     {
1615       ShenandoahGCPhase prepare_evac(ShenandoahPhaseTimings::prepare_evac);
1616       prepare_for_concurrent_evacuation();
1617     }
1618 
1619     // If collection set has candidates, start evacuation.
1620     // Otherwise, bypass the rest of the cycle.
1621     if (!collection_set()->is_empty()) {
1622       set_evacuation_in_progress(true);
1623       // From here on, we need to update references.
1624       set_has_forwarded_objects(true);
1625 
1626       ShenandoahGCPhase init_evac(ShenandoahPhaseTimings::init_evac);
1627       evacuate_and_update_roots();
1628     }
1629 
1630     if (ShenandoahPacing) {
1631       pacer()->setup_for_evac();
1632     }
1633   } else {
1634     concurrentMark()->cancel();
1635     stop_concurrent_marking();
1636 
1637     if (process_references()) {
1638       // Abandon reference processing right away: pre-cleaning must have failed.
1639       ReferenceProcessor *rp = ref_processor();
1640       rp->disable_discovery();
1641       rp->abandon_partial_discovery();
1642       rp->verify_no_references_recorded();
1643     }
1644   }
1645 }
1646 
1647 void ShenandoahHeap::op_final_evac() {
1648   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Should be at safepoint");
1649 
1650   set_evacuation_in_progress(false);
1651 
1652   retire_and_reset_gclabs();
1653 
1654   if (ShenandoahVerify) {
1655     verifier()->verify_after_evacuation();
1656   }
1657 }
1658 
1659 void ShenandoahHeap::op_evac() {
1660   ShenandoahParallelEvacuationTask task(this, _collection_set);
1661   workers()->run_task(&task);
1662 }
1663 
1664 void ShenandoahHeap::op_updaterefs() {
1665   update_heap_references(true);
1666 }
1667 
1668 void ShenandoahHeap::op_cleanup() {
1669   ShenandoahGCPhase phase_recycle(ShenandoahPhaseTimings::conc_cleanup_recycle);
1670   free_set()->recycle_trash();
1671 }
1672 
1673 void ShenandoahHeap::op_cleanup_bitmaps() {
1674   op_cleanup();
1675 
1676   ShenandoahGCPhase phase_reset(ShenandoahPhaseTimings::conc_cleanup_reset_bitmaps);
1677   reset_next_mark_bitmap();
1678 }
1679 
1680 void ShenandoahHeap::op_cleanup_traversal() {
1681   op_cleanup();
1682 
1683   ShenandoahGCPhase phase_reset(ShenandoahPhaseTimings::conc_cleanup_reset_bitmaps);
1684   reset_next_mark_bitmap_traversal();
1685 }
1686 
1687 void ShenandoahHeap::op_preclean() {
1688   concurrentMark()->preclean_weak_refs();
1689 }
1690 
1691 void ShenandoahHeap::op_init_traversal() {
1692   traversal_gc()->init_traversal_collection();
1693 }
1694 
1695 void ShenandoahHeap::op_traversal() {
1696   traversal_gc()->concurrent_traversal_collection();
1697 }
1698 
1699 void ShenandoahHeap::op_final_traversal() {
1700   traversal_gc()->final_traversal_collection();
1701 }
1702 
1703 void ShenandoahHeap::op_full(GCCause::Cause cause) {
1704   ShenandoahMetricsSnapshot metrics;
1705   metrics.snap_before();
1706 
1707   full_gc()->do_it(cause);
1708   if (UseTLAB) {
1709     ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_resize_tlabs);
1710     resize_all_tlabs();
1711   }
1712 
1713   metrics.snap_after();
1714   metrics.print();
1715 
1716   if (metrics.is_good_progress("Full GC")) {
1717     _progress_last_gc.set();
1718   } else {
1719     // Nothing to do. Tell the allocation path that we have failed to make
1720     // progress, and it can finally fail.
1721     _progress_last_gc.unset();
1722   }
1723 }
1724 
1725 void ShenandoahHeap::op_degenerated(ShenandoahDegenPoint point) {
1726   // Degenerated GC is STW, but it can also fail. Current mechanics communicates
1727   // GC failure via cancelled_concgc() flag. So, if we detect the failure after
1728   // some phase, we have to upgrade the Degenerate GC to Full GC.
1729 
1730   clear_cancelled_gc();
1731 
1732   ShenandoahMetricsSnapshot metrics;
1733   metrics.snap_before();
1734 
1735   switch (point) {
1736     case _degenerated_traversal:
1737       {
1738         // Drop the collection set. Note: this leaves some already forwarded objects
1739         // behind, which may be problematic, see comments for ShenandoahEvacAssist
1740         // workarounds in ShenandoahTraversalHeuristics.
1741 
1742         ShenandoahHeapLocker locker(lock());
1743         collection_set()->clear_current_index();
1744         for (size_t i = 0; i < collection_set()->count(); i++) {
1745           ShenandoahHeapRegion* r = collection_set()->next();
1746           r->make_regular_bypass();
1747         }
1748         collection_set()->clear();
1749       }
1750       op_final_traversal();
1751       op_cleanup_traversal();
1752       return;
1753 
1754     // The cases below form the Duff's-like device: it describes the actual GC cycle,
1755     // but enters it at different points, depending on which concurrent phase had
1756     // degenerated.
1757 
1758     case _degenerated_outside_cycle:
1759       // We have degenerated from outside the cycle, which means something is bad with
1760       // the heap, most probably heavy humongous fragmentation, or we are very low on free
1761       // space. It makes little sense to wait for Full GC to reclaim as much as it can, when
1762       // we can do the most aggressive degen cycle, which includes processing references and
1763       // class unloading, unless those features are explicitly disabled.
1764       //
1765       // Note that we can only do this for "outside-cycle" degens, otherwise we would risk
1766       // changing the cycle parameters mid-cycle during concurrent -> degenerated handover.
1767       set_process_references(ShenandoahRefProcFrequency != 0);
1768       set_unload_classes(ClassUnloading);
1769 
1770       if (heuristics()->can_do_traversal_gc()) {
1771         // Not possible to degenerate from here, upgrade to Full GC right away.
1772         cancel_gc(GCCause::_shenandoah_upgrade_to_full_gc);
1773         op_degenerated_fail();
1774         return;
1775       }
1776       op_init_mark();
1777       if (cancelled_gc()) {
1778         op_degenerated_fail();
1779         return;
1780       }
1781 
1782     case _degenerated_mark:
1783       op_final_mark();
1784       if (cancelled_gc()) {
1785         op_degenerated_fail();
1786         return;
1787       }
1788 
1789       op_cleanup();
1790 
1791     case _degenerated_evac:
1792       // If heuristics thinks we should do the cycle, this flag would be set,
1793       // and we can do evacuation. Otherwise, it would be the shortcut cycle.
1794       if (is_evacuation_in_progress()) {
1795 
1796         // Degeneration under oom-evac protocol might have left some objects in
1797         // collection set un-evacuated. Restart evacuation from the beginning to
1798         // capture all objects. For all the objects that are already evacuated,
1799         // it would be a simple check, which is supposed to be fast. This is also
1800         // safe to do even without degeneration, as CSet iterator is at beginning
1801         // in preparation for evacuation anyway.
1802         collection_set()->clear_current_index();
1803 
1804         op_evac();
1805         if (cancelled_gc()) {
1806           op_degenerated_fail();
1807           return;
1808         }
1809       }
1810 
1811       // If heuristics thinks we should do the cycle, this flag would be set,
1812       // and we need to do update-refs. Otherwise, it would be the shortcut cycle.
1813       if (has_forwarded_objects()) {
1814         op_init_updaterefs();
1815         if (cancelled_gc()) {
1816           op_degenerated_fail();
1817           return;
1818         }
1819       }
1820 
1821     case _degenerated_updaterefs:
1822       if (has_forwarded_objects()) {
1823         op_final_updaterefs();
1824         if (cancelled_gc()) {
1825           op_degenerated_fail();
1826           return;
1827         }
1828       }
1829 
1830       op_cleanup_bitmaps();
1831       break;
1832 
1833     default:
1834       ShouldNotReachHere();
1835   }
1836 
1837   if (ShenandoahVerify) {
1838     verifier()->verify_after_degenerated();
1839   }
1840 
1841   metrics.snap_after();
1842   metrics.print();
1843 
1844   // Check for futility and fail. There is no reason to do several back-to-back Degenerated cycles,
1845   // because that probably means the heap is overloaded and/or fragmented.
1846   if (!metrics.is_good_progress("Degenerated GC")) {
1847     _progress_last_gc.unset();
1848     cancel_gc(GCCause::_shenandoah_upgrade_to_full_gc);
1849     op_degenerated_futile();
1850   } else {
1851     _progress_last_gc.set();
1852   }
1853 }
1854 
1855 void ShenandoahHeap::op_degenerated_fail() {
1856   log_info(gc)("Cannot finish degeneration, upgrading to Full GC");
1857   shenandoahPolicy()->record_degenerated_upgrade_to_full();
1858   op_full(GCCause::_shenandoah_upgrade_to_full_gc);
1859 }
1860 
1861 void ShenandoahHeap::op_degenerated_futile() {
1862   shenandoahPolicy()->record_degenerated_upgrade_to_full();
1863   op_full(GCCause::_shenandoah_upgrade_to_full_gc);
1864 }
1865 
1866 void ShenandoahHeap::swap_mark_contexts() {
1867   ShenandoahMarkingContext* tmp = _complete_marking_context;
1868   _complete_marking_context = _next_marking_context;
1869   _next_marking_context = tmp;
1870 }
1871 
1872 
1873 void ShenandoahHeap::stop_concurrent_marking() {
1874   assert(is_concurrent_mark_in_progress(), "How else could we get here?");
1875   if (!cancelled_gc()) {
1876     // If we needed to update refs, and concurrent marking has been cancelled,
1877     // we need to finish updating references.
1878     set_has_forwarded_objects(false);
1879     swap_mark_contexts();
1880   }
1881   set_concurrent_mark_in_progress(false);
1882 }
1883 
1884 void ShenandoahHeap::force_satb_flush_all_threads() {
1885   if (!is_concurrent_mark_in_progress() && !is_concurrent_traversal_in_progress()) {
1886     // No need to flush SATBs
1887     return;
1888   }
1889 
1890   for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) {
1891     ShenandoahThreadLocalData::set_force_satb_flush(t, true);
1892   }
1893   // The threads are not "acquiring" their thread-local data, but it does not
1894   // hurt to "release" the updates here anyway.
1895   OrderAccess::fence();
1896 }
1897 
1898 void ShenandoahHeap::set_gc_state_all_threads(char state) {
1899   for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) {
1900     ShenandoahThreadLocalData::set_gc_state(t, state);
1901   }
1902 }
1903 
1904 void ShenandoahHeap::set_gc_state_mask(uint mask, bool value) {
1905   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Should really be Shenandoah safepoint");
1906   _gc_state.set_cond(mask, value);
1907   set_gc_state_all_threads(_gc_state.raw_value());
1908 }
1909 
1910 void ShenandoahHeap::set_concurrent_mark_in_progress(bool in_progress) {
1911   set_gc_state_mask(MARKING, in_progress);
1912   ShenandoahBarrierSet::satb_mark_queue_set().set_active_all_threads(in_progress, !in_progress);
1913 }
1914 
1915 void ShenandoahHeap::set_concurrent_traversal_in_progress(bool in_progress) {
1916    set_gc_state_mask(TRAVERSAL | HAS_FORWARDED, in_progress);
1917    ShenandoahBarrierSet::satb_mark_queue_set().set_active_all_threads(in_progress, !in_progress);
1918 }
1919 
1920 void ShenandoahHeap::set_evacuation_in_progress(bool in_progress) {
1921   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Only call this at safepoint");
1922   set_gc_state_mask(EVACUATION, in_progress);
1923 }
1924 
1925 HeapWord* ShenandoahHeap::tlab_post_allocation_setup(HeapWord* obj) {
1926   // Initialize Brooks pointer for the next object
1927   HeapWord* result = obj + BrooksPointer::word_size();
1928   BrooksPointer::initialize(oop(result));
1929   return result;
1930 }
1931 
1932 uint ShenandoahHeap::oop_extra_words() {
1933   return BrooksPointer::word_size();
1934 }
1935 
1936 ShenandoahForwardedIsAliveClosure::ShenandoahForwardedIsAliveClosure() :
1937   _mark_context(ShenandoahHeap::heap()->next_marking_context()) {
1938 }
1939 
1940 ShenandoahIsAliveClosure::ShenandoahIsAliveClosure() :
1941   _mark_context(ShenandoahHeap::heap()->next_marking_context()) {
1942 }
1943 
1944 bool ShenandoahForwardedIsAliveClosure::do_object_b(oop obj) {
1945   if (CompressedOops::is_null(obj)) {
1946     return false;
1947   }
1948   obj = ShenandoahBarrierSet::resolve_forwarded_not_null(obj);
1949   shenandoah_assert_not_forwarded_if(NULL, obj, ShenandoahHeap::heap()->is_concurrent_mark_in_progress() || ShenandoahHeap::heap()->is_concurrent_traversal_in_progress());
1950   return _mark_context->is_marked(obj);
1951 }
1952 
1953 bool ShenandoahIsAliveClosure::do_object_b(oop obj) {
1954   if (CompressedOops::is_null(obj)) {
1955     return false;
1956   }
1957   shenandoah_assert_not_forwarded(NULL, obj);
1958   return _mark_context->is_marked(obj);
1959 }
1960 
1961 void ShenandoahHeap::ref_processing_init() {
1962   MemRegion mr = reserved_region();
1963 
1964   assert(_max_workers > 0, "Sanity");
1965 
1966   _ref_processor =
1967     new ReferenceProcessor(&_subject_to_discovery,  // is_subject_to_discovery
1968                            true,                    // MT processing
1969                            ParallelRefProcEnabled ? _max_workers : 1, // Degree of MT processing
1970                            true,                    // MT discovery
1971                            _max_workers,            // Degree of MT discovery
1972                            false,                   // Reference discovery is not atomic
1973                            NULL,                    // No closure, should be installed before use
1974                            true);                   // Scale worker threads
1975 
1976   shenandoah_assert_rp_isalive_not_installed();
1977 }
1978 
1979 
1980 GCTracer* ShenandoahHeap::tracer() {
1981   return shenandoahPolicy()->tracer();
1982 }
1983 
1984 size_t ShenandoahHeap::tlab_used(Thread* thread) const {
1985   return _free_set->used();
1986 }
1987 
1988 void ShenandoahHeap::cancel_gc(GCCause::Cause cause) {
1989   if (try_cancel_gc()) {
1990     FormatBuffer<> msg("Cancelling GC: %s", GCCause::to_string(cause));
1991     log_info(gc)("%s", msg.buffer());
1992     Events::log(Thread::current(), "%s", msg.buffer());
1993   }
1994 }
1995 
1996 uint ShenandoahHeap::max_workers() {
1997   return _max_workers;
1998 }
1999 
2000 void ShenandoahHeap::stop() {
2001   // The shutdown sequence should be able to terminate when GC is running.
2002 
2003   // Step 0. Notify policy to disable event recording.
2004   _shenandoah_policy->record_shutdown();
2005 
2006   // Step 1. Notify control thread that we are in shutdown.
2007   // Note that we cannot do that with stop(), because stop() is blocking and waits for the actual shutdown.
2008   // Doing stop() here would wait for the normal GC cycle to complete, never falling through to cancel below.
2009   control_thread()->prepare_for_graceful_shutdown();
2010 
2011   // Step 2. Notify GC workers that we are cancelling GC.
2012   cancel_gc(GCCause::_shenandoah_stop_vm);
2013 
2014   // Step 3. Wait until GC worker exits normally.
2015   control_thread()->stop();
2016 
2017   // Step 4. Stop String Dedup thread if it is active
2018   if (ShenandoahStringDedup::is_enabled()) {
2019     ShenandoahStringDedup::stop();
2020   }
2021 }
2022 
2023 void ShenandoahHeap::unload_classes_and_cleanup_tables(bool full_gc) {
2024   assert(ClassUnloading || full_gc, "Class unloading should be enabled");
2025 
2026   ShenandoahPhaseTimings::Phase phase_root =
2027           full_gc ?
2028           ShenandoahPhaseTimings::full_gc_purge :
2029           ShenandoahPhaseTimings::purge;
2030 
2031   ShenandoahPhaseTimings::Phase phase_unload =
2032           full_gc ?
2033           ShenandoahPhaseTimings::full_gc_purge_class_unload :
2034           ShenandoahPhaseTimings::purge_class_unload;
2035 
2036   ShenandoahPhaseTimings::Phase phase_cldg =
2037           full_gc ?
2038           ShenandoahPhaseTimings::full_gc_purge_cldg :
2039           ShenandoahPhaseTimings::purge_cldg;
2040 
2041   ShenandoahPhaseTimings::Phase phase_par =
2042           full_gc ?
2043           ShenandoahPhaseTimings::full_gc_purge_par :
2044           ShenandoahPhaseTimings::purge_par;
2045 
2046   ShenandoahPhaseTimings::Phase phase_par_classes =
2047           full_gc ?
2048           ShenandoahPhaseTimings::full_gc_purge_par_classes :
2049           ShenandoahPhaseTimings::purge_par_classes;
2050 
2051   ShenandoahPhaseTimings::Phase phase_par_codecache =
2052           full_gc ?
2053           ShenandoahPhaseTimings::full_gc_purge_par_codecache :
2054           ShenandoahPhaseTimings::purge_par_codecache;
2055 
2056   ShenandoahPhaseTimings::Phase phase_par_symbstring =
2057           full_gc ?
2058           ShenandoahPhaseTimings::full_gc_purge_par_symbstring :
2059           ShenandoahPhaseTimings::purge_par_symbstring;
2060 
2061   ShenandoahPhaseTimings::Phase phase_par_sync =
2062           full_gc ?
2063           ShenandoahPhaseTimings::full_gc_purge_par_sync :
2064           ShenandoahPhaseTimings::purge_par_sync;
2065 
2066   ShenandoahGCPhase root_phase(phase_root);
2067 
2068   ShenandoahIsAliveSelector alive;
2069   BoolObjectClosure* is_alive = alive.is_alive_closure();
2070 
2071   bool purged_class;
2072 
2073   // Unload classes and purge SystemDictionary.
2074   {
2075     ShenandoahGCPhase phase(phase_unload);
2076     purged_class = SystemDictionary::do_unloading(gc_timer(),
2077                                                   full_gc /* do_cleaning*/ );
2078   }
2079 
2080   {
2081     ShenandoahGCPhase phase(phase_par);
2082     uint active = _workers->active_workers();
2083     StringDedupUnlinkOrOopsDoClosure dedup_cl(is_alive, NULL);
2084     ParallelCleaningTask unlink_task(is_alive, &dedup_cl, active, purged_class);
2085     _workers->run_task(&unlink_task);
2086 
2087     /*
2088     ShenandoahPhaseTimings* p = phase_timings();
2089     ParallelCleaningTimes times = unlink_task.times();
2090 
2091     // "times" report total time, phase_tables_cc reports wall time. Divide total times
2092     // by active workers to get average time per worker, that would add up to wall time.
2093     p->record_phase_time(phase_par_classes,    times.klass_work_us() / active);
2094     p->record_phase_time(phase_par_codecache,  times.codecache_work_us() / active);
2095     p->record_phase_time(phase_par_symbstring, times.tables_work_us() / active);
2096     p->record_phase_time(phase_par_sync,       times.sync_us() / active);
2097     */
2098   }
2099 
2100   if (ShenandoahStringDedup::is_enabled()) {
2101     ShenandoahPhaseTimings::Phase phase_purge_dedup =
2102             full_gc ?
2103             ShenandoahPhaseTimings::full_gc_purge_string_dedup :
2104             ShenandoahPhaseTimings::purge_string_dedup;
2105     ShenandoahGCPhase phase(phase_purge_dedup);
2106     ShenandoahStringDedup::parallel_cleanup();
2107   }
2108 
2109   {
2110     ShenandoahGCPhase phase(phase_cldg);
2111     ClassLoaderDataGraph::purge();
2112   }
2113 }
2114 
2115 void ShenandoahHeap::set_has_forwarded_objects(bool cond) {
2116   set_gc_state_mask(HAS_FORWARDED, cond);
2117 }
2118 
2119 bool ShenandoahHeap::last_gc_made_progress() const {
2120   return _progress_last_gc.is_set();
2121 }
2122 
2123 void ShenandoahHeap::set_process_references(bool pr) {
2124   _process_references.set_cond(pr);
2125 }
2126 
2127 void ShenandoahHeap::set_unload_classes(bool uc) {
2128   _unload_classes.set_cond(uc);
2129 }
2130 
2131 bool ShenandoahHeap::process_references() const {
2132   return _process_references.is_set();
2133 }
2134 
2135 bool ShenandoahHeap::unload_classes() const {
2136   return _unload_classes.is_set();
2137 }
2138 
2139 //fixme this should be in heapregionset
2140 ShenandoahHeapRegion* ShenandoahHeap::next_compaction_region(const ShenandoahHeapRegion* r) {
2141   size_t region_idx = r->region_number() + 1;
2142   ShenandoahHeapRegion* next = get_region(region_idx);
2143   guarantee(next->region_number() == region_idx, "region number must match");
2144   while (next->is_humongous()) {
2145     region_idx = next->region_number() + 1;
2146     next = get_region(region_idx);
2147     guarantee(next->region_number() == region_idx, "region number must match");
2148   }
2149   return next;
2150 }
2151 
2152 ShenandoahMonitoringSupport* ShenandoahHeap::monitoring_support() {
2153   return _monitoring_support;
2154 }
2155 
2156 address ShenandoahHeap::in_cset_fast_test_addr() {
2157   ShenandoahHeap* heap = ShenandoahHeap::heap();
2158   assert(heap->collection_set() != NULL, "Sanity");
2159   return (address) heap->collection_set()->biased_map_address();
2160 }
2161 
2162 address ShenandoahHeap::cancelled_gc_addr() {
2163   return (address) ShenandoahHeap::heap()->_cancelled_gc.addr_of();
2164 }
2165 
2166 address ShenandoahHeap::gc_state_addr() {
2167   return (address) ShenandoahHeap::heap()->_gc_state.addr_of();
2168 }
2169 
2170 size_t ShenandoahHeap::bytes_allocated_since_gc_start() {
2171   return OrderAccess::load_acquire(&_bytes_allocated_since_gc_start);
2172 }
2173 
2174 void ShenandoahHeap::reset_bytes_allocated_since_gc_start() {
2175   OrderAccess::release_store_fence(&_bytes_allocated_since_gc_start, (size_t)0);
2176 }
2177 
2178 ShenandoahPacer* ShenandoahHeap::pacer() const {
2179   assert (_pacer != NULL, "sanity");
2180   return _pacer;
2181 }
2182 
2183 void ShenandoahHeap::set_degenerated_gc_in_progress(bool in_progress) {
2184   _degenerated_gc_in_progress.set_cond(in_progress);
2185 }
2186 
2187 void ShenandoahHeap::set_full_gc_in_progress(bool in_progress) {
2188   _full_gc_in_progress.set_cond(in_progress);
2189 }
2190 
2191 void ShenandoahHeap::set_full_gc_move_in_progress(bool in_progress) {
2192   assert (is_full_gc_in_progress(), "should be");
2193   _full_gc_move_in_progress.set_cond(in_progress);
2194 }
2195 
2196 void ShenandoahHeap::set_update_refs_in_progress(bool in_progress) {
2197   set_gc_state_mask(UPDATEREFS, in_progress);
2198 }
2199 
2200 void ShenandoahHeap::register_nmethod(nmethod* nm) {
2201   ShenandoahCodeRoots::add_nmethod(nm);
2202 }
2203 
2204 void ShenandoahHeap::unregister_nmethod(nmethod* nm) {
2205   ShenandoahCodeRoots::remove_nmethod(nm);
2206 }
2207 
2208 oop ShenandoahHeap::pin_object(JavaThread* thr, oop o) {
2209   o = ShenandoahBarrierSet::barrier_set()->write_barrier(o);
2210   ShenandoahHeapLocker locker(lock());
2211   heap_region_containing(o)->make_pinned();
2212   return o;
2213 }
2214 
2215 void ShenandoahHeap::unpin_object(JavaThread* thr, oop o) {
2216   o = ShenandoahBarrierSet::barrier_set()->read_barrier(o);
2217   ShenandoahHeapLocker locker(lock());
2218   heap_region_containing(o)->make_unpinned();
2219 }
2220 
2221 GCTimer* ShenandoahHeap::gc_timer() const {
2222   return _gc_timer;
2223 }
2224 
2225 #ifdef ASSERT
2226 void ShenandoahHeap::assert_gc_workers(uint nworkers) {
2227   assert(nworkers > 0 && nworkers <= max_workers(), "Sanity");
2228 
2229   if (ShenandoahSafepoint::is_at_shenandoah_safepoint()) {
2230     if (UseDynamicNumberOfGCThreads ||
2231         (FLAG_IS_DEFAULT(ParallelGCThreads) && ForceDynamicNumberOfGCThreads)) {
2232       assert(nworkers <= ParallelGCThreads, "Cannot use more than it has");
2233     } else {
2234       // Use ParallelGCThreads inside safepoints
2235       assert(nworkers == ParallelGCThreads, "Use ParalleGCThreads within safepoints");
2236     }
2237   } else {
2238     if (UseDynamicNumberOfGCThreads ||
2239         (FLAG_IS_DEFAULT(ConcGCThreads) && ForceDynamicNumberOfGCThreads)) {
2240       assert(nworkers <= ConcGCThreads, "Cannot use more than it has");
2241     } else {
2242       // Use ConcGCThreads outside safepoints
2243       assert(nworkers == ConcGCThreads, "Use ConcGCThreads outside safepoints");
2244     }
2245   }
2246 }
2247 #endif
2248 
2249 ShenandoahTraversalGC* ShenandoahHeap::traversal_gc() {
2250   return _traversal_gc;
2251 }
2252 
2253 ShenandoahVerifier* ShenandoahHeap::verifier() {
2254   guarantee(ShenandoahVerify, "Should be enabled");
2255   assert (_verifier != NULL, "sanity");
2256   return _verifier;
2257 }
2258 
2259 template<class T>
2260 class ShenandoahUpdateHeapRefsTask : public AbstractGangTask {
2261 private:
2262   T cl;
2263   ShenandoahHeap* _heap;
2264   ShenandoahRegionIterator* _regions;
2265   bool _concurrent;
2266 public:
2267   ShenandoahUpdateHeapRefsTask(ShenandoahRegionIterator* regions, bool concurrent) :
2268     AbstractGangTask("Concurrent Update References Task"),
2269     cl(T()),
2270     _heap(ShenandoahHeap::heap()),
2271     _regions(regions),
2272     _concurrent(concurrent) {
2273   }
2274 
2275   void work(uint worker_id) {
2276     ShenandoahWorkerSession worker_session(worker_id);
2277     SuspendibleThreadSetJoiner stsj(_concurrent && ShenandoahSuspendibleWorkers);
2278     ShenandoahHeapRegion* r = _regions->next();
2279     ShenandoahMarkingContext* const ctx = _heap->complete_marking_context();
2280     while (r != NULL) {
2281       if (_heap->in_collection_set(r)) {
2282         HeapWord* bottom = r->bottom();
2283         HeapWord* top = ctx->top_at_mark_start(r->region_number());
2284         if (top > bottom) {
2285           ctx->clear_bitmap(bottom, top);
2286         }
2287       } else {
2288         if (r->is_active()) {
2289           _heap->marked_object_oop_safe_iterate(r, &cl);
2290         }
2291       }
2292       if (ShenandoahPacing) {
2293         HeapWord* top_at_start_ur = r->concurrent_iteration_safe_limit();
2294         assert (top_at_start_ur >= r->bottom(), "sanity");
2295         _heap->pacer()->report_updaterefs(pointer_delta(top_at_start_ur, r->bottom()));
2296       }
2297       if (_heap->check_cancelled_gc_and_yield(_concurrent)) {
2298         return;
2299       }
2300       r = _regions->next();
2301     }
2302   }
2303 };
2304 
2305 void ShenandoahHeap::update_heap_references(bool concurrent) {
2306   ShenandoahUpdateHeapRefsTask<ShenandoahUpdateHeapRefsClosure> task(&_update_refs_iterator, concurrent);
2307   workers()->run_task(&task);
2308 }
2309 
2310 void ShenandoahHeap::op_init_updaterefs() {
2311   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "must be at safepoint");
2312 
2313   set_evacuation_in_progress(false);
2314 
2315   retire_and_reset_gclabs();
2316 
2317   if (ShenandoahVerify) {
2318     verifier()->verify_before_updaterefs();
2319   }
2320 
2321   set_update_refs_in_progress(true);
2322   make_parsable(true);
2323   for (uint i = 0; i < num_regions(); i++) {
2324     ShenandoahHeapRegion* r = get_region(i);
2325     r->set_concurrent_iteration_safe_limit(r->top());
2326   }
2327 
2328   // Reset iterator.
2329   _update_refs_iterator.reset();
2330 
2331   if (ShenandoahPacing) {
2332     pacer()->setup_for_updaterefs();
2333   }
2334 }
2335 
2336 void ShenandoahHeap::op_final_updaterefs() {
2337   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "must be at safepoint");
2338 
2339   // Check if there is left-over work, and finish it
2340   if (_update_refs_iterator.has_next()) {
2341     ShenandoahGCPhase final_work(ShenandoahPhaseTimings::final_update_refs_finish_work);
2342 
2343     // Finish updating references where we left off.
2344     clear_cancelled_gc();
2345     update_heap_references(false);
2346   }
2347 
2348   // Clear cancelled GC, if set. On cancellation path, the block before would handle
2349   // everything. On degenerated paths, cancelled gc would not be set anyway.
2350   if (cancelled_gc()) {
2351     clear_cancelled_gc();
2352   }
2353   assert(!cancelled_gc(), "Should have been done right before");
2354 
2355   concurrentMark()->update_roots(ShenandoahPhaseTimings::final_update_refs_roots);
2356 
2357   ShenandoahGCPhase final_update_refs(ShenandoahPhaseTimings::final_update_refs_recycle);
2358 
2359   trash_cset_regions();
2360   set_has_forwarded_objects(false);
2361   set_update_refs_in_progress(false);
2362 
2363   if (ShenandoahVerify) {
2364     verifier()->verify_after_updaterefs();
2365   }
2366 
2367   {
2368     ShenandoahHeapLocker locker(lock());
2369     _free_set->rebuild();
2370   }
2371 }
2372 
2373 void ShenandoahHeap::set_alloc_seq_gc_start() {
2374   // Take next number, the start seq number is inclusive
2375   _alloc_seq_at_last_gc_start = ShenandoahHeapRegion::seqnum_current_alloc() + 1;
2376 }
2377 
2378 void ShenandoahHeap::set_alloc_seq_gc_end() {
2379   // Take current number, the end seq number is also inclusive
2380   _alloc_seq_at_last_gc_end = ShenandoahHeapRegion::seqnum_current_alloc();
2381 }
2382 
2383 
2384 #ifdef ASSERT
2385 void ShenandoahHeap::assert_heaplock_owned_by_current_thread() {
2386   _lock.assert_owned_by_current_thread();
2387 }
2388 
2389 void ShenandoahHeap::assert_heaplock_not_owned_by_current_thread() {
2390   _lock.assert_not_owned_by_current_thread();
2391 }
2392 
2393 void ShenandoahHeap::assert_heaplock_or_safepoint() {
2394   _lock.assert_owned_by_current_thread_or_safepoint();
2395 }
2396 #endif
2397 
2398 void ShenandoahHeap::print_extended_on(outputStream *st) const {
2399   print_on(st);
2400   print_heap_regions_on(st);
2401 }
2402 
2403 bool ShenandoahHeap::is_bitmap_slice_committed(ShenandoahHeapRegion* r, bool skip_self) {
2404   size_t slice = r->region_number() / _bitmap_regions_per_slice;
2405 
2406   size_t regions_from = _bitmap_regions_per_slice * slice;
2407   size_t regions_to   = MIN2(num_regions(), _bitmap_regions_per_slice * (slice + 1));
2408   for (size_t g = regions_from; g < regions_to; g++) {
2409     assert (g / _bitmap_regions_per_slice == slice, "same slice");
2410     if (skip_self && g == r->region_number()) continue;
2411     if (get_region(g)->is_committed()) {
2412       return true;
2413     }
2414   }
2415   return false;
2416 }
2417 
2418 bool ShenandoahHeap::commit_bitmap_slice(ShenandoahHeapRegion* r) {
2419   assert_heaplock_owned_by_current_thread();
2420 
2421   if (is_bitmap_slice_committed(r, true)) {
2422     // Some other region from the group is already committed, meaning the bitmap
2423     // slice is already committed, we exit right away.
2424     return true;
2425   }
2426 
2427   // Commit the bitmap slice:
2428   size_t slice = r->region_number() / _bitmap_regions_per_slice;
2429   size_t off = _bitmap_bytes_per_slice * slice;
2430   size_t len = _bitmap_bytes_per_slice;
2431   if (!os::commit_memory((char*)_bitmap0_region.start() + off, len, false)) {
2432     return false;
2433   }
2434   if (!os::commit_memory((char*)_bitmap1_region.start() + off, len, false)) {
2435     return false;
2436   }
2437   return true;
2438 }
2439 
2440 bool ShenandoahHeap::uncommit_bitmap_slice(ShenandoahHeapRegion *r) {
2441   assert_heaplock_owned_by_current_thread();
2442 
2443   if (is_bitmap_slice_committed(r, true)) {
2444     // Some other region from the group is still committed, meaning the bitmap
2445     // slice is should stay committed, exit right away.
2446     return true;
2447   }
2448 
2449   // Uncommit the bitmap slice:
2450   size_t slice = r->region_number() / _bitmap_regions_per_slice;
2451   size_t off = _bitmap_bytes_per_slice * slice;
2452   size_t len = _bitmap_bytes_per_slice;
2453   if (!os::uncommit_memory((char*)_bitmap0_region.start() + off, len)) {
2454     return false;
2455   }
2456   if (!os::uncommit_memory((char*)_bitmap1_region.start() + off, len)) {
2457     return false;
2458   }
2459   return true;
2460 }
2461 
2462 bool ShenandoahHeap::idle_bitmap_slice(ShenandoahHeapRegion *r) {
2463   assert_heaplock_owned_by_current_thread();
2464   assert(ShenandoahUncommitWithIdle, "Must be enabled");
2465 
2466   if (is_bitmap_slice_committed(r, true)) {
2467     // Some other region from the group is still committed, meaning the bitmap
2468     // slice is should stay committed, exit right away.
2469     return true;
2470   }
2471 
2472   // Idle the bitmap slice:
2473   size_t slice = r->region_number() / _bitmap_regions_per_slice;
2474   size_t off = _bitmap_bytes_per_slice * slice;
2475   size_t len = _bitmap_bytes_per_slice;
2476   if (!os::idle_memory((char*)_bitmap0_region.start() + off, len)) {
2477     return false;
2478   }
2479   if (!os::idle_memory((char*)_bitmap1_region.start() + off, len)) {
2480     return false;
2481   }
2482   return true;
2483 }
2484 
2485 void ShenandoahHeap::activate_bitmap_slice(ShenandoahHeapRegion* r) {
2486   assert_heaplock_owned_by_current_thread();
2487   assert(ShenandoahUncommitWithIdle, "Must be enabled");
2488   size_t slice = r->region_number() / _bitmap_regions_per_slice;
2489   size_t off = _bitmap_bytes_per_slice * slice;
2490   size_t len = _bitmap_bytes_per_slice;
2491   os::activate_memory((char*)_bitmap0_region.start() + off, len);
2492   os::activate_memory((char*)_bitmap1_region.start() + off, len);
2493 }
2494 
2495 void ShenandoahHeap::safepoint_synchronize_begin() {
2496   if (ShenandoahSuspendibleWorkers || UseStringDeduplication) {
2497     SuspendibleThreadSet::synchronize();
2498   }
2499 }
2500 
2501 void ShenandoahHeap::safepoint_synchronize_end() {
2502   if (ShenandoahSuspendibleWorkers || UseStringDeduplication) {
2503     SuspendibleThreadSet::desynchronize();
2504   }
2505 }
2506 
2507 void ShenandoahHeap::vmop_entry_init_mark() {
2508   TraceCollectorStats tcs(monitoring_support()->stw_collection_counters());
2509   ShenandoahGCPhase total(ShenandoahPhaseTimings::total_pause_gross);
2510   ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_mark_gross);
2511 
2512   try_inject_alloc_failure();
2513   VM_ShenandoahInitMark op;
2514   VMThread::execute(&op); // jump to entry_init_mark() under safepoint
2515 }
2516 
2517 void ShenandoahHeap::vmop_entry_final_mark() {
2518   TraceCollectorStats tcs(monitoring_support()->stw_collection_counters());
2519   ShenandoahGCPhase total(ShenandoahPhaseTimings::total_pause_gross);
2520   ShenandoahGCPhase phase(ShenandoahPhaseTimings::final_mark_gross);
2521 
2522   try_inject_alloc_failure();
2523   VM_ShenandoahFinalMarkStartEvac op;
2524   VMThread::execute(&op); // jump to entry_final_mark under safepoint
2525 }
2526 
2527 void ShenandoahHeap::vmop_entry_final_evac() {
2528   TraceCollectorStats tcs(monitoring_support()->stw_collection_counters());
2529   ShenandoahGCPhase total(ShenandoahPhaseTimings::total_pause_gross);
2530   ShenandoahGCPhase phase(ShenandoahPhaseTimings::final_evac_gross);
2531 
2532   VM_ShenandoahFinalEvac op;
2533   VMThread::execute(&op); // jump to entry_final_evac under safepoint
2534 }
2535 
2536 void ShenandoahHeap::vmop_entry_init_updaterefs() {
2537   TraceCollectorStats tcs(monitoring_support()->stw_collection_counters());
2538   ShenandoahGCPhase total(ShenandoahPhaseTimings::total_pause_gross);
2539   ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_update_refs_gross);
2540 
2541   try_inject_alloc_failure();
2542   VM_ShenandoahInitUpdateRefs op;
2543   VMThread::execute(&op);
2544 }
2545 
2546 void ShenandoahHeap::vmop_entry_final_updaterefs() {
2547   TraceCollectorStats tcs(monitoring_support()->stw_collection_counters());
2548   ShenandoahGCPhase total(ShenandoahPhaseTimings::total_pause_gross);
2549   ShenandoahGCPhase phase(ShenandoahPhaseTimings::final_update_refs_gross);
2550 
2551   try_inject_alloc_failure();
2552   VM_ShenandoahFinalUpdateRefs op;
2553   VMThread::execute(&op);
2554 }
2555 
2556 void ShenandoahHeap::vmop_entry_init_traversal() {
2557   TraceCollectorStats tcs(monitoring_support()->stw_collection_counters());
2558   ShenandoahGCPhase total(ShenandoahPhaseTimings::total_pause_gross);
2559   ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_traversal_gc_gross);
2560 
2561   try_inject_alloc_failure();
2562   VM_ShenandoahInitTraversalGC op;
2563   VMThread::execute(&op);
2564 }
2565 
2566 void ShenandoahHeap::vmop_entry_final_traversal() {
2567   TraceCollectorStats tcs(monitoring_support()->stw_collection_counters());
2568   ShenandoahGCPhase total(ShenandoahPhaseTimings::total_pause_gross);
2569   ShenandoahGCPhase phase(ShenandoahPhaseTimings::final_traversal_gc_gross);
2570 
2571   try_inject_alloc_failure();
2572   VM_ShenandoahFinalTraversalGC op;
2573   VMThread::execute(&op);
2574 }
2575 
2576 void ShenandoahHeap::vmop_entry_full(GCCause::Cause cause) {
2577   TraceCollectorStats tcs(monitoring_support()->full_stw_collection_counters());
2578   ShenandoahGCPhase total(ShenandoahPhaseTimings::total_pause_gross);
2579   ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_gross);
2580 
2581   try_inject_alloc_failure();
2582   VM_ShenandoahFullGC op(cause);
2583   VMThread::execute(&op);
2584 }
2585 
2586 void ShenandoahHeap::vmop_degenerated(ShenandoahDegenPoint point) {
2587   TraceCollectorStats tcs(monitoring_support()->full_stw_collection_counters());
2588   ShenandoahGCPhase total(ShenandoahPhaseTimings::total_pause_gross);
2589   ShenandoahGCPhase phase(ShenandoahPhaseTimings::degen_gc_gross);
2590 
2591   VM_ShenandoahDegeneratedGC degenerated_gc((int)point);
2592   VMThread::execute(&degenerated_gc);
2593 }
2594 
2595 void ShenandoahHeap::entry_init_mark() {
2596   ShenandoahGCPhase total_phase(ShenandoahPhaseTimings::total_pause);
2597   ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_mark);
2598   const char* msg = init_mark_event_message();
2599   GCTraceTime(Info, gc) time(msg, gc_timer());
2600   EventMark em("%s", msg);
2601 
2602   ShenandoahWorkerScope scope(workers(),
2603                               ShenandoahWorkerPolicy::calc_workers_for_init_marking(),
2604                               "init marking");
2605 
2606   op_init_mark();
2607 }
2608 
2609 void ShenandoahHeap::entry_final_mark() {
2610   ShenandoahGCPhase total_phase(ShenandoahPhaseTimings::total_pause);
2611   ShenandoahGCPhase phase(ShenandoahPhaseTimings::final_mark);
2612   const char* msg = final_mark_event_message();
2613   GCTraceTime(Info, gc) time(msg, gc_timer());
2614   EventMark em("%s", msg);
2615 
2616   ShenandoahWorkerScope scope(workers(),
2617                               ShenandoahWorkerPolicy::calc_workers_for_final_marking(),
2618                               "final marking");
2619 
2620   op_final_mark();
2621 }
2622 
2623 void ShenandoahHeap::entry_final_evac() {
2624   ShenandoahGCPhase total_phase(ShenandoahPhaseTimings::total_pause);
2625   ShenandoahGCPhase phase(ShenandoahPhaseTimings::final_evac);
2626   static const char* msg = "Pause Final Evac";
2627   GCTraceTime(Info, gc) time(msg, gc_timer());
2628   EventMark em("%s", msg);
2629 
2630   op_final_evac();
2631 }
2632 
2633 void ShenandoahHeap::entry_init_updaterefs() {
2634   ShenandoahGCPhase total_phase(ShenandoahPhaseTimings::total_pause);
2635   ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_update_refs);
2636 
2637   static const char* msg = "Pause Init Update Refs";
2638   GCTraceTime(Info, gc) time(msg, gc_timer());
2639   EventMark em("%s", msg);
2640 
2641   // No workers used in this phase, no setup required
2642 
2643   op_init_updaterefs();
2644 }
2645 
2646 void ShenandoahHeap::entry_final_updaterefs() {
2647   ShenandoahGCPhase total_phase(ShenandoahPhaseTimings::total_pause);
2648   ShenandoahGCPhase phase(ShenandoahPhaseTimings::final_update_refs);
2649 
2650   static const char* msg = "Pause Final Update Refs";
2651   GCTraceTime(Info, gc) time(msg, gc_timer());
2652   EventMark em("%s", msg);
2653 
2654   ShenandoahWorkerScope scope(workers(),
2655                               ShenandoahWorkerPolicy::calc_workers_for_final_update_ref(),
2656                               "final reference update");
2657 
2658   op_final_updaterefs();
2659 }
2660 
2661 void ShenandoahHeap::entry_init_traversal() {
2662   ShenandoahGCPhase total_phase(ShenandoahPhaseTimings::total_pause);
2663   ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_traversal_gc);
2664 
2665   static const char* msg = "Pause Init Traversal";
2666   GCTraceTime(Info, gc) time(msg, gc_timer());
2667   EventMark em("%s", msg);
2668 
2669   ShenandoahWorkerScope scope(workers(),
2670                               ShenandoahWorkerPolicy::calc_workers_for_stw_traversal(),
2671                               "init traversal");
2672 
2673   op_init_traversal();
2674 }
2675 
2676 void ShenandoahHeap::entry_final_traversal() {
2677   ShenandoahGCPhase total_phase(ShenandoahPhaseTimings::total_pause);
2678   ShenandoahGCPhase phase(ShenandoahPhaseTimings::final_traversal_gc);
2679 
2680   static const char* msg = "Pause Final Traversal";
2681   GCTraceTime(Info, gc) time(msg, gc_timer());
2682   EventMark em("%s", msg);
2683 
2684   ShenandoahWorkerScope scope(workers(),
2685                               ShenandoahWorkerPolicy::calc_workers_for_stw_traversal(),
2686                               "final traversal");
2687 
2688   op_final_traversal();
2689 }
2690 
2691 void ShenandoahHeap::entry_full(GCCause::Cause cause) {
2692   ShenandoahGCPhase total_phase(ShenandoahPhaseTimings::total_pause);
2693   ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc);
2694 
2695   static const char* msg = "Pause Full";
2696   GCTraceTime(Info, gc) time(msg, gc_timer(), cause, true);
2697   EventMark em("%s", msg);
2698 
2699   ShenandoahWorkerScope scope(workers(),
2700                               ShenandoahWorkerPolicy::calc_workers_for_fullgc(),
2701                               "full gc");
2702 
2703   op_full(cause);
2704 }
2705 
2706 void ShenandoahHeap::entry_degenerated(int point) {
2707   ShenandoahGCPhase total_phase(ShenandoahPhaseTimings::total_pause);
2708   ShenandoahGCPhase phase(ShenandoahPhaseTimings::degen_gc);
2709 
2710   ShenandoahDegenPoint dpoint = (ShenandoahDegenPoint)point;
2711   const char* msg = degen_event_message(dpoint);
2712   GCTraceTime(Info, gc) time(msg, NULL, GCCause::_no_gc, true);
2713   EventMark em("%s", msg);
2714 
2715   ShenandoahWorkerScope scope(workers(),
2716                               ShenandoahWorkerPolicy::calc_workers_for_stw_degenerated(),
2717                               "stw degenerated gc");
2718 
2719   set_degenerated_gc_in_progress(true);
2720   op_degenerated(dpoint);
2721   set_degenerated_gc_in_progress(false);
2722 }
2723 
2724 void ShenandoahHeap::entry_mark() {
2725   TraceCollectorStats tcs(monitoring_support()->concurrent_collection_counters());
2726 
2727   const char* msg = conc_mark_event_message();
2728   GCTraceTime(Info, gc) time(msg, NULL, GCCause::_no_gc, true);
2729   EventMark em("%s", msg);
2730 
2731   ShenandoahWorkerScope scope(workers(),
2732                               ShenandoahWorkerPolicy::calc_workers_for_conc_marking(),
2733                               "concurrent marking");
2734 
2735   try_inject_alloc_failure();
2736   op_mark();
2737 }
2738 
2739 void ShenandoahHeap::entry_evac() {
2740   ShenandoahGCPhase conc_evac_phase(ShenandoahPhaseTimings::conc_evac);
2741   TraceCollectorStats tcs(monitoring_support()->concurrent_collection_counters());
2742 
2743   static const char* msg = "Concurrent evacuation";
2744   GCTraceTime(Info, gc) time(msg, NULL, GCCause::_no_gc, true);
2745   EventMark em("%s", msg);
2746 
2747   ShenandoahWorkerScope scope(workers(),
2748                               ShenandoahWorkerPolicy::calc_workers_for_conc_evac(),
2749                               "concurrent evacuation");
2750 
2751   try_inject_alloc_failure();
2752   op_evac();
2753 }
2754 
2755 void ShenandoahHeap::entry_updaterefs() {
2756   ShenandoahGCPhase phase(ShenandoahPhaseTimings::conc_update_refs);
2757 
2758   static const char* msg = "Concurrent update references";
2759   GCTraceTime(Info, gc) time(msg, NULL, GCCause::_no_gc, true);
2760   EventMark em("%s", msg);
2761 
2762   ShenandoahWorkerScope scope(workers(),
2763                               ShenandoahWorkerPolicy::calc_workers_for_conc_update_ref(),
2764                               "concurrent reference update");
2765 
2766   try_inject_alloc_failure();
2767   op_updaterefs();
2768 }
2769 void ShenandoahHeap::entry_cleanup() {
2770   ShenandoahGCPhase phase(ShenandoahPhaseTimings::conc_cleanup);
2771 
2772   static const char* msg = "Concurrent cleanup";
2773   GCTraceTime(Info, gc) time(msg, NULL, GCCause::_no_gc, true);
2774   EventMark em("%s", msg);
2775 
2776   // This phase does not use workers, no need for setup
2777 
2778   try_inject_alloc_failure();
2779   op_cleanup();
2780 }
2781 
2782 void ShenandoahHeap::entry_cleanup_traversal() {
2783   ShenandoahGCPhase phase(ShenandoahPhaseTimings::conc_cleanup);
2784 
2785   static const char* msg = "Concurrent cleanup";
2786   GCTraceTime(Info, gc) time(msg, NULL, GCCause::_no_gc, true);
2787   EventMark em("%s", msg);
2788 
2789   ShenandoahWorkerScope scope(workers(),
2790                               ShenandoahWorkerPolicy::calc_workers_for_conc_traversal(),
2791                               "concurrent traversal cleanup");
2792 
2793   try_inject_alloc_failure();
2794   op_cleanup_traversal();
2795 }
2796 
2797 void ShenandoahHeap::entry_cleanup_bitmaps() {
2798   ShenandoahGCPhase phase(ShenandoahPhaseTimings::conc_cleanup);
2799 
2800   static const char* msg = "Concurrent cleanup";
2801   GCTraceTime(Info, gc) time(msg, NULL, GCCause::_no_gc, true);
2802   EventMark em("%s", msg);
2803 
2804   ShenandoahWorkerScope scope(workers(),
2805                               ShenandoahWorkerPolicy::calc_workers_for_conc_cleanup(),
2806                               "concurrent cleanup");
2807 
2808   try_inject_alloc_failure();
2809   op_cleanup_bitmaps();
2810 }
2811 
2812 void ShenandoahHeap::entry_preclean() {
2813   if (ShenandoahPreclean && process_references()) {
2814     static const char* msg = "Concurrent precleaning";
2815     GCTraceTime(Info, gc) time(msg, NULL, GCCause::_no_gc, true);
2816     EventMark em("%s", msg);
2817 
2818     ShenandoahGCPhase conc_preclean(ShenandoahPhaseTimings::conc_preclean);
2819 
2820     ShenandoahWorkerScope scope(workers(),
2821                                 ShenandoahWorkerPolicy::calc_workers_for_conc_preclean(),
2822                                 "concurrent preclean");
2823 
2824     try_inject_alloc_failure();
2825     op_preclean();
2826   }
2827 }
2828 
2829 void ShenandoahHeap::entry_traversal() {
2830   static const char* msg = "Concurrent traversal";
2831   GCTraceTime(Info, gc) time(msg, NULL, GCCause::_no_gc, true);
2832   EventMark em("%s", msg);
2833 
2834   TraceCollectorStats tcs(monitoring_support()->concurrent_collection_counters());
2835 
2836   ShenandoahWorkerScope scope(workers(),
2837                               ShenandoahWorkerPolicy::calc_workers_for_conc_traversal(),
2838                               "concurrent traversal");
2839 
2840   try_inject_alloc_failure();
2841   op_traversal();
2842 }
2843 
2844 void ShenandoahHeap::entry_uncommit(double shrink_before) {
2845   static const char *msg = "Concurrent uncommit";
2846   GCTraceTime(Info, gc) time(msg, NULL, GCCause::_no_gc, true);
2847   EventMark em("%s", msg);
2848 
2849   ShenandoahGCPhase phase(ShenandoahPhaseTimings::conc_uncommit);
2850 
2851   op_uncommit(shrink_before);
2852 }
2853 
2854 void ShenandoahHeap::try_inject_alloc_failure() {
2855   if (ShenandoahAllocFailureALot && !cancelled_gc() && ((os::random() % 1000) > 950)) {
2856     _inject_alloc_failure.set();
2857     os::naked_short_sleep(1);
2858     if (cancelled_gc()) {
2859       log_info(gc)("Allocation failure was successfully injected");
2860     }
2861   }
2862 }
2863 
2864 bool ShenandoahHeap::should_inject_alloc_failure() {
2865   return _inject_alloc_failure.is_set() && _inject_alloc_failure.try_unset();
2866 }
2867 
2868 void ShenandoahHeap::initialize_serviceability() {
2869   _memory_pool = new ShenandoahMemoryPool(this);
2870   _cycle_memory_manager.add_pool(_memory_pool);
2871   _stw_memory_manager.add_pool(_memory_pool);
2872 }
2873 
2874 GrowableArray<GCMemoryManager*> ShenandoahHeap::memory_managers() {
2875   GrowableArray<GCMemoryManager*> memory_managers(2);
2876   memory_managers.append(&_cycle_memory_manager);
2877   memory_managers.append(&_stw_memory_manager);
2878   return memory_managers;
2879 }
2880 
2881 GrowableArray<MemoryPool*> ShenandoahHeap::memory_pools() {
2882   GrowableArray<MemoryPool*> memory_pools(1);
2883   memory_pools.append(_memory_pool);
2884   return memory_pools;
2885 }
2886 
2887 void ShenandoahHeap::enter_evacuation() {
2888   _oom_evac_handler.enter_evacuation();
2889 }
2890 
2891 void ShenandoahHeap::leave_evacuation() {
2892   _oom_evac_handler.leave_evacuation();
2893 }
2894 
2895 SoftRefPolicy* ShenandoahHeap::soft_ref_policy() {
2896   return &_soft_ref_policy;
2897 }
2898 
2899 ShenandoahRegionIterator::ShenandoahRegionIterator() :
2900   _index(0),
2901   _heap(ShenandoahHeap::heap()) {}
2902 
2903 ShenandoahRegionIterator::ShenandoahRegionIterator(ShenandoahHeap* heap) :
2904   _index(0),
2905   _heap(heap) {}
2906 
2907 void ShenandoahRegionIterator::reset() {
2908   _index = 0;
2909 }
2910 
2911 bool ShenandoahRegionIterator::has_next() const {
2912   return _index < _heap->num_regions();
2913 }
2914 
2915 void ShenandoahHeap::heap_region_iterate(ShenandoahHeapRegionClosure& cl) const {
2916   ShenandoahRegionIterator regions;
2917   ShenandoahHeapRegion* r = regions.next();
2918   while (r != NULL) {
2919     if (cl.heap_region_do(r)) {
2920       break;
2921     }
2922     r = regions.next();
2923   }
2924 }
2925 
2926 char ShenandoahHeap::gc_state() const {
2927   return _gc_state.raw_value();
2928 }
2929 
2930 void ShenandoahHeap::deduplicate_string(oop str) {
2931   assert(java_lang_String::is_instance(str), "invariant");
2932 
2933   if (ShenandoahStringDedup::is_enabled()) {
2934     ShenandoahStringDedup::deduplicate(str);
2935   }
2936 }
2937 
2938 const char* ShenandoahHeap::init_mark_event_message() const {
2939   bool update_refs = has_forwarded_objects();
2940   bool proc_refs = process_references();
2941   bool unload_cls = unload_classes();
2942 
2943   if (update_refs && proc_refs && unload_cls) {
2944     return "Pause Init Mark (update refs) (process refs) (unload classes)";
2945   } else if (update_refs && proc_refs) {
2946     return "Pause Init Mark (update refs) (process refs)";
2947   } else if (update_refs && unload_cls) {
2948     return "Pause Init Mark (update refs) (unload classes)";
2949   } else if (proc_refs && unload_cls) {
2950     return "Pause Init Mark (process refs) (unload classes)";
2951   } else if (update_refs) {
2952     return "Pause Init Mark (update refs)";
2953   } else if (proc_refs) {
2954     return "Pause Init Mark (process refs)";
2955   } else if (unload_cls) {
2956     return "Pause Init Mark (unload classes)";
2957   } else {
2958     return "Pause Init Mark";
2959   }
2960 }
2961 
2962 const char* ShenandoahHeap::final_mark_event_message() const {
2963   bool update_refs = has_forwarded_objects();
2964   bool proc_refs = process_references();
2965   bool unload_cls = unload_classes();
2966 
2967   if (update_refs && proc_refs && unload_cls) {
2968     return "Pause Final Mark (update refs) (process refs) (unload classes)";
2969   } else if (update_refs && proc_refs) {
2970     return "Pause Final Mark (update refs) (process refs)";
2971   } else if (update_refs && unload_cls) {
2972     return "Pause Final Mark (update refs) (unload classes)";
2973   } else if (proc_refs && unload_cls) {
2974     return "Pause Final Mark (process refs) (unload classes)";
2975   } else if (update_refs) {
2976     return "Pause Final Mark (update refs)";
2977   } else if (proc_refs) {
2978     return "Pause Final Mark (process refs)";
2979   } else if (unload_cls) {
2980     return "Pause Final Mark (unload classes)";
2981   } else {
2982     return "Pause Final Mark";
2983   }
2984 }
2985 
2986 const char* ShenandoahHeap::conc_mark_event_message() const {
2987   bool update_refs = has_forwarded_objects();
2988   bool proc_refs = process_references();
2989   bool unload_cls = unload_classes();
2990 
2991   if (update_refs && proc_refs && unload_cls) {
2992     return "Concurrent marking (update refs) (process refs) (unload classes)";
2993   } else if (update_refs && proc_refs) {
2994     return "Concurrent marking (update refs) (process refs)";
2995   } else if (update_refs && unload_cls) {
2996     return "Concurrent marking (update refs) (unload classes)";
2997   } else if (proc_refs && unload_cls) {
2998     return "Concurrent marking (process refs) (unload classes)";
2999   } else if (update_refs) {
3000     return "Concurrent marking (update refs)";
3001   } else if (proc_refs) {
3002     return "Concurrent marking (process refs)";
3003   } else if (unload_cls) {
3004     return "Concurrent marking (unload classes)";
3005   } else {
3006     return "Concurrent marking";
3007   }
3008 }
3009 
3010 const char* ShenandoahHeap::degen_event_message(ShenandoahDegenPoint point) const {
3011   switch (point) {
3012     case _degenerated_unset:
3013       return "Pause Degenerated GC (<UNSET>)";
3014     case _degenerated_traversal:
3015       return "Pause Degenerated GC (Traversal)";
3016     case _degenerated_outside_cycle:
3017       return "Pause Degenerated GC (Outside of Cycle)";
3018     case _degenerated_mark:
3019       return "Pause Degenerated GC (Mark)";
3020     case _degenerated_evac:
3021       return "Pause Degenerated GC (Evacuation)";
3022     case _degenerated_updaterefs:
3023       return "Pause Degenerated GC (Update Refs)";
3024     default:
3025       ShouldNotReachHere();
3026       return "ERROR";
3027   }
3028 }
3029 
3030 
3031 BoolObjectClosure* ShenandoahIsAliveSelector::is_alive_closure() {
3032   return ShenandoahHeap::heap()->has_forwarded_objects() ? reinterpret_cast<BoolObjectClosure*>(&_fwd_alive_cl)
3033                                                          : reinterpret_cast<BoolObjectClosure*>(&_alive_cl);
3034 }