1 /*
   2  * Copyright (c) 2013, 2017, Red Hat, Inc. and/or its affiliates.
   3  *
   4  * This code is free software; you can redistribute it and/or modify it
   5  * under the terms of the GNU General Public License version 2 only, as
   6  * published by the Free Software Foundation.
   7  *
   8  * This code is distributed in the hope that it will be useful, but WITHOUT
   9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  11  * version 2 for more details (a copy is included in the LICENSE file that
  12  * accompanied this code).
  13  *
  14  * You should have received a copy of the GNU General Public License version
  15  * 2 along with this work; if not, write to the Free Software Foundation,
  16  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  17  *
  18  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  19  * or visit www.oracle.com if you need additional information or have any
  20  * questions.
  21  *
  22  */
  23 
  24 #include "precompiled.hpp"
  25 #include "memory/allocation.hpp"
  26 
  27 #include "gc/shared/gcTimer.hpp"
  28 #include "gc/shared/gcTraceTime.inline.hpp"
  29 #include "gc/shared/parallelCleaning.hpp"
  30 #include "gc/shared/plab.hpp"
  31 
  32 #include "gc/shenandoah/brooksPointer.hpp"
  33 #include "gc/shenandoah/shenandoahAllocTracker.hpp"
  34 #include "gc/shenandoah/shenandoahBarrierSet.hpp"
  35 #include "gc/shenandoah/shenandoahCollectionSet.hpp"
  36 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
  37 #include "gc/shenandoah/shenandoahConcurrentMark.hpp"
  38 #include "gc/shenandoah/shenandoahConcurrentMark.inline.hpp"
  39 #include "gc/shenandoah/shenandoahControlThread.hpp"
  40 #include "gc/shenandoah/shenandoahFreeSet.hpp"
  41 #include "gc/shenandoah/shenandoahPhaseTimings.hpp"
  42 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
  43 #include "gc/shenandoah/shenandoahHeapRegion.hpp"
  44 #include "gc/shenandoah/shenandoahHeapRegionSet.hpp"
  45 #include "gc/shenandoah/shenandoahMarkCompact.hpp"
  46 #include "gc/shenandoah/shenandoahMemoryPool.hpp"
  47 #include "gc/shenandoah/shenandoahMonitoringSupport.hpp"
  48 #include "gc/shenandoah/shenandoahOopClosures.inline.hpp"
  49 #include "gc/shenandoah/shenandoahPacer.hpp"
  50 #include "gc/shenandoah/shenandoahPacer.inline.hpp"
  51 #include "gc/shenandoah/shenandoahRootProcessor.hpp"
  52 #include "gc/shenandoah/shenandoahStringDedup.hpp"
  53 #include "gc/shenandoah/shenandoahUtils.hpp"
  54 #include "gc/shenandoah/shenandoahVerifier.hpp"
  55 #include "gc/shenandoah/shenandoahCodeRoots.hpp"
  56 #include "gc/shenandoah/shenandoahWorkerPolicy.hpp"
  57 #include "gc/shenandoah/vm_operations_shenandoah.hpp"
  58 
  59 #include "runtime/vmThread.hpp"
  60 #include "services/mallocTracker.hpp"
  61 
  62 ShenandoahUpdateRefsClosure::ShenandoahUpdateRefsClosure() : _heap(ShenandoahHeap::heap()) {}
  63 
  64 #ifdef ASSERT
  65 template <class T>
  66 void ShenandoahAssertToSpaceClosure::do_oop_nv(T* p) {
  67   T o = oopDesc::load_heap_oop(p);
  68   if (! oopDesc::is_null(o)) {
  69     oop obj = oopDesc::decode_heap_oop_not_null(o);
  70     shenandoah_assert_not_forwarded(p, obj);
  71   }
  72 }
  73 
  74 void ShenandoahAssertToSpaceClosure::do_oop(narrowOop* p) { do_oop_nv(p); }
  75 void ShenandoahAssertToSpaceClosure::do_oop(oop* p)       { do_oop_nv(p); }
  76 #endif
  77 
  78 const char* ShenandoahHeap::name() const {
  79   return "Shenandoah";
  80 }
  81 
  82 class ShenandoahPretouchTask : public AbstractGangTask {
  83 private:
  84   ShenandoahRegionIterator _regions;
  85   const size_t _bitmap_size;
  86   const size_t _page_size;
  87   char* _bitmap0_base;
  88   char* _bitmap1_base;
  89 public:
  90   ShenandoahPretouchTask(char* bitmap0_base, char* bitmap1_base, size_t bitmap_size,
  91                          size_t page_size) :
  92     AbstractGangTask("Shenandoah PreTouch",
  93                      Universe::is_fully_initialized() ? GCId::current_raw() :
  94                                                         // During VM initialization there is
  95                                                         // no GC cycle that this task can be
  96                                                         // associated with.
  97                                                         GCId::undefined()),
  98     _bitmap0_base(bitmap0_base),
  99     _bitmap1_base(bitmap1_base),
 100     _bitmap_size(bitmap_size),
 101     _page_size(page_size) {}
 102 
 103   virtual void work(uint worker_id) {
 104     ShenandoahHeapRegion* r = _regions.next();
 105     while (r != NULL) {
 106       log_trace(gc, heap)("Pretouch region " SIZE_FORMAT ": " PTR_FORMAT " -> " PTR_FORMAT,
 107                           r->region_number(), p2i(r->bottom()), p2i(r->end()));
 108       os::pretouch_memory(r->bottom(), r->end(), _page_size);
 109 
 110       size_t start = r->region_number()       * ShenandoahHeapRegion::region_size_bytes() / MarkBitMap::heap_map_factor();
 111       size_t end   = (r->region_number() + 1) * ShenandoahHeapRegion::region_size_bytes() / MarkBitMap::heap_map_factor();
 112       assert (end <= _bitmap_size, "end is sane: " SIZE_FORMAT " < " SIZE_FORMAT, end, _bitmap_size);
 113 
 114       log_trace(gc, heap)("Pretouch bitmap under region " SIZE_FORMAT ": " PTR_FORMAT " -> " PTR_FORMAT,
 115                           r->region_number(), p2i(_bitmap0_base + start), p2i(_bitmap0_base + end));
 116       os::pretouch_memory(_bitmap0_base + start, _bitmap0_base + end, _page_size);
 117 
 118       log_trace(gc, heap)("Pretouch bitmap under region " SIZE_FORMAT ": " PTR_FORMAT " -> " PTR_FORMAT,
 119                           r->region_number(), p2i(_bitmap1_base + start), p2i(_bitmap1_base + end));
 120       os::pretouch_memory(_bitmap1_base + start, _bitmap1_base + end, _page_size);
 121 
 122       r = _regions.next();
 123     }
 124   }
 125 };
 126 
 127 jint ShenandoahHeap::initialize() {
 128   CollectedHeap::pre_initialize();
 129 
 130   BrooksPointer::initial_checks();
 131 
 132   size_t init_byte_size = collector_policy()->initial_heap_byte_size();
 133   size_t max_byte_size = collector_policy()->max_heap_byte_size();
 134   size_t heap_alignment = collector_policy()->heap_alignment();
 135 
 136   if (ShenandoahAlwaysPreTouch) {
 137     // Enabled pre-touch means the entire heap is committed right away.
 138     init_byte_size = max_byte_size;
 139   }
 140 
 141   Universe::check_alignment(max_byte_size,
 142                             ShenandoahHeapRegion::region_size_bytes(),
 143                             "shenandoah heap");
 144   Universe::check_alignment(init_byte_size,
 145                             ShenandoahHeapRegion::region_size_bytes(),
 146                             "shenandoah heap");
 147 
 148   ReservedSpace heap_rs = Universe::reserve_heap(max_byte_size,
 149                                                  heap_alignment);
 150   initialize_reserved_region((HeapWord*)heap_rs.base(), (HeapWord*) (heap_rs.base() + heap_rs.size()));
 151 
 152   set_barrier_set(new ShenandoahBarrierSet(this));
 153   ReservedSpace pgc_rs = heap_rs.first_part(max_byte_size);
 154 
 155   _num_regions = max_byte_size / ShenandoahHeapRegion::region_size_bytes();
 156   size_t num_committed_regions = init_byte_size / ShenandoahHeapRegion::region_size_bytes();
 157   _initial_size = num_committed_regions * ShenandoahHeapRegion::region_size_bytes();
 158   _committed = _initial_size;
 159 
 160   log_info(gc, heap)("Initialize Shenandoah heap with initial size " SIZE_FORMAT " bytes", init_byte_size);
 161   if (!os::commit_memory(pgc_rs.base(), _initial_size, false)) {
 162     vm_exit_out_of_memory(_initial_size, OOM_MMAP_ERROR, "Shenandoah failed to initialize heap");
 163   }
 164 
 165   size_t reg_size_words = ShenandoahHeapRegion::region_size_words();
 166   size_t reg_size_bytes = ShenandoahHeapRegion::region_size_bytes();
 167 
 168   _regions = NEW_C_HEAP_ARRAY(ShenandoahHeapRegion*, _num_regions, mtGC);
 169   _free_set = new ShenandoahFreeSet(this, _num_regions);
 170 
 171   _collection_set = new ShenandoahCollectionSet(this, (HeapWord*)pgc_rs.base());
 172 
 173   _next_top_at_mark_starts_base = NEW_C_HEAP_ARRAY(HeapWord*, _num_regions, mtGC);
 174   _next_top_at_mark_starts = _next_top_at_mark_starts_base -
 175                ((uintx) pgc_rs.base() >> ShenandoahHeapRegion::region_size_bytes_shift());
 176 
 177   _complete_top_at_mark_starts_base = NEW_C_HEAP_ARRAY(HeapWord*, _num_regions, mtGC);
 178   _complete_top_at_mark_starts = _complete_top_at_mark_starts_base -
 179                ((uintx) pgc_rs.base() >> ShenandoahHeapRegion::region_size_bytes_shift());
 180 
 181   if (ShenandoahPacing) {
 182     _pacer = new ShenandoahPacer(this);
 183     _pacer->setup_for_idle();
 184   } else {
 185     _pacer = NULL;
 186   }
 187 
 188   {
 189     ShenandoahHeapLocker locker(lock());
 190     for (size_t i = 0; i < _num_regions; i++) {
 191       ShenandoahHeapRegion* r = new ShenandoahHeapRegion(this,
 192                                                          (HeapWord*) pgc_rs.base() + reg_size_words * i,
 193                                                          reg_size_words,
 194                                                          i,
 195                                                          i < num_committed_regions);
 196 
 197       _complete_top_at_mark_starts_base[i] = r->bottom();
 198       _next_top_at_mark_starts_base[i] = r->bottom();
 199       _regions[i] = r;
 200       assert(!collection_set()->is_in(i), "New region should not be in collection set");
 201     }
 202 
 203     _free_set->rebuild();
 204   }
 205 
 206   assert((((size_t) base()) & ShenandoahHeapRegion::region_size_bytes_mask()) == 0,
 207          "misaligned heap: "PTR_FORMAT, p2i(base()));
 208 
 209   // The call below uses stuff (the SATB* things) that are in G1, but probably
 210   // belong into a shared location.
 211   JavaThread::satb_mark_queue_set().initialize(SATB_Q_CBL_mon,
 212                                                SATB_Q_FL_lock,
 213                                                20 /*G1SATBProcessCompletedThreshold */,
 214                                                Shared_SATB_Q_lock);
 215 
 216   // Reserve space for prev and next bitmap.
 217   size_t bitmap_page_size = UseLargePages ? (size_t)os::large_page_size() : (size_t)os::vm_page_size();
 218   _bitmap_size = MarkBitMap::compute_size(heap_rs.size());
 219   _bitmap_size = align_up(_bitmap_size, bitmap_page_size);
 220   _heap_region = MemRegion((HeapWord*) heap_rs.base(), heap_rs.size() / HeapWordSize);
 221 
 222   size_t bitmap_bytes_per_region = reg_size_bytes / MarkBitMap::heap_map_factor();
 223 
 224   guarantee(bitmap_bytes_per_region != 0,
 225             "Bitmap bytes per region should not be zero");
 226   guarantee(is_power_of_2(bitmap_bytes_per_region),
 227             "Bitmap bytes per region should be power of two: " SIZE_FORMAT, bitmap_bytes_per_region);
 228 
 229   if (bitmap_page_size > bitmap_bytes_per_region) {
 230     _bitmap_regions_per_slice = bitmap_page_size / bitmap_bytes_per_region;
 231     _bitmap_bytes_per_slice = bitmap_page_size;
 232   } else {
 233     _bitmap_regions_per_slice = 1;
 234     _bitmap_bytes_per_slice = bitmap_bytes_per_region;
 235   }
 236 
 237   guarantee(_bitmap_regions_per_slice >= 1,
 238             "Should have at least one region per slice: " SIZE_FORMAT,
 239             _bitmap_regions_per_slice);
 240 
 241   guarantee(((_bitmap_bytes_per_slice) % bitmap_page_size) == 0,
 242             "Bitmap slices should be page-granular: bps = " SIZE_FORMAT ", page size = " SIZE_FORMAT,
 243             _bitmap_bytes_per_slice, bitmap_page_size);
 244 
 245   ReservedSpace bitmap0(_bitmap_size, bitmap_page_size);
 246   MemTracker::record_virtual_memory_type(bitmap0.base(), mtGC);
 247   _bitmap0_region = MemRegion((HeapWord*) bitmap0.base(), bitmap0.size() / HeapWordSize);
 248 
 249   ReservedSpace bitmap1(_bitmap_size, bitmap_page_size);
 250   MemTracker::record_virtual_memory_type(bitmap1.base(), mtGC);
 251   _bitmap1_region = MemRegion((HeapWord*) bitmap1.base(), bitmap1.size() / HeapWordSize);
 252 
 253   size_t bitmap_init_commit = _bitmap_bytes_per_slice *
 254                               align_up(num_committed_regions, _bitmap_regions_per_slice) / _bitmap_regions_per_slice;
 255   bitmap_init_commit = MIN2(_bitmap_size, bitmap_init_commit);
 256   os::commit_memory_or_exit((char *) (_bitmap0_region.start()), bitmap_init_commit, false,
 257                             "couldn't allocate initial bitmap");
 258   os::commit_memory_or_exit((char *) (_bitmap1_region.start()), bitmap_init_commit, false,
 259                             "couldn't allocate initial bitmap");
 260 
 261   size_t page_size = UseLargePages ? (size_t)os::large_page_size() : (size_t)os::vm_page_size();
 262 
 263   if (ShenandoahVerify) {
 264     ReservedSpace verify_bitmap(_bitmap_size, page_size);
 265     os::commit_memory_or_exit(verify_bitmap.base(), verify_bitmap.size(), false,
 266                               "couldn't allocate verification bitmap");
 267     MemTracker::record_virtual_memory_type(verify_bitmap.base(), mtGC);
 268     MemRegion verify_bitmap_region = MemRegion((HeapWord *) verify_bitmap.base(), verify_bitmap.size() / HeapWordSize);
 269     _verification_bit_map.initialize(_heap_region, verify_bitmap_region);
 270     _verifier = new ShenandoahVerifier(this, &_verification_bit_map);
 271   }
 272 
 273   if (ShenandoahAlwaysPreTouch) {
 274     assert (!AlwaysPreTouch, "Should have been overridden");
 275 
 276     // For NUMA, it is important to pre-touch the storage under bitmaps with worker threads,
 277     // before initialize() below zeroes it with initializing thread. For any given region,
 278     // we touch the region and the corresponding bitmaps from the same thread.
 279 
 280     log_info(gc, heap)("Parallel pretouch " SIZE_FORMAT " regions with " SIZE_FORMAT " byte pages",
 281                        _num_regions, page_size);
 282     ShenandoahPretouchTask cl(bitmap0.base(), bitmap1.base(), _bitmap_size, page_size);
 283     _workers->run_task(&cl);
 284   }
 285 
 286   _mark_bit_map0.initialize(_heap_region, _bitmap0_region);
 287   _complete_mark_bit_map = &_mark_bit_map0;
 288 
 289   _mark_bit_map1.initialize(_heap_region, _bitmap1_region);
 290   _next_mark_bit_map = &_mark_bit_map1;
 291 
 292   // Reserve aux bitmap for use in object_iterate(). We don't commit it here.
 293   ReservedSpace aux_bitmap(_bitmap_size, bitmap_page_size);
 294   MemTracker::record_virtual_memory_type(aux_bitmap.base(), mtGC);
 295   _aux_bitmap_region = MemRegion((HeapWord*) aux_bitmap.base(), aux_bitmap.size() / HeapWordSize);
 296   _aux_bit_map.initialize(_heap_region, _aux_bitmap_region);
 297 
 298   if (UseShenandoahMatrix) {
 299     _connection_matrix = new ShenandoahConnectionMatrix(_num_regions);
 300   } else {
 301     _connection_matrix = NULL;
 302   }
 303 
 304   _traversal_gc = _shenandoah_policy->can_do_traversal_gc() ?
 305                 new ShenandoahTraversalGC(this, _num_regions) :
 306                 NULL;
 307 
 308   _monitoring_support = new ShenandoahMonitoringSupport(this);
 309 
 310   _phase_timings = new ShenandoahPhaseTimings();
 311 
 312   if (ShenandoahAllocationTrace) {
 313     _alloc_tracker = new ShenandoahAllocTracker();
 314   }
 315 
 316   ShenandoahStringDedup::initialize();
 317 
 318   _control_thread = new ShenandoahControlThread();
 319 
 320   ShenandoahCodeRoots::initialize();
 321 
 322   LogTarget(Trace, gc, region) lt;
 323   if (lt.is_enabled()) {
 324     ResourceMark rm;
 325     LogStream ls(lt);
 326     log_trace(gc, region)("All Regions");
 327     print_heap_regions_on(&ls);
 328     log_trace(gc, region)("Free Regions");
 329     _free_set->print_on(&ls);
 330   }
 331 
 332   log_info(gc, init)("Safepointing mechanism: %s",
 333                      SafepointMechanism::uses_thread_local_poll() ? "thread-local poll" :
 334                      (SafepointMechanism::uses_global_page_poll() ? "global-page poll" : "unknown"));
 335 
 336   return JNI_OK;
 337 }
 338 
 339 #ifdef _MSC_VER
 340 #pragma warning( push )
 341 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list
 342 #endif
 343 
 344 ShenandoahHeap::ShenandoahHeap(ShenandoahCollectorPolicy* policy) :
 345   CollectedHeap(),
 346   _shenandoah_policy(policy),
 347   _regions(NULL),
 348   _free_set(NULL),
 349   _collection_set(NULL),
 350   _update_refs_iterator(this),
 351   _bytes_allocated_since_gc_start(0),
 352   _max_workers(MAX2(ConcGCThreads, ParallelGCThreads)),
 353   _ref_processor(NULL),
 354   _next_top_at_mark_starts(NULL),
 355   _next_top_at_mark_starts_base(NULL),
 356   _complete_top_at_mark_starts(NULL),
 357   _complete_top_at_mark_starts_base(NULL),
 358   _mark_bit_map0(),
 359   _mark_bit_map1(),
 360   _aux_bit_map(),
 361   _connection_matrix(NULL),
 362   _verifier(NULL),
 363   _pacer(NULL),
 364   _used_at_last_gc(0),
 365   _alloc_seq_at_last_gc_start(0),
 366   _alloc_seq_at_last_gc_end(0),
 367   _safepoint_workers(NULL),
 368   _gc_cycle_mode(),
 369 #ifdef ASSERT
 370   _heap_expansion_count(0),
 371 #endif
 372   _gc_timer(new (ResourceObj::C_HEAP, mtGC) ConcurrentGCTimer()),
 373   _phase_timings(NULL),
 374   _alloc_tracker(NULL),
 375   _cycle_memory_manager("Shenandoah Cycles", "end of GC cycle"),
 376   _stw_memory_manager("Shenandoah Pauses", "end of GC pause"),
 377   _mutator_gclab_stats(new PLABStats("Shenandoah mutator GCLAB stats", OldPLABSize, PLABWeight)),
 378   _collector_gclab_stats(new PLABStats("Shenandoah collector GCLAB stats", YoungPLABSize, PLABWeight)),
 379   _memory_pool(NULL)
 380 {
 381   log_info(gc, init)("Parallel GC threads: "UINT32_FORMAT, ParallelGCThreads);
 382   log_info(gc, init)("Concurrent GC threads: "UINT32_FORMAT, ConcGCThreads);
 383   log_info(gc, init)("Parallel reference processing enabled: %s", BOOL_TO_STR(ParallelRefProcEnabled));
 384 
 385   _scm = new ShenandoahConcurrentMark();
 386   _full_gc = new ShenandoahMarkCompact();
 387   _used = 0;
 388 
 389   _max_workers = MAX2(_max_workers, 1U);
 390   _workers = new ShenandoahWorkGang("Shenandoah GC Threads", _max_workers,
 391                             /* are_GC_task_threads */true,
 392                             /* are_ConcurrentGC_threads */false);
 393   if (_workers == NULL) {
 394     vm_exit_during_initialization("Failed necessary allocation.");
 395   } else {
 396     _workers->initialize_workers();
 397   }
 398 
 399   if (ParallelSafepointCleanupThreads > 1) {
 400     _safepoint_workers = new ShenandoahWorkGang("Safepoint Cleanup Thread",
 401                                                 ParallelSafepointCleanupThreads,
 402                                                 false, false);
 403     _safepoint_workers->initialize_workers();
 404   }
 405 }
 406 
 407 #ifdef _MSC_VER
 408 #pragma warning( pop )
 409 #endif
 410 
 411 class ShenandoahResetNextBitmapTask : public AbstractGangTask {
 412 private:
 413   ShenandoahRegionIterator _regions;
 414 
 415 public:
 416   ShenandoahResetNextBitmapTask() :
 417     AbstractGangTask("Parallel Reset Bitmap Task") {}
 418 
 419   void work(uint worker_id) {
 420     ShenandoahHeapRegion* region = _regions.next();
 421     ShenandoahHeap* heap = ShenandoahHeap::heap();
 422     while (region != NULL) {
 423       if (heap->is_bitmap_slice_committed(region)) {
 424         HeapWord* bottom = region->bottom();
 425         HeapWord* top = heap->next_top_at_mark_start(region->bottom());
 426         if (top > bottom) {
 427           heap->next_mark_bit_map()->clear_range_large(MemRegion(bottom, top));
 428         }
 429         assert(heap->is_next_bitmap_clear_range(bottom, region->end()), "must be clear");
 430       }
 431       region = _regions.next();
 432     }
 433   }
 434 };
 435 
 436 void ShenandoahHeap::reset_next_mark_bitmap() {
 437   assert_gc_workers(_workers->active_workers());
 438 
 439   ShenandoahResetNextBitmapTask task;
 440   _workers->run_task(&task);
 441 }
 442 
 443 class ShenandoahResetNextBitmapTraversalTask : public AbstractGangTask {
 444 private:
 445   ShenandoahRegionIterator _regions;
 446 
 447 public:
 448   ShenandoahResetNextBitmapTraversalTask() :
 449     AbstractGangTask("Parallel Reset Bitmap Task for Traversal") {}
 450 
 451   void work(uint worker_id) {
 452     ShenandoahHeap* heap = ShenandoahHeap::heap();
 453     ShenandoahHeapRegionSet* traversal_set = heap->traversal_gc()->traversal_set();
 454     ShenandoahHeapRegion* region = _regions.next();
 455     while (region != NULL) {
 456       if (heap->is_bitmap_slice_committed(region)) {
 457         if (traversal_set->is_in(region) && !region->is_trash()) {
 458           ShenandoahHeapLocker locker(heap->lock());
 459           HeapWord* bottom = region->bottom();
 460           HeapWord* top = heap->next_top_at_mark_start(bottom);
 461           assert(top <= region->top(),
 462                  "TAMS must smaller/equals than top: TAMS: "PTR_FORMAT", top: "PTR_FORMAT,
 463                  p2i(top), p2i(region->top()));
 464           if (top > bottom) {
 465             heap->complete_mark_bit_map()->copy_from(heap->next_mark_bit_map(), MemRegion(bottom, top));
 466             heap->set_complete_top_at_mark_start(bottom, top);
 467             heap->next_mark_bit_map()->clear_range_large(MemRegion(bottom, top));
 468             heap->set_next_top_at_mark_start(bottom, bottom);
 469           }
 470         }
 471         assert(heap->is_next_bitmap_clear_range(region->bottom(), region->end()),
 472                "need clear next bitmap");
 473       }
 474       region = _regions.next();
 475     }
 476   }
 477 };
 478 
 479 void ShenandoahHeap::reset_next_mark_bitmap_traversal() {
 480   assert_gc_workers(_workers->active_workers());
 481 
 482   ShenandoahResetNextBitmapTraversalTask task;
 483   _workers->run_task(&task);
 484 }
 485 
 486 bool ShenandoahHeap::is_next_bitmap_clear() {
 487   for (size_t idx = 0; idx < _num_regions; idx++) {
 488     ShenandoahHeapRegion* r = get_region(idx);
 489     if (is_bitmap_slice_committed(r) && !is_next_bitmap_clear_range(r->bottom(), r->end())) {
 490       return false;
 491     }
 492   }
 493   return true;
 494 }
 495 
 496 bool ShenandoahHeap::is_next_bitmap_clear_range(HeapWord* start, HeapWord* end) {
 497   return _next_mark_bit_map->getNextMarkedWordAddress(start, end) == end;
 498 }
 499 
 500 bool ShenandoahHeap::is_complete_bitmap_clear_range(HeapWord* start, HeapWord* end) {
 501   return _complete_mark_bit_map->getNextMarkedWordAddress(start, end) == end;
 502 }
 503 
 504 void ShenandoahHeap::print_on(outputStream* st) const {
 505   st->print_cr("Shenandoah Heap");
 506   st->print_cr(" " SIZE_FORMAT "K total, " SIZE_FORMAT "K committed, " SIZE_FORMAT "K used",
 507                capacity() / K, committed() / K, used() / K);
 508   st->print_cr(" " SIZE_FORMAT " x " SIZE_FORMAT"K regions",
 509                num_regions(), ShenandoahHeapRegion::region_size_bytes() / K);
 510 
 511   st->print("Status: ");
 512   if (has_forwarded_objects())               st->print("has forwarded objects, ");
 513   if (is_concurrent_mark_in_progress())      st->print("marking, ");
 514   if (is_evacuation_in_progress())           st->print("evacuating, ");
 515   if (is_update_refs_in_progress())          st->print("updating refs, ");
 516   if (is_concurrent_traversal_in_progress()) st->print("traversal, ");
 517   if (is_degenerated_gc_in_progress())       st->print("degenerated gc, ");
 518   if (is_full_gc_in_progress())              st->print("full gc, ");
 519   if (is_full_gc_move_in_progress())         st->print("full gc move, ");
 520 
 521   if (cancelled_concgc()) {
 522     st->print("conc gc cancelled");
 523   } else {
 524     st->print("not cancelled");
 525   }
 526   st->cr();
 527 
 528   st->print_cr("Reserved region:");
 529   st->print_cr(" - [" PTR_FORMAT ", " PTR_FORMAT ") ",
 530                p2i(reserved_region().start()),
 531                p2i(reserved_region().end()));
 532 
 533   if (UseShenandoahMatrix) {
 534     st->print_cr("Matrix:");
 535 
 536     ShenandoahConnectionMatrix* matrix = connection_matrix();
 537     if (matrix != NULL) {
 538       st->print_cr(" - base: " PTR_FORMAT, p2i(matrix->matrix_addr()));
 539       st->print_cr(" - stride: " SIZE_FORMAT, matrix->stride());
 540       st->print_cr(" - magic: " PTR_FORMAT, matrix->magic_offset());
 541     } else {
 542       st->print_cr(" No matrix.");
 543     }
 544   }
 545 
 546   if (Verbose) {
 547     print_heap_regions_on(st);
 548   }
 549 }
 550 
 551 class ShenandoahInitGCLABClosure : public ThreadClosure {
 552 public:
 553   void do_thread(Thread* thread) {
 554     ShenandoahHeap::heap()->initialize_gclab(thread);
 555   }
 556 };
 557 
 558 void ShenandoahHeap::post_initialize() {
 559   CollectedHeap::post_initialize();
 560   MutexLocker ml(Threads_lock);
 561 
 562   ShenandoahInitGCLABClosure init_gclabs;
 563   Threads::java_threads_do(&init_gclabs);
 564   gc_threads_do(&init_gclabs);
 565 
 566   // gclab can not be initialized early during VM startup, as it can not determinate its max_size.
 567   // Now, we will let WorkGang to initialize gclab when new worker is created.
 568   _workers->set_initialize_gclab();
 569 
 570   _scm->initialize(_max_workers);
 571   _full_gc->initialize(_gc_timer);
 572 
 573   ref_processing_init();
 574 
 575   _shenandoah_policy->post_heap_initialize();
 576 }
 577 
 578 size_t ShenandoahHeap::used() const {
 579   return OrderAccess::load_acquire(&_used);
 580 }
 581 
 582 size_t ShenandoahHeap::committed() const {
 583   OrderAccess::acquire();
 584   return _committed;
 585 }
 586 
 587 void ShenandoahHeap::increase_committed(size_t bytes) {
 588   assert_heaplock_or_safepoint();
 589   _committed += bytes;
 590 }
 591 
 592 void ShenandoahHeap::decrease_committed(size_t bytes) {
 593   assert_heaplock_or_safepoint();
 594   _committed -= bytes;
 595 }
 596 
 597 void ShenandoahHeap::increase_used(size_t bytes) {
 598   Atomic::add(bytes, &_used);
 599 }
 600 
 601 void ShenandoahHeap::set_used(size_t bytes) {
 602   OrderAccess::release_store_fence(&_used, bytes);
 603 }
 604 
 605 void ShenandoahHeap::decrease_used(size_t bytes) {
 606   assert(used() >= bytes, "never decrease heap size by more than we've left");
 607   Atomic::sub(bytes, &_used);
 608 }
 609 
 610 void ShenandoahHeap::increase_allocated(size_t bytes) {
 611   Atomic::add(bytes, &_bytes_allocated_since_gc_start);
 612 }
 613 
 614 void ShenandoahHeap::notify_alloc(size_t words, bool waste) {
 615   size_t bytes = words * HeapWordSize;
 616   if (!waste) {
 617     increase_used(bytes);
 618   }
 619   increase_allocated(bytes);
 620   if (ShenandoahPacing) {
 621     control_thread()->pacing_notify_alloc(words);
 622     if (waste) {
 623       pacer()->claim_for_alloc(words, true);
 624     }
 625   }
 626 }
 627 
 628 size_t ShenandoahHeap::capacity() const {
 629   return num_regions() * ShenandoahHeapRegion::region_size_bytes();
 630 }
 631 
 632 bool ShenandoahHeap::is_maximal_no_gc() const {
 633   Unimplemented();
 634   return true;
 635 }
 636 
 637 size_t ShenandoahHeap::max_capacity() const {
 638   return _num_regions * ShenandoahHeapRegion::region_size_bytes();
 639 }
 640 
 641 size_t ShenandoahHeap::initial_capacity() const {
 642   return _initial_size;
 643 }
 644 
 645 bool ShenandoahHeap::is_in(const void* p) const {
 646   HeapWord* heap_base = (HeapWord*) base();
 647   HeapWord* last_region_end = heap_base + ShenandoahHeapRegion::region_size_words() * num_regions();
 648   return p >= heap_base && p < last_region_end;
 649 }
 650 
 651 bool ShenandoahHeap::is_scavengable(oop p) {
 652   return true;
 653 }
 654 
 655 void ShenandoahHeap::handle_heap_shrinkage(double shrink_before) {
 656   if (!ShenandoahUncommit) {
 657     return;
 658   }
 659 
 660   ShenandoahHeapLocker locker(lock());
 661 
 662   size_t count = 0;
 663   for (size_t i = 0; i < num_regions(); i++) {
 664     ShenandoahHeapRegion* r = get_region(i);
 665     if (r->is_empty_committed() && (r->empty_time() < shrink_before)) {
 666       r->make_uncommitted();
 667       count++;
 668     }
 669   }
 670 
 671   if (count > 0) {
 672     log_info(gc)("Uncommitted " SIZE_FORMAT "M. Heap: " SIZE_FORMAT "M reserved, " SIZE_FORMAT "M committed, " SIZE_FORMAT "M used",
 673                  count * ShenandoahHeapRegion::region_size_bytes() / M, capacity() / M, committed() / M, used() / M);
 674     _control_thread->notify_heap_changed();
 675   }
 676 }
 677 
 678 HeapWord* ShenandoahHeap::allocate_from_gclab_slow(Thread* thread, size_t size) {
 679   // Retain tlab and allocate object in shared space if
 680   // the amount free in the tlab is too large to discard.
 681   PLAB* gclab = thread->gclab();
 682 
 683   // Discard gclab and allocate a new one.
 684   // To minimize fragmentation, the last GCLAB may be smaller than the rest.
 685   gclab->retire();
 686   // Figure out size of new GCLAB
 687   size_t new_gclab_size;
 688   if (thread->is_Java_thread()) {
 689     new_gclab_size = _mutator_gclab_stats->desired_plab_sz(Threads::number_of_threads());
 690   } else {
 691     new_gclab_size = _collector_gclab_stats->desired_plab_sz(workers()->active_workers());
 692   }
 693 
 694   // Allocate a new GCLAB...
 695   HeapWord* gclab_buf = allocate_new_gclab(new_gclab_size);
 696   if (gclab_buf == NULL) {
 697     return NULL;
 698   }
 699 
 700   if (ZeroTLAB) {
 701     // ..and clear it.
 702     Copy::zero_to_words(gclab_buf, new_gclab_size);
 703   } else {
 704     // ...and zap just allocated object.
 705 #ifdef ASSERT
 706     // Skip mangling the space corresponding to the object header to
 707     // ensure that the returned space is not considered parsable by
 708     // any concurrent GC thread.
 709     size_t hdr_size = oopDesc::header_size();
 710     Copy::fill_to_words(gclab_buf + hdr_size, new_gclab_size - hdr_size, badHeapWordVal);
 711 #endif // ASSERT
 712   }
 713   gclab->set_buf(gclab_buf, new_gclab_size);
 714   return gclab->allocate(size);
 715 }
 716 
 717 HeapWord* ShenandoahHeap::allocate_new_tlab(size_t word_size) {
 718 #ifdef ASSERT
 719   log_debug(gc, alloc)("Allocate new tlab, requested size = " SIZE_FORMAT " bytes", word_size * HeapWordSize);
 720 #endif
 721   return allocate_new_lab(word_size, _alloc_tlab);
 722 }
 723 
 724 HeapWord* ShenandoahHeap::allocate_new_gclab(size_t word_size) {
 725 #ifdef ASSERT
 726   log_debug(gc, alloc)("Allocate new gclab, requested size = " SIZE_FORMAT " bytes", word_size * HeapWordSize);
 727 #endif
 728   return allocate_new_lab(word_size, _alloc_gclab);
 729 }
 730 
 731 HeapWord* ShenandoahHeap::allocate_new_lab(size_t word_size, AllocType type) {
 732   HeapWord* result = allocate_memory(word_size, type);
 733 
 734   if (result != NULL) {
 735     assert(! in_collection_set(result), "Never allocate in collection set");
 736 
 737     log_develop_trace(gc, tlab)("allocating new tlab of size "SIZE_FORMAT" at addr "PTR_FORMAT, word_size, p2i(result));
 738 
 739   }
 740   return result;
 741 }
 742 
 743 ShenandoahHeap* ShenandoahHeap::heap() {
 744   CollectedHeap* heap = Universe::heap();
 745   assert(heap != NULL, "Unitialized access to ShenandoahHeap::heap()");
 746   assert(heap->kind() == CollectedHeap::ShenandoahHeap, "not a shenandoah heap");
 747   return (ShenandoahHeap*) heap;
 748 }
 749 
 750 ShenandoahHeap* ShenandoahHeap::heap_no_check() {
 751   CollectedHeap* heap = Universe::heap();
 752   return (ShenandoahHeap*) heap;
 753 }
 754 
 755 HeapWord* ShenandoahHeap::allocate_memory(size_t word_size, AllocType type) {
 756   ShenandoahAllocTrace trace_alloc(word_size, type);
 757 
 758   bool in_new_region = false;
 759   HeapWord* result = NULL;
 760 
 761   if (type == _alloc_tlab || type == _alloc_shared) {
 762     if (ShenandoahPacing) {
 763       pacer()->pace_for_alloc(word_size);
 764     }
 765 
 766     if (!ShenandoahAllocFailureALot || !should_inject_alloc_failure()) {
 767       result = allocate_memory_under_lock(word_size, type, in_new_region);
 768     }
 769 
 770     // Allocation failed, try full-GC, then retry allocation.
 771     //
 772     // It might happen that one of the threads requesting allocation would unblock
 773     // way later after full-GC happened, only to fail the second allocation, because
 774     // other threads have already depleted the free storage. In this case, a better
 775     // strategy would be to try full-GC again.
 776     //
 777     // Lacking the way to detect progress from "collect" call, we are left with blindly
 778     // retrying for some bounded number of times.
 779     // TODO: Poll if Full GC made enough progress to warrant retry.
 780     int tries = 0;
 781     while ((result == NULL) && (tries++ < ShenandoahAllocGCTries)) {
 782       log_debug(gc)("[" PTR_FORMAT " Failed to allocate " SIZE_FORMAT " bytes, doing GC, try %d",
 783                     p2i(Thread::current()), word_size * HeapWordSize, tries);
 784       control_thread()->handle_alloc_failure(word_size);
 785       result = allocate_memory_under_lock(word_size, type, in_new_region);
 786     }
 787   } else {
 788     assert(type == _alloc_gclab || type == _alloc_shared_gc, "Can only accept these types here");
 789     result = allocate_memory_under_lock(word_size, type, in_new_region);
 790     // Do not call handle_alloc_failure() here, because we cannot block.
 791     // The allocation failure would be handled by the WB slowpath with handle_alloc_failure_evac().
 792   }
 793 
 794   if (in_new_region) {
 795     control_thread()->notify_heap_changed();
 796   }
 797 
 798   log_develop_trace(gc, alloc)("allocate memory chunk of size "SIZE_FORMAT" at addr "PTR_FORMAT " by thread %d ",
 799                                word_size, p2i(result), Thread::current()->osthread()->thread_id());
 800 
 801   if (result != NULL) {
 802     notify_alloc(word_size, false);
 803   }
 804 
 805   return result;
 806 }
 807 
 808 HeapWord* ShenandoahHeap::allocate_memory_under_lock(size_t word_size, AllocType type, bool& in_new_region) {
 809   ShenandoahHeapLocker locker(lock());
 810   return _free_set->allocate(word_size, type, in_new_region);
 811 }
 812 
 813 HeapWord*  ShenandoahHeap::mem_allocate(size_t size,
 814                                         bool*  gc_overhead_limit_was_exceeded) {
 815   HeapWord* filler = allocate_memory(size + BrooksPointer::word_size(), _alloc_shared);
 816   HeapWord* result = filler + BrooksPointer::word_size();
 817   if (filler != NULL) {
 818     BrooksPointer::initialize(oop(result));
 819 
 820     assert(! in_collection_set(result), "never allocate in targetted region");
 821     return result;
 822   } else {
 823     return NULL;
 824   }
 825 }
 826 
 827 class ShenandoahEvacuateUpdateRootsClosure: public ExtendedOopClosure {
 828 private:
 829   ShenandoahHeap* _heap;
 830   Thread* _thread;
 831 public:
 832   ShenandoahEvacuateUpdateRootsClosure() :
 833     _heap(ShenandoahHeap::heap()), _thread(Thread::current()) {
 834   }
 835 
 836 private:
 837   template <class T>
 838   void do_oop_work(T* p) {
 839     assert(_heap->is_evacuation_in_progress(), "Only do this when evacuation is in progress");
 840 
 841     T o = oopDesc::load_heap_oop(p);
 842     if (! oopDesc::is_null(o)) {
 843       oop obj = oopDesc::decode_heap_oop_not_null(o);
 844       if (_heap->in_collection_set(obj)) {
 845         shenandoah_assert_marked_complete(p, obj);
 846         oop resolved = ShenandoahBarrierSet::resolve_forwarded_not_null(obj);
 847         if (oopDesc::unsafe_equals(resolved, obj)) {
 848           resolved = _heap->evacuate_object(obj, _thread);
 849         }
 850         oopDesc::encode_store_heap_oop(p, resolved);
 851       }
 852     }
 853   }
 854 
 855 public:
 856   void do_oop(oop* p) {
 857     do_oop_work(p);
 858   }
 859   void do_oop(narrowOop* p) {
 860     do_oop_work(p);
 861   }
 862 };
 863 
 864 class ShenandoahEvacuateRootsClosure: public ExtendedOopClosure {
 865 private:
 866   ShenandoahHeap* _heap;
 867   Thread* _thread;
 868 public:
 869   ShenandoahEvacuateRootsClosure() :
 870           _heap(ShenandoahHeap::heap()), _thread(Thread::current()) {
 871   }
 872 
 873 private:
 874   template <class T>
 875   void do_oop_work(T* p) {
 876     T o = oopDesc::load_heap_oop(p);
 877     if (! oopDesc::is_null(o)) {
 878       oop obj = oopDesc::decode_heap_oop_not_null(o);
 879       if (_heap->in_collection_set(obj)) {
 880         oop resolved = ShenandoahBarrierSet::resolve_forwarded_not_null(obj);
 881         if (oopDesc::unsafe_equals(resolved, obj)) {
 882           _heap->evacuate_object(obj, _thread);
 883         }
 884       }
 885     }
 886   }
 887 
 888 public:
 889   void do_oop(oop* p) {
 890     do_oop_work(p);
 891   }
 892   void do_oop(narrowOop* p) {
 893     do_oop_work(p);
 894   }
 895 };
 896 
 897 class ShenandoahParallelEvacuateRegionObjectClosure : public ObjectClosure {
 898 private:
 899   ShenandoahHeap* const _heap;
 900   Thread* const _thread;
 901 public:
 902   ShenandoahParallelEvacuateRegionObjectClosure(ShenandoahHeap* heap) :
 903     _heap(heap), _thread(Thread::current()) {}
 904 
 905   void do_object(oop p) {
 906     shenandoah_assert_marked_complete(NULL, p);
 907     if (oopDesc::unsafe_equals(p, ShenandoahBarrierSet::resolve_forwarded_not_null(p))) {
 908       _heap->evacuate_object(p, _thread);
 909     }
 910   }
 911 };
 912 
 913 class ShenandoahParallelEvacuationTask : public AbstractGangTask {
 914 private:
 915   ShenandoahHeap* const _sh;
 916   ShenandoahCollectionSet* const _cs;
 917   ShenandoahSharedFlag _claimed_codecache;
 918 
 919 public:
 920   ShenandoahParallelEvacuationTask(ShenandoahHeap* sh,
 921                          ShenandoahCollectionSet* cs) :
 922     AbstractGangTask("Parallel Evacuation Task"),
 923     _cs(cs),
 924     _sh(sh)
 925   {}
 926 
 927   void work(uint worker_id) {
 928 
 929     ShenandoahEvacOOMScope oom_evac_scope;
 930     SuspendibleThreadSetJoiner stsj(ShenandoahSuspendibleWorkers);
 931 
 932     // If concurrent code cache evac is enabled, evacuate it here.
 933     // Note we cannot update the roots here, because we risk non-atomic stores to the alive
 934     // nmethods. The update would be handled elsewhere.
 935     if (ShenandoahConcurrentEvacCodeRoots && _claimed_codecache.try_set()) {
 936       ShenandoahEvacuateRootsClosure cl;
 937       MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
 938       CodeBlobToOopClosure blobs(&cl, !CodeBlobToOopClosure::FixRelocations);
 939       CodeCache::blobs_do(&blobs);
 940     }
 941 
 942     ShenandoahParallelEvacuateRegionObjectClosure cl(_sh);
 943     ShenandoahHeapRegion* r;
 944     while ((r =_cs->claim_next()) != NULL) {
 945       log_develop_trace(gc, region)("Thread "INT32_FORMAT" claimed Heap Region "SIZE_FORMAT,
 946                                     worker_id,
 947                                     r->region_number());
 948 
 949       assert(r->has_live(), "all-garbage regions are reclaimed early");
 950       _sh->marked_object_iterate(r, &cl);
 951 
 952       if (_sh->check_cancelled_concgc_and_yield()) {
 953         log_develop_trace(gc, region)("Cancelled concgc while evacuating region " SIZE_FORMAT, r->region_number());
 954         break;
 955       }
 956 
 957       if (ShenandoahPacing) {
 958         _sh->pacer()->report_evac(r->get_live_data_words());
 959       }
 960     }
 961   }
 962 };
 963 
 964 void ShenandoahHeap::trash_cset_regions() {
 965   ShenandoahHeapLocker locker(lock());
 966 
 967   ShenandoahCollectionSet* set = collection_set();
 968   ShenandoahHeapRegion* r;
 969   set->clear_current_index();
 970   while ((r = set->next()) != NULL) {
 971     r->make_trash();
 972   }
 973   collection_set()->clear();
 974 }
 975 
 976 void ShenandoahHeap::print_heap_regions_on(outputStream* st) const {
 977   st->print_cr("Heap Regions:");
 978   st->print_cr("EU=empty-uncommitted, EC=empty-committed, R=regular, H=humongous start, HC=humongous continuation, CS=collection set, T=trash, P=pinned");
 979   st->print_cr("BTE=bottom/top/end, U=used, T=TLAB allocs, G=GCLAB allocs, S=shared allocs, L=live data");
 980   st->print_cr("R=root, CP=critical pins, TAMS=top-at-mark-start (previous, next)");
 981   st->print_cr("SN=alloc sequence numbers (first mutator, last mutator, first gc, last gc)");
 982 
 983   for (size_t i = 0; i < num_regions(); i++) {
 984     get_region(i)->print_on(st);
 985   }
 986 }
 987 
 988 void ShenandoahHeap::trash_humongous_region_at(ShenandoahHeapRegion* start) {
 989   assert(start->is_humongous_start(), "reclaim regions starting with the first one");
 990 
 991   oop humongous_obj = oop(start->bottom() + BrooksPointer::word_size());
 992   size_t size = humongous_obj->size() + BrooksPointer::word_size();
 993   size_t required_regions = ShenandoahHeapRegion::required_regions(size * HeapWordSize);
 994   size_t index = start->region_number() + required_regions - 1;
 995 
 996   assert(!start->has_live(), "liveness must be zero");
 997   log_trace(gc, humongous)("Reclaiming "SIZE_FORMAT" humongous regions for object of size: "SIZE_FORMAT" words", required_regions, size);
 998 
 999   for(size_t i = 0; i < required_regions; i++) {
1000     // Reclaim from tail. Otherwise, assertion fails when printing region to trace log,
1001     // as it expects that every region belongs to a humongous region starting with a humongous start region.
1002     ShenandoahHeapRegion* region = get_region(index --);
1003 
1004     LogTarget(Trace, gc, humongous) lt;
1005     if (lt.is_enabled()) {
1006       ResourceMark rm;
1007       LogStream ls(lt);
1008       region->print_on(&ls);
1009     }
1010 
1011     assert(region->is_humongous(), "expect correct humongous start or continuation");
1012     assert(!in_collection_set(region), "Humongous region should not be in collection set");
1013 
1014     region->make_trash();
1015   }
1016 }
1017 
1018 #ifdef ASSERT
1019 class ShenandoahCheckCollectionSetClosure: public ShenandoahHeapRegionClosure {
1020   bool heap_region_do(ShenandoahHeapRegion* r) {
1021     assert(! ShenandoahHeap::heap()->in_collection_set(r), "Should have been cleared by now");
1022     return false;
1023   }
1024 };
1025 #endif
1026 
1027 void ShenandoahHeap::prepare_for_concurrent_evacuation() {
1028   log_develop_trace(gc)("Thread %d started prepare_for_concurrent_evacuation", Thread::current()->osthread()->thread_id());
1029 
1030   if (!cancelled_concgc()) {
1031     // Allocations might have happened before we STWed here, record peak:
1032     shenandoahPolicy()->record_peak_occupancy();
1033 
1034     make_tlabs_parsable(true);
1035 
1036     if (ShenandoahVerify) {
1037       verifier()->verify_after_concmark();
1038     }
1039 
1040     trash_cset_regions();
1041 
1042     // NOTE: This needs to be done during a stop the world pause, because
1043     // putting regions into the collection set concurrently with Java threads
1044     // will create a race. In particular, acmp could fail because when we
1045     // resolve the first operand, the containing region might not yet be in
1046     // the collection set, and thus return the original oop. When the 2nd
1047     // operand gets resolved, the region could be in the collection set
1048     // and the oop gets evacuated. If both operands have originally been
1049     // the same, we get false negatives.
1050 
1051     {
1052       ShenandoahHeapLocker locker(lock());
1053       _collection_set->clear();
1054       _free_set->clear();
1055 
1056 #ifdef ASSERT
1057       ShenandoahCheckCollectionSetClosure ccsc;
1058       heap_region_iterate(&ccsc);
1059 #endif
1060 
1061       _shenandoah_policy->choose_collection_set(_collection_set);
1062 
1063       _free_set->rebuild();
1064     }
1065 
1066     Universe::update_heap_info_at_gc();
1067 
1068     if (ShenandoahVerify) {
1069       verifier()->verify_before_evacuation();
1070     }
1071   }
1072 }
1073 
1074 
1075 class ShenandoahRetireTLABClosure : public ThreadClosure {
1076 private:
1077   bool _retire;
1078 
1079 public:
1080   ShenandoahRetireTLABClosure(bool retire) : _retire(retire) {}
1081 
1082   void do_thread(Thread* thread) {
1083     PLAB* gclab = thread->gclab();
1084     if (gclab != NULL) {
1085       gclab->retire();
1086     }
1087   }
1088 };
1089 
1090 void ShenandoahHeap::make_tlabs_parsable(bool retire_tlabs) {
1091   if (UseTLAB) {
1092     CollectedHeap::ensure_parsability(retire_tlabs);
1093   }
1094   ShenandoahRetireTLABClosure cl(retire_tlabs);
1095   Threads::java_threads_do(&cl);
1096   gc_threads_do(&cl);
1097 }
1098 
1099 
1100 class ShenandoahEvacuateUpdateRootsTask : public AbstractGangTask {
1101   ShenandoahRootEvacuator* _rp;
1102 public:
1103 
1104   ShenandoahEvacuateUpdateRootsTask(ShenandoahRootEvacuator* rp) :
1105     AbstractGangTask("Shenandoah evacuate and update roots"),
1106     _rp(rp)
1107   {
1108     // Nothing else to do.
1109   }
1110 
1111   void work(uint worker_id) {
1112     ShenandoahEvacOOMScope oom_evac_scope;
1113     ShenandoahEvacuateUpdateRootsClosure cl;
1114 
1115     if (ShenandoahConcurrentEvacCodeRoots) {
1116       _rp->process_evacuate_roots(&cl, NULL, worker_id);
1117     } else {
1118       MarkingCodeBlobClosure blobsCl(&cl, CodeBlobToOopClosure::FixRelocations);
1119       _rp->process_evacuate_roots(&cl, &blobsCl, worker_id);
1120     }
1121   }
1122 };
1123 
1124 class ShenandoahFixRootsTask : public AbstractGangTask {
1125   ShenandoahRootEvacuator* _rp;
1126 public:
1127 
1128   ShenandoahFixRootsTask(ShenandoahRootEvacuator* rp) :
1129     AbstractGangTask("Shenandoah update roots"),
1130     _rp(rp)
1131   {
1132     // Nothing else to do.
1133   }
1134 
1135   void work(uint worker_id) {
1136     ShenandoahEvacOOMScope oom_evac_scope;
1137     ShenandoahUpdateRefsClosure cl;
1138     MarkingCodeBlobClosure blobsCl(&cl, CodeBlobToOopClosure::FixRelocations);
1139 
1140     _rp->process_evacuate_roots(&cl, &blobsCl, worker_id);
1141   }
1142 };
1143 
1144 void ShenandoahHeap::evacuate_and_update_roots() {
1145 
1146 #if defined(COMPILER2) || INCLUDE_JVMCI
1147   DerivedPointerTable::clear();
1148 #endif
1149   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Only iterate roots while world is stopped");
1150 
1151   {
1152     ShenandoahRootEvacuator rp(this, workers()->active_workers(), ShenandoahPhaseTimings::init_evac);
1153     ShenandoahEvacuateUpdateRootsTask roots_task(&rp);
1154     workers()->run_task(&roots_task);
1155   }
1156 
1157 #if defined(COMPILER2) || INCLUDE_JVMCI
1158   DerivedPointerTable::update_pointers();
1159 #endif
1160   if (cancelled_concgc()) {
1161     fixup_roots();
1162   }
1163 }
1164 
1165 void ShenandoahHeap::fixup_roots() {
1166     assert(cancelled_concgc(), "Only after concurrent cycle failed");
1167 
1168     // If initial evacuation has been cancelled, we need to update all references
1169     // after all workers have finished. Otherwise we might run into the following problem:
1170     // GC thread 1 cannot allocate anymore, thus evacuation fails, leaves from-space ptr of object X.
1171     // GC thread 2 evacuates the same object X to to-space
1172     // which leaves a truly dangling from-space reference in the first root oop*. This must not happen.
1173     // clear() and update_pointers() must always be called in pairs,
1174     // cannot nest with above clear()/update_pointers().
1175 #if defined(COMPILER2) || INCLUDE_JVMCI
1176     DerivedPointerTable::clear();
1177 #endif
1178     ShenandoahRootEvacuator rp(this, workers()->active_workers(), ShenandoahPhaseTimings::init_evac);
1179     ShenandoahFixRootsTask update_roots_task(&rp);
1180     workers()->run_task(&update_roots_task);
1181 #if defined(COMPILER2) || INCLUDE_JVMCI
1182     DerivedPointerTable::update_pointers();
1183 #endif
1184 }
1185 
1186 void ShenandoahHeap::roots_iterate(OopClosure* cl) {
1187   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Only iterate roots while world is stopped");
1188 
1189   CodeBlobToOopClosure blobsCl(cl, false);
1190   CLDToOopClosure cldCl(cl);
1191 
1192   ShenandoahRootProcessor rp(this, 1, ShenandoahPhaseTimings::_num_phases);
1193   rp.process_all_roots(cl, NULL, &cldCl, &blobsCl, NULL, 0);
1194 }
1195 
1196 bool ShenandoahHeap::supports_tlab_allocation() const {
1197   return true;
1198 }
1199 
1200 size_t  ShenandoahHeap::unsafe_max_tlab_alloc(Thread *thread) const {
1201   return MIN2(_free_set->unsafe_peek_free(), max_tlab_size());
1202 }
1203 
1204 size_t ShenandoahHeap::max_tlab_size() const {
1205   return ShenandoahHeapRegion::max_tlab_size_bytes();
1206 }
1207 
1208 class ShenandoahAccumulateStatisticsGCLABClosure : public ThreadClosure {
1209 public:
1210   void do_thread(Thread* thread) {
1211     ShenandoahHeap* heap = ShenandoahHeap::heap();
1212     PLAB* gclab = thread->gclab();
1213     if (gclab != NULL) {
1214       if (thread->is_Java_thread()) {
1215         gclab->flush_and_retire_stats(heap->mutator_gclab_stats());
1216       } else {
1217         gclab->flush_and_retire_stats(heap->collector_gclab_stats());
1218       }
1219     }
1220   }
1221 };
1222 
1223 void ShenandoahHeap::accumulate_statistics_all_gclabs() {
1224   ShenandoahAccumulateStatisticsGCLABClosure cl;
1225   Threads::java_threads_do(&cl);
1226   gc_threads_do(&cl);
1227   _mutator_gclab_stats->adjust_desired_plab_sz();
1228   _collector_gclab_stats->adjust_desired_plab_sz();
1229 }
1230 
1231 bool  ShenandoahHeap::can_elide_tlab_store_barriers() const {
1232   return true;
1233 }
1234 
1235 oop ShenandoahHeap::new_store_pre_barrier(JavaThread* thread, oop new_obj) {
1236   // Overridden to do nothing.
1237   return new_obj;
1238 }
1239 
1240 bool  ShenandoahHeap::can_elide_initializing_store_barrier(oop new_obj) {
1241   return true;
1242 }
1243 
1244 bool ShenandoahHeap::card_mark_must_follow_store() const {
1245   return false;
1246 }
1247 
1248 void ShenandoahHeap::collect(GCCause::Cause cause) {
1249   _control_thread->handle_explicit_gc(cause);
1250 }
1251 
1252 void ShenandoahHeap::do_full_collection(bool clear_all_soft_refs) {
1253   //assert(false, "Shouldn't need to do full collections");
1254 }
1255 
1256 AdaptiveSizePolicy* ShenandoahHeap::size_policy() {
1257   Unimplemented();
1258   return NULL;
1259 
1260 }
1261 
1262 CollectorPolicy* ShenandoahHeap::collector_policy() const {
1263   return _shenandoah_policy;
1264 }
1265 
1266 
1267 HeapWord* ShenandoahHeap::block_start(const void* addr) const {
1268   Space* sp = heap_region_containing(addr);
1269   if (sp != NULL) {
1270     return sp->block_start(addr);
1271   }
1272   return NULL;
1273 }
1274 
1275 size_t ShenandoahHeap::block_size(const HeapWord* addr) const {
1276   Space* sp = heap_region_containing(addr);
1277   assert(sp != NULL, "block_size of address outside of heap");
1278   return sp->block_size(addr);
1279 }
1280 
1281 bool ShenandoahHeap::block_is_obj(const HeapWord* addr) const {
1282   Space* sp = heap_region_containing(addr);
1283   return sp->block_is_obj(addr);
1284 }
1285 
1286 jlong ShenandoahHeap::millis_since_last_gc() {
1287   return 0;
1288 }
1289 
1290 void ShenandoahHeap::prepare_for_verify() {
1291   if (SafepointSynchronize::is_at_safepoint() || ! UseTLAB) {
1292     make_tlabs_parsable(false);
1293   }
1294 }
1295 
1296 void ShenandoahHeap::print_gc_threads_on(outputStream* st) const {
1297   workers()->print_worker_threads_on(st);
1298   if (ShenandoahStringDedup::is_enabled()) {
1299     ShenandoahStringDedup::print_worker_threads_on(st);
1300   }
1301 }
1302 
1303 void ShenandoahHeap::gc_threads_do(ThreadClosure* tcl) const {
1304   workers()->threads_do(tcl);
1305   if (ShenandoahStringDedup::is_enabled()) {
1306     ShenandoahStringDedup::threads_do(tcl);
1307   }
1308 }
1309 
1310 void ShenandoahHeap::print_tracing_info() const {
1311   LogTarget(Info, gc, stats) lt;
1312   if (lt.is_enabled()) {
1313     ResourceMark rm;
1314     LogStream ls(lt);
1315 
1316     phase_timings()->print_on(&ls);
1317 
1318     ls.cr();
1319     ls.cr();
1320 
1321     shenandoahPolicy()->print_gc_stats(&ls);
1322 
1323     ls.cr();
1324     ls.cr();
1325 
1326     if (ShenandoahPacing) {
1327       pacer()->print_on(&ls);
1328     }
1329 
1330     ls.cr();
1331     ls.cr();
1332 
1333     if (ShenandoahAllocationTrace) {
1334       assert(alloc_tracker() != NULL, "Must be");
1335       alloc_tracker()->print_on(&ls);
1336     } else {
1337       ls.print_cr("  Allocation tracing is disabled, use -XX:+ShenandoahAllocationTrace to enable.");
1338     }
1339   }
1340 }
1341 
1342 void ShenandoahHeap::verify(VerifyOption vo) {
1343   if (ShenandoahSafepoint::is_at_shenandoah_safepoint()) {
1344     if (ShenandoahVerify) {
1345       verifier()->verify_generic(vo);
1346     } else {
1347       // TODO: Consider allocating verification bitmaps on demand,
1348       // and turn this on unconditionally.
1349     }
1350   }
1351 }
1352 size_t ShenandoahHeap::tlab_capacity(Thread *thr) const {
1353   return _free_set->capacity();
1354 }
1355 
1356 class ObjectIterateScanRootClosure : public ExtendedOopClosure {
1357 private:
1358   MarkBitMap* _bitmap;
1359   Stack<oop,mtGC>* _oop_stack;
1360 
1361   template <class T>
1362   void do_oop_work(T* p) {
1363     T o = oopDesc::load_heap_oop(p);
1364     if (!oopDesc::is_null(o)) {
1365       oop obj = oopDesc::decode_heap_oop_not_null(o);
1366       obj = ShenandoahBarrierSet::resolve_forwarded_not_null(obj);
1367       assert(oopDesc::is_oop(obj), "must be a valid oop");
1368       if (!_bitmap->isMarked((HeapWord*) obj)) {
1369         _bitmap->mark((HeapWord*) obj);
1370         _oop_stack->push(obj);
1371       }
1372     }
1373   }
1374 public:
1375   ObjectIterateScanRootClosure(MarkBitMap* bitmap, Stack<oop,mtGC>* oop_stack) :
1376     _bitmap(bitmap), _oop_stack(oop_stack) {}
1377   void do_oop(oop* p)       { do_oop_work(p); }
1378   void do_oop(narrowOop* p) { do_oop_work(p); }
1379 };
1380 
1381 /*
1382  * This is public API, used in preparation of object_iterate().
1383  * Since we don't do linear scan of heap in object_iterate() (see comment below), we don't
1384  * need to make the heap parsable. For Shenandoah-internal linear heap scans that we can
1385  * control, we call SH::make_tlabs_parsable().
1386  */
1387 void ShenandoahHeap::ensure_parsability(bool retire_tlabs) {
1388   // No-op.
1389 }
1390 
1391 /*
1392  * Iterates objects in the heap. This is public API, used for, e.g., heap dumping.
1393  *
1394  * We cannot safely iterate objects by doing a linear scan at random points in time. Linear
1395  * scanning needs to deal with dead objects, which may have dead Klass* pointers (e.g.
1396  * calling oopDesc::size() would crash) or dangling reference fields (crashes) etc. Linear
1397  * scanning therefore depends on having a valid marking bitmap to support it. However, we only
1398  * have a valid marking bitmap after successful marking. In particular, we *don't* have a valid
1399  * marking bitmap during marking, after aborted marking or during/after cleanup (when we just
1400  * wiped the bitmap in preparation for next marking).
1401  *
1402  * For all those reasons, we implement object iteration as a single marking traversal, reporting
1403  * objects as we mark+traverse through the heap, starting from GC roots. JVMTI IterateThroughHeap
1404  * is allowed to report dead objects, but is not required to do so.
1405  */
1406 void ShenandoahHeap::object_iterate(ObjectClosure* cl) {
1407   assert(SafepointSynchronize::is_at_safepoint(), "safe iteration is only available during safepoints");
1408   if (!os::commit_memory((char*)_aux_bitmap_region.start(), _aux_bitmap_region.byte_size(), false)) {
1409     log_warning(gc)("Could not commit native memory for auxiliary marking bitmap for heap iteration");
1410     return;
1411   }
1412 
1413   Stack<oop,mtGC> oop_stack;
1414 
1415   // First, we process all GC roots. This populates the work stack with initial objects.
1416   ShenandoahRootProcessor rp(this, 1, ShenandoahPhaseTimings::_num_phases);
1417   ObjectIterateScanRootClosure oops(&_aux_bit_map, &oop_stack);
1418   CLDToOopClosure clds(&oops, false);
1419   CodeBlobToOopClosure blobs(&oops, false);
1420   rp.process_all_roots(&oops, &oops, &clds, &blobs, NULL, 0);
1421 
1422   // Work through the oop stack to traverse heap.
1423   while (! oop_stack.is_empty()) {
1424     oop obj = oop_stack.pop();
1425     assert(oopDesc::is_oop(obj), "must be a valid oop");
1426     cl->do_object(obj);
1427     obj->oop_iterate(&oops);
1428   }
1429 
1430   assert(oop_stack.is_empty(), "should be empty");
1431 
1432   if (!os::uncommit_memory((char*)_aux_bitmap_region.start(), _aux_bitmap_region.byte_size())) {
1433     log_warning(gc)("Could not uncommit native memory for auxiliary marking bitmap for heap iteration");
1434   }
1435 }
1436 
1437 void ShenandoahHeap::safe_object_iterate(ObjectClosure* cl) {
1438   assert(SafepointSynchronize::is_at_safepoint(), "safe iteration is only available during safepoints");
1439   object_iterate(cl);
1440 }
1441 
1442 // Apply blk->heap_region_do() on all committed regions in address order,
1443 // terminating the iteration early if heap_region_do() returns true.
1444 void ShenandoahHeap::heap_region_iterate(ShenandoahHeapRegionClosure* blk, bool skip_cset_regions, bool skip_humongous_continuation) const {
1445   for (size_t i = 0; i < num_regions(); i++) {
1446     ShenandoahHeapRegion* current  = get_region(i);
1447     if (skip_humongous_continuation && current->is_humongous_continuation()) {
1448       continue;
1449     }
1450     if (skip_cset_regions && in_collection_set(current)) {
1451       continue;
1452     }
1453     if (blk->heap_region_do(current)) {
1454       return;
1455     }
1456   }
1457 }
1458 
1459 class ShenandoahClearLivenessClosure : public ShenandoahHeapRegionClosure {
1460 private:
1461   ShenandoahHeap* sh;
1462 public:
1463   ShenandoahClearLivenessClosure(ShenandoahHeap* heap) : sh(heap) {}
1464 
1465   bool heap_region_do(ShenandoahHeapRegion* r) {
1466     r->clear_live_data();
1467     sh->set_next_top_at_mark_start(r->bottom(), r->top());
1468     return false;
1469   }
1470 };
1471 
1472 void ShenandoahHeap::op_init_mark() {
1473   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Should be at safepoint");
1474 
1475   assert(is_next_bitmap_clear(), "need clear marking bitmap");
1476 
1477   if (ShenandoahVerify) {
1478     verifier()->verify_before_concmark();
1479   }
1480 
1481   {
1482     ShenandoahGCPhase phase(ShenandoahPhaseTimings::accumulate_stats);
1483     accumulate_statistics_all_tlabs();
1484   }
1485 
1486   set_concurrent_mark_in_progress(true);
1487   // We need to reset all TLABs because we'd lose marks on all objects allocated in them.
1488   {
1489     ShenandoahGCPhase phase(ShenandoahPhaseTimings::make_parsable);
1490     make_tlabs_parsable(true);
1491   }
1492 
1493   {
1494     ShenandoahGCPhase phase(ShenandoahPhaseTimings::clear_liveness);
1495     ShenandoahClearLivenessClosure clc(this);
1496     heap_region_iterate(&clc);
1497   }
1498 
1499   // Make above changes visible to worker threads
1500   OrderAccess::fence();
1501 
1502   concurrentMark()->init_mark_roots();
1503 
1504   if (UseTLAB) {
1505     ShenandoahGCPhase phase(ShenandoahPhaseTimings::resize_tlabs);
1506     resize_all_tlabs();
1507   }
1508 
1509   if (ShenandoahPacing) {
1510     pacer()->setup_for_mark();
1511   }
1512 }
1513 
1514 void ShenandoahHeap::op_mark() {
1515   concurrentMark()->mark_from_roots();
1516 
1517   // Allocations happen during concurrent mark, record peak after the phase:
1518   shenandoahPolicy()->record_peak_occupancy();
1519 }
1520 
1521 void ShenandoahHeap::op_final_mark() {
1522   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Should be at safepoint");
1523 
1524   // It is critical that we
1525   // evacuate roots right after finishing marking, so that we don't
1526   // get unmarked objects in the roots.
1527 
1528   if (! cancelled_concgc()) {
1529     concurrentMark()->finish_mark_from_roots();
1530     stop_concurrent_marking();
1531 
1532     {
1533       ShenandoahGCPhase prepare_evac(ShenandoahPhaseTimings::prepare_evac);
1534       prepare_for_concurrent_evacuation();
1535     }
1536 
1537     // If collection set has candidates, start evacuation.
1538     // Otherwise, bypass the rest of the cycle.
1539     if (!collection_set()->is_empty()) {
1540       set_evacuation_in_progress(true);
1541       // From here on, we need to update references.
1542       set_has_forwarded_objects(true);
1543 
1544       ShenandoahGCPhase init_evac(ShenandoahPhaseTimings::init_evac);
1545       evacuate_and_update_roots();
1546     }
1547 
1548     if (ShenandoahPacing) {
1549       pacer()->setup_for_evac();
1550     }
1551   } else {
1552     concurrentMark()->cancel();
1553     stop_concurrent_marking();
1554 
1555     if (process_references()) {
1556       // Abandon reference processing right away: pre-cleaning must have failed.
1557       ReferenceProcessor *rp = ref_processor();
1558       rp->disable_discovery();
1559       rp->abandon_partial_discovery();
1560       rp->verify_no_references_recorded();
1561     }
1562   }
1563 }
1564 
1565 void ShenandoahHeap::op_final_evac() {
1566   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Should be at safepoint");
1567 
1568   accumulate_statistics_all_gclabs();
1569   set_evacuation_in_progress(false);
1570   if (ShenandoahVerify) {
1571     verifier()->verify_after_evacuation();
1572   }
1573 }
1574 
1575 void ShenandoahHeap::op_evac() {
1576 
1577   LogTarget(Trace, gc, region) lt_region;
1578   LogTarget(Trace, gc, cset) lt_cset;
1579 
1580   if (lt_region.is_enabled()) {
1581     ResourceMark rm;
1582     LogStream ls(lt_region);
1583     ls.print_cr("All available regions:");
1584     print_heap_regions_on(&ls);
1585   }
1586 
1587   if (lt_cset.is_enabled()) {
1588     ResourceMark rm;
1589     LogStream ls(lt_cset);
1590     ls.print_cr("Collection set ("SIZE_FORMAT" regions):", _collection_set->count());
1591     _collection_set->print_on(&ls);
1592 
1593     ls.print_cr("Free set:");
1594     _free_set->print_on(&ls);
1595   }
1596 
1597   ShenandoahParallelEvacuationTask task(this, _collection_set);
1598   workers()->run_task(&task);
1599 
1600   if (lt_cset.is_enabled()) {
1601     ResourceMark rm;
1602     LogStream ls(lt_cset);
1603     ls.print_cr("After evacuation collection set ("SIZE_FORMAT" regions):",
1604                 _collection_set->count());
1605     _collection_set->print_on(&ls);
1606 
1607     ls.print_cr("After evacuation free set:");
1608     _free_set->print_on(&ls);
1609   }
1610 
1611   if (lt_region.is_enabled()) {
1612     ResourceMark rm;
1613     LogStream ls(lt_region);
1614     ls.print_cr("All regions after evacuation:");
1615     print_heap_regions_on(&ls);
1616   }
1617 
1618   // Allocations happen during evacuation, record peak after the phase:
1619   shenandoahPolicy()->record_peak_occupancy();
1620 }
1621 
1622 void ShenandoahHeap::op_updaterefs() {
1623   update_heap_references(true);
1624 
1625   // Allocations happen during update-refs, record peak after the phase:
1626   shenandoahPolicy()->record_peak_occupancy();
1627 }
1628 
1629 void ShenandoahHeap::op_cleanup() {
1630   ShenandoahGCPhase phase_recycle(ShenandoahPhaseTimings::conc_cleanup_recycle);
1631   free_set()->recycle_trash();
1632 
1633   // Allocations happen during cleanup, record peak after the phase:
1634   shenandoahPolicy()->record_peak_occupancy();
1635 }
1636 
1637 void ShenandoahHeap::op_cleanup_bitmaps() {
1638   op_cleanup();
1639 
1640   ShenandoahGCPhase phase_reset(ShenandoahPhaseTimings::conc_cleanup_reset_bitmaps);
1641   reset_next_mark_bitmap();
1642 
1643   // Allocations happen during bitmap cleanup, record peak after the phase:
1644   shenandoahPolicy()->record_peak_occupancy();
1645 }
1646 
1647 void ShenandoahHeap::op_cleanup_traversal() {
1648 
1649   {
1650     ShenandoahGCPhase phase_reset(ShenandoahPhaseTimings::conc_cleanup_reset_bitmaps);
1651     reset_next_mark_bitmap_traversal();
1652   }
1653 
1654   op_cleanup();
1655 
1656   // Allocations happen during bitmap cleanup, record peak after the phase:
1657   shenandoahPolicy()->record_peak_occupancy();
1658 }
1659 
1660 void ShenandoahHeap::op_preclean() {
1661   concurrentMark()->preclean_weak_refs();
1662 
1663   // Allocations happen during concurrent preclean, record peak after the phase:
1664   shenandoahPolicy()->record_peak_occupancy();
1665 }
1666 
1667 void ShenandoahHeap::op_init_traversal() {
1668   traversal_gc()->init_traversal_collection();
1669 }
1670 
1671 void ShenandoahHeap::op_traversal() {
1672   traversal_gc()->concurrent_traversal_collection();
1673 }
1674 
1675 void ShenandoahHeap::op_final_traversal() {
1676   traversal_gc()->final_traversal_collection();
1677 }
1678 
1679 void ShenandoahHeap::op_full(GCCause::Cause cause) {
1680   full_gc()->do_it(cause);
1681   if (UseTLAB) {
1682     ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_resize_tlabs);
1683     resize_all_tlabs();
1684   }
1685 }
1686 
1687 void ShenandoahHeap::op_degenerated(ShenandoahDegenPoint point) {
1688   // Degenerated GC is STW, but it can also fail. Current mechanics communicates
1689   // GC failure via cancelled_concgc() flag. So, if we detect the failure after
1690   // some phase, we have to upgrade the Degenerate GC to Full GC.
1691 
1692   clear_cancelled_concgc();
1693 
1694   size_t used_before = used();
1695 
1696   switch (point) {
1697     case _degenerated_evac:
1698       // Not possible to degenerate from here, upgrade to Full GC right away.
1699       cancel_concgc(GCCause::_shenandoah_upgrade_to_full_gc);
1700       op_degenerated_fail();
1701       return;
1702 
1703     // The cases below form the Duff's-like device: it describes the actual GC cycle,
1704     // but enters it at different points, depending on which concurrent phase had
1705     // degenerated.
1706 
1707     case _degenerated_traversal:
1708       {
1709         ShenandoahHeapLocker locker(lock());
1710         collection_set()->clear_current_index();
1711         for (size_t i = 0; i < collection_set()->count(); i++) {
1712           ShenandoahHeapRegion* r = collection_set()->next();
1713           r->make_regular_bypass();
1714         }
1715         collection_set()->clear();
1716       }
1717       op_final_traversal();
1718       op_cleanup_traversal();
1719       return;
1720 
1721     case _degenerated_outside_cycle:
1722       if (shenandoahPolicy()->can_do_traversal_gc()) {
1723         // Not possible to degenerate from here, upgrade to Full GC right away.
1724         cancel_concgc(GCCause::_shenandoah_upgrade_to_full_gc);
1725         op_degenerated_fail();
1726         return;
1727       }
1728       op_init_mark();
1729       if (cancelled_concgc()) {
1730         op_degenerated_fail();
1731         return;
1732       }
1733 
1734     case _degenerated_mark:
1735       op_final_mark();
1736       if (cancelled_concgc()) {
1737         op_degenerated_fail();
1738         return;
1739       }
1740 
1741       op_cleanup();
1742 
1743       // If heuristics thinks we should do the cycle, this flag would be set,
1744       // and we can do evacuation. Otherwise, it would be the shortcut cycle.
1745       if (is_evacuation_in_progress()) {
1746         op_evac();
1747         if (cancelled_concgc()) {
1748           op_degenerated_fail();
1749           return;
1750         }
1751       }
1752 
1753       // If heuristics thinks we should do the cycle, this flag would be set,
1754       // and we need to do update-refs. Otherwise, it would be the shortcut cycle.
1755       if (has_forwarded_objects()) {
1756         op_init_updaterefs();
1757         if (cancelled_concgc()) {
1758           op_degenerated_fail();
1759           return;
1760         }
1761       }
1762 
1763     case _degenerated_updaterefs:
1764       if (has_forwarded_objects()) {
1765         op_final_updaterefs();
1766         if (cancelled_concgc()) {
1767           op_degenerated_fail();
1768           return;
1769         }
1770       }
1771 
1772       op_cleanup_bitmaps();
1773       break;
1774 
1775     default:
1776       ShouldNotReachHere();
1777   }
1778 
1779   if (ShenandoahVerify) {
1780     verifier()->verify_after_degenerated();
1781   }
1782 
1783   // Check for futility and fail. There is no reason to do several back-to-back Degenerated cycles,
1784   // because that probably means the heap is overloaded and/or fragmented.
1785   size_t used_after = used();
1786   size_t difference = (used_before > used_after) ? used_before - used_after : 0;
1787   if (difference < ShenandoahHeapRegion::region_size_words()) {
1788     cancel_concgc(GCCause::_shenandoah_upgrade_to_full_gc);
1789     op_degenerated_futile();
1790   }
1791 }
1792 
1793 void ShenandoahHeap::op_degenerated_fail() {
1794   log_info(gc)("Cannot finish degeneration, upgrading to Full GC");
1795   shenandoahPolicy()->record_degenerated_upgrade_to_full();
1796   op_full(GCCause::_shenandoah_upgrade_to_full_gc);
1797 }
1798 
1799 void ShenandoahHeap::op_degenerated_futile() {
1800   log_info(gc)("Degenerated GC had not reclaimed enough, upgrading to Full GC");
1801   shenandoahPolicy()->record_degenerated_upgrade_to_full();
1802   op_full(GCCause::_shenandoah_upgrade_to_full_gc);
1803 }
1804 
1805 void ShenandoahHeap::swap_mark_bitmaps() {
1806   // Swap bitmaps.
1807   MarkBitMap* tmp1 = _complete_mark_bit_map;
1808   _complete_mark_bit_map = _next_mark_bit_map;
1809   _next_mark_bit_map = tmp1;
1810 
1811   // Swap top-at-mark-start pointers
1812   HeapWord** tmp2 = _complete_top_at_mark_starts;
1813   _complete_top_at_mark_starts = _next_top_at_mark_starts;
1814   _next_top_at_mark_starts = tmp2;
1815 
1816   HeapWord** tmp3 = _complete_top_at_mark_starts_base;
1817   _complete_top_at_mark_starts_base = _next_top_at_mark_starts_base;
1818   _next_top_at_mark_starts_base = tmp3;
1819 }
1820 
1821 
1822 void ShenandoahHeap::stop_concurrent_marking() {
1823   assert(is_concurrent_mark_in_progress(), "How else could we get here?");
1824   if (! cancelled_concgc()) {
1825     // If we needed to update refs, and concurrent marking has been cancelled,
1826     // we need to finish updating references.
1827     set_has_forwarded_objects(false);
1828     swap_mark_bitmaps();
1829   }
1830   set_concurrent_mark_in_progress(false);
1831 
1832   LogTarget(Trace, gc, region) lt;
1833   if (lt.is_enabled()) {
1834     ResourceMark rm;
1835     LogStream ls(lt);
1836     ls.print_cr("Regions at stopping the concurrent mark:");
1837     print_heap_regions_on(&ls);
1838   }
1839 }
1840 
1841 void ShenandoahHeap::set_gc_state_mask(uint mask, bool value) {
1842   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Should really be Shenandoah safepoint");
1843   _gc_state.set_cond(mask, value);
1844   JavaThread::set_gc_state_all_threads(_gc_state.raw_value());
1845 }
1846 
1847 void ShenandoahHeap::set_concurrent_mark_in_progress(bool in_progress) {
1848   set_gc_state_mask(MARKING, in_progress);
1849   JavaThread::satb_mark_queue_set().set_active_all_threads(in_progress, !in_progress);
1850 }
1851 
1852 void ShenandoahHeap::set_concurrent_traversal_in_progress(bool in_progress) {
1853    set_gc_state_mask(TRAVERSAL | HAS_FORWARDED, in_progress);
1854    JavaThread::satb_mark_queue_set().set_active_all_threads(in_progress, !in_progress);
1855 }
1856 
1857 void ShenandoahHeap::set_evacuation_in_progress(bool in_progress) {
1858   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Only call this at safepoint");
1859   set_gc_state_mask(EVACUATION, in_progress);
1860 }
1861 
1862 HeapWord* ShenandoahHeap::tlab_post_allocation_setup(HeapWord* obj) {
1863   // Initialize Brooks pointer for the next object
1864   HeapWord* result = obj + BrooksPointer::word_size();
1865   BrooksPointer::initialize(oop(result));
1866   return result;
1867 }
1868 
1869 uint ShenandoahHeap::oop_extra_words() {
1870   return BrooksPointer::word_size();
1871 }
1872 
1873 ShenandoahForwardedIsAliveClosure::ShenandoahForwardedIsAliveClosure() :
1874   _heap(ShenandoahHeap::heap_no_check()) {
1875 }
1876 
1877 ShenandoahIsAliveClosure::ShenandoahIsAliveClosure() :
1878   _heap(ShenandoahHeap::heap_no_check()) {
1879 }
1880 
1881 bool ShenandoahForwardedIsAliveClosure::do_object_b(oop obj) {
1882   if (oopDesc::is_null(obj)) {
1883     return false;
1884   }
1885   obj = ShenandoahBarrierSet::resolve_forwarded_not_null(obj);
1886   shenandoah_assert_not_forwarded_if(NULL, obj, _heap->is_concurrent_mark_in_progress() || _heap->is_concurrent_traversal_in_progress())
1887   return _heap->is_marked_next(obj);
1888 }
1889 
1890 bool ShenandoahIsAliveClosure::do_object_b(oop obj) {
1891   if (oopDesc::is_null(obj)) {
1892     return false;
1893   }
1894   shenandoah_assert_not_forwarded(NULL, obj);
1895   return _heap->is_marked_next(obj);
1896 }
1897 
1898 BoolObjectClosure* ShenandoahHeap::is_alive_closure() {
1899   return has_forwarded_objects() ?
1900          (BoolObjectClosure*) &_forwarded_is_alive :
1901          (BoolObjectClosure*) &_is_alive;
1902 }
1903 
1904 void ShenandoahHeap::ref_processing_init() {
1905   MemRegion mr = reserved_region();
1906 
1907   _forwarded_is_alive.init(this);
1908   _is_alive.init(this);
1909   assert(_max_workers > 0, "Sanity");
1910 
1911   _ref_processor =
1912     new ReferenceProcessor(mr,    // span
1913                            ParallelRefProcEnabled,  // MT processing
1914                            _max_workers,            // Degree of MT processing
1915                            true,                    // MT discovery
1916                            _max_workers,            // Degree of MT discovery
1917                            false,                   // Reference discovery is not atomic
1918                            NULL);                   // No closure, should be installed before use
1919 
1920   shenandoah_assert_rp_isalive_not_installed();
1921 }
1922 
1923 
1924 GCTracer* ShenandoahHeap::tracer() {
1925   return shenandoahPolicy()->tracer();
1926 }
1927 
1928 size_t ShenandoahHeap::tlab_used(Thread* thread) const {
1929   return _free_set->used();
1930 }
1931 
1932 void ShenandoahHeap::cancel_concgc(GCCause::Cause cause) {
1933   if (try_cancel_concgc()) {
1934     FormatBuffer<> msg("Cancelling concurrent GC: %s", GCCause::to_string(cause));
1935     log_info(gc)("%s", msg.buffer());
1936     Events::log(Thread::current(), "%s", msg.buffer());
1937   }
1938 }
1939 
1940 uint ShenandoahHeap::max_workers() {
1941   return _max_workers;
1942 }
1943 
1944 void ShenandoahHeap::stop() {
1945   // The shutdown sequence should be able to terminate when GC is running.
1946 
1947   // Step 0. Notify policy to disable event recording.
1948   _shenandoah_policy->record_shutdown();
1949 
1950   // Step 1. Notify control thread that we are in shutdown.
1951   // Note that we cannot do that with stop(), because stop() is blocking and waits for the actual shutdown.
1952   // Doing stop() here would wait for the normal GC cycle to complete, never falling through to cancel below.
1953   _control_thread->prepare_for_graceful_shutdown();
1954 
1955   // Step 2. Notify GC workers that we are cancelling GC.
1956   cancel_concgc(GCCause::_shenandoah_stop_vm);
1957 
1958   // Step 3. Wait until GC worker exits normally.
1959   _control_thread->stop();
1960 
1961   // Step 4. Stop String Dedup thread if it is active
1962   if (ShenandoahStringDedup::is_enabled()) {
1963     ShenandoahStringDedup::stop();
1964   }
1965 }
1966 
1967 void ShenandoahHeap::unload_classes_and_cleanup_tables(bool full_gc) {
1968   ShenandoahPhaseTimings::Phase phase_root =
1969           full_gc ?
1970           ShenandoahPhaseTimings::full_gc_purge :
1971           ShenandoahPhaseTimings::purge;
1972 
1973   ShenandoahPhaseTimings::Phase phase_unload =
1974           full_gc ?
1975           ShenandoahPhaseTimings::full_gc_purge_class_unload :
1976           ShenandoahPhaseTimings::purge_class_unload;
1977 
1978   ShenandoahPhaseTimings::Phase phase_cldg =
1979           full_gc ?
1980           ShenandoahPhaseTimings::full_gc_purge_cldg :
1981           ShenandoahPhaseTimings::purge_cldg;
1982 
1983   ShenandoahPhaseTimings::Phase phase_par =
1984           full_gc ?
1985           ShenandoahPhaseTimings::full_gc_purge_par :
1986           ShenandoahPhaseTimings::purge_par;
1987 
1988   ShenandoahPhaseTimings::Phase phase_par_classes =
1989           full_gc ?
1990           ShenandoahPhaseTimings::full_gc_purge_par_classes :
1991           ShenandoahPhaseTimings::purge_par_classes;
1992 
1993   ShenandoahPhaseTimings::Phase phase_par_codecache =
1994           full_gc ?
1995           ShenandoahPhaseTimings::full_gc_purge_par_codecache :
1996           ShenandoahPhaseTimings::purge_par_codecache;
1997 
1998   ShenandoahPhaseTimings::Phase phase_par_rmt =
1999           full_gc ?
2000           ShenandoahPhaseTimings::full_gc_purge_par_rmt :
2001           ShenandoahPhaseTimings::purge_par_rmt;
2002 
2003   ShenandoahPhaseTimings::Phase phase_par_symbstring =
2004           full_gc ?
2005           ShenandoahPhaseTimings::full_gc_purge_par_symbstring :
2006           ShenandoahPhaseTimings::purge_par_symbstring;
2007 
2008   ShenandoahPhaseTimings::Phase phase_par_sync =
2009           full_gc ?
2010           ShenandoahPhaseTimings::full_gc_purge_par_sync :
2011           ShenandoahPhaseTimings::purge_par_sync;
2012 
2013   ShenandoahGCPhase root_phase(phase_root);
2014 
2015   BoolObjectClosure* is_alive = is_alive_closure();
2016 
2017   bool purged_class;
2018 
2019   // Unload classes and purge SystemDictionary.
2020   {
2021     ShenandoahGCPhase phase(phase_unload);
2022     purged_class = SystemDictionary::do_unloading(is_alive,
2023                                                   gc_timer(),
2024                                                   false /* defer cleaning */);
2025   }
2026 
2027   {
2028     ShenandoahGCPhase phase(phase_par);
2029     uint active = _workers->active_workers();
2030     ParallelCleaningTask unlink_task(is_alive, true, true, active, purged_class);
2031     _workers->run_task(&unlink_task);
2032 
2033     ShenandoahPhaseTimings* p = phase_timings();
2034     ParallelCleaningTimes times = unlink_task.times();
2035 
2036     // "times" report total time, phase_tables_cc reports wall time. Divide total times
2037     // by active workers to get average time per worker, that would add up to wall time.
2038     p->record_phase_time(phase_par_classes,    times.klass_work_us() / active);
2039     p->record_phase_time(phase_par_codecache,  times.codecache_work_us() / active);
2040     p->record_phase_time(phase_par_rmt,        times.rmt_work_us() / active);
2041     p->record_phase_time(phase_par_symbstring, times.tables_work_us() / active);
2042     p->record_phase_time(phase_par_sync,       times.sync_us() / active);
2043   }
2044 
2045   if (ShenandoahStringDedup::is_enabled()) {
2046     ShenandoahPhaseTimings::Phase phase_par_string_dedup =
2047             full_gc ?
2048             ShenandoahPhaseTimings::full_gc_purge_par_string_dedup :
2049             ShenandoahPhaseTimings::purge_par_string_dedup;
2050     ShenandoahGCPhase phase(phase_par_string_dedup);
2051     ShenandoahStringDedup::parallel_cleanup();
2052   }
2053 
2054 
2055   {
2056     ShenandoahGCPhase phase(phase_cldg);
2057     ClassLoaderDataGraph::purge();
2058   }
2059 }
2060 
2061 void ShenandoahHeap::set_has_forwarded_objects(bool cond) {
2062   set_gc_state_mask(HAS_FORWARDED, cond);
2063 }
2064 
2065 void ShenandoahHeap::set_process_references(bool pr) {
2066   _process_references.set_cond(pr);
2067 }
2068 
2069 void ShenandoahHeap::set_unload_classes(bool uc) {
2070   _unload_classes.set_cond(uc);
2071 }
2072 
2073 bool ShenandoahHeap::process_references() const {
2074   return _process_references.is_set();
2075 }
2076 
2077 bool ShenandoahHeap::unload_classes() const {
2078   return _unload_classes.is_set();
2079 }
2080 
2081 //fixme this should be in heapregionset
2082 ShenandoahHeapRegion* ShenandoahHeap::next_compaction_region(const ShenandoahHeapRegion* r) {
2083   size_t region_idx = r->region_number() + 1;
2084   ShenandoahHeapRegion* next = get_region(region_idx);
2085   guarantee(next->region_number() == region_idx, "region number must match");
2086   while (next->is_humongous()) {
2087     region_idx = next->region_number() + 1;
2088     next = get_region(region_idx);
2089     guarantee(next->region_number() == region_idx, "region number must match");
2090   }
2091   return next;
2092 }
2093 
2094 ShenandoahMonitoringSupport* ShenandoahHeap::monitoring_support() {
2095   return _monitoring_support;
2096 }
2097 
2098 MarkBitMap* ShenandoahHeap::complete_mark_bit_map() {
2099   return _complete_mark_bit_map;
2100 }
2101 
2102 MarkBitMap* ShenandoahHeap::next_mark_bit_map() {
2103   return _next_mark_bit_map;
2104 }
2105 
2106 address ShenandoahHeap::in_cset_fast_test_addr() {
2107   ShenandoahHeap* heap = ShenandoahHeap::heap();
2108   assert(heap->collection_set() != NULL, "Sanity");
2109   return (address) heap->collection_set()->biased_map_address();
2110 }
2111 
2112 address ShenandoahHeap::cancelled_concgc_addr() {
2113   return (address) ShenandoahHeap::heap()->_cancelled_concgc.addr_of();
2114 }
2115 
2116 address ShenandoahHeap::gc_state_addr() {
2117   return (address) ShenandoahHeap::heap()->_gc_state.addr_of();
2118 }
2119 
2120 size_t ShenandoahHeap::bytes_allocated_since_gc_start() {
2121   return OrderAccess::load_acquire(&_bytes_allocated_since_gc_start);
2122 }
2123 
2124 void ShenandoahHeap::reset_bytes_allocated_since_gc_start() {
2125   OrderAccess::release_store_fence(&_bytes_allocated_since_gc_start, (size_t)0);
2126 }
2127 
2128 ShenandoahPacer* ShenandoahHeap::pacer() const {
2129   assert (_pacer != NULL, "sanity");
2130   return _pacer;
2131 }
2132 
2133 void ShenandoahHeap::set_next_top_at_mark_start(HeapWord* region_base, HeapWord* addr) {
2134   uintx index = ((uintx) region_base) >> ShenandoahHeapRegion::region_size_bytes_shift();
2135   _next_top_at_mark_starts[index] = addr;
2136 }
2137 
2138 HeapWord* ShenandoahHeap::next_top_at_mark_start(HeapWord* region_base) {
2139   uintx index = ((uintx) region_base) >> ShenandoahHeapRegion::region_size_bytes_shift();
2140   return _next_top_at_mark_starts[index];
2141 }
2142 
2143 void ShenandoahHeap::set_complete_top_at_mark_start(HeapWord* region_base, HeapWord* addr) {
2144   uintx index = ((uintx) region_base) >> ShenandoahHeapRegion::region_size_bytes_shift();
2145   _complete_top_at_mark_starts[index] = addr;
2146 }
2147 
2148 HeapWord* ShenandoahHeap::complete_top_at_mark_start(HeapWord* region_base) {
2149   uintx index = ((uintx) region_base) >> ShenandoahHeapRegion::region_size_bytes_shift();
2150   return _complete_top_at_mark_starts[index];
2151 }
2152 
2153 void ShenandoahHeap::set_degenerated_gc_in_progress(bool in_progress) {
2154   _degenerated_gc_in_progress.set_cond(in_progress);
2155 }
2156 
2157 void ShenandoahHeap::set_full_gc_in_progress(bool in_progress) {
2158   _full_gc_in_progress.set_cond(in_progress);
2159 }
2160 
2161 void ShenandoahHeap::set_full_gc_move_in_progress(bool in_progress) {
2162   assert (is_full_gc_in_progress(), "should be");
2163   _full_gc_move_in_progress.set_cond(in_progress);
2164 }
2165 
2166 void ShenandoahHeap::set_update_refs_in_progress(bool in_progress) {
2167   set_gc_state_mask(UPDATEREFS, in_progress);
2168 }
2169 
2170 void ShenandoahHeap::register_nmethod(nmethod* nm) {
2171   ShenandoahCodeRoots::add_nmethod(nm);
2172 }
2173 
2174 void ShenandoahHeap::unregister_nmethod(nmethod* nm) {
2175   ShenandoahCodeRoots::remove_nmethod(nm);
2176 }
2177 
2178 oop ShenandoahHeap::pin_object(JavaThread* thr, oop o) {
2179   o = BarrierSet::barrier_set()->write_barrier(o);
2180   ShenandoahHeapLocker locker(lock());
2181   heap_region_containing(o)->make_pinned();
2182   return o;
2183 }
2184 
2185 void ShenandoahHeap::unpin_object(JavaThread* thr, oop o) {
2186   o = BarrierSet::barrier_set()->read_barrier(o);
2187   ShenandoahHeapLocker locker(lock());
2188   heap_region_containing(o)->make_unpinned();
2189 }
2190 
2191 GCTimer* ShenandoahHeap::gc_timer() const {
2192   return _gc_timer;
2193 }
2194 
2195 #ifdef ASSERT
2196 void ShenandoahHeap::assert_gc_workers(uint nworkers) {
2197   assert(nworkers > 0 && nworkers <= max_workers(), "Sanity");
2198 
2199   if (ShenandoahSafepoint::is_at_shenandoah_safepoint()) {
2200     if (UseDynamicNumberOfGCThreads ||
2201         (FLAG_IS_DEFAULT(ParallelGCThreads) && ForceDynamicNumberOfGCThreads)) {
2202       assert(nworkers <= ParallelGCThreads, "Cannot use more than it has");
2203     } else {
2204       // Use ParallelGCThreads inside safepoints
2205       assert(nworkers == ParallelGCThreads, "Use ParalleGCThreads within safepoints");
2206     }
2207   } else {
2208     if (UseDynamicNumberOfGCThreads ||
2209         (FLAG_IS_DEFAULT(ConcGCThreads) && ForceDynamicNumberOfGCThreads)) {
2210       assert(nworkers <= ConcGCThreads, "Cannot use more than it has");
2211     } else {
2212       // Use ConcGCThreads outside safepoints
2213       assert(nworkers == ConcGCThreads, "Use ConcGCThreads outside safepoints");
2214     }
2215   }
2216 }
2217 #endif
2218 
2219 ShenandoahConnectionMatrix* ShenandoahHeap::connection_matrix() const {
2220   return _connection_matrix;
2221 }
2222 
2223 ShenandoahTraversalGC* ShenandoahHeap::traversal_gc() {
2224   return _traversal_gc;
2225 }
2226 
2227 ShenandoahVerifier* ShenandoahHeap::verifier() {
2228   guarantee(ShenandoahVerify, "Should be enabled");
2229   assert (_verifier != NULL, "sanity");
2230   return _verifier;
2231 }
2232 
2233 template<class T>
2234 class ShenandoahUpdateHeapRefsTask : public AbstractGangTask {
2235 private:
2236   T cl;
2237   ShenandoahHeap* _heap;
2238   ShenandoahRegionIterator* _regions;
2239   bool _concurrent;
2240 public:
2241   ShenandoahUpdateHeapRefsTask(ShenandoahRegionIterator* regions, bool concurrent) :
2242     AbstractGangTask("Concurrent Update References Task"),
2243     cl(T()),
2244     _heap(ShenandoahHeap::heap()),
2245     _regions(regions),
2246     _concurrent(concurrent) {
2247   }
2248 
2249   void work(uint worker_id) {
2250     SuspendibleThreadSetJoiner stsj(_concurrent && ShenandoahSuspendibleWorkers);
2251     ShenandoahHeapRegion* r = _regions->next();
2252     while (r != NULL) {
2253       if (_heap->in_collection_set(r)) {
2254         HeapWord* bottom = r->bottom();
2255         HeapWord* top = _heap->complete_top_at_mark_start(r->bottom());
2256         if (top > bottom) {
2257           _heap->complete_mark_bit_map()->clear_range_large(MemRegion(bottom, top));
2258         }
2259       } else {
2260         if (r->is_active()) {
2261           _heap->marked_object_oop_safe_iterate(r, &cl);
2262           if (ShenandoahPacing) {
2263             _heap->pacer()->report_updaterefs(r->get_live_data_words());
2264           }
2265         }
2266       }
2267       if (_heap->check_cancelled_concgc_and_yield(_concurrent)) {
2268         return;
2269       }
2270       r = _regions->next();
2271     }
2272   }
2273 };
2274 
2275 void ShenandoahHeap::update_heap_references(bool concurrent) {
2276   if (UseShenandoahMatrix) {
2277     ShenandoahUpdateHeapRefsTask<ShenandoahUpdateHeapRefsMatrixClosure> task(&_update_refs_iterator, concurrent);
2278     workers()->run_task(&task);
2279   } else {
2280     ShenandoahUpdateHeapRefsTask<ShenandoahUpdateHeapRefsClosure> task(&_update_refs_iterator, concurrent);
2281     workers()->run_task(&task);
2282   }
2283 }
2284 
2285 void ShenandoahHeap::op_init_updaterefs() {
2286   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "must be at safepoint");
2287 
2288   if (ShenandoahVerify) {
2289     verifier()->verify_before_updaterefs();
2290   }
2291 
2292   accumulate_statistics_all_gclabs();
2293   set_evacuation_in_progress(false);
2294   set_update_refs_in_progress(true);
2295   make_tlabs_parsable(true);
2296   if (UseShenandoahMatrix) {
2297     connection_matrix()->clear_all();
2298   }
2299   for (uint i = 0; i < num_regions(); i++) {
2300     ShenandoahHeapRegion* r = get_region(i);
2301     r->set_concurrent_iteration_safe_limit(r->top());
2302   }
2303 
2304   // Reset iterator.
2305   _update_refs_iterator = ShenandoahRegionIterator();
2306 
2307   if (ShenandoahPacing) {
2308     pacer()->setup_for_updaterefs();
2309   }
2310 }
2311 
2312 void ShenandoahHeap::op_final_updaterefs() {
2313   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "must be at safepoint");
2314 
2315   // Check if there is left-over work, and finish it
2316   if (_update_refs_iterator.has_next()) {
2317     ShenandoahGCPhase final_work(ShenandoahPhaseTimings::final_update_refs_finish_work);
2318 
2319     // Finish updating references where we left off.
2320     clear_cancelled_concgc();
2321     update_heap_references(false);
2322   }
2323 
2324   // Clear cancelled conc GC, if set. On cancellation path, the block before would handle
2325   // everything. On degenerated paths, cancelled gc would not be set anyway.
2326   if (cancelled_concgc()) {
2327     clear_cancelled_concgc();
2328   }
2329   assert(!cancelled_concgc(), "Should have been done right before");
2330 
2331   concurrentMark()->update_roots(ShenandoahPhaseTimings::final_update_refs_roots);
2332 
2333   // Allocations might have happened before we STWed here, record peak:
2334   shenandoahPolicy()->record_peak_occupancy();
2335 
2336   ShenandoahGCPhase final_update_refs(ShenandoahPhaseTimings::final_update_refs_recycle);
2337 
2338   trash_cset_regions();
2339   set_has_forwarded_objects(false);
2340 
2341   if (ShenandoahVerify) {
2342     verifier()->verify_after_updaterefs();
2343   }
2344 
2345   {
2346     ShenandoahHeapLocker locker(lock());
2347     _free_set->rebuild();
2348   }
2349 
2350   set_update_refs_in_progress(false);
2351 }
2352 
2353 void ShenandoahHeap::set_alloc_seq_gc_start() {
2354   // Take next number, the start seq number is inclusive
2355   _alloc_seq_at_last_gc_start = ShenandoahHeapRegion::seqnum_current_alloc() + 1;
2356 }
2357 
2358 void ShenandoahHeap::set_alloc_seq_gc_end() {
2359   // Take current number, the end seq number is also inclusive
2360   _alloc_seq_at_last_gc_end = ShenandoahHeapRegion::seqnum_current_alloc();
2361 }
2362 
2363 
2364 #ifdef ASSERT
2365 void ShenandoahHeap::assert_heaplock_owned_by_current_thread() {
2366   _lock.assert_owned_by_current_thread();
2367 }
2368 
2369 void ShenandoahHeap::assert_heaplock_not_owned_by_current_thread() {
2370   _lock.assert_not_owned_by_current_thread();
2371 }
2372 
2373 void ShenandoahHeap::assert_heaplock_or_safepoint() {
2374   _lock.assert_owned_by_current_thread_or_safepoint();
2375 }
2376 #endif
2377 
2378 void ShenandoahHeap::print_extended_on(outputStream *st) const {
2379   print_on(st);
2380   print_heap_regions_on(st);
2381 }
2382 
2383 bool ShenandoahHeap::is_bitmap_slice_committed(ShenandoahHeapRegion* r, bool skip_self) {
2384   size_t slice = r->region_number() / _bitmap_regions_per_slice;
2385 
2386   size_t regions_from = _bitmap_regions_per_slice * slice;
2387   size_t regions_to   = MIN2(num_regions(), _bitmap_regions_per_slice * (slice + 1));
2388   for (size_t g = regions_from; g < regions_to; g++) {
2389     assert (g / _bitmap_regions_per_slice == slice, "same slice");
2390     if (skip_self && g == r->region_number()) continue;
2391     if (get_region(g)->is_committed()) {
2392       return true;
2393     }
2394   }
2395   return false;
2396 }
2397 
2398 bool ShenandoahHeap::commit_bitmap_slice(ShenandoahHeapRegion* r) {
2399   assert_heaplock_owned_by_current_thread();
2400 
2401   if (is_bitmap_slice_committed(r, true)) {
2402     // Some other region from the group is already committed, meaning the bitmap
2403     // slice is already committed, we exit right away.
2404     return true;
2405   }
2406 
2407   // Commit the bitmap slice:
2408   size_t slice = r->region_number() / _bitmap_regions_per_slice;
2409   size_t off = _bitmap_bytes_per_slice * slice;
2410   size_t len = _bitmap_bytes_per_slice;
2411   if (!os::commit_memory((char*)_bitmap0_region.start() + off, len, false)) {
2412     return false;
2413   }
2414   if (!os::commit_memory((char*)_bitmap1_region.start() + off, len, false)) {
2415     return false;
2416   }
2417   return true;
2418 }
2419 
2420 bool ShenandoahHeap::uncommit_bitmap_slice(ShenandoahHeapRegion *r) {
2421   assert_heaplock_owned_by_current_thread();
2422 
2423   if (is_bitmap_slice_committed(r, true)) {
2424     // Some other region from the group is still committed, meaning the bitmap
2425     // slice is should stay committed, exit right away.
2426     return true;
2427   }
2428 
2429   // Uncommit the bitmap slice:
2430   size_t slice = r->region_number() / _bitmap_regions_per_slice;
2431   size_t off = _bitmap_bytes_per_slice * slice;
2432   size_t len = _bitmap_bytes_per_slice;
2433   if (!os::uncommit_memory((char*)_bitmap0_region.start() + off, len)) {
2434     return false;
2435   }
2436   if (!os::uncommit_memory((char*)_bitmap1_region.start() + off, len)) {
2437     return false;
2438   }
2439   return true;
2440 }
2441 
2442 bool ShenandoahHeap::idle_bitmap_slice(ShenandoahHeapRegion *r) {
2443   assert_heaplock_owned_by_current_thread();
2444   assert(ShenandoahUncommitWithIdle, "Must be enabled");
2445 
2446   if (is_bitmap_slice_committed(r, true)) {
2447     // Some other region from the group is still committed, meaning the bitmap
2448     // slice is should stay committed, exit right away.
2449     return true;
2450   }
2451 
2452   // Idle the bitmap slice:
2453   size_t slice = r->region_number() / _bitmap_regions_per_slice;
2454   size_t off = _bitmap_bytes_per_slice * slice;
2455   size_t len = _bitmap_bytes_per_slice;
2456   if (!os::idle_memory((char*)_bitmap0_region.start() + off, len)) {
2457     return false;
2458   }
2459   if (!os::idle_memory((char*)_bitmap1_region.start() + off, len)) {
2460     return false;
2461   }
2462   return true;
2463 }
2464 
2465 void ShenandoahHeap::activate_bitmap_slice(ShenandoahHeapRegion* r) {
2466   assert_heaplock_owned_by_current_thread();
2467   assert(ShenandoahUncommitWithIdle, "Must be enabled");
2468   size_t slice = r->region_number() / _bitmap_regions_per_slice;
2469   size_t off = _bitmap_bytes_per_slice * slice;
2470   size_t len = _bitmap_bytes_per_slice;
2471   os::activate_memory((char*)_bitmap0_region.start() + off, len);
2472   os::activate_memory((char*)_bitmap1_region.start() + off, len);
2473 }
2474 
2475 void ShenandoahHeap::safepoint_synchronize_begin() {
2476   if (ShenandoahSuspendibleWorkers || UseStringDeduplication) {
2477     SuspendibleThreadSet::synchronize();
2478   }
2479 }
2480 
2481 void ShenandoahHeap::safepoint_synchronize_end() {
2482   if (ShenandoahSuspendibleWorkers || UseStringDeduplication) {
2483     SuspendibleThreadSet::desynchronize();
2484   }
2485 }
2486 
2487 void ShenandoahHeap::vmop_entry_init_mark() {
2488   TraceCollectorStats tcs(monitoring_support()->stw_collection_counters());
2489   ShenandoahGCPhase total(ShenandoahPhaseTimings::total_pause_gross);
2490   ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_mark_gross);
2491 
2492   try_inject_alloc_failure();
2493   VM_ShenandoahInitMark op;
2494   VMThread::execute(&op); // jump to entry_init_mark() under safepoint
2495 }
2496 
2497 void ShenandoahHeap::vmop_entry_final_mark() {
2498   TraceCollectorStats tcs(monitoring_support()->stw_collection_counters());
2499   ShenandoahGCPhase total(ShenandoahPhaseTimings::total_pause_gross);
2500   ShenandoahGCPhase phase(ShenandoahPhaseTimings::final_mark_gross);
2501 
2502   try_inject_alloc_failure();
2503   VM_ShenandoahFinalMarkStartEvac op;
2504   VMThread::execute(&op); // jump to entry_final_mark under safepoint
2505 }
2506 
2507 void ShenandoahHeap::vmop_entry_final_evac() {
2508   TraceCollectorStats tcs(monitoring_support()->stw_collection_counters());
2509   ShenandoahGCPhase total(ShenandoahPhaseTimings::total_pause_gross);
2510   ShenandoahGCPhase phase(ShenandoahPhaseTimings::final_evac_gross);
2511 
2512   VM_ShenandoahFinalEvac op;
2513   VMThread::execute(&op); // jump to entry_final_evac under safepoint
2514 }
2515 
2516 void ShenandoahHeap::vmop_entry_init_updaterefs() {
2517   TraceCollectorStats tcs(monitoring_support()->stw_collection_counters());
2518   ShenandoahGCPhase total(ShenandoahPhaseTimings::total_pause_gross);
2519   ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_update_refs_gross);
2520 
2521   try_inject_alloc_failure();
2522   VM_ShenandoahInitUpdateRefs op;
2523   VMThread::execute(&op);
2524 }
2525 
2526 void ShenandoahHeap::vmop_entry_final_updaterefs() {
2527   TraceCollectorStats tcs(monitoring_support()->stw_collection_counters());
2528   ShenandoahGCPhase total(ShenandoahPhaseTimings::total_pause_gross);
2529   ShenandoahGCPhase phase(ShenandoahPhaseTimings::final_update_refs_gross);
2530 
2531   try_inject_alloc_failure();
2532   VM_ShenandoahFinalUpdateRefs op;
2533   VMThread::execute(&op);
2534 }
2535 
2536 void ShenandoahHeap::vmop_entry_init_traversal() {
2537   TraceCollectorStats tcs(monitoring_support()->stw_collection_counters());
2538   ShenandoahGCPhase total(ShenandoahPhaseTimings::total_pause_gross);
2539   ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_traversal_gc_gross);
2540 
2541   try_inject_alloc_failure();
2542   VM_ShenandoahInitTraversalGC op;
2543   VMThread::execute(&op);
2544 }
2545 
2546 void ShenandoahHeap::vmop_entry_final_traversal() {
2547   TraceCollectorStats tcs(monitoring_support()->stw_collection_counters());
2548   ShenandoahGCPhase total(ShenandoahPhaseTimings::total_pause_gross);
2549   ShenandoahGCPhase phase(ShenandoahPhaseTimings::final_traversal_gc_gross);
2550 
2551   try_inject_alloc_failure();
2552   VM_ShenandoahFinalTraversalGC op;
2553   VMThread::execute(&op);
2554 }
2555 
2556 void ShenandoahHeap::vmop_entry_full(GCCause::Cause cause) {
2557   TraceCollectorStats tcs(monitoring_support()->full_stw_collection_counters());
2558   ShenandoahGCPhase total(ShenandoahPhaseTimings::total_pause_gross);
2559   ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_gross);
2560 
2561   try_inject_alloc_failure();
2562   VM_ShenandoahFullGC op(cause);
2563   VMThread::execute(&op);
2564 }
2565 
2566 void ShenandoahHeap::vmop_degenerated(ShenandoahDegenPoint point) {
2567   TraceCollectorStats tcs(monitoring_support()->full_stw_collection_counters());
2568   ShenandoahGCPhase total(ShenandoahPhaseTimings::total_pause_gross);
2569   ShenandoahGCPhase phase(ShenandoahPhaseTimings::degen_gc_gross);
2570 
2571   VM_ShenandoahDegeneratedGC degenerated_gc((int)point);
2572   VMThread::execute(&degenerated_gc);
2573 }
2574 
2575 void ShenandoahHeap::entry_init_mark() {
2576   ShenandoahGCPhase total_phase(ShenandoahPhaseTimings::total_pause);
2577   ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_mark);
2578 
2579   FormatBuffer<> msg("Pause Init Mark%s%s%s",
2580                      has_forwarded_objects() ? " (update refs)"    : "",
2581                      process_references() ?    " (process refs)"   : "",
2582                      unload_classes() ?        " (unload classes)" : "");
2583   GCTraceTime(Info, gc) time(msg, gc_timer());
2584   EventMark em("%s", msg.buffer());
2585 
2586   ShenandoahWorkerScope scope(workers(), ShenandoahWorkerPolicy::calc_workers_for_init_marking());
2587 
2588   op_init_mark();
2589 }
2590 
2591 void ShenandoahHeap::entry_final_mark() {
2592   ShenandoahGCPhase total_phase(ShenandoahPhaseTimings::total_pause);
2593   ShenandoahGCPhase phase(ShenandoahPhaseTimings::final_mark);
2594 
2595   FormatBuffer<> msg("Pause Final Mark%s%s%s",
2596                      has_forwarded_objects() ? " (update refs)"    : "",
2597                      process_references() ?    " (process refs)"   : "",
2598                      unload_classes() ?        " (unload classes)" : "");
2599   GCTraceTime(Info, gc) time(msg, gc_timer());
2600   EventMark em("%s", msg.buffer());
2601 
2602   ShenandoahWorkerScope scope(workers(), ShenandoahWorkerPolicy::calc_workers_for_final_marking());
2603 
2604   op_final_mark();
2605 }
2606 
2607 void ShenandoahHeap::entry_final_evac() {
2608   ShenandoahGCPhase total_phase(ShenandoahPhaseTimings::total_pause);
2609   ShenandoahGCPhase phase(ShenandoahPhaseTimings::final_evac);
2610 
2611   FormatBuffer<> msg("Pause Final Evac");
2612   GCTraceTime(Info, gc) time(msg, gc_timer());
2613   EventMark em("%s", msg.buffer());
2614 
2615   op_final_evac();
2616 }
2617 
2618 void ShenandoahHeap::entry_init_updaterefs() {
2619   ShenandoahGCPhase total_phase(ShenandoahPhaseTimings::total_pause);
2620   ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_update_refs);
2621 
2622   static const char* msg = "Pause Init Update Refs";
2623   GCTraceTime(Info, gc) time(msg, gc_timer());
2624   EventMark em("%s", msg);
2625 
2626   // No workers used in this phase, no setup required
2627 
2628   op_init_updaterefs();
2629 }
2630 
2631 void ShenandoahHeap::entry_final_updaterefs() {
2632   ShenandoahGCPhase total_phase(ShenandoahPhaseTimings::total_pause);
2633   ShenandoahGCPhase phase(ShenandoahPhaseTimings::final_update_refs);
2634 
2635   static const char* msg = "Pause Final Update Refs";
2636   GCTraceTime(Info, gc) time(msg, gc_timer());
2637   EventMark em("%s", msg);
2638 
2639   ShenandoahWorkerScope scope(workers(), ShenandoahWorkerPolicy::calc_workers_for_final_update_ref());
2640 
2641   op_final_updaterefs();
2642 }
2643 
2644 void ShenandoahHeap::entry_init_traversal() {
2645   ShenandoahGCPhase total_phase(ShenandoahPhaseTimings::total_pause);
2646   ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_traversal_gc);
2647 
2648   static const char* msg = "Pause Init Traversal";
2649   GCTraceTime(Info, gc) time(msg, gc_timer());
2650   EventMark em("%s", msg);
2651 
2652   ShenandoahWorkerScope scope(workers(), ShenandoahWorkerPolicy::calc_workers_for_stw_traversal());
2653 
2654   op_init_traversal();
2655 }
2656 
2657 void ShenandoahHeap::entry_final_traversal() {
2658   ShenandoahGCPhase total_phase(ShenandoahPhaseTimings::total_pause);
2659   ShenandoahGCPhase phase(ShenandoahPhaseTimings::final_traversal_gc);
2660 
2661   static const char* msg = "Pause Final Traversal";
2662   GCTraceTime(Info, gc) time(msg, gc_timer());
2663   EventMark em("%s", msg);
2664 
2665   ShenandoahWorkerScope scope(workers(), ShenandoahWorkerPolicy::calc_workers_for_stw_traversal());
2666 
2667   op_final_traversal();
2668 }
2669 
2670 void ShenandoahHeap::entry_full(GCCause::Cause cause) {
2671   ShenandoahGCPhase total_phase(ShenandoahPhaseTimings::total_pause);
2672   ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc);
2673 
2674   static const char* msg = "Pause Full";
2675   GCTraceTime(Info, gc) time(msg, gc_timer(), cause, true);
2676   EventMark em("%s", msg);
2677 
2678   ShenandoahWorkerScope scope(workers(), ShenandoahWorkerPolicy::calc_workers_for_fullgc());
2679 
2680   op_full(cause);
2681 }
2682 
2683 void ShenandoahHeap::entry_degenerated(int point) {
2684   ShenandoahGCPhase total_phase(ShenandoahPhaseTimings::total_pause);
2685   ShenandoahGCPhase phase(ShenandoahPhaseTimings::degen_gc);
2686 
2687   ShenandoahDegenPoint dpoint = (ShenandoahDegenPoint)point;
2688   FormatBuffer<> msg("Pause Degenerated GC (%s)", degen_point_to_string(dpoint));
2689   GCTraceTime(Info, gc) time(msg, gc_timer(), GCCause::_no_gc, true);
2690   EventMark em("%s", msg.buffer());
2691 
2692   ShenandoahWorkerScope scope(workers(), ShenandoahWorkerPolicy::calc_workers_for_stw_degenerated());
2693 
2694   set_degenerated_gc_in_progress(true);
2695   op_degenerated(dpoint);
2696   set_degenerated_gc_in_progress(false);
2697 }
2698 
2699 void ShenandoahHeap::entry_mark() {
2700   TraceCollectorStats tcs(monitoring_support()->concurrent_collection_counters());
2701 
2702   FormatBuffer<> msg("Concurrent marking%s%s%s",
2703                      has_forwarded_objects() ? " (update refs)"    : "",
2704                      process_references() ?    " (process refs)"   : "",
2705                      unload_classes() ?        " (unload classes)" : "");
2706   GCTraceTime(Info, gc) time(msg, gc_timer(), GCCause::_no_gc, true);
2707   EventMark em("%s", msg.buffer());
2708 
2709   ShenandoahWorkerScope scope(workers(), ShenandoahWorkerPolicy::calc_workers_for_conc_marking());
2710 
2711   try_inject_alloc_failure();
2712   op_mark();
2713 }
2714 
2715 void ShenandoahHeap::entry_evac() {
2716   ShenandoahGCPhase conc_evac_phase(ShenandoahPhaseTimings::conc_evac);
2717   TraceCollectorStats tcs(monitoring_support()->concurrent_collection_counters());
2718 
2719   static const char* msg = "Concurrent evacuation";
2720   GCTraceTime(Info, gc) time(msg, gc_timer(), GCCause::_no_gc, true);
2721   EventMark em("%s", msg);
2722 
2723   ShenandoahWorkerScope scope(workers(), ShenandoahWorkerPolicy::calc_workers_for_conc_evac());
2724 
2725   try_inject_alloc_failure();
2726   op_evac();
2727 }
2728 
2729 void ShenandoahHeap::entry_updaterefs() {
2730   ShenandoahGCPhase phase(ShenandoahPhaseTimings::conc_update_refs);
2731 
2732   static const char* msg = "Concurrent update references";
2733   GCTraceTime(Info, gc) time(msg, gc_timer(), GCCause::_no_gc, true);
2734   EventMark em("%s", msg);
2735 
2736   ShenandoahWorkerScope scope(workers(), ShenandoahWorkerPolicy::calc_workers_for_conc_update_ref());
2737 
2738   try_inject_alloc_failure();
2739   op_updaterefs();
2740 }
2741 void ShenandoahHeap::entry_cleanup() {
2742   ShenandoahGCPhase phase(ShenandoahPhaseTimings::conc_cleanup);
2743 
2744   static const char* msg = "Concurrent cleanup";
2745   GCTraceTime(Info, gc) time(msg, gc_timer(), GCCause::_no_gc, true);
2746   EventMark em("%s", msg);
2747 
2748   // This phase does not use workers, no need for setup
2749 
2750   try_inject_alloc_failure();
2751   op_cleanup();
2752 }
2753 
2754 void ShenandoahHeap::entry_cleanup_traversal() {
2755   ShenandoahGCPhase phase(ShenandoahPhaseTimings::conc_cleanup);
2756 
2757   static const char* msg = "Concurrent cleanup";
2758   GCTraceTime(Info, gc) time(msg, gc_timer(), GCCause::_no_gc, true);
2759   EventMark em("%s", msg);
2760 
2761   ShenandoahWorkerScope scope(workers(), ShenandoahWorkerPolicy::calc_workers_for_conc_traversal());
2762 
2763   try_inject_alloc_failure();
2764   op_cleanup_traversal();
2765 }
2766 
2767 void ShenandoahHeap::entry_cleanup_bitmaps() {
2768   ShenandoahGCPhase phase(ShenandoahPhaseTimings::conc_cleanup);
2769 
2770   static const char* msg = "Concurrent cleanup";
2771   GCTraceTime(Info, gc) time(msg, gc_timer(), GCCause::_no_gc, true);
2772   EventMark em("%s", msg);
2773 
2774   ShenandoahWorkerScope scope(workers(), ShenandoahWorkerPolicy::calc_workers_for_conc_cleanup());
2775 
2776   try_inject_alloc_failure();
2777   op_cleanup_bitmaps();
2778 }
2779 
2780 void ShenandoahHeap::entry_preclean() {
2781   if (ShenandoahPreclean && process_references()) {
2782     static const char* msg = "Concurrent precleaning";
2783     GCTraceTime(Info, gc) time(msg, gc_timer(), GCCause::_no_gc, true);
2784     EventMark em("%s", msg);
2785 
2786     ShenandoahGCPhase conc_preclean(ShenandoahPhaseTimings::conc_preclean);
2787 
2788     ShenandoahWorkerScope scope(workers(), ShenandoahWorkerPolicy::calc_workers_for_conc_preclean());
2789 
2790     try_inject_alloc_failure();
2791     op_preclean();
2792   }
2793 }
2794 
2795 void ShenandoahHeap::entry_traversal() {
2796   static const char* msg = "Concurrent traversal";
2797   GCTraceTime(Info, gc) time(msg, gc_timer(), GCCause::_no_gc, true);
2798   EventMark em("%s", msg);
2799 
2800   TraceCollectorStats tcs(is_minor_gc() ? monitoring_support()->partial_collection_counters()
2801                                         : monitoring_support()->concurrent_collection_counters());
2802 
2803   ShenandoahWorkerScope scope(workers(), ShenandoahWorkerPolicy::calc_workers_for_conc_traversal());
2804 
2805   try_inject_alloc_failure();
2806   op_traversal();
2807 }
2808 
2809 void ShenandoahHeap::try_inject_alloc_failure() {
2810   if (ShenandoahAllocFailureALot && !cancelled_concgc() && ((os::random() % 1000) > 950)) {
2811     _inject_alloc_failure.set();
2812     os::naked_short_sleep(1);
2813     if (cancelled_concgc()) {
2814       log_info(gc)("Allocation failure was successfully injected");
2815     }
2816   }
2817 }
2818 
2819 bool ShenandoahHeap::should_inject_alloc_failure() {
2820   return _inject_alloc_failure.is_set() && _inject_alloc_failure.try_unset();
2821 }
2822 
2823 void ShenandoahHeap::initialize_serviceability() {
2824   _memory_pool = new ShenandoahMemoryPool(this);
2825   _cycle_memory_manager.add_pool(_memory_pool);
2826   _stw_memory_manager.add_pool(_memory_pool);
2827 }
2828 
2829 GrowableArray<GCMemoryManager*> ShenandoahHeap::memory_managers() {
2830   GrowableArray<GCMemoryManager*> memory_managers(2);
2831   memory_managers.append(&_cycle_memory_manager);
2832   memory_managers.append(&_stw_memory_manager);
2833   return memory_managers;
2834 }
2835 
2836 GrowableArray<MemoryPool*> ShenandoahHeap::memory_pools() {
2837   GrowableArray<MemoryPool*> memory_pools(1);
2838   memory_pools.append(_memory_pool);
2839   return memory_pools;
2840 }
2841 
2842 void ShenandoahHeap::enter_evacuation() {
2843   _oom_evac_handler.enter_evacuation();
2844 }
2845 
2846 void ShenandoahHeap::leave_evacuation() {
2847   _oom_evac_handler.leave_evacuation();
2848 }
2849 
2850 ShenandoahRegionIterator::ShenandoahRegionIterator() :
2851   _index(0),
2852   _heap(ShenandoahHeap::heap()) {}
2853 
2854 ShenandoahRegionIterator::ShenandoahRegionIterator(ShenandoahHeap* heap) :
2855   _index(0),
2856   _heap(heap) {}
2857 
2858 ShenandoahRegionIterator& ShenandoahRegionIterator::operator=(const ShenandoahRegionIterator& o) {
2859   _index = o._index;
2860   assert(_heap == o._heap, "must be same");
2861   return *this;
2862 }
2863 
2864 bool ShenandoahRegionIterator::has_next() const {
2865   return _index < _heap->num_regions();
2866 }
2867 
2868 void ShenandoahHeap::heap_region_iterate(ShenandoahHeapRegionClosure& cl) const {
2869   ShenandoahRegionIterator regions;
2870   ShenandoahHeapRegion* r = regions.next();
2871   while (r != NULL) {
2872     if (cl.heap_region_do(r)) {
2873       break;
2874     }
2875     r = regions.next();
2876   }
2877 }
2878 
2879 bool ShenandoahHeap::is_minor_gc() const {
2880   return _gc_cycle_mode.get() == MINOR;
2881 }
2882 
2883 bool ShenandoahHeap::is_major_gc() const {
2884   return _gc_cycle_mode.get() == MAJOR;
2885 }
2886 
2887 void ShenandoahHeap::set_cycle_mode(GCCycleMode gc_cycle_mode) {
2888   _gc_cycle_mode.set(gc_cycle_mode);
2889 }
2890 
2891 char ShenandoahHeap::gc_state() {
2892   return _gc_state.raw_value();
2893 }
2894 
2895 void ShenandoahHeap::initialize_gclab(Thread* thread) {
2896   if (thread->is_Java_thread()) {
2897     thread->set_gclab(new PLAB(OldPLABSize));
2898   } else {
2899     thread->set_gclab(new PLAB(YoungPLABSize));
2900   }
2901 }
2902 
2903 void ShenandoahHeap::finalize_mutator_gclab(Thread* thread) {
2904   thread->gclab()->flush_and_retire_stats(ShenandoahHeap::heap()->mutator_gclab_stats());
2905   delete thread->gclab();
2906   thread->set_gclab(NULL);
2907 }