1 /*
   2  * Copyright (c) 2013, 2018, Red Hat, Inc. and/or its affiliates.
   3  *
   4  * This code is free software; you can redistribute it and/or modify it
   5  * under the terms of the GNU General Public License version 2 only, as
   6  * published by the Free Software Foundation.
   7  *
   8  * This code is distributed in the hope that it will be useful, but WITHOUT
   9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  11  * version 2 for more details (a copy is included in the LICENSE file that
  12  * accompanied this code).
  13  *
  14  * You should have received a copy of the GNU General Public License version
  15  * 2 along with this work; if not, write to the Free Software Foundation,
  16  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  17  *
  18  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  19  * or visit www.oracle.com if you need additional information or have any
  20  * questions.
  21  *
  22  */
  23 
  24 #include "precompiled.hpp"
  25 #include "memory/allocation.hpp"
  26 
  27 #include "gc/shared/gcTimer.hpp"
  28 #include "gc/shared/gcTraceTime.inline.hpp"
  29 #include "gc/shared/parallelCleaning.hpp"
  30 
  31 #include "gc/shenandoah/brooksPointer.hpp"
  32 #include "gc/shenandoah/shenandoahAllocTracker.hpp"
  33 #include "gc/shenandoah/shenandoahBarrierSet.hpp"
  34 #include "gc/shenandoah/shenandoahCollectionSet.hpp"
  35 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
  36 #include "gc/shenandoah/shenandoahConcurrentMark.hpp"
  37 #include "gc/shenandoah/shenandoahConcurrentMark.inline.hpp"
  38 #include "gc/shenandoah/shenandoahControlThread.hpp"
  39 #include "gc/shenandoah/shenandoahFreeSet.hpp"
  40 #include "gc/shenandoah/shenandoahPhaseTimings.hpp"
  41 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
  42 #include "gc/shenandoah/shenandoahHeapRegion.hpp"
  43 #include "gc/shenandoah/shenandoahHeapRegionSet.hpp"
  44 #include "gc/shenandoah/shenandoahMarkCompact.hpp"
  45 #include "gc/shenandoah/shenandoahMemoryPool.hpp"
  46 #include "gc/shenandoah/shenandoahMonitoringSupport.hpp"
  47 #include "gc/shenandoah/shenandoahOopClosures.inline.hpp"
  48 #include "gc/shenandoah/shenandoahPacer.hpp"
  49 #include "gc/shenandoah/shenandoahPacer.inline.hpp"
  50 #include "gc/shenandoah/shenandoahRootProcessor.hpp"
  51 #include "gc/shenandoah/shenandoahStringDedup.hpp"
  52 #include "gc/shenandoah/shenandoahUtils.hpp"
  53 #include "gc/shenandoah/shenandoahVerifier.hpp"
  54 #include "gc/shenandoah/shenandoahCodeRoots.hpp"
  55 #include "gc/shenandoah/shenandoahWorkerPolicy.hpp"
  56 #include "gc/shenandoah/vm_operations_shenandoah.hpp"
  57 
  58 #include "runtime/vmThread.hpp"
  59 #include "services/mallocTracker.hpp"
  60 
  61 ShenandoahUpdateRefsClosure::ShenandoahUpdateRefsClosure() : _heap(ShenandoahHeap::heap()) {}
  62 
  63 #ifdef ASSERT
  64 template <class T>
  65 void ShenandoahAssertToSpaceClosure::do_oop_nv(T* p) {
  66   T o = RawAccess<>::oop_load(p);
  67   if (! CompressedOops::is_null(o)) {
  68     oop obj = CompressedOops::decode_not_null(o);
  69     shenandoah_assert_not_forwarded(p, obj);
  70   }
  71 }
  72 
  73 void ShenandoahAssertToSpaceClosure::do_oop(narrowOop* p) { do_oop_nv(p); }
  74 void ShenandoahAssertToSpaceClosure::do_oop(oop* p)       { do_oop_nv(p); }
  75 #endif
  76 
  77 const char* ShenandoahHeap::name() const {
  78   return "Shenandoah";
  79 }
  80 
  81 class ShenandoahPretouchTask : public AbstractGangTask {
  82 private:
  83   ShenandoahRegionIterator _regions;
  84   const size_t _bitmap_size;
  85   const size_t _page_size;
  86   char* _bitmap0_base;
  87   char* _bitmap1_base;
  88 public:
  89   ShenandoahPretouchTask(char* bitmap0_base, char* bitmap1_base, size_t bitmap_size,
  90                          size_t page_size) :
  91     AbstractGangTask("Shenandoah PreTouch"),
  92     _bitmap0_base(bitmap0_base),
  93     _bitmap1_base(bitmap1_base),
  94     _bitmap_size(bitmap_size),
  95     _page_size(page_size) {}
  96 
  97   virtual void work(uint worker_id) {
  98     ShenandoahHeapRegion* r = _regions.next();
  99     while (r != NULL) {
 100       log_trace(gc, heap)("Pretouch region " SIZE_FORMAT ": " PTR_FORMAT " -> " PTR_FORMAT,
 101                           r->region_number(), p2i(r->bottom()), p2i(r->end()));
 102       os::pretouch_memory(r->bottom(), r->end(), _page_size);
 103 
 104       size_t start = r->region_number()       * ShenandoahHeapRegion::region_size_bytes() / MarkBitMap::heap_map_factor();
 105       size_t end   = (r->region_number() + 1) * ShenandoahHeapRegion::region_size_bytes() / MarkBitMap::heap_map_factor();
 106       assert (end <= _bitmap_size, "end is sane: " SIZE_FORMAT " < " SIZE_FORMAT, end, _bitmap_size);
 107 
 108       log_trace(gc, heap)("Pretouch bitmap under region " SIZE_FORMAT ": " PTR_FORMAT " -> " PTR_FORMAT,
 109                           r->region_number(), p2i(_bitmap0_base + start), p2i(_bitmap0_base + end));
 110       os::pretouch_memory(_bitmap0_base + start, _bitmap0_base + end, _page_size);
 111 
 112       log_trace(gc, heap)("Pretouch bitmap under region " SIZE_FORMAT ": " PTR_FORMAT " -> " PTR_FORMAT,
 113                           r->region_number(), p2i(_bitmap1_base + start), p2i(_bitmap1_base + end));
 114       os::pretouch_memory(_bitmap1_base + start, _bitmap1_base + end, _page_size);
 115 
 116       r = _regions.next();
 117     }
 118   }
 119 };
 120 
 121 jint ShenandoahHeap::initialize() {
 122 
 123   BrooksPointer::initial_checks();
 124 
 125   size_t init_byte_size = collector_policy()->initial_heap_byte_size();
 126   size_t max_byte_size = collector_policy()->max_heap_byte_size();
 127   size_t heap_alignment = collector_policy()->heap_alignment();
 128 
 129   if (ShenandoahAlwaysPreTouch) {
 130     // Enabled pre-touch means the entire heap is committed right away.
 131     init_byte_size = max_byte_size;
 132   }
 133 
 134   Universe::check_alignment(max_byte_size,
 135                             ShenandoahHeapRegion::region_size_bytes(),
 136                             "shenandoah heap");
 137   Universe::check_alignment(init_byte_size,
 138                             ShenandoahHeapRegion::region_size_bytes(),
 139                             "shenandoah heap");
 140 
 141   ReservedSpace heap_rs = Universe::reserve_heap(max_byte_size,
 142                                                  heap_alignment);
 143   initialize_reserved_region((HeapWord*)heap_rs.base(), (HeapWord*) (heap_rs.base() + heap_rs.size()));
 144 
 145   BarrierSet::set_barrier_set(new ShenandoahBarrierSet(this));
 146   ReservedSpace pgc_rs = heap_rs.first_part(max_byte_size);
 147 
 148   _num_regions = max_byte_size / ShenandoahHeapRegion::region_size_bytes();
 149   size_t num_committed_regions = init_byte_size / ShenandoahHeapRegion::region_size_bytes();
 150   _initial_size = num_committed_regions * ShenandoahHeapRegion::region_size_bytes();
 151   _committed = _initial_size;
 152 
 153   log_info(gc, heap)("Initialize Shenandoah heap with initial size " SIZE_FORMAT " bytes", init_byte_size);
 154   if (!os::commit_memory(pgc_rs.base(), _initial_size, false)) {
 155     vm_exit_out_of_memory(_initial_size, OOM_MMAP_ERROR, "Shenandoah failed to initialize heap");
 156   }
 157 
 158   size_t reg_size_words = ShenandoahHeapRegion::region_size_words();
 159   size_t reg_size_bytes = ShenandoahHeapRegion::region_size_bytes();
 160 
 161   _regions = NEW_C_HEAP_ARRAY(ShenandoahHeapRegion*, _num_regions, mtGC);
 162   _free_set = new ShenandoahFreeSet(this, _num_regions);
 163 
 164   _collection_set = new ShenandoahCollectionSet(this, (HeapWord*)pgc_rs.base());
 165 
 166   _next_top_at_mark_starts_base = NEW_C_HEAP_ARRAY(HeapWord*, _num_regions, mtGC);
 167   _next_top_at_mark_starts = _next_top_at_mark_starts_base -
 168                ((uintx) pgc_rs.base() >> ShenandoahHeapRegion::region_size_bytes_shift());
 169 
 170   _complete_top_at_mark_starts_base = NEW_C_HEAP_ARRAY(HeapWord*, _num_regions, mtGC);
 171   _complete_top_at_mark_starts = _complete_top_at_mark_starts_base -
 172                ((uintx) pgc_rs.base() >> ShenandoahHeapRegion::region_size_bytes_shift());
 173 
 174   if (ShenandoahPacing) {
 175     _pacer = new ShenandoahPacer(this);
 176     _pacer->setup_for_idle();
 177   } else {
 178     _pacer = NULL;
 179   }
 180 
 181   {
 182     ShenandoahHeapLocker locker(lock());
 183     for (size_t i = 0; i < _num_regions; i++) {
 184       ShenandoahHeapRegion* r = new ShenandoahHeapRegion(this,
 185                                                          (HeapWord*) pgc_rs.base() + reg_size_words * i,
 186                                                          reg_size_words,
 187                                                          i,
 188                                                          i < num_committed_regions);
 189 
 190       _complete_top_at_mark_starts_base[i] = r->bottom();
 191       _next_top_at_mark_starts_base[i] = r->bottom();
 192       _regions[i] = r;
 193       assert(!collection_set()->is_in(i), "New region should not be in collection set");
 194     }
 195 
 196     _free_set->rebuild();
 197   }
 198 
 199   assert((((size_t) base()) & ShenandoahHeapRegion::region_size_bytes_mask()) == 0,
 200          "misaligned heap: "PTR_FORMAT, p2i(base()));
 201 
 202   // The call below uses stuff (the SATB* things) that are in G1, but probably
 203   // belong into a shared location.
 204   ShenandoahBarrierSet::satb_mark_queue_set().initialize(SATB_Q_CBL_mon,
 205                                                SATB_Q_FL_lock,
 206                                                20 /*G1SATBProcessCompletedThreshold */,
 207                                                Shared_SATB_Q_lock);
 208 
 209   // Reserve space for prev and next bitmap.
 210   size_t bitmap_page_size = UseLargePages ? (size_t)os::large_page_size() : (size_t)os::vm_page_size();
 211   _bitmap_size = MarkBitMap::compute_size(heap_rs.size());
 212   _bitmap_size = align_up(_bitmap_size, bitmap_page_size);
 213   _heap_region = MemRegion((HeapWord*) heap_rs.base(), heap_rs.size() / HeapWordSize);
 214 
 215   size_t bitmap_bytes_per_region = reg_size_bytes / MarkBitMap::heap_map_factor();
 216 
 217   guarantee(bitmap_bytes_per_region != 0,
 218             "Bitmap bytes per region should not be zero");
 219   guarantee(is_power_of_2(bitmap_bytes_per_region),
 220             "Bitmap bytes per region should be power of two: " SIZE_FORMAT, bitmap_bytes_per_region);
 221 
 222   if (bitmap_page_size > bitmap_bytes_per_region) {
 223     _bitmap_regions_per_slice = bitmap_page_size / bitmap_bytes_per_region;
 224     _bitmap_bytes_per_slice = bitmap_page_size;
 225   } else {
 226     _bitmap_regions_per_slice = 1;
 227     _bitmap_bytes_per_slice = bitmap_bytes_per_region;
 228   }
 229 
 230   guarantee(_bitmap_regions_per_slice >= 1,
 231             "Should have at least one region per slice: " SIZE_FORMAT,
 232             _bitmap_regions_per_slice);
 233 
 234   guarantee(((_bitmap_bytes_per_slice) % bitmap_page_size) == 0,
 235             "Bitmap slices should be page-granular: bps = " SIZE_FORMAT ", page size = " SIZE_FORMAT,
 236             _bitmap_bytes_per_slice, bitmap_page_size);
 237 
 238   ReservedSpace bitmap0(_bitmap_size, bitmap_page_size);
 239   MemTracker::record_virtual_memory_type(bitmap0.base(), mtGC);
 240   _bitmap0_region = MemRegion((HeapWord*) bitmap0.base(), bitmap0.size() / HeapWordSize);
 241 
 242   ReservedSpace bitmap1(_bitmap_size, bitmap_page_size);
 243   MemTracker::record_virtual_memory_type(bitmap1.base(), mtGC);
 244   _bitmap1_region = MemRegion((HeapWord*) bitmap1.base(), bitmap1.size() / HeapWordSize);
 245 
 246   size_t bitmap_init_commit = _bitmap_bytes_per_slice *
 247                               align_up(num_committed_regions, _bitmap_regions_per_slice) / _bitmap_regions_per_slice;
 248   bitmap_init_commit = MIN2(_bitmap_size, bitmap_init_commit);
 249   os::commit_memory_or_exit((char *) (_bitmap0_region.start()), bitmap_init_commit, false,
 250                             "couldn't allocate initial bitmap");
 251   os::commit_memory_or_exit((char *) (_bitmap1_region.start()), bitmap_init_commit, false,
 252                             "couldn't allocate initial bitmap");
 253 
 254   size_t page_size = UseLargePages ? (size_t)os::large_page_size() : (size_t)os::vm_page_size();
 255 
 256   if (ShenandoahVerify) {
 257     ReservedSpace verify_bitmap(_bitmap_size, page_size);
 258     os::commit_memory_or_exit(verify_bitmap.base(), verify_bitmap.size(), false,
 259                               "couldn't allocate verification bitmap");
 260     MemTracker::record_virtual_memory_type(verify_bitmap.base(), mtGC);
 261     MemRegion verify_bitmap_region = MemRegion((HeapWord *) verify_bitmap.base(), verify_bitmap.size() / HeapWordSize);
 262     _verification_bit_map.initialize(_heap_region, verify_bitmap_region);
 263     _verifier = new ShenandoahVerifier(this, &_verification_bit_map);
 264   }
 265 
 266   if (ShenandoahAlwaysPreTouch) {
 267     assert (!AlwaysPreTouch, "Should have been overridden");
 268 
 269     // For NUMA, it is important to pre-touch the storage under bitmaps with worker threads,
 270     // before initialize() below zeroes it with initializing thread. For any given region,
 271     // we touch the region and the corresponding bitmaps from the same thread.
 272 
 273     log_info(gc, heap)("Parallel pretouch " SIZE_FORMAT " regions with " SIZE_FORMAT " byte pages",
 274                        _num_regions, page_size);
 275     ShenandoahPretouchTask cl(bitmap0.base(), bitmap1.base(), _bitmap_size, page_size);
 276     _workers->run_task(&cl);
 277   }
 278 
 279   _mark_bit_map0.initialize(_heap_region, _bitmap0_region);
 280   _complete_mark_bit_map = &_mark_bit_map0;
 281 
 282   _mark_bit_map1.initialize(_heap_region, _bitmap1_region);
 283   _next_mark_bit_map = &_mark_bit_map1;
 284 
 285   // Reserve aux bitmap for use in object_iterate(). We don't commit it here.
 286   ReservedSpace aux_bitmap(_bitmap_size, bitmap_page_size);
 287   MemTracker::record_virtual_memory_type(aux_bitmap.base(), mtGC);
 288   _aux_bitmap_region = MemRegion((HeapWord*) aux_bitmap.base(), aux_bitmap.size() / HeapWordSize);
 289   _aux_bit_map.initialize(_heap_region, _aux_bitmap_region);
 290 
 291   if (UseShenandoahMatrix) {
 292     _connection_matrix = new ShenandoahConnectionMatrix(_num_regions);
 293   } else {
 294     _connection_matrix = NULL;
 295   }
 296 
 297   _traversal_gc = _shenandoah_policy->can_do_traversal_gc() ?
 298                 new ShenandoahTraversalGC(this, _num_regions) :
 299                 NULL;
 300 
 301   _monitoring_support = new ShenandoahMonitoringSupport(this);
 302 
 303   _phase_timings = new ShenandoahPhaseTimings();
 304 
 305   if (ShenandoahAllocationTrace) {
 306     _alloc_tracker = new ShenandoahAllocTracker();
 307   }
 308 
 309   ShenandoahStringDedup::initialize();
 310 
 311   _control_thread = new ShenandoahControlThread();
 312 
 313   ShenandoahCodeRoots::initialize();
 314 
 315   LogTarget(Trace, gc, region) lt;
 316   if (lt.is_enabled()) {
 317     ResourceMark rm;
 318     LogStream ls(lt);
 319     log_trace(gc, region)("All Regions");
 320     print_heap_regions_on(&ls);
 321     log_trace(gc, region)("Free Regions");
 322     _free_set->print_on(&ls);
 323   }
 324 
 325   log_info(gc, init)("Safepointing mechanism: %s",
 326                      SafepointMechanism::uses_thread_local_poll() ? "thread-local poll" :
 327                      (SafepointMechanism::uses_global_page_poll() ? "global-page poll" : "unknown"));
 328 
 329   return JNI_OK;
 330 }
 331 
 332 ShenandoahHeap::ShenandoahHeap(ShenandoahCollectorPolicy* policy) :
 333   CollectedHeap(),
 334   _shenandoah_policy(policy),
 335   _soft_ref_policy(),
 336   _regions(NULL),
 337   _free_set(NULL),
 338   _collection_set(NULL),
 339   _update_refs_iterator(this),
 340   _bytes_allocated_since_gc_start(0),
 341   _max_workers(MAX2(ConcGCThreads, ParallelGCThreads)),
 342   _ref_processor(NULL),
 343   _next_top_at_mark_starts(NULL),
 344   _next_top_at_mark_starts_base(NULL),
 345   _complete_top_at_mark_starts(NULL),
 346   _complete_top_at_mark_starts_base(NULL),
 347   _mark_bit_map0(),
 348   _mark_bit_map1(),
 349   _aux_bit_map(),
 350   _connection_matrix(NULL),
 351   _verifier(NULL),
 352   _pacer(NULL),
 353   _used_at_last_gc(0),
 354   _alloc_seq_at_last_gc_start(0),
 355   _alloc_seq_at_last_gc_end(0),
 356   _safepoint_workers(NULL),
 357   _gc_cycle_mode(),
 358 #ifdef ASSERT
 359   _heap_expansion_count(0),
 360 #endif
 361   _gc_timer(new (ResourceObj::C_HEAP, mtGC) ConcurrentGCTimer()),
 362   _phase_timings(NULL),
 363   _alloc_tracker(NULL),
 364   _cycle_memory_manager("Shenandoah Cycles", "end of GC cycle"),
 365   _stw_memory_manager("Shenandoah Pauses", "end of GC pause"),
 366   _memory_pool(NULL)
 367 {
 368   log_info(gc, init)("Parallel GC threads: "UINT32_FORMAT, ParallelGCThreads);
 369   log_info(gc, init)("Concurrent GC threads: "UINT32_FORMAT, ConcGCThreads);
 370   log_info(gc, init)("Parallel reference processing enabled: %s", BOOL_TO_STR(ParallelRefProcEnabled));
 371 
 372   _scm = new ShenandoahConcurrentMark();
 373   _full_gc = new ShenandoahMarkCompact();
 374   _used = 0;
 375 
 376   _max_workers = MAX2(_max_workers, 1U);
 377   _workers = new ShenandoahWorkGang("Shenandoah GC Threads", _max_workers,
 378                             /* are_GC_task_threads */true,
 379                             /* are_ConcurrentGC_threads */false);
 380   if (_workers == NULL) {
 381     vm_exit_during_initialization("Failed necessary allocation.");
 382   } else {
 383     _workers->initialize_workers();
 384   }
 385 
 386   if (ParallelSafepointCleanupThreads > 1) {
 387     _safepoint_workers = new ShenandoahWorkGang("Safepoint Cleanup Thread",
 388                                                 ParallelSafepointCleanupThreads,
 389                                                 false, false);
 390     _safepoint_workers->initialize_workers();
 391   }
 392 }
 393 
 394 class ShenandoahResetNextBitmapTask : public AbstractGangTask {
 395 private:
 396   ShenandoahRegionIterator _regions;
 397 
 398 public:
 399   ShenandoahResetNextBitmapTask() :
 400     AbstractGangTask("Parallel Reset Bitmap Task") {}
 401 
 402   void work(uint worker_id) {
 403     ShenandoahHeapRegion* region = _regions.next();
 404     ShenandoahHeap* heap = ShenandoahHeap::heap();
 405     while (region != NULL) {
 406       if (heap->is_bitmap_slice_committed(region)) {
 407         HeapWord* bottom = region->bottom();
 408         HeapWord* top = heap->next_top_at_mark_start(region->bottom());
 409         if (top > bottom) {
 410           heap->next_mark_bit_map()->clear_range_large(MemRegion(bottom, top));
 411         }
 412         assert(heap->is_next_bitmap_clear_range(bottom, region->end()), "must be clear");
 413       }
 414       region = _regions.next();
 415     }
 416   }
 417 };
 418 
 419 void ShenandoahHeap::reset_next_mark_bitmap() {
 420   assert_gc_workers(_workers->active_workers());
 421 
 422   ShenandoahResetNextBitmapTask task;
 423   _workers->run_task(&task);
 424 }
 425 
 426 class ShenandoahResetNextBitmapTraversalTask : public AbstractGangTask {
 427 private:
 428   ShenandoahRegionIterator _regions;
 429 
 430 public:
 431   ShenandoahResetNextBitmapTraversalTask() :
 432     AbstractGangTask("Parallel Reset Bitmap Task for Traversal") {}
 433 
 434   void work(uint worker_id) {
 435     ShenandoahHeap* heap = ShenandoahHeap::heap();
 436     ShenandoahHeapRegionSet* traversal_set = heap->traversal_gc()->traversal_set();
 437     ShenandoahHeapRegion* region = _regions.next();
 438     while (region != NULL) {
 439       if (heap->is_bitmap_slice_committed(region)) {
 440         if (traversal_set->is_in(region) && !region->is_trash()) {
 441           ShenandoahHeapLocker locker(heap->lock());
 442           HeapWord* bottom = region->bottom();
 443           HeapWord* top = heap->next_top_at_mark_start(bottom);
 444           assert(top <= region->top(),
 445                  "TAMS must smaller/equals than top: TAMS: "PTR_FORMAT", top: "PTR_FORMAT,
 446                  p2i(top), p2i(region->top()));
 447           if (top > bottom) {
 448             heap->complete_mark_bit_map()->copy_from(heap->next_mark_bit_map(), MemRegion(bottom, top));
 449             heap->set_complete_top_at_mark_start(bottom, top);
 450             heap->next_mark_bit_map()->clear_range_large(MemRegion(bottom, top));
 451             heap->set_next_top_at_mark_start(bottom, bottom);
 452           }
 453         }
 454         assert(heap->is_next_bitmap_clear_range(region->bottom(), region->end()),
 455                "need clear next bitmap");
 456       }
 457       region = _regions.next();
 458     }
 459   }
 460 };
 461 
 462 void ShenandoahHeap::reset_next_mark_bitmap_traversal() {
 463   assert_gc_workers(_workers->active_workers());
 464 
 465   ShenandoahResetNextBitmapTraversalTask task;
 466   _workers->run_task(&task);
 467 }
 468 
 469 bool ShenandoahHeap::is_next_bitmap_clear() {
 470   for (size_t idx = 0; idx < _num_regions; idx++) {
 471     ShenandoahHeapRegion* r = get_region(idx);
 472     if (is_bitmap_slice_committed(r) && !is_next_bitmap_clear_range(r->bottom(), r->end())) {
 473       return false;
 474     }
 475   }
 476   return true;
 477 }
 478 
 479 bool ShenandoahHeap::is_next_bitmap_clear_range(HeapWord* start, HeapWord* end) {
 480   return _next_mark_bit_map->getNextMarkedWordAddress(start, end) == end;
 481 }
 482 
 483 bool ShenandoahHeap::is_complete_bitmap_clear_range(HeapWord* start, HeapWord* end) {
 484   return _complete_mark_bit_map->getNextMarkedWordAddress(start, end) == end;
 485 }
 486 
 487 void ShenandoahHeap::print_on(outputStream* st) const {
 488   st->print_cr("Shenandoah Heap");
 489   st->print_cr(" " SIZE_FORMAT "K total, " SIZE_FORMAT "K committed, " SIZE_FORMAT "K used",
 490                capacity() / K, committed() / K, used() / K);
 491   st->print_cr(" " SIZE_FORMAT " x " SIZE_FORMAT"K regions",
 492                num_regions(), ShenandoahHeapRegion::region_size_bytes() / K);
 493 
 494   st->print("Status: ");
 495   if (has_forwarded_objects())               st->print("has forwarded objects, ");
 496   if (is_concurrent_mark_in_progress())      st->print("marking, ");
 497   if (is_evacuation_in_progress())           st->print("evacuating, ");
 498   if (is_update_refs_in_progress())          st->print("updating refs, ");
 499   if (is_concurrent_traversal_in_progress()) st->print("traversal, ");
 500   if (is_degenerated_gc_in_progress())       st->print("degenerated gc, ");
 501   if (is_full_gc_in_progress())              st->print("full gc, ");
 502   if (is_full_gc_move_in_progress())         st->print("full gc move, ");
 503 
 504   if (cancelled_concgc()) {
 505     st->print("conc gc cancelled");
 506   } else {
 507     st->print("not cancelled");
 508   }
 509   st->cr();
 510 
 511   st->print_cr("Reserved region:");
 512   st->print_cr(" - [" PTR_FORMAT ", " PTR_FORMAT ") ",
 513                p2i(reserved_region().start()),
 514                p2i(reserved_region().end()));
 515 
 516   if (UseShenandoahMatrix) {
 517     st->print_cr("Matrix:");
 518 
 519     ShenandoahConnectionMatrix* matrix = connection_matrix();
 520     if (matrix != NULL) {
 521       st->print_cr(" - base: " PTR_FORMAT, p2i(matrix->matrix_addr()));
 522       st->print_cr(" - stride: " SIZE_FORMAT, matrix->stride());
 523       st->print_cr(" - magic: " PTR_FORMAT, matrix->magic_offset());
 524     } else {
 525       st->print_cr(" No matrix.");
 526     }
 527   }
 528 
 529   if (Verbose) {
 530     print_heap_regions_on(st);
 531   }
 532 }
 533 
 534 class ShenandoahInitGCLABClosure : public ThreadClosure {
 535 public:
 536   void do_thread(Thread* thread) {
 537     if (thread != NULL && (thread->is_Java_thread() || thread->is_Worker_thread() ||
 538                            thread->is_ConcurrentGC_thread())) {
 539       thread->gclab().initialize(true);
 540     }
 541   }
 542 };
 543 
 544 void ShenandoahHeap::post_initialize() {
 545   CollectedHeap::post_initialize();
 546   if (UseTLAB) {
 547     MutexLocker ml(Threads_lock);
 548 
 549     ShenandoahInitGCLABClosure init_gclabs;
 550     Threads::threads_do(&init_gclabs);
 551     gc_threads_do(&init_gclabs);
 552 
 553     // gclab can not be initialized early during VM startup, as it can not determinate its max_size.
 554     // Now, we will let WorkGang to initialize gclab when new worker is created.
 555     _workers->set_initialize_gclab();
 556   }
 557 
 558   _scm->initialize(_max_workers);
 559   _full_gc->initialize(_gc_timer);
 560 
 561   ref_processing_init();
 562 
 563   _shenandoah_policy->post_heap_initialize();
 564 }
 565 
 566 size_t ShenandoahHeap::used() const {
 567   return OrderAccess::load_acquire(&_used);
 568 }
 569 
 570 size_t ShenandoahHeap::committed() const {
 571   OrderAccess::acquire();
 572   return _committed;
 573 }
 574 
 575 void ShenandoahHeap::increase_committed(size_t bytes) {
 576   assert_heaplock_or_safepoint();
 577   _committed += bytes;
 578 }
 579 
 580 void ShenandoahHeap::decrease_committed(size_t bytes) {
 581   assert_heaplock_or_safepoint();
 582   _committed -= bytes;
 583 }
 584 
 585 void ShenandoahHeap::increase_used(size_t bytes) {
 586   Atomic::add(bytes, &_used);
 587 }
 588 
 589 void ShenandoahHeap::set_used(size_t bytes) {
 590   OrderAccess::release_store_fence(&_used, bytes);
 591 }
 592 
 593 void ShenandoahHeap::decrease_used(size_t bytes) {
 594   assert(used() >= bytes, "never decrease heap size by more than we've left");
 595   Atomic::add(-bytes, &_used);
 596 }
 597 
 598 void ShenandoahHeap::increase_allocated(size_t bytes) {
 599   Atomic::add(bytes, &_bytes_allocated_since_gc_start);
 600 }
 601 
 602 void ShenandoahHeap::notify_alloc(size_t words, bool waste) {
 603   size_t bytes = words * HeapWordSize;
 604   if (!waste) {
 605     increase_used(bytes);
 606   }
 607   increase_allocated(bytes);
 608   if (ShenandoahPacing) {
 609     control_thread()->pacing_notify_alloc(words);
 610     if (waste) {
 611       pacer()->claim_for_alloc(words, true);
 612     }
 613   }
 614 }
 615 
 616 size_t ShenandoahHeap::capacity() const {
 617   return num_regions() * ShenandoahHeapRegion::region_size_bytes();
 618 }
 619 
 620 bool ShenandoahHeap::is_maximal_no_gc() const {
 621   Unimplemented();
 622   return true;
 623 }
 624 
 625 size_t ShenandoahHeap::max_capacity() const {
 626   return _num_regions * ShenandoahHeapRegion::region_size_bytes();
 627 }
 628 
 629 size_t ShenandoahHeap::initial_capacity() const {
 630   return _initial_size;
 631 }
 632 
 633 bool ShenandoahHeap::is_in(const void* p) const {
 634   HeapWord* heap_base = (HeapWord*) base();
 635   HeapWord* last_region_end = heap_base + ShenandoahHeapRegion::region_size_words() * num_regions();
 636   return p >= heap_base && p < last_region_end;
 637 }
 638 
 639 bool ShenandoahHeap::is_scavengable(oop p) {
 640   return true;
 641 }
 642 
 643 void ShenandoahHeap::handle_heap_shrinkage(double shrink_before) {
 644   if (!ShenandoahUncommit) {
 645     return;
 646   }
 647 
 648   ShenandoahHeapLocker locker(lock());
 649 
 650   size_t count = 0;
 651   for (size_t i = 0; i < num_regions(); i++) {
 652     ShenandoahHeapRegion* r = get_region(i);
 653     if (r->is_empty_committed() && (r->empty_time() < shrink_before)) {
 654       r->make_uncommitted();
 655       count++;
 656     }
 657   }
 658 
 659   if (count > 0) {
 660     log_info(gc)("Uncommitted " SIZE_FORMAT "M. Heap: " SIZE_FORMAT "M reserved, " SIZE_FORMAT "M committed, " SIZE_FORMAT "M used",
 661                  count * ShenandoahHeapRegion::region_size_bytes() / M, capacity() / M, committed() / M, used() / M);
 662     control_thread()->notify_heap_changed();
 663   }
 664 }
 665 
 666 HeapWord* ShenandoahHeap::allocate_from_gclab_slow(Thread* thread, size_t size) {
 667   // Retain tlab and allocate object in shared space if
 668   // the amount free in the tlab is too large to discard.
 669   if (thread->gclab().free() > thread->gclab().refill_waste_limit()) {
 670     thread->gclab().record_slow_allocation(size);
 671     return NULL;
 672   }
 673 
 674   // Discard gclab and allocate a new one.
 675   // To minimize fragmentation, the last GCLAB may be smaller than the rest.
 676   size_t new_gclab_size = thread->gclab().compute_size(size);
 677 
 678   thread->gclab().clear_before_allocation();
 679 
 680   if (new_gclab_size == 0) {
 681     return NULL;
 682   }
 683 
 684   // Allocate a new GCLAB...
 685   HeapWord* obj = allocate_new_gclab(new_gclab_size);
 686   if (obj == NULL) {
 687     return NULL;
 688   }
 689 
 690   if (ZeroTLAB) {
 691     // ..and clear it.
 692     Copy::zero_to_words(obj, new_gclab_size);
 693   } else {
 694     // ...and zap just allocated object.
 695 #ifdef ASSERT
 696     // Skip mangling the space corresponding to the object header to
 697     // ensure that the returned space is not considered parsable by
 698     // any concurrent GC thread.
 699     size_t hdr_size = oopDesc::header_size();
 700     Copy::fill_to_words(obj + hdr_size, new_gclab_size - hdr_size, badHeapWordVal);
 701 #endif // ASSERT
 702   }
 703   thread->gclab().fill(obj, obj + size, new_gclab_size);
 704   return obj;
 705 }
 706 
 707 HeapWord* ShenandoahHeap::allocate_new_tlab(size_t word_size) {
 708 #ifdef ASSERT
 709   log_debug(gc, alloc)("Allocate new tlab, requested size = " SIZE_FORMAT " bytes", word_size * HeapWordSize);
 710 #endif
 711   return allocate_new_lab(word_size, _alloc_tlab);
 712 }
 713 
 714 HeapWord* ShenandoahHeap::allocate_new_gclab(size_t word_size) {
 715 #ifdef ASSERT
 716   log_debug(gc, alloc)("Allocate new gclab, requested size = " SIZE_FORMAT " bytes", word_size * HeapWordSize);
 717 #endif
 718   return allocate_new_lab(word_size, _alloc_gclab);
 719 }
 720 
 721 HeapWord* ShenandoahHeap::allocate_new_lab(size_t word_size, AllocType type) {
 722   HeapWord* result = allocate_memory(word_size, type);
 723 
 724   if (result != NULL) {
 725     assert(! in_collection_set(result), "Never allocate in collection set");
 726 
 727     log_develop_trace(gc, tlab)("allocating new tlab of size "SIZE_FORMAT" at addr "PTR_FORMAT, word_size, p2i(result));
 728 
 729   }
 730   return result;
 731 }
 732 
 733 ShenandoahHeap* ShenandoahHeap::heap() {
 734   CollectedHeap* heap = Universe::heap();
 735   assert(heap != NULL, "Unitialized access to ShenandoahHeap::heap()");
 736   assert(heap->kind() == CollectedHeap::Shenandoah, "not a shenandoah heap");
 737   return (ShenandoahHeap*) heap;
 738 }
 739 
 740 ShenandoahHeap* ShenandoahHeap::heap_no_check() {
 741   CollectedHeap* heap = Universe::heap();
 742   return (ShenandoahHeap*) heap;
 743 }
 744 
 745 HeapWord* ShenandoahHeap::allocate_memory(size_t word_size, AllocType type) {
 746   ShenandoahAllocTrace trace_alloc(word_size, type);
 747 
 748   bool in_new_region = false;
 749   HeapWord* result = NULL;
 750 
 751   if (type == _alloc_tlab || type == _alloc_shared) {
 752     if (ShenandoahPacing) {
 753       pacer()->pace_for_alloc(word_size);
 754     }
 755 
 756     if (!ShenandoahAllocFailureALot || !should_inject_alloc_failure()) {
 757       result = allocate_memory_under_lock(word_size, type, in_new_region);
 758     }
 759 
 760     // Allocation failed, try full-GC, then retry allocation.
 761     //
 762     // It might happen that one of the threads requesting allocation would unblock
 763     // way later after full-GC happened, only to fail the second allocation, because
 764     // other threads have already depleted the free storage. In this case, a better
 765     // strategy would be to try full-GC again.
 766     //
 767     // Lacking the way to detect progress from "collect" call, we are left with blindly
 768     // retrying for some bounded number of times.
 769     // TODO: Poll if Full GC made enough progress to warrant retry.
 770     int tries = 0;
 771     while ((result == NULL) && (tries++ < ShenandoahAllocGCTries)) {
 772       log_debug(gc)("[" PTR_FORMAT " Failed to allocate " SIZE_FORMAT " bytes, doing GC, try %d",
 773                     p2i(Thread::current()), word_size * HeapWordSize, tries);
 774       control_thread()->handle_alloc_failure(word_size);
 775       result = allocate_memory_under_lock(word_size, type, in_new_region);
 776     }
 777   } else {
 778     assert(type == _alloc_gclab || type == _alloc_shared_gc, "Can only accept these types here");
 779     result = allocate_memory_under_lock(word_size, type, in_new_region);
 780     // Do not call handle_alloc_failure() here, because we cannot block.
 781     // The allocation failure would be handled by the WB slowpath with handle_alloc_failure_evac().
 782   }
 783 
 784   if (in_new_region) {
 785     control_thread()->notify_heap_changed();
 786   }
 787 
 788   log_develop_trace(gc, alloc)("allocate memory chunk of size "SIZE_FORMAT" at addr "PTR_FORMAT " by thread %d ",
 789                                word_size, p2i(result), Thread::current()->osthread()->thread_id());
 790 
 791   if (result != NULL) {
 792     notify_alloc(word_size, false);
 793   }
 794 
 795   return result;
 796 }
 797 
 798 HeapWord* ShenandoahHeap::allocate_memory_under_lock(size_t word_size, AllocType type, bool& in_new_region) {
 799   ShenandoahHeapLocker locker(lock());
 800   return _free_set->allocate(word_size, type, in_new_region);
 801 }
 802 
 803 HeapWord*  ShenandoahHeap::mem_allocate(size_t size,
 804                                         bool*  gc_overhead_limit_was_exceeded) {
 805   HeapWord* filler = allocate_memory(size + BrooksPointer::word_size(), _alloc_shared);
 806   HeapWord* result = filler + BrooksPointer::word_size();
 807   if (filler != NULL) {
 808     BrooksPointer::initialize(oop(result));
 809 
 810     assert(! in_collection_set(result), "never allocate in targetted region");
 811     return result;
 812   } else {
 813     return NULL;
 814   }
 815 }
 816 
 817 class ShenandoahEvacuateUpdateRootsClosure: public ExtendedOopClosure {
 818 private:
 819   ShenandoahHeap* _heap;
 820   Thread* _thread;
 821 public:
 822   ShenandoahEvacuateUpdateRootsClosure() :
 823     _heap(ShenandoahHeap::heap()), _thread(Thread::current()) {
 824   }
 825 
 826 private:
 827   template <class T>
 828   void do_oop_work(T* p) {
 829     assert(_heap->is_evacuation_in_progress(), "Only do this when evacuation is in progress");
 830 
 831     T o = RawAccess<>::oop_load(p);
 832     if (! CompressedOops::is_null(o)) {
 833       oop obj = CompressedOops::decode_not_null(o);
 834       if (_heap->in_collection_set(obj)) {
 835         shenandoah_assert_marked_complete(p, obj);
 836         oop resolved = ShenandoahBarrierSet::resolve_forwarded_not_null(obj);
 837         if (oopDesc::unsafe_equals(resolved, obj)) {
 838           resolved = _heap->evacuate_object(obj, _thread);
 839         }
 840         RawAccess<OOP_NOT_NULL>::oop_store(p, resolved);
 841       }
 842     }
 843   }
 844 
 845 public:
 846   void do_oop(oop* p) {
 847     do_oop_work(p);
 848   }
 849   void do_oop(narrowOop* p) {
 850     do_oop_work(p);
 851   }
 852 };
 853 
 854 class ShenandoahEvacuateRootsClosure: public ExtendedOopClosure {
 855 private:
 856   ShenandoahHeap* _heap;
 857   Thread* _thread;
 858 public:
 859   ShenandoahEvacuateRootsClosure() :
 860           _heap(ShenandoahHeap::heap()), _thread(Thread::current()) {
 861   }
 862 
 863 private:
 864   template <class T>
 865   void do_oop_work(T* p) {
 866     T o = RawAccess<>::oop_load(p);
 867     if (! CompressedOops::is_null(o)) {
 868       oop obj = CompressedOops::decode_not_null(o);
 869       if (_heap->in_collection_set(obj)) {
 870         oop resolved = ShenandoahBarrierSet::resolve_forwarded_not_null(obj);
 871         if (oopDesc::unsafe_equals(resolved, obj)) {
 872           _heap->evacuate_object(obj, _thread);
 873         }
 874       }
 875     }
 876   }
 877 
 878 public:
 879   void do_oop(oop* p) {
 880     do_oop_work(p);
 881   }
 882   void do_oop(narrowOop* p) {
 883     do_oop_work(p);
 884   }
 885 };
 886 
 887 class ShenandoahParallelEvacuateRegionObjectClosure : public ObjectClosure {
 888 private:
 889   ShenandoahHeap* const _heap;
 890   Thread* const _thread;
 891 public:
 892   ShenandoahParallelEvacuateRegionObjectClosure(ShenandoahHeap* heap) :
 893     _heap(heap), _thread(Thread::current()) {}
 894 
 895   void do_object(oop p) {
 896     shenandoah_assert_marked_complete(NULL, p);
 897     if (oopDesc::unsafe_equals(p, ShenandoahBarrierSet::resolve_forwarded_not_null(p))) {
 898       _heap->evacuate_object(p, _thread);
 899     }
 900   }
 901 };
 902 
 903 class ShenandoahParallelEvacuationTask : public AbstractGangTask {
 904 private:
 905   ShenandoahHeap* const _sh;
 906   ShenandoahCollectionSet* const _cs;
 907   ShenandoahSharedFlag _claimed_codecache;
 908 
 909 public:
 910   ShenandoahParallelEvacuationTask(ShenandoahHeap* sh,
 911                          ShenandoahCollectionSet* cs) :
 912     AbstractGangTask("Parallel Evacuation Task"),
 913     _cs(cs),
 914     _sh(sh)
 915   {}
 916 
 917   void work(uint worker_id) {
 918 
 919     ShenandoahEvacOOMScope oom_evac_scope;
 920     SuspendibleThreadSetJoiner stsj(ShenandoahSuspendibleWorkers);
 921 
 922     // If concurrent code cache evac is enabled, evacuate it here.
 923     // Note we cannot update the roots here, because we risk non-atomic stores to the alive
 924     // nmethods. The update would be handled elsewhere.
 925     if (ShenandoahConcurrentEvacCodeRoots && _claimed_codecache.try_set()) {
 926       ShenandoahEvacuateRootsClosure cl;
 927       MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
 928       CodeBlobToOopClosure blobs(&cl, !CodeBlobToOopClosure::FixRelocations);
 929       CodeCache::blobs_do(&blobs);
 930     }
 931 
 932     ShenandoahParallelEvacuateRegionObjectClosure cl(_sh);
 933     ShenandoahHeapRegion* r;
 934     while ((r =_cs->claim_next()) != NULL) {
 935       log_develop_trace(gc, region)("Thread "INT32_FORMAT" claimed Heap Region "SIZE_FORMAT,
 936                                     worker_id,
 937                                     r->region_number());
 938 
 939       assert(r->has_live(), "all-garbage regions are reclaimed early");
 940       _sh->marked_object_iterate(r, &cl);
 941 
 942       if (_sh->check_cancelled_concgc_and_yield()) {
 943         log_develop_trace(gc, region)("Cancelled concgc while evacuating region " SIZE_FORMAT, r->region_number());
 944         break;
 945       }
 946 
 947       if (ShenandoahPacing) {
 948         _sh->pacer()->report_evac(r->get_live_data_words());
 949       }
 950     }
 951   }
 952 };
 953 
 954 void ShenandoahHeap::trash_cset_regions() {
 955   ShenandoahHeapLocker locker(lock());
 956 
 957   ShenandoahCollectionSet* set = collection_set();
 958   ShenandoahHeapRegion* r;
 959   set->clear_current_index();
 960   while ((r = set->next()) != NULL) {
 961     r->make_trash();
 962   }
 963   collection_set()->clear();
 964 }
 965 
 966 void ShenandoahHeap::print_heap_regions_on(outputStream* st) const {
 967   st->print_cr("Heap Regions:");
 968   st->print_cr("EU=empty-uncommitted, EC=empty-committed, R=regular, H=humongous start, HC=humongous continuation, CS=collection set, T=trash, P=pinned");
 969   st->print_cr("BTE=bottom/top/end, U=used, T=TLAB allocs, G=GCLAB allocs, S=shared allocs, L=live data");
 970   st->print_cr("R=root, CP=critical pins, TAMS=top-at-mark-start (previous, next)");
 971   st->print_cr("SN=alloc sequence numbers (first mutator, last mutator, first gc, last gc)");
 972 
 973   for (size_t i = 0; i < num_regions(); i++) {
 974     get_region(i)->print_on(st);
 975   }
 976 }
 977 
 978 void ShenandoahHeap::trash_humongous_region_at(ShenandoahHeapRegion* start) {
 979   assert(start->is_humongous_start(), "reclaim regions starting with the first one");
 980 
 981   oop humongous_obj = oop(start->bottom() + BrooksPointer::word_size());
 982   size_t size = humongous_obj->size() + BrooksPointer::word_size();
 983   size_t required_regions = ShenandoahHeapRegion::required_regions(size * HeapWordSize);
 984   size_t index = start->region_number() + required_regions - 1;
 985 
 986   assert(!start->has_live(), "liveness must be zero");
 987   log_trace(gc, humongous)("Reclaiming "SIZE_FORMAT" humongous regions for object of size: "SIZE_FORMAT" words", required_regions, size);
 988 
 989   for(size_t i = 0; i < required_regions; i++) {
 990     // Reclaim from tail. Otherwise, assertion fails when printing region to trace log,
 991     // as it expects that every region belongs to a humongous region starting with a humongous start region.
 992     ShenandoahHeapRegion* region = get_region(index --);
 993 
 994     LogTarget(Trace, gc, humongous) lt;
 995     if (lt.is_enabled()) {
 996       ResourceMark rm;
 997       LogStream ls(lt);
 998       region->print_on(&ls);
 999     }
1000 
1001     assert(region->is_humongous(), "expect correct humongous start or continuation");
1002     assert(!in_collection_set(region), "Humongous region should not be in collection set");
1003 
1004     region->make_trash();
1005   }
1006 }
1007 
1008 #ifdef ASSERT
1009 class ShenandoahCheckCollectionSetClosure: public ShenandoahHeapRegionClosure {
1010   bool heap_region_do(ShenandoahHeapRegion* r) {
1011     assert(! ShenandoahHeap::heap()->in_collection_set(r), "Should have been cleared by now");
1012     return false;
1013   }
1014 };
1015 #endif
1016 
1017 void ShenandoahHeap::prepare_for_concurrent_evacuation() {
1018   log_develop_trace(gc)("Thread %d started prepare_for_concurrent_evacuation", Thread::current()->osthread()->thread_id());
1019 
1020   if (!cancelled_concgc()) {
1021     // Allocations might have happened before we STWed here, record peak:
1022     shenandoahPolicy()->record_peak_occupancy();
1023 
1024     make_tlabs_parsable(true);
1025 
1026     if (ShenandoahVerify) {
1027       verifier()->verify_after_concmark();
1028     }
1029 
1030     trash_cset_regions();
1031 
1032     // NOTE: This needs to be done during a stop the world pause, because
1033     // putting regions into the collection set concurrently with Java threads
1034     // will create a race. In particular, acmp could fail because when we
1035     // resolve the first operand, the containing region might not yet be in
1036     // the collection set, and thus return the original oop. When the 2nd
1037     // operand gets resolved, the region could be in the collection set
1038     // and the oop gets evacuated. If both operands have originally been
1039     // the same, we get false negatives.
1040 
1041     {
1042       ShenandoahHeapLocker locker(lock());
1043       _collection_set->clear();
1044       _free_set->clear();
1045 
1046 #ifdef ASSERT
1047       ShenandoahCheckCollectionSetClosure ccsc;
1048       heap_region_iterate(&ccsc);
1049 #endif
1050 
1051       _shenandoah_policy->choose_collection_set(_collection_set);
1052 
1053       _free_set->rebuild();
1054     }
1055 
1056     Universe::update_heap_info_at_gc();
1057 
1058     if (ShenandoahVerify) {
1059       verifier()->verify_before_evacuation();
1060     }
1061   }
1062 }
1063 
1064 
1065 class ShenandoahRetireTLABClosure : public ThreadClosure {
1066 private:
1067   bool _retire;
1068 
1069 public:
1070   ShenandoahRetireTLABClosure(bool retire) : _retire(retire) {}
1071 
1072   void do_thread(Thread* thread) {
1073     assert(thread->gclab().is_initialized(), "GCLAB should be initialized for %s", thread->name());
1074     thread->gclab().make_parsable(_retire);
1075   }
1076 };
1077 
1078 void ShenandoahHeap::make_tlabs_parsable(bool retire_tlabs) {
1079   if (UseTLAB) {
1080     CollectedHeap::ensure_parsability(retire_tlabs);
1081     ShenandoahRetireTLABClosure cl(retire_tlabs);
1082     for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) {
1083       cl.do_thread(t);
1084     }
1085     gc_threads_do(&cl);
1086   }
1087 }
1088 
1089 
1090 class ShenandoahEvacuateUpdateRootsTask : public AbstractGangTask {
1091   ShenandoahRootEvacuator* _rp;
1092 public:
1093 
1094   ShenandoahEvacuateUpdateRootsTask(ShenandoahRootEvacuator* rp) :
1095     AbstractGangTask("Shenandoah evacuate and update roots"),
1096     _rp(rp)
1097   {
1098     // Nothing else to do.
1099   }
1100 
1101   void work(uint worker_id) {
1102     ShenandoahEvacOOMScope oom_evac_scope;
1103     ShenandoahEvacuateUpdateRootsClosure cl;
1104 
1105     if (ShenandoahConcurrentEvacCodeRoots) {
1106       _rp->process_evacuate_roots(&cl, NULL, worker_id);
1107     } else {
1108       MarkingCodeBlobClosure blobsCl(&cl, CodeBlobToOopClosure::FixRelocations);
1109       _rp->process_evacuate_roots(&cl, &blobsCl, worker_id);
1110     }
1111   }
1112 };
1113 
1114 class ShenandoahFixRootsTask : public AbstractGangTask {
1115   ShenandoahRootEvacuator* _rp;
1116 public:
1117 
1118   ShenandoahFixRootsTask(ShenandoahRootEvacuator* rp) :
1119     AbstractGangTask("Shenandoah update roots"),
1120     _rp(rp)
1121   {
1122     // Nothing else to do.
1123   }
1124 
1125   void work(uint worker_id) {
1126     ShenandoahEvacOOMScope oom_evac_scope;
1127     ShenandoahUpdateRefsClosure cl;
1128     MarkingCodeBlobClosure blobsCl(&cl, CodeBlobToOopClosure::FixRelocations);
1129 
1130     _rp->process_evacuate_roots(&cl, &blobsCl, worker_id);
1131   }
1132 };
1133 
1134 void ShenandoahHeap::evacuate_and_update_roots() {
1135 
1136 #if defined(COMPILER2) || INCLUDE_JVMCI
1137   DerivedPointerTable::clear();
1138 #endif
1139   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Only iterate roots while world is stopped");
1140 
1141   {
1142     ShenandoahRootEvacuator rp(this, workers()->active_workers(), ShenandoahPhaseTimings::init_evac);
1143     ShenandoahEvacuateUpdateRootsTask roots_task(&rp);
1144     workers()->run_task(&roots_task);
1145   }
1146 
1147 #if defined(COMPILER2) || INCLUDE_JVMCI
1148   DerivedPointerTable::update_pointers();
1149 #endif
1150   if (cancelled_concgc()) {
1151     fixup_roots();
1152   }
1153 }
1154 
1155 void ShenandoahHeap::fixup_roots() {
1156     assert(cancelled_concgc(), "Only after concurrent cycle failed");
1157 
1158     // If initial evacuation has been cancelled, we need to update all references
1159     // after all workers have finished. Otherwise we might run into the following problem:
1160     // GC thread 1 cannot allocate anymore, thus evacuation fails, leaves from-space ptr of object X.
1161     // GC thread 2 evacuates the same object X to to-space
1162     // which leaves a truly dangling from-space reference in the first root oop*. This must not happen.
1163     // clear() and update_pointers() must always be called in pairs,
1164     // cannot nest with above clear()/update_pointers().
1165 #if defined(COMPILER2) || INCLUDE_JVMCI
1166     DerivedPointerTable::clear();
1167 #endif
1168     ShenandoahRootEvacuator rp(this, workers()->active_workers(), ShenandoahPhaseTimings::init_evac);
1169     ShenandoahFixRootsTask update_roots_task(&rp);
1170     workers()->run_task(&update_roots_task);
1171 #if defined(COMPILER2) || INCLUDE_JVMCI
1172     DerivedPointerTable::update_pointers();
1173 #endif
1174 }
1175 
1176 void ShenandoahHeap::roots_iterate(OopClosure* cl) {
1177   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Only iterate roots while world is stopped");
1178 
1179   CodeBlobToOopClosure blobsCl(cl, false);
1180   CLDToOopClosure cldCl(cl);
1181 
1182   ShenandoahRootProcessor rp(this, 1, ShenandoahPhaseTimings::_num_phases);
1183   rp.process_all_roots(cl, NULL, &cldCl, &blobsCl, NULL, 0);
1184 }
1185 
1186 bool ShenandoahHeap::supports_tlab_allocation() const {
1187   return true;
1188 }
1189 
1190 size_t  ShenandoahHeap::unsafe_max_tlab_alloc(Thread *thread) const {
1191   return MIN2(_free_set->unsafe_peek_free(), max_tlab_size());
1192 }
1193 
1194 size_t ShenandoahHeap::max_tlab_size() const {
1195   return ShenandoahHeapRegion::max_tlab_size_bytes();
1196 }
1197 
1198 class ShenandoahResizeGCLABClosure : public ThreadClosure {
1199 public:
1200   void do_thread(Thread* thread) {
1201     assert(thread->gclab().is_initialized(), "GCLAB should be initialized for %s", thread->name());
1202     thread->gclab().resize();
1203   }
1204 };
1205 
1206 void ShenandoahHeap::resize_all_tlabs() {
1207   CollectedHeap::resize_all_tlabs();
1208 
1209   ShenandoahResizeGCLABClosure cl;
1210   for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) {
1211     cl.do_thread(t);
1212   }
1213   gc_threads_do(&cl);
1214 }
1215 
1216 class ShenandoahAccumulateStatisticsGCLABClosure : public ThreadClosure {
1217 public:
1218   void do_thread(Thread* thread) {
1219     assert(thread->gclab().is_initialized(), "GCLAB should be initialized for %s", thread->name());
1220     thread->gclab().accumulate_statistics();
1221     thread->gclab().initialize_statistics();
1222   }
1223 };
1224 
1225 void ShenandoahHeap::accumulate_statistics_all_gclabs() {
1226   ShenandoahAccumulateStatisticsGCLABClosure cl;
1227   for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) {
1228     cl.do_thread(t);
1229   }
1230   gc_threads_do(&cl);
1231 }
1232 
1233 bool  ShenandoahHeap::can_elide_tlab_store_barriers() const {
1234   return true;
1235 }
1236 
1237 oop ShenandoahHeap::new_store_pre_barrier(JavaThread* thread, oop new_obj) {
1238   // Overridden to do nothing.
1239   return new_obj;
1240 }
1241 
1242 bool  ShenandoahHeap::can_elide_initializing_store_barrier(oop new_obj) {
1243   return true;
1244 }
1245 
1246 bool ShenandoahHeap::card_mark_must_follow_store() const {
1247   return false;
1248 }
1249 
1250 void ShenandoahHeap::collect(GCCause::Cause cause) {
1251   control_thread()->handle_explicit_gc(cause);
1252 }
1253 
1254 void ShenandoahHeap::do_full_collection(bool clear_all_soft_refs) {
1255   //assert(false, "Shouldn't need to do full collections");
1256 }
1257 
1258 AdaptiveSizePolicy* ShenandoahHeap::size_policy() {
1259   Unimplemented();
1260   return NULL;
1261 
1262 }
1263 
1264 CollectorPolicy* ShenandoahHeap::collector_policy() const {
1265   return _shenandoah_policy;
1266 }
1267 
1268 
1269 HeapWord* ShenandoahHeap::block_start(const void* addr) const {
1270   Space* sp = heap_region_containing(addr);
1271   if (sp != NULL) {
1272     return sp->block_start(addr);
1273   }
1274   return NULL;
1275 }
1276 
1277 size_t ShenandoahHeap::block_size(const HeapWord* addr) const {
1278   Space* sp = heap_region_containing(addr);
1279   assert(sp != NULL, "block_size of address outside of heap");
1280   return sp->block_size(addr);
1281 }
1282 
1283 bool ShenandoahHeap::block_is_obj(const HeapWord* addr) const {
1284   Space* sp = heap_region_containing(addr);
1285   return sp->block_is_obj(addr);
1286 }
1287 
1288 jlong ShenandoahHeap::millis_since_last_gc() {
1289   return 0;
1290 }
1291 
1292 void ShenandoahHeap::prepare_for_verify() {
1293   if (SafepointSynchronize::is_at_safepoint() || ! UseTLAB) {
1294     make_tlabs_parsable(false);
1295   }
1296 }
1297 
1298 void ShenandoahHeap::print_gc_threads_on(outputStream* st) const {
1299   workers()->print_worker_threads_on(st);
1300   if (ShenandoahStringDedup::is_enabled()) {
1301     ShenandoahStringDedup::print_worker_threads_on(st);
1302   }
1303 }
1304 
1305 void ShenandoahHeap::gc_threads_do(ThreadClosure* tcl) const {
1306   workers()->threads_do(tcl);
1307   if (ShenandoahStringDedup::is_enabled()) {
1308     ShenandoahStringDedup::threads_do(tcl);
1309   }
1310 }
1311 
1312 void ShenandoahHeap::print_tracing_info() const {
1313   LogTarget(Info, gc, stats) lt;
1314   if (lt.is_enabled()) {
1315     ResourceMark rm;
1316     LogStream ls(lt);
1317 
1318     phase_timings()->print_on(&ls);
1319 
1320     ls.cr();
1321     ls.cr();
1322 
1323     shenandoahPolicy()->print_gc_stats(&ls);
1324 
1325     ls.cr();
1326     ls.cr();
1327 
1328     if (ShenandoahPacing) {
1329       pacer()->print_on(&ls);
1330     }
1331 
1332     ls.cr();
1333     ls.cr();
1334 
1335     if (ShenandoahAllocationTrace) {
1336       assert(alloc_tracker() != NULL, "Must be");
1337       alloc_tracker()->print_on(&ls);
1338     } else {
1339       ls.print_cr("  Allocation tracing is disabled, use -XX:+ShenandoahAllocationTrace to enable.");
1340     }
1341   }
1342 }
1343 
1344 void ShenandoahHeap::verify(VerifyOption vo) {
1345   if (ShenandoahSafepoint::is_at_shenandoah_safepoint()) {
1346     if (ShenandoahVerify) {
1347       verifier()->verify_generic(vo);
1348     } else {
1349       // TODO: Consider allocating verification bitmaps on demand,
1350       // and turn this on unconditionally.
1351     }
1352   }
1353 }
1354 size_t ShenandoahHeap::tlab_capacity(Thread *thr) const {
1355   return _free_set->capacity();
1356 }
1357 
1358 class ObjectIterateScanRootClosure : public ExtendedOopClosure {
1359 private:
1360   MarkBitMap* _bitmap;
1361   Stack<oop,mtGC>* _oop_stack;
1362 
1363   template <class T>
1364   void do_oop_work(T* p) {
1365     T o = RawAccess<>::oop_load(p);
1366     if (!CompressedOops::is_null(o)) {
1367       oop obj = CompressedOops::decode_not_null(o);
1368       obj = ShenandoahBarrierSet::resolve_forwarded_not_null(obj);
1369       assert(oopDesc::is_oop(obj), "must be a valid oop");
1370       if (!_bitmap->isMarked((HeapWord*) obj)) {
1371         _bitmap->mark((HeapWord*) obj);
1372         _oop_stack->push(obj);
1373       }
1374     }
1375   }
1376 public:
1377   ObjectIterateScanRootClosure(MarkBitMap* bitmap, Stack<oop,mtGC>* oop_stack) :
1378     _bitmap(bitmap), _oop_stack(oop_stack) {}
1379   void do_oop(oop* p)       { do_oop_work(p); }
1380   void do_oop(narrowOop* p) { do_oop_work(p); }
1381 };
1382 
1383 /*
1384  * This is public API, used in preparation of object_iterate().
1385  * Since we don't do linear scan of heap in object_iterate() (see comment below), we don't
1386  * need to make the heap parsable. For Shenandoah-internal linear heap scans that we can
1387  * control, we call SH::make_tlabs_parsable().
1388  */
1389 void ShenandoahHeap::ensure_parsability(bool retire_tlabs) {
1390   // No-op.
1391 }
1392 
1393 /*
1394  * Iterates objects in the heap. This is public API, used for, e.g., heap dumping.
1395  *
1396  * We cannot safely iterate objects by doing a linear scan at random points in time. Linear
1397  * scanning needs to deal with dead objects, which may have dead Klass* pointers (e.g.
1398  * calling oopDesc::size() would crash) or dangling reference fields (crashes) etc. Linear
1399  * scanning therefore depends on having a valid marking bitmap to support it. However, we only
1400  * have a valid marking bitmap after successful marking. In particular, we *don't* have a valid
1401  * marking bitmap during marking, after aborted marking or during/after cleanup (when we just
1402  * wiped the bitmap in preparation for next marking).
1403  *
1404  * For all those reasons, we implement object iteration as a single marking traversal, reporting
1405  * objects as we mark+traverse through the heap, starting from GC roots. JVMTI IterateThroughHeap
1406  * is allowed to report dead objects, but is not required to do so.
1407  */
1408 void ShenandoahHeap::object_iterate(ObjectClosure* cl) {
1409   assert(SafepointSynchronize::is_at_safepoint(), "safe iteration is only available during safepoints");
1410   if (!os::commit_memory((char*)_aux_bitmap_region.start(), _aux_bitmap_region.byte_size(), false)) {
1411     log_warning(gc)("Could not commit native memory for auxiliary marking bitmap for heap iteration");
1412     return;
1413   }
1414 
1415   Stack<oop,mtGC> oop_stack;
1416 
1417   // First, we process all GC roots. This populates the work stack with initial objects.
1418   ShenandoahRootProcessor rp(this, 1, ShenandoahPhaseTimings::_num_phases);
1419   ObjectIterateScanRootClosure oops(&_aux_bit_map, &oop_stack);
1420   CLDToOopClosure clds(&oops, false);
1421   CodeBlobToOopClosure blobs(&oops, false);
1422   rp.process_all_roots(&oops, &oops, &clds, &blobs, NULL, 0);
1423 
1424   // Work through the oop stack to traverse heap.
1425   while (! oop_stack.is_empty()) {
1426     oop obj = oop_stack.pop();
1427     assert(oopDesc::is_oop(obj), "must be a valid oop");
1428     cl->do_object(obj);
1429     obj->oop_iterate(&oops);
1430   }
1431 
1432   assert(oop_stack.is_empty(), "should be empty");
1433 
1434   if (!os::uncommit_memory((char*)_aux_bitmap_region.start(), _aux_bitmap_region.byte_size())) {
1435     log_warning(gc)("Could not uncommit native memory for auxiliary marking bitmap for heap iteration");
1436   }
1437 }
1438 
1439 void ShenandoahHeap::safe_object_iterate(ObjectClosure* cl) {
1440   assert(SafepointSynchronize::is_at_safepoint(), "safe iteration is only available during safepoints");
1441   object_iterate(cl);
1442 }
1443 
1444 // Apply blk->heap_region_do() on all committed regions in address order,
1445 // terminating the iteration early if heap_region_do() returns true.
1446 void ShenandoahHeap::heap_region_iterate(ShenandoahHeapRegionClosure* blk, bool skip_cset_regions, bool skip_humongous_continuation) const {
1447   for (size_t i = 0; i < num_regions(); i++) {
1448     ShenandoahHeapRegion* current  = get_region(i);
1449     if (skip_humongous_continuation && current->is_humongous_continuation()) {
1450       continue;
1451     }
1452     if (skip_cset_regions && in_collection_set(current)) {
1453       continue;
1454     }
1455     if (blk->heap_region_do(current)) {
1456       return;
1457     }
1458   }
1459 }
1460 
1461 class ShenandoahClearLivenessClosure : public ShenandoahHeapRegionClosure {
1462 private:
1463   ShenandoahHeap* sh;
1464 public:
1465   ShenandoahClearLivenessClosure(ShenandoahHeap* heap) : sh(heap) {}
1466 
1467   bool heap_region_do(ShenandoahHeapRegion* r) {
1468     r->clear_live_data();
1469     sh->set_next_top_at_mark_start(r->bottom(), r->top());
1470     return false;
1471   }
1472 };
1473 
1474 void ShenandoahHeap::op_init_mark() {
1475   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Should be at safepoint");
1476 
1477   assert(is_next_bitmap_clear(), "need clear marking bitmap");
1478 
1479   if (ShenandoahVerify) {
1480     verifier()->verify_before_concmark();
1481   }
1482 
1483   {
1484     ShenandoahGCPhase phase(ShenandoahPhaseTimings::accumulate_stats);
1485     accumulate_statistics_all_tlabs();
1486   }
1487 
1488   set_concurrent_mark_in_progress(true);
1489   // We need to reset all TLABs because we'd lose marks on all objects allocated in them.
1490   if (UseTLAB) {
1491     ShenandoahGCPhase phase(ShenandoahPhaseTimings::make_parsable);
1492     make_tlabs_parsable(true);
1493   }
1494 
1495   {
1496     ShenandoahGCPhase phase(ShenandoahPhaseTimings::clear_liveness);
1497     ShenandoahClearLivenessClosure clc(this);
1498     heap_region_iterate(&clc);
1499   }
1500 
1501   // Make above changes visible to worker threads
1502   OrderAccess::fence();
1503 
1504   concurrentMark()->init_mark_roots();
1505 
1506   if (UseTLAB) {
1507     ShenandoahGCPhase phase(ShenandoahPhaseTimings::resize_tlabs);
1508     resize_all_tlabs();
1509   }
1510 
1511   if (ShenandoahPacing) {
1512     pacer()->setup_for_mark();
1513   }
1514 }
1515 
1516 void ShenandoahHeap::op_mark() {
1517   concurrentMark()->mark_from_roots();
1518 
1519   // Allocations happen during concurrent mark, record peak after the phase:
1520   shenandoahPolicy()->record_peak_occupancy();
1521 }
1522 
1523 void ShenandoahHeap::op_final_mark() {
1524   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Should be at safepoint");
1525 
1526   // It is critical that we
1527   // evacuate roots right after finishing marking, so that we don't
1528   // get unmarked objects in the roots.
1529 
1530   if (! cancelled_concgc()) {
1531     concurrentMark()->finish_mark_from_roots();
1532     stop_concurrent_marking();
1533 
1534     {
1535       ShenandoahGCPhase prepare_evac(ShenandoahPhaseTimings::prepare_evac);
1536       prepare_for_concurrent_evacuation();
1537     }
1538 
1539     // If collection set has candidates, start evacuation.
1540     // Otherwise, bypass the rest of the cycle.
1541     if (!collection_set()->is_empty()) {
1542       set_evacuation_in_progress(true);
1543       // From here on, we need to update references.
1544       set_has_forwarded_objects(true);
1545 
1546       ShenandoahGCPhase init_evac(ShenandoahPhaseTimings::init_evac);
1547       evacuate_and_update_roots();
1548     }
1549 
1550     if (ShenandoahPacing) {
1551       pacer()->setup_for_evac();
1552     }
1553   } else {
1554     concurrentMark()->cancel();
1555     stop_concurrent_marking();
1556 
1557     if (process_references()) {
1558       // Abandon reference processing right away: pre-cleaning must have failed.
1559       ReferenceProcessor *rp = ref_processor();
1560       rp->disable_discovery();
1561       rp->abandon_partial_discovery();
1562       rp->verify_no_references_recorded();
1563     }
1564   }
1565 }
1566 
1567 void ShenandoahHeap::op_final_evac() {
1568   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Should be at safepoint");
1569 
1570   set_evacuation_in_progress(false);
1571   if (ShenandoahVerify) {
1572     verifier()->verify_after_evacuation();
1573   }
1574 }
1575 
1576 void ShenandoahHeap::op_evac() {
1577 
1578   LogTarget(Trace, gc, region) lt_region;
1579   LogTarget(Trace, gc, cset) lt_cset;
1580 
1581   if (lt_region.is_enabled()) {
1582     ResourceMark rm;
1583     LogStream ls(lt_region);
1584     ls.print_cr("All available regions:");
1585     print_heap_regions_on(&ls);
1586   }
1587 
1588   if (lt_cset.is_enabled()) {
1589     ResourceMark rm;
1590     LogStream ls(lt_cset);
1591     ls.print_cr("Collection set ("SIZE_FORMAT" regions):", _collection_set->count());
1592     _collection_set->print_on(&ls);
1593 
1594     ls.print_cr("Free set:");
1595     _free_set->print_on(&ls);
1596   }
1597 
1598   ShenandoahParallelEvacuationTask task(this, _collection_set);
1599   workers()->run_task(&task);
1600 
1601   if (lt_cset.is_enabled()) {
1602     ResourceMark rm;
1603     LogStream ls(lt_cset);
1604     ls.print_cr("After evacuation collection set ("SIZE_FORMAT" regions):",
1605                 _collection_set->count());
1606     _collection_set->print_on(&ls);
1607 
1608     ls.print_cr("After evacuation free set:");
1609     _free_set->print_on(&ls);
1610   }
1611 
1612   if (lt_region.is_enabled()) {
1613     ResourceMark rm;
1614     LogStream ls(lt_region);
1615     ls.print_cr("All regions after evacuation:");
1616     print_heap_regions_on(&ls);
1617   }
1618 
1619   // Allocations happen during evacuation, record peak after the phase:
1620   shenandoahPolicy()->record_peak_occupancy();
1621 }
1622 
1623 void ShenandoahHeap::op_updaterefs() {
1624   update_heap_references(true);
1625 
1626   // Allocations happen during update-refs, record peak after the phase:
1627   shenandoahPolicy()->record_peak_occupancy();
1628 }
1629 
1630 void ShenandoahHeap::op_cleanup() {
1631   ShenandoahGCPhase phase_recycle(ShenandoahPhaseTimings::conc_cleanup_recycle);
1632   free_set()->recycle_trash();
1633 
1634   // Allocations happen during cleanup, record peak after the phase:
1635   shenandoahPolicy()->record_peak_occupancy();
1636 }
1637 
1638 void ShenandoahHeap::op_cleanup_bitmaps() {
1639   op_cleanup();
1640 
1641   ShenandoahGCPhase phase_reset(ShenandoahPhaseTimings::conc_cleanup_reset_bitmaps);
1642   reset_next_mark_bitmap();
1643 
1644   // Allocations happen during bitmap cleanup, record peak after the phase:
1645   shenandoahPolicy()->record_peak_occupancy();
1646 }
1647 
1648 void ShenandoahHeap::op_cleanup_traversal() {
1649 
1650   {
1651     ShenandoahGCPhase phase_reset(ShenandoahPhaseTimings::conc_cleanup_reset_bitmaps);
1652     reset_next_mark_bitmap_traversal();
1653   }
1654 
1655   op_cleanup();
1656 
1657   // Allocations happen during bitmap cleanup, record peak after the phase:
1658   shenandoahPolicy()->record_peak_occupancy();
1659 }
1660 
1661 void ShenandoahHeap::op_preclean() {
1662   concurrentMark()->preclean_weak_refs();
1663 
1664   // Allocations happen during concurrent preclean, record peak after the phase:
1665   shenandoahPolicy()->record_peak_occupancy();
1666 }
1667 
1668 void ShenandoahHeap::op_init_traversal() {
1669   traversal_gc()->init_traversal_collection();
1670 }
1671 
1672 void ShenandoahHeap::op_traversal() {
1673   traversal_gc()->concurrent_traversal_collection();
1674 }
1675 
1676 void ShenandoahHeap::op_final_traversal() {
1677   traversal_gc()->final_traversal_collection();
1678 }
1679 
1680 void ShenandoahHeap::op_full(GCCause::Cause cause) {
1681   full_gc()->do_it(cause);
1682 }
1683 
1684 void ShenandoahHeap::op_degenerated(ShenandoahDegenPoint point) {
1685   // Degenerated GC is STW, but it can also fail. Current mechanics communicates
1686   // GC failure via cancelled_concgc() flag. So, if we detect the failure after
1687   // some phase, we have to upgrade the Degenerate GC to Full GC.
1688 
1689   clear_cancelled_concgc();
1690 
1691   size_t used_before = used();
1692 
1693   switch (point) {
1694     case _degenerated_evac:
1695       // Not possible to degenerate from here, upgrade to Full GC right away.
1696       cancel_concgc(GCCause::_shenandoah_upgrade_to_full_gc);
1697       op_degenerated_fail();
1698       return;
1699 
1700     // The cases below form the Duff's-like device: it describes the actual GC cycle,
1701     // but enters it at different points, depending on which concurrent phase had
1702     // degenerated.
1703 
1704     case _degenerated_traversal:
1705       {
1706         ShenandoahHeapLocker locker(lock());
1707         collection_set()->clear_current_index();
1708         for (size_t i = 0; i < collection_set()->count(); i++) {
1709           ShenandoahHeapRegion* r = collection_set()->next();
1710           r->make_regular_bypass();
1711         }
1712         collection_set()->clear();
1713       }
1714       op_final_traversal();
1715       op_cleanup_traversal();
1716       return;
1717 
1718     case _degenerated_outside_cycle:
1719       if (shenandoahPolicy()->can_do_traversal_gc()) {
1720         // Not possible to degenerate from here, upgrade to Full GC right away.
1721         cancel_concgc(GCCause::_shenandoah_upgrade_to_full_gc);
1722         op_degenerated_fail();
1723         return;
1724       }
1725       op_init_mark();
1726       if (cancelled_concgc()) {
1727         op_degenerated_fail();
1728         return;
1729       }
1730 
1731     case _degenerated_mark:
1732       op_final_mark();
1733       if (cancelled_concgc()) {
1734         op_degenerated_fail();
1735         return;
1736       }
1737 
1738       op_cleanup();
1739 
1740       // If heuristics thinks we should do the cycle, this flag would be set,
1741       // and we can do evacuation. Otherwise, it would be the shortcut cycle.
1742       if (is_evacuation_in_progress()) {
1743         op_evac();
1744         if (cancelled_concgc()) {
1745           op_degenerated_fail();
1746           return;
1747         }
1748       }
1749 
1750       // If heuristics thinks we should do the cycle, this flag would be set,
1751       // and we need to do update-refs. Otherwise, it would be the shortcut cycle.
1752       if (has_forwarded_objects()) {
1753         op_init_updaterefs();
1754         if (cancelled_concgc()) {
1755           op_degenerated_fail();
1756           return;
1757         }
1758       }
1759 
1760     case _degenerated_updaterefs:
1761       if (has_forwarded_objects()) {
1762         op_final_updaterefs();
1763         if (cancelled_concgc()) {
1764           op_degenerated_fail();
1765           return;
1766         }
1767       }
1768 
1769       op_cleanup_bitmaps();
1770       break;
1771 
1772     default:
1773       ShouldNotReachHere();
1774   }
1775 
1776   if (ShenandoahVerify) {
1777     verifier()->verify_after_degenerated();
1778   }
1779 
1780   // Check for futility and fail. There is no reason to do several back-to-back Degenerated cycles,
1781   // because that probably means the heap is overloaded and/or fragmented.
1782   size_t used_after = used();
1783   size_t difference = (used_before > used_after) ? used_before - used_after : 0;
1784   if (difference < ShenandoahHeapRegion::region_size_words()) {
1785     cancel_concgc(GCCause::_shenandoah_upgrade_to_full_gc);
1786     op_degenerated_futile();
1787   }
1788 }
1789 
1790 void ShenandoahHeap::op_degenerated_fail() {
1791   log_info(gc)("Cannot finish degeneration, upgrading to Full GC");
1792   shenandoahPolicy()->record_degenerated_upgrade_to_full();
1793   op_full(GCCause::_shenandoah_upgrade_to_full_gc);
1794 }
1795 
1796 void ShenandoahHeap::op_degenerated_futile() {
1797   log_info(gc)("Degenerated GC had not reclaimed enough, upgrading to Full GC");
1798   shenandoahPolicy()->record_degenerated_upgrade_to_full();
1799   op_full(GCCause::_shenandoah_upgrade_to_full_gc);
1800 }
1801 
1802 void ShenandoahHeap::swap_mark_bitmaps() {
1803   // Swap bitmaps.
1804   MarkBitMap* tmp1 = _complete_mark_bit_map;
1805   _complete_mark_bit_map = _next_mark_bit_map;
1806   _next_mark_bit_map = tmp1;
1807 
1808   // Swap top-at-mark-start pointers
1809   HeapWord** tmp2 = _complete_top_at_mark_starts;
1810   _complete_top_at_mark_starts = _next_top_at_mark_starts;
1811   _next_top_at_mark_starts = tmp2;
1812 
1813   HeapWord** tmp3 = _complete_top_at_mark_starts_base;
1814   _complete_top_at_mark_starts_base = _next_top_at_mark_starts_base;
1815   _next_top_at_mark_starts_base = tmp3;
1816 }
1817 
1818 
1819 void ShenandoahHeap::stop_concurrent_marking() {
1820   assert(is_concurrent_mark_in_progress(), "How else could we get here?");
1821   if (! cancelled_concgc()) {
1822     // If we needed to update refs, and concurrent marking has been cancelled,
1823     // we need to finish updating references.
1824     set_has_forwarded_objects(false);
1825     swap_mark_bitmaps();
1826   }
1827   set_concurrent_mark_in_progress(false);
1828 
1829   LogTarget(Trace, gc, region) lt;
1830   if (lt.is_enabled()) {
1831     ResourceMark rm;
1832     LogStream ls(lt);
1833     ls.print_cr("Regions at stopping the concurrent mark:");
1834     print_heap_regions_on(&ls);
1835   }
1836 }
1837 
1838 void ShenandoahHeap::set_gc_state_all_threads(char state) {
1839   for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) {
1840     ShenandoahThreadLocalData::set_gc_state(t, state);
1841   }
1842 }
1843 
1844 void ShenandoahHeap::set_gc_state_mask(uint mask, bool value) {
1845   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Should really be Shenandoah safepoint");
1846   _gc_state.set_cond(mask, value);
1847   set_gc_state_all_threads(_gc_state.raw_value());
1848 }
1849 
1850 void ShenandoahHeap::set_concurrent_mark_in_progress(bool in_progress) {
1851   set_gc_state_mask(MARKING, in_progress);
1852   ShenandoahBarrierSet::satb_mark_queue_set().set_active_all_threads(in_progress, !in_progress);
1853 }
1854 
1855 void ShenandoahHeap::set_concurrent_traversal_in_progress(bool in_progress) {
1856    set_gc_state_mask(TRAVERSAL | HAS_FORWARDED, in_progress);
1857    ShenandoahBarrierSet::satb_mark_queue_set().set_active_all_threads(in_progress, !in_progress);
1858 }
1859 
1860 void ShenandoahHeap::set_evacuation_in_progress(bool in_progress) {
1861   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Only call this at safepoint");
1862   set_gc_state_mask(EVACUATION, in_progress);
1863 }
1864 
1865 HeapWord* ShenandoahHeap::tlab_post_allocation_setup(HeapWord* obj) {
1866   // Initialize Brooks pointer for the next object
1867   HeapWord* result = obj + BrooksPointer::word_size();
1868   BrooksPointer::initialize(oop(result));
1869   return result;
1870 }
1871 
1872 uint ShenandoahHeap::oop_extra_words() {
1873   return BrooksPointer::word_size();
1874 }
1875 
1876 ShenandoahForwardedIsAliveClosure::ShenandoahForwardedIsAliveClosure() :
1877   _heap(ShenandoahHeap::heap_no_check()) {
1878 }
1879 
1880 ShenandoahIsAliveClosure::ShenandoahIsAliveClosure() :
1881   _heap(ShenandoahHeap::heap_no_check()) {
1882 }
1883 
1884 bool ShenandoahForwardedIsAliveClosure::do_object_b(oop obj) {
1885   if (CompressedOops::is_null(obj)) {
1886     return false;
1887   }
1888   obj = ShenandoahBarrierSet::resolve_forwarded_not_null(obj);
1889   shenandoah_assert_not_forwarded_if(NULL, obj, _heap->is_concurrent_mark_in_progress() || _heap->is_concurrent_traversal_in_progress())
1890   return _heap->is_marked_next(obj);
1891 }
1892 
1893 bool ShenandoahIsAliveClosure::do_object_b(oop obj) {
1894   if (CompressedOops::is_null(obj)) {
1895     return false;
1896   }
1897   shenandoah_assert_not_forwarded(NULL, obj);
1898   return _heap->is_marked_next(obj);
1899 }
1900 
1901 BoolObjectClosure* ShenandoahHeap::is_alive_closure() {
1902   return has_forwarded_objects() ?
1903          (BoolObjectClosure*) &_forwarded_is_alive :
1904          (BoolObjectClosure*) &_is_alive;
1905 }
1906 
1907 void ShenandoahHeap::ref_processing_init() {
1908   MemRegion mr = reserved_region();
1909 
1910   _forwarded_is_alive.init(this);
1911   _is_alive.init(this);
1912   assert(_max_workers > 0, "Sanity");
1913 
1914   _ref_processor =
1915     new ReferenceProcessor(mr,    // span
1916                            ParallelRefProcEnabled,  // MT processing
1917                            _max_workers,            // Degree of MT processing
1918                            true,                    // MT discovery
1919                            _max_workers,            // Degree of MT discovery
1920                            false,                   // Reference discovery is not atomic
1921                            NULL);                   // No closure, should be installed before use
1922 
1923   shenandoah_assert_rp_isalive_not_installed();
1924 }
1925 
1926 
1927 GCTracer* ShenandoahHeap::tracer() {
1928   return shenandoahPolicy()->tracer();
1929 }
1930 
1931 size_t ShenandoahHeap::tlab_used(Thread* thread) const {
1932   return _free_set->used();
1933 }
1934 
1935 void ShenandoahHeap::cancel_concgc(GCCause::Cause cause) {
1936   if (try_cancel_concgc()) {
1937     FormatBuffer<> msg("Cancelling concurrent GC: %s", GCCause::to_string(cause));
1938     log_info(gc)("%s", msg.buffer());
1939     Events::log(Thread::current(), "%s", msg.buffer());
1940   }
1941 }
1942 
1943 uint ShenandoahHeap::max_workers() {
1944   return _max_workers;
1945 }
1946 
1947 void ShenandoahHeap::stop() {
1948   // The shutdown sequence should be able to terminate when GC is running.
1949 
1950   // Step 0. Notify policy to disable event recording.
1951   _shenandoah_policy->record_shutdown();
1952 
1953   // Step 1. Notify control thread that we are in shutdown.
1954   // Note that we cannot do that with stop(), because stop() is blocking and waits for the actual shutdown.
1955   // Doing stop() here would wait for the normal GC cycle to complete, never falling through to cancel below.
1956   control_thread()->prepare_for_graceful_shutdown();
1957 
1958   // Step 2. Notify GC workers that we are cancelling GC.
1959   cancel_concgc(GCCause::_shenandoah_stop_vm);
1960 
1961   // Step 3. Wait until GC worker exits normally.
1962   control_thread()->stop();
1963 
1964   // Step 4. Stop String Dedup thread if it is active
1965   if (ShenandoahStringDedup::is_enabled()) {
1966     ShenandoahStringDedup::stop();
1967   }
1968 }
1969 
1970 void ShenandoahHeap::unload_classes_and_cleanup_tables(bool full_gc) {
1971   ShenandoahPhaseTimings::Phase phase_root =
1972           full_gc ?
1973           ShenandoahPhaseTimings::full_gc_purge :
1974           ShenandoahPhaseTimings::purge;
1975 
1976   ShenandoahPhaseTimings::Phase phase_unload =
1977           full_gc ?
1978           ShenandoahPhaseTimings::full_gc_purge_class_unload :
1979           ShenandoahPhaseTimings::purge_class_unload;
1980 
1981   ShenandoahPhaseTimings::Phase phase_cldg =
1982           full_gc ?
1983           ShenandoahPhaseTimings::full_gc_purge_cldg :
1984           ShenandoahPhaseTimings::purge_cldg;
1985 
1986   ShenandoahPhaseTimings::Phase phase_par =
1987           full_gc ?
1988           ShenandoahPhaseTimings::full_gc_purge_par :
1989           ShenandoahPhaseTimings::purge_par;
1990 
1991   ShenandoahPhaseTimings::Phase phase_par_classes =
1992           full_gc ?
1993           ShenandoahPhaseTimings::full_gc_purge_par_classes :
1994           ShenandoahPhaseTimings::purge_par_classes;
1995 
1996   ShenandoahPhaseTimings::Phase phase_par_codecache =
1997           full_gc ?
1998           ShenandoahPhaseTimings::full_gc_purge_par_codecache :
1999           ShenandoahPhaseTimings::purge_par_codecache;
2000 
2001   ShenandoahPhaseTimings::Phase phase_par_rmt =
2002           full_gc ?
2003           ShenandoahPhaseTimings::full_gc_purge_par_rmt :
2004           ShenandoahPhaseTimings::purge_par_rmt;
2005 
2006   ShenandoahPhaseTimings::Phase phase_par_symbstring =
2007           full_gc ?
2008           ShenandoahPhaseTimings::full_gc_purge_par_symbstring :
2009           ShenandoahPhaseTimings::purge_par_symbstring;
2010 
2011   ShenandoahPhaseTimings::Phase phase_par_sync =
2012           full_gc ?
2013           ShenandoahPhaseTimings::full_gc_purge_par_sync :
2014           ShenandoahPhaseTimings::purge_par_sync;
2015 
2016   ShenandoahGCPhase root_phase(phase_root);
2017 
2018   BoolObjectClosure* is_alive = is_alive_closure();
2019 
2020   bool purged_class;
2021 
2022   // Unload classes and purge SystemDictionary.
2023   {
2024     ShenandoahGCPhase phase(phase_unload);
2025     purged_class = SystemDictionary::do_unloading(is_alive,
2026                                                   gc_timer(),
2027                                                   false /* defer cleaning */);
2028   }
2029 
2030   {
2031     ShenandoahGCPhase phase(phase_par);
2032     uint active = _workers->active_workers();
2033     ParallelCleaningTask unlink_task(is_alive, true, true, active, purged_class);
2034     _workers->run_task(&unlink_task);
2035 
2036     ShenandoahPhaseTimings* p = phase_timings();
2037     ParallelCleaningTimes times = unlink_task.times();
2038 
2039     // "times" report total time, phase_tables_cc reports wall time. Divide total times
2040     // by active workers to get average time per worker, that would add up to wall time.
2041     p->record_phase_time(phase_par_classes,    times.klass_work_us() / active);
2042     p->record_phase_time(phase_par_codecache,  times.codecache_work_us() / active);
2043     p->record_phase_time(phase_par_rmt,        times.rmt_work_us() / active);
2044     p->record_phase_time(phase_par_symbstring, times.tables_work_us() / active);
2045     p->record_phase_time(phase_par_sync,       times.sync_us() / active);
2046   }
2047 
2048   if (ShenandoahStringDedup::is_enabled()) {
2049     ShenandoahPhaseTimings::Phase phase_par_string_dedup =
2050             full_gc ?
2051             ShenandoahPhaseTimings::full_gc_purge_par_string_dedup :
2052             ShenandoahPhaseTimings::purge_par_string_dedup;
2053     ShenandoahGCPhase phase(phase_par_string_dedup);
2054     ShenandoahStringDedup::parallel_cleanup();
2055   }
2056 
2057 
2058   {
2059     ShenandoahGCPhase phase(phase_cldg);
2060     ClassLoaderDataGraph::purge();
2061   }
2062 }
2063 
2064 void ShenandoahHeap::set_has_forwarded_objects(bool cond) {
2065   set_gc_state_mask(HAS_FORWARDED, cond);
2066 }
2067 
2068 void ShenandoahHeap::set_process_references(bool pr) {
2069   _process_references.set_cond(pr);
2070 }
2071 
2072 void ShenandoahHeap::set_unload_classes(bool uc) {
2073   _unload_classes.set_cond(uc);
2074 }
2075 
2076 bool ShenandoahHeap::process_references() const {
2077   return _process_references.is_set();
2078 }
2079 
2080 bool ShenandoahHeap::unload_classes() const {
2081   return _unload_classes.is_set();
2082 }
2083 
2084 //fixme this should be in heapregionset
2085 ShenandoahHeapRegion* ShenandoahHeap::next_compaction_region(const ShenandoahHeapRegion* r) {
2086   size_t region_idx = r->region_number() + 1;
2087   ShenandoahHeapRegion* next = get_region(region_idx);
2088   guarantee(next->region_number() == region_idx, "region number must match");
2089   while (next->is_humongous()) {
2090     region_idx = next->region_number() + 1;
2091     next = get_region(region_idx);
2092     guarantee(next->region_number() == region_idx, "region number must match");
2093   }
2094   return next;
2095 }
2096 
2097 ShenandoahMonitoringSupport* ShenandoahHeap::monitoring_support() {
2098   return _monitoring_support;
2099 }
2100 
2101 MarkBitMap* ShenandoahHeap::complete_mark_bit_map() {
2102   return _complete_mark_bit_map;
2103 }
2104 
2105 MarkBitMap* ShenandoahHeap::next_mark_bit_map() {
2106   return _next_mark_bit_map;
2107 }
2108 
2109 address ShenandoahHeap::in_cset_fast_test_addr() {
2110   ShenandoahHeap* heap = ShenandoahHeap::heap();
2111   assert(heap->collection_set() != NULL, "Sanity");
2112   return (address) heap->collection_set()->biased_map_address();
2113 }
2114 
2115 address ShenandoahHeap::cancelled_concgc_addr() {
2116   return (address) ShenandoahHeap::heap()->_cancelled_concgc.addr_of();
2117 }
2118 
2119 address ShenandoahHeap::gc_state_addr() {
2120   return (address) ShenandoahHeap::heap()->_gc_state.addr_of();
2121 }
2122 
2123 size_t ShenandoahHeap::bytes_allocated_since_gc_start() {
2124   return OrderAccess::load_acquire(&_bytes_allocated_since_gc_start);
2125 }
2126 
2127 void ShenandoahHeap::reset_bytes_allocated_since_gc_start() {
2128   OrderAccess::release_store_fence(&_bytes_allocated_since_gc_start, (size_t)0);
2129 }
2130 
2131 ShenandoahPacer* ShenandoahHeap::pacer() const {
2132   assert (_pacer != NULL, "sanity");
2133   return _pacer;
2134 }
2135 
2136 void ShenandoahHeap::set_next_top_at_mark_start(HeapWord* region_base, HeapWord* addr) {
2137   uintx index = ((uintx) region_base) >> ShenandoahHeapRegion::region_size_bytes_shift();
2138   _next_top_at_mark_starts[index] = addr;
2139 }
2140 
2141 HeapWord* ShenandoahHeap::next_top_at_mark_start(HeapWord* region_base) {
2142   uintx index = ((uintx) region_base) >> ShenandoahHeapRegion::region_size_bytes_shift();
2143   return _next_top_at_mark_starts[index];
2144 }
2145 
2146 void ShenandoahHeap::set_complete_top_at_mark_start(HeapWord* region_base, HeapWord* addr) {
2147   uintx index = ((uintx) region_base) >> ShenandoahHeapRegion::region_size_bytes_shift();
2148   _complete_top_at_mark_starts[index] = addr;
2149 }
2150 
2151 HeapWord* ShenandoahHeap::complete_top_at_mark_start(HeapWord* region_base) {
2152   uintx index = ((uintx) region_base) >> ShenandoahHeapRegion::region_size_bytes_shift();
2153   return _complete_top_at_mark_starts[index];
2154 }
2155 
2156 void ShenandoahHeap::set_degenerated_gc_in_progress(bool in_progress) {
2157   _degenerated_gc_in_progress.set_cond(in_progress);
2158 }
2159 
2160 void ShenandoahHeap::set_full_gc_in_progress(bool in_progress) {
2161   _full_gc_in_progress.set_cond(in_progress);
2162 }
2163 
2164 void ShenandoahHeap::set_full_gc_move_in_progress(bool in_progress) {
2165   assert (is_full_gc_in_progress(), "should be");
2166   _full_gc_move_in_progress.set_cond(in_progress);
2167 }
2168 
2169 void ShenandoahHeap::set_update_refs_in_progress(bool in_progress) {
2170   set_gc_state_mask(UPDATEREFS, in_progress);
2171 }
2172 
2173 void ShenandoahHeap::register_nmethod(nmethod* nm) {
2174   ShenandoahCodeRoots::add_nmethod(nm);
2175 }
2176 
2177 void ShenandoahHeap::unregister_nmethod(nmethod* nm) {
2178   ShenandoahCodeRoots::remove_nmethod(nm);
2179 }
2180 
2181 oop ShenandoahHeap::pin_object(JavaThread* thr, oop o) {
2182   o = BarrierSet::barrier_set()->write_barrier(o);
2183   ShenandoahHeapLocker locker(lock());
2184   heap_region_containing(o)->make_pinned();
2185   return o;
2186 }
2187 
2188 void ShenandoahHeap::unpin_object(JavaThread* thr, oop o) {
2189   o = BarrierSet::barrier_set()->read_barrier(o);
2190   ShenandoahHeapLocker locker(lock());
2191   heap_region_containing(o)->make_unpinned();
2192 }
2193 
2194 GCTimer* ShenandoahHeap::gc_timer() const {
2195   return _gc_timer;
2196 }
2197 
2198 #ifdef ASSERT
2199 void ShenandoahHeap::assert_gc_workers(uint nworkers) {
2200   assert(nworkers > 0 && nworkers <= max_workers(), "Sanity");
2201 
2202   if (ShenandoahSafepoint::is_at_shenandoah_safepoint()) {
2203     if (UseDynamicNumberOfGCThreads ||
2204         (FLAG_IS_DEFAULT(ParallelGCThreads) && ForceDynamicNumberOfGCThreads)) {
2205       assert(nworkers <= ParallelGCThreads, "Cannot use more than it has");
2206     } else {
2207       // Use ParallelGCThreads inside safepoints
2208       assert(nworkers == ParallelGCThreads, "Use ParalleGCThreads within safepoints");
2209     }
2210   } else {
2211     if (UseDynamicNumberOfGCThreads ||
2212         (FLAG_IS_DEFAULT(ConcGCThreads) && ForceDynamicNumberOfGCThreads)) {
2213       assert(nworkers <= ConcGCThreads, "Cannot use more than it has");
2214     } else {
2215       // Use ConcGCThreads outside safepoints
2216       assert(nworkers == ConcGCThreads, "Use ConcGCThreads outside safepoints");
2217     }
2218   }
2219 }
2220 #endif
2221 
2222 ShenandoahConnectionMatrix* ShenandoahHeap::connection_matrix() const {
2223   return _connection_matrix;
2224 }
2225 
2226 ShenandoahTraversalGC* ShenandoahHeap::traversal_gc() {
2227   return _traversal_gc;
2228 }
2229 
2230 ShenandoahVerifier* ShenandoahHeap::verifier() {
2231   guarantee(ShenandoahVerify, "Should be enabled");
2232   assert (_verifier != NULL, "sanity");
2233   return _verifier;
2234 }
2235 
2236 template<class T>
2237 class ShenandoahUpdateHeapRefsTask : public AbstractGangTask {
2238 private:
2239   T cl;
2240   ShenandoahHeap* _heap;
2241   ShenandoahRegionIterator* _regions;
2242   bool _concurrent;
2243 public:
2244   ShenandoahUpdateHeapRefsTask(ShenandoahRegionIterator* regions, bool concurrent) :
2245     AbstractGangTask("Concurrent Update References Task"),
2246     cl(T()),
2247     _heap(ShenandoahHeap::heap()),
2248     _regions(regions),
2249     _concurrent(concurrent) {
2250   }
2251 
2252   void work(uint worker_id) {
2253     SuspendibleThreadSetJoiner stsj(_concurrent && ShenandoahSuspendibleWorkers);
2254     ShenandoahHeapRegion* r = _regions->next();
2255     while (r != NULL) {
2256       if (_heap->in_collection_set(r)) {
2257         HeapWord* bottom = r->bottom();
2258         HeapWord* top = _heap->complete_top_at_mark_start(r->bottom());
2259         if (top > bottom) {
2260           _heap->complete_mark_bit_map()->clear_range_large(MemRegion(bottom, top));
2261         }
2262       } else {
2263         if (r->is_active()) {
2264           _heap->marked_object_oop_safe_iterate(r, &cl);
2265           if (ShenandoahPacing) {
2266             _heap->pacer()->report_updaterefs(r->get_live_data_words());
2267           }
2268         }
2269       }
2270       if (_heap->check_cancelled_concgc_and_yield(_concurrent)) {
2271         return;
2272       }
2273       r = _regions->next();
2274     }
2275   }
2276 };
2277 
2278 void ShenandoahHeap::update_heap_references(bool concurrent) {
2279   if (UseShenandoahMatrix) {
2280     ShenandoahUpdateHeapRefsTask<ShenandoahUpdateHeapRefsMatrixClosure> task(&_update_refs_iterator, concurrent);
2281     workers()->run_task(&task);
2282   } else {
2283     ShenandoahUpdateHeapRefsTask<ShenandoahUpdateHeapRefsClosure> task(&_update_refs_iterator, concurrent);
2284     workers()->run_task(&task);
2285   }
2286 }
2287 
2288 void ShenandoahHeap::op_init_updaterefs() {
2289   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "must be at safepoint");
2290 
2291   if (ShenandoahVerify) {
2292     verifier()->verify_before_updaterefs();
2293   }
2294 
2295   set_evacuation_in_progress(false);
2296   set_update_refs_in_progress(true);
2297   make_tlabs_parsable(true);
2298   if (UseShenandoahMatrix) {
2299     connection_matrix()->clear_all();
2300   }
2301   for (uint i = 0; i < num_regions(); i++) {
2302     ShenandoahHeapRegion* r = get_region(i);
2303     r->set_concurrent_iteration_safe_limit(r->top());
2304   }
2305 
2306   // Reset iterator.
2307   _update_refs_iterator = ShenandoahRegionIterator();
2308 
2309   if (ShenandoahPacing) {
2310     pacer()->setup_for_updaterefs();
2311   }
2312 }
2313 
2314 void ShenandoahHeap::op_final_updaterefs() {
2315   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "must be at safepoint");
2316 
2317   // Check if there is left-over work, and finish it
2318   if (_update_refs_iterator.has_next()) {
2319     ShenandoahGCPhase final_work(ShenandoahPhaseTimings::final_update_refs_finish_work);
2320 
2321     // Finish updating references where we left off.
2322     clear_cancelled_concgc();
2323     update_heap_references(false);
2324   }
2325 
2326   // Clear cancelled conc GC, if set. On cancellation path, the block before would handle
2327   // everything. On degenerated paths, cancelled gc would not be set anyway.
2328   if (cancelled_concgc()) {
2329     clear_cancelled_concgc();
2330   }
2331   assert(!cancelled_concgc(), "Should have been done right before");
2332 
2333   concurrentMark()->update_roots(ShenandoahPhaseTimings::final_update_refs_roots);
2334 
2335   // Allocations might have happened before we STWed here, record peak:
2336   shenandoahPolicy()->record_peak_occupancy();
2337 
2338   ShenandoahGCPhase final_update_refs(ShenandoahPhaseTimings::final_update_refs_recycle);
2339 
2340   trash_cset_regions();
2341   set_has_forwarded_objects(false);
2342 
2343   if (ShenandoahVerify) {
2344     verifier()->verify_after_updaterefs();
2345   }
2346 
2347   {
2348     ShenandoahHeapLocker locker(lock());
2349     _free_set->rebuild();
2350   }
2351 
2352   set_update_refs_in_progress(false);
2353 }
2354 
2355 void ShenandoahHeap::set_alloc_seq_gc_start() {
2356   // Take next number, the start seq number is inclusive
2357   _alloc_seq_at_last_gc_start = ShenandoahHeapRegion::seqnum_current_alloc() + 1;
2358 }
2359 
2360 void ShenandoahHeap::set_alloc_seq_gc_end() {
2361   // Take current number, the end seq number is also inclusive
2362   _alloc_seq_at_last_gc_end = ShenandoahHeapRegion::seqnum_current_alloc();
2363 }
2364 
2365 
2366 #ifdef ASSERT
2367 void ShenandoahHeap::assert_heaplock_owned_by_current_thread() {
2368   _lock.assert_owned_by_current_thread();
2369 }
2370 
2371 void ShenandoahHeap::assert_heaplock_not_owned_by_current_thread() {
2372   _lock.assert_not_owned_by_current_thread();
2373 }
2374 
2375 void ShenandoahHeap::assert_heaplock_or_safepoint() {
2376   _lock.assert_owned_by_current_thread_or_safepoint();
2377 }
2378 #endif
2379 
2380 void ShenandoahHeap::print_extended_on(outputStream *st) const {
2381   print_on(st);
2382   print_heap_regions_on(st);
2383 }
2384 
2385 bool ShenandoahHeap::is_bitmap_slice_committed(ShenandoahHeapRegion* r, bool skip_self) {
2386   size_t slice = r->region_number() / _bitmap_regions_per_slice;
2387 
2388   size_t regions_from = _bitmap_regions_per_slice * slice;
2389   size_t regions_to   = MIN2(num_regions(), _bitmap_regions_per_slice * (slice + 1));
2390   for (size_t g = regions_from; g < regions_to; g++) {
2391     assert (g / _bitmap_regions_per_slice == slice, "same slice");
2392     if (skip_self && g == r->region_number()) continue;
2393     if (get_region(g)->is_committed()) {
2394       return true;
2395     }
2396   }
2397   return false;
2398 }
2399 
2400 bool ShenandoahHeap::commit_bitmap_slice(ShenandoahHeapRegion* r) {
2401   assert_heaplock_owned_by_current_thread();
2402 
2403   if (is_bitmap_slice_committed(r, true)) {
2404     // Some other region from the group is already committed, meaning the bitmap
2405     // slice is already committed, we exit right away.
2406     return true;
2407   }
2408 
2409   // Commit the bitmap slice:
2410   size_t slice = r->region_number() / _bitmap_regions_per_slice;
2411   size_t off = _bitmap_bytes_per_slice * slice;
2412   size_t len = _bitmap_bytes_per_slice;
2413   if (!os::commit_memory((char*)_bitmap0_region.start() + off, len, false)) {
2414     return false;
2415   }
2416   if (!os::commit_memory((char*)_bitmap1_region.start() + off, len, false)) {
2417     return false;
2418   }
2419   return true;
2420 }
2421 
2422 bool ShenandoahHeap::uncommit_bitmap_slice(ShenandoahHeapRegion *r) {
2423   assert_heaplock_owned_by_current_thread();
2424 
2425   if (is_bitmap_slice_committed(r, true)) {
2426     // Some other region from the group is still committed, meaning the bitmap
2427     // slice is should stay committed, exit right away.
2428     return true;
2429   }
2430 
2431   // Uncommit the bitmap slice:
2432   size_t slice = r->region_number() / _bitmap_regions_per_slice;
2433   size_t off = _bitmap_bytes_per_slice * slice;
2434   size_t len = _bitmap_bytes_per_slice;
2435   if (!os::uncommit_memory((char*)_bitmap0_region.start() + off, len)) {
2436     return false;
2437   }
2438   if (!os::uncommit_memory((char*)_bitmap1_region.start() + off, len)) {
2439     return false;
2440   }
2441   return true;
2442 }
2443 
2444 bool ShenandoahHeap::idle_bitmap_slice(ShenandoahHeapRegion *r) {
2445   assert_heaplock_owned_by_current_thread();
2446   assert(ShenandoahUncommitWithIdle, "Must be enabled");
2447 
2448   if (is_bitmap_slice_committed(r, true)) {
2449     // Some other region from the group is still committed, meaning the bitmap
2450     // slice is should stay committed, exit right away.
2451     return true;
2452   }
2453 
2454   // Idle the bitmap slice:
2455   size_t slice = r->region_number() / _bitmap_regions_per_slice;
2456   size_t off = _bitmap_bytes_per_slice * slice;
2457   size_t len = _bitmap_bytes_per_slice;
2458   if (!os::idle_memory((char*)_bitmap0_region.start() + off, len)) {
2459     return false;
2460   }
2461   if (!os::idle_memory((char*)_bitmap1_region.start() + off, len)) {
2462     return false;
2463   }
2464   return true;
2465 }
2466 
2467 void ShenandoahHeap::activate_bitmap_slice(ShenandoahHeapRegion* r) {
2468   assert_heaplock_owned_by_current_thread();
2469   assert(ShenandoahUncommitWithIdle, "Must be enabled");
2470   size_t slice = r->region_number() / _bitmap_regions_per_slice;
2471   size_t off = _bitmap_bytes_per_slice * slice;
2472   size_t len = _bitmap_bytes_per_slice;
2473   os::activate_memory((char*)_bitmap0_region.start() + off, len);
2474   os::activate_memory((char*)_bitmap1_region.start() + off, len);
2475 }
2476 
2477 void ShenandoahHeap::safepoint_synchronize_begin() {
2478   if (ShenandoahSuspendibleWorkers || UseStringDeduplication) {
2479     SuspendibleThreadSet::synchronize();
2480   }
2481 }
2482 
2483 void ShenandoahHeap::safepoint_synchronize_end() {
2484   if (ShenandoahSuspendibleWorkers || UseStringDeduplication) {
2485     SuspendibleThreadSet::desynchronize();
2486   }
2487 }
2488 
2489 void ShenandoahHeap::vmop_entry_init_mark() {
2490   TraceCollectorStats tcs(monitoring_support()->stw_collection_counters());
2491   ShenandoahGCPhase total(ShenandoahPhaseTimings::total_pause_gross);
2492   ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_mark_gross);
2493 
2494   try_inject_alloc_failure();
2495   VM_ShenandoahInitMark op;
2496   VMThread::execute(&op); // jump to entry_init_mark() under safepoint
2497 }
2498 
2499 void ShenandoahHeap::vmop_entry_final_mark() {
2500   TraceCollectorStats tcs(monitoring_support()->stw_collection_counters());
2501   ShenandoahGCPhase total(ShenandoahPhaseTimings::total_pause_gross);
2502   ShenandoahGCPhase phase(ShenandoahPhaseTimings::final_mark_gross);
2503 
2504   try_inject_alloc_failure();
2505   VM_ShenandoahFinalMarkStartEvac op;
2506   VMThread::execute(&op); // jump to entry_final_mark under safepoint
2507 }
2508 
2509 void ShenandoahHeap::vmop_entry_final_evac() {
2510   TraceCollectorStats tcs(monitoring_support()->stw_collection_counters());
2511   ShenandoahGCPhase total(ShenandoahPhaseTimings::total_pause_gross);
2512   ShenandoahGCPhase phase(ShenandoahPhaseTimings::final_evac_gross);
2513 
2514   VM_ShenandoahFinalEvac op;
2515   VMThread::execute(&op); // jump to entry_final_evac under safepoint
2516 }
2517 
2518 void ShenandoahHeap::vmop_entry_init_updaterefs() {
2519   TraceCollectorStats tcs(monitoring_support()->stw_collection_counters());
2520   ShenandoahGCPhase total(ShenandoahPhaseTimings::total_pause_gross);
2521   ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_update_refs_gross);
2522 
2523   try_inject_alloc_failure();
2524   VM_ShenandoahInitUpdateRefs op;
2525   VMThread::execute(&op);
2526 }
2527 
2528 void ShenandoahHeap::vmop_entry_final_updaterefs() {
2529   TraceCollectorStats tcs(monitoring_support()->stw_collection_counters());
2530   ShenandoahGCPhase total(ShenandoahPhaseTimings::total_pause_gross);
2531   ShenandoahGCPhase phase(ShenandoahPhaseTimings::final_update_refs_gross);
2532 
2533   try_inject_alloc_failure();
2534   VM_ShenandoahFinalUpdateRefs op;
2535   VMThread::execute(&op);
2536 }
2537 
2538 void ShenandoahHeap::vmop_entry_init_traversal() {
2539   TraceCollectorStats tcs(monitoring_support()->stw_collection_counters());
2540   ShenandoahGCPhase total(ShenandoahPhaseTimings::total_pause_gross);
2541   ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_traversal_gc_gross);
2542 
2543   try_inject_alloc_failure();
2544   VM_ShenandoahInitTraversalGC op;
2545   VMThread::execute(&op);
2546 }
2547 
2548 void ShenandoahHeap::vmop_entry_final_traversal() {
2549   TraceCollectorStats tcs(monitoring_support()->stw_collection_counters());
2550   ShenandoahGCPhase total(ShenandoahPhaseTimings::total_pause_gross);
2551   ShenandoahGCPhase phase(ShenandoahPhaseTimings::final_traversal_gc_gross);
2552 
2553   try_inject_alloc_failure();
2554   VM_ShenandoahFinalTraversalGC op;
2555   VMThread::execute(&op);
2556 }
2557 
2558 void ShenandoahHeap::vmop_entry_full(GCCause::Cause cause) {
2559   TraceCollectorStats tcs(monitoring_support()->full_stw_collection_counters());
2560   ShenandoahGCPhase total(ShenandoahPhaseTimings::total_pause_gross);
2561   ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_gross);
2562 
2563   try_inject_alloc_failure();
2564   VM_ShenandoahFullGC op(cause);
2565   VMThread::execute(&op);
2566 }
2567 
2568 void ShenandoahHeap::vmop_degenerated(ShenandoahDegenPoint point) {
2569   TraceCollectorStats tcs(monitoring_support()->full_stw_collection_counters());
2570   ShenandoahGCPhase total(ShenandoahPhaseTimings::total_pause_gross);
2571   ShenandoahGCPhase phase(ShenandoahPhaseTimings::degen_gc_gross);
2572 
2573   VM_ShenandoahDegeneratedGC degenerated_gc((int)point);
2574   VMThread::execute(&degenerated_gc);
2575 }
2576 
2577 void ShenandoahHeap::entry_init_mark() {
2578   ShenandoahGCPhase total_phase(ShenandoahPhaseTimings::total_pause);
2579   ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_mark);
2580 
2581   FormatBuffer<> msg("Pause Init Mark%s%s%s",
2582                      has_forwarded_objects() ? " (update refs)"    : "",
2583                      process_references() ?    " (process refs)"   : "",
2584                      unload_classes() ?        " (unload classes)" : "");
2585   GCTraceTime(Info, gc) time(msg, gc_timer());
2586   EventMark em("%s", msg.buffer());
2587 
2588   ShenandoahWorkerScope scope(workers(), ShenandoahWorkerPolicy::calc_workers_for_init_marking());
2589 
2590   op_init_mark();
2591 }
2592 
2593 void ShenandoahHeap::entry_final_mark() {
2594   ShenandoahGCPhase total_phase(ShenandoahPhaseTimings::total_pause);
2595   ShenandoahGCPhase phase(ShenandoahPhaseTimings::final_mark);
2596 
2597   FormatBuffer<> msg("Pause Final Mark%s%s%s",
2598                      has_forwarded_objects() ? " (update refs)"    : "",
2599                      process_references() ?    " (process refs)"   : "",
2600                      unload_classes() ?        " (unload classes)" : "");
2601   GCTraceTime(Info, gc) time(msg, gc_timer());
2602   EventMark em("%s", msg.buffer());
2603 
2604   ShenandoahWorkerScope scope(workers(), ShenandoahWorkerPolicy::calc_workers_for_final_marking());
2605 
2606   op_final_mark();
2607 }
2608 
2609 void ShenandoahHeap::entry_final_evac() {
2610   ShenandoahGCPhase total_phase(ShenandoahPhaseTimings::total_pause);
2611   ShenandoahGCPhase phase(ShenandoahPhaseTimings::final_evac);
2612 
2613   FormatBuffer<> msg("Pause Final Evac");
2614   GCTraceTime(Info, gc) time(msg, gc_timer());
2615   EventMark em("%s", msg.buffer());
2616 
2617   op_final_evac();
2618 }
2619 
2620 void ShenandoahHeap::entry_init_updaterefs() {
2621   ShenandoahGCPhase total_phase(ShenandoahPhaseTimings::total_pause);
2622   ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_update_refs);
2623 
2624   static const char* msg = "Pause Init Update Refs";
2625   GCTraceTime(Info, gc) time(msg, gc_timer());
2626   EventMark em("%s", msg);
2627 
2628   // No workers used in this phase, no setup required
2629 
2630   op_init_updaterefs();
2631 }
2632 
2633 void ShenandoahHeap::entry_final_updaterefs() {
2634   ShenandoahGCPhase total_phase(ShenandoahPhaseTimings::total_pause);
2635   ShenandoahGCPhase phase(ShenandoahPhaseTimings::final_update_refs);
2636 
2637   static const char* msg = "Pause Final Update Refs";
2638   GCTraceTime(Info, gc) time(msg, gc_timer());
2639   EventMark em("%s", msg);
2640 
2641   ShenandoahWorkerScope scope(workers(), ShenandoahWorkerPolicy::calc_workers_for_final_update_ref());
2642 
2643   op_final_updaterefs();
2644 }
2645 
2646 void ShenandoahHeap::entry_init_traversal() {
2647   ShenandoahGCPhase total_phase(ShenandoahPhaseTimings::total_pause);
2648   ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_traversal_gc);
2649 
2650   static const char* msg = "Pause Init Traversal";
2651   GCTraceTime(Info, gc) time(msg, gc_timer());
2652   EventMark em("%s", msg);
2653 
2654   ShenandoahWorkerScope scope(workers(), ShenandoahWorkerPolicy::calc_workers_for_stw_traversal());
2655 
2656   op_init_traversal();
2657 }
2658 
2659 void ShenandoahHeap::entry_final_traversal() {
2660   ShenandoahGCPhase total_phase(ShenandoahPhaseTimings::total_pause);
2661   ShenandoahGCPhase phase(ShenandoahPhaseTimings::final_traversal_gc);
2662 
2663   static const char* msg = "Pause Final Traversal";
2664   GCTraceTime(Info, gc) time(msg, gc_timer());
2665   EventMark em("%s", msg);
2666 
2667   ShenandoahWorkerScope scope(workers(), ShenandoahWorkerPolicy::calc_workers_for_stw_traversal());
2668 
2669   op_final_traversal();
2670 }
2671 
2672 void ShenandoahHeap::entry_full(GCCause::Cause cause) {
2673   ShenandoahGCPhase total_phase(ShenandoahPhaseTimings::total_pause);
2674   ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc);
2675 
2676   static const char* msg = "Pause Full";
2677   GCTraceTime(Info, gc) time(msg, gc_timer(), cause, true);
2678   EventMark em("%s", msg);
2679 
2680   ShenandoahWorkerScope scope(workers(), ShenandoahWorkerPolicy::calc_workers_for_fullgc());
2681 
2682   op_full(cause);
2683 }
2684 
2685 void ShenandoahHeap::entry_degenerated(int point) {
2686   ShenandoahGCPhase total_phase(ShenandoahPhaseTimings::total_pause);
2687   ShenandoahGCPhase phase(ShenandoahPhaseTimings::degen_gc);
2688 
2689   ShenandoahDegenPoint dpoint = (ShenandoahDegenPoint)point;
2690   FormatBuffer<> msg("Pause Degenerated GC (%s)", degen_point_to_string(dpoint));
2691   GCTraceTime(Info, gc) time(msg, gc_timer(), GCCause::_no_gc, true);
2692   EventMark em("%s", msg.buffer());
2693 
2694   ShenandoahWorkerScope scope(workers(), ShenandoahWorkerPolicy::calc_workers_for_stw_degenerated());
2695 
2696   set_degenerated_gc_in_progress(true);
2697   op_degenerated(dpoint);
2698   set_degenerated_gc_in_progress(false);
2699 }
2700 
2701 void ShenandoahHeap::entry_mark() {
2702   TraceCollectorStats tcs(monitoring_support()->concurrent_collection_counters());
2703 
2704   FormatBuffer<> msg("Concurrent marking%s%s%s",
2705                      has_forwarded_objects() ? " (update refs)"    : "",
2706                      process_references() ?    " (process refs)"   : "",
2707                      unload_classes() ?        " (unload classes)" : "");
2708   GCTraceTime(Info, gc) time(msg, gc_timer(), GCCause::_no_gc, true);
2709   EventMark em("%s", msg.buffer());
2710 
2711   ShenandoahWorkerScope scope(workers(), ShenandoahWorkerPolicy::calc_workers_for_conc_marking());
2712 
2713   try_inject_alloc_failure();
2714   op_mark();
2715 }
2716 
2717 void ShenandoahHeap::entry_evac() {
2718   ShenandoahGCPhase conc_evac_phase(ShenandoahPhaseTimings::conc_evac);
2719   TraceCollectorStats tcs(monitoring_support()->concurrent_collection_counters());
2720 
2721   static const char* msg = "Concurrent evacuation";
2722   GCTraceTime(Info, gc) time(msg, gc_timer(), GCCause::_no_gc, true);
2723   EventMark em("%s", msg);
2724 
2725   ShenandoahWorkerScope scope(workers(), ShenandoahWorkerPolicy::calc_workers_for_conc_evac());
2726 
2727   try_inject_alloc_failure();
2728   op_evac();
2729 }
2730 
2731 void ShenandoahHeap::entry_updaterefs() {
2732   ShenandoahGCPhase phase(ShenandoahPhaseTimings::conc_update_refs);
2733 
2734   static const char* msg = "Concurrent update references";
2735   GCTraceTime(Info, gc) time(msg, gc_timer(), GCCause::_no_gc, true);
2736   EventMark em("%s", msg);
2737 
2738   ShenandoahWorkerScope scope(workers(), ShenandoahWorkerPolicy::calc_workers_for_conc_update_ref());
2739 
2740   try_inject_alloc_failure();
2741   op_updaterefs();
2742 }
2743 void ShenandoahHeap::entry_cleanup() {
2744   ShenandoahGCPhase phase(ShenandoahPhaseTimings::conc_cleanup);
2745 
2746   static const char* msg = "Concurrent cleanup";
2747   GCTraceTime(Info, gc) time(msg, gc_timer(), GCCause::_no_gc, true);
2748   EventMark em("%s", msg);
2749 
2750   // This phase does not use workers, no need for setup
2751 
2752   try_inject_alloc_failure();
2753   op_cleanup();
2754 }
2755 
2756 void ShenandoahHeap::entry_cleanup_traversal() {
2757   ShenandoahGCPhase phase(ShenandoahPhaseTimings::conc_cleanup);
2758 
2759   static const char* msg = "Concurrent cleanup";
2760   GCTraceTime(Info, gc) time(msg, gc_timer(), GCCause::_no_gc, true);
2761   EventMark em("%s", msg);
2762 
2763   ShenandoahWorkerScope scope(workers(), ShenandoahWorkerPolicy::calc_workers_for_conc_traversal());
2764 
2765   try_inject_alloc_failure();
2766   op_cleanup_traversal();
2767 }
2768 
2769 void ShenandoahHeap::entry_cleanup_bitmaps() {
2770   ShenandoahGCPhase phase(ShenandoahPhaseTimings::conc_cleanup);
2771 
2772   static const char* msg = "Concurrent cleanup";
2773   GCTraceTime(Info, gc) time(msg, gc_timer(), GCCause::_no_gc, true);
2774   EventMark em("%s", msg);
2775 
2776   ShenandoahWorkerScope scope(workers(), ShenandoahWorkerPolicy::calc_workers_for_conc_cleanup());
2777 
2778   try_inject_alloc_failure();
2779   op_cleanup_bitmaps();
2780 }
2781 
2782 void ShenandoahHeap::entry_preclean() {
2783   if (ShenandoahPreclean && process_references()) {
2784     static const char* msg = "Concurrent precleaning";
2785     GCTraceTime(Info, gc) time(msg, gc_timer(), GCCause::_no_gc, true);
2786     EventMark em("%s", msg);
2787 
2788     ShenandoahGCPhase conc_preclean(ShenandoahPhaseTimings::conc_preclean);
2789 
2790     ShenandoahWorkerScope scope(workers(), ShenandoahWorkerPolicy::calc_workers_for_conc_preclean());
2791 
2792     try_inject_alloc_failure();
2793     op_preclean();
2794   }
2795 }
2796 
2797 void ShenandoahHeap::entry_traversal() {
2798   static const char* msg = "Concurrent traversal";
2799   GCTraceTime(Info, gc) time(msg, gc_timer(), GCCause::_no_gc, true);
2800   EventMark em("%s", msg);
2801 
2802   TraceCollectorStats tcs(is_minor_gc() ? monitoring_support()->partial_collection_counters()
2803                                         : monitoring_support()->concurrent_collection_counters());
2804 
2805   ShenandoahWorkerScope scope(workers(), ShenandoahWorkerPolicy::calc_workers_for_conc_traversal());
2806 
2807   try_inject_alloc_failure();
2808   op_traversal();
2809 }
2810 
2811 void ShenandoahHeap::try_inject_alloc_failure() {
2812   if (ShenandoahAllocFailureALot && !cancelled_concgc() && ((os::random() % 1000) > 950)) {
2813     _inject_alloc_failure.set();
2814     os::naked_short_sleep(1);
2815     if (cancelled_concgc()) {
2816       log_info(gc)("Allocation failure was successfully injected");
2817     }
2818   }
2819 }
2820 
2821 bool ShenandoahHeap::should_inject_alloc_failure() {
2822   return _inject_alloc_failure.is_set() && _inject_alloc_failure.try_unset();
2823 }
2824 
2825 void ShenandoahHeap::initialize_serviceability() {
2826   _memory_pool = new ShenandoahMemoryPool(this);
2827   _cycle_memory_manager.add_pool(_memory_pool);
2828   _stw_memory_manager.add_pool(_memory_pool);
2829 }
2830 
2831 GrowableArray<GCMemoryManager*> ShenandoahHeap::memory_managers() {
2832   GrowableArray<GCMemoryManager*> memory_managers(2);
2833   memory_managers.append(&_cycle_memory_manager);
2834   memory_managers.append(&_stw_memory_manager);
2835   return memory_managers;
2836 }
2837 
2838 GrowableArray<MemoryPool*> ShenandoahHeap::memory_pools() {
2839   GrowableArray<MemoryPool*> memory_pools(1);
2840   memory_pools.append(_memory_pool);
2841   return memory_pools;
2842 }
2843 
2844 void ShenandoahHeap::enter_evacuation() {
2845   _oom_evac_handler.enter_evacuation();
2846 }
2847 
2848 void ShenandoahHeap::leave_evacuation() {
2849   _oom_evac_handler.leave_evacuation();
2850 }
2851 
2852 SoftRefPolicy* ShenandoahHeap::soft_ref_policy() {
2853   return &_soft_ref_policy;
2854 }
2855 
2856 ShenandoahRegionIterator::ShenandoahRegionIterator() :
2857   _index(0),
2858   _heap(ShenandoahHeap::heap()) {}
2859 
2860 ShenandoahRegionIterator::ShenandoahRegionIterator(ShenandoahHeap* heap) :
2861   _index(0),
2862   _heap(heap) {}
2863 
2864 ShenandoahRegionIterator& ShenandoahRegionIterator::operator=(const ShenandoahRegionIterator& o) {
2865   _index = o._index;
2866   assert(_heap == o._heap, "must be same");
2867   return *this;
2868 }
2869 
2870 bool ShenandoahRegionIterator::has_next() const {
2871   return _index < _heap->num_regions();
2872 }
2873 
2874 void ShenandoahHeap::heap_region_iterate(ShenandoahHeapRegionClosure& cl) const {
2875   ShenandoahRegionIterator regions;
2876   ShenandoahHeapRegion* r = regions.next();
2877   while (r != NULL) {
2878     if (cl.heap_region_do(r)) {
2879       break;
2880     }
2881     r = regions.next();
2882   }
2883 }
2884 
2885 bool ShenandoahHeap::is_minor_gc() const {
2886   return _gc_cycle_mode.get() == MINOR;
2887 }
2888 
2889 bool ShenandoahHeap::is_major_gc() const {
2890   return _gc_cycle_mode.get() == MAJOR;
2891 }
2892 
2893 void ShenandoahHeap::set_cycle_mode(GCCycleMode gc_cycle_mode) {
2894   _gc_cycle_mode.set(gc_cycle_mode);
2895 }
2896 
2897 char ShenandoahHeap::gc_state() const {
2898   return _gc_state.raw_value();
2899 }