1 /*
   2  * Copyright (c) 2013, 2017, Red Hat, Inc. and/or its affiliates.
   3  *
   4  * This code is free software; you can redistribute it and/or modify it
   5  * under the terms of the GNU General Public License version 2 only, as
   6  * published by the Free Software Foundation.
   7  *
   8  * This code is distributed in the hope that it will be useful, but WITHOUT
   9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  11  * version 2 for more details (a copy is included in the LICENSE file that
  12  * accompanied this code).
  13  *
  14  * You should have received a copy of the GNU General Public License version
  15  * 2 along with this work; if not, write to the Free Software Foundation,
  16  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  17  *
  18  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  19  * or visit www.oracle.com if you need additional information or have any
  20  * questions.
  21  *
  22  */
  23 
  24 #include "precompiled.hpp"
  25 #include "memory/allocation.hpp"
  26 
  27 #include "gc/shared/gcTimer.hpp"
  28 #include "gc/shared/gcTraceTime.inline.hpp"
  29 #include "gc/shared/parallelCleaning.hpp"
  30 
  31 #include "gc/shenandoah/brooksPointer.hpp"
  32 #include "gc/shenandoah/shenandoahAllocTracker.hpp"
  33 #include "gc/shenandoah/shenandoahBarrierSet.hpp"
  34 #include "gc/shenandoah/shenandoahCollectionSet.hpp"
  35 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
  36 #include "gc/shenandoah/shenandoahConcurrentMark.hpp"
  37 #include "gc/shenandoah/shenandoahConcurrentMark.inline.hpp"
  38 #include "gc/shenandoah/shenandoahConcurrentThread.hpp"
  39 #include "gc/shenandoah/shenandoahFreeSet.hpp"
  40 #include "gc/shenandoah/shenandoahPhaseTimings.hpp"
  41 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
  42 #include "gc/shenandoah/shenandoahHeapRegion.hpp"
  43 #include "gc/shenandoah/shenandoahHeapRegionSet.hpp"
  44 #include "gc/shenandoah/shenandoahMarkCompact.hpp"
  45 #include "gc/shenandoah/shenandoahMemoryPool.hpp"
  46 #include "gc/shenandoah/shenandoahMonitoringSupport.hpp"
  47 #include "gc/shenandoah/shenandoahOopClosures.inline.hpp"
  48 #include "gc/shenandoah/shenandoahPacer.hpp"
  49 #include "gc/shenandoah/shenandoahPacer.inline.hpp"
  50 #include "gc/shenandoah/shenandoahRootProcessor.hpp"
  51 #include "gc/shenandoah/shenandoahStringDedup.hpp"
  52 #include "gc/shenandoah/shenandoahUtils.hpp"
  53 #include "gc/shenandoah/shenandoahVerifier.hpp"
  54 #include "gc/shenandoah/shenandoahCodeRoots.hpp"
  55 #include "gc/shenandoah/shenandoahWorkerPolicy.hpp"
  56 #include "gc/shenandoah/vm_operations_shenandoah.hpp"
  57 
  58 #include "runtime/vmThread.hpp"
  59 #include "services/mallocTracker.hpp"
  60 
  61 ShenandoahUpdateRefsClosure::ShenandoahUpdateRefsClosure() : _heap(ShenandoahHeap::heap()) {}
  62 
  63 #ifdef ASSERT
  64 template <class T>
  65 void ShenandoahAssertToSpaceClosure::do_oop_nv(T* p) {
  66   T o = RawAccess<>::oop_load(p);
  67   if (! CompressedOops::is_null(o)) {
  68     oop obj = CompressedOops::decode_not_null(o);
  69     shenandoah_assert_not_forwarded(p, obj);
  70   }
  71 }
  72 
  73 void ShenandoahAssertToSpaceClosure::do_oop(narrowOop* p) { do_oop_nv(p); }
  74 void ShenandoahAssertToSpaceClosure::do_oop(oop* p)       { do_oop_nv(p); }
  75 #endif
  76 
  77 const char* ShenandoahHeap::name() const {
  78   return "Shenandoah";
  79 }
  80 
  81 class ShenandoahPretouchTask : public AbstractGangTask {
  82 private:
  83   ShenandoahRegionIterator _regions;
  84   const size_t _bitmap_size;
  85   const size_t _page_size;
  86   char* _bitmap0_base;
  87   char* _bitmap1_base;
  88 public:
  89   ShenandoahPretouchTask(ShenandoahRegionIterator regions,
  90                          char* bitmap0_base, char* bitmap1_base, size_t bitmap_size,
  91                          size_t page_size) :
  92     AbstractGangTask("Shenandoah PreTouch"),
  93     _bitmap0_base(bitmap0_base),
  94     _bitmap1_base(bitmap1_base),
  95     _regions(regions),
  96     _bitmap_size(bitmap_size),
  97     _page_size(page_size) {}
  98 
  99   virtual void work(uint worker_id) {
 100     ShenandoahHeapRegion* r = _regions.next();
 101     while (r != NULL) {
 102       log_trace(gc, heap)("Pretouch region " SIZE_FORMAT ": " PTR_FORMAT " -> " PTR_FORMAT,
 103                           r->region_number(), p2i(r->bottom()), p2i(r->end()));
 104       os::pretouch_memory(r->bottom(), r->end(), _page_size);
 105 
 106       size_t start = r->region_number()       * ShenandoahHeapRegion::region_size_bytes() / MarkBitMap::heap_map_factor();
 107       size_t end   = (r->region_number() + 1) * ShenandoahHeapRegion::region_size_bytes() / MarkBitMap::heap_map_factor();
 108       assert (end <= _bitmap_size, "end is sane: " SIZE_FORMAT " < " SIZE_FORMAT, end, _bitmap_size);
 109 
 110       log_trace(gc, heap)("Pretouch bitmap under region " SIZE_FORMAT ": " PTR_FORMAT " -> " PTR_FORMAT,
 111                           r->region_number(), p2i(_bitmap0_base + start), p2i(_bitmap0_base + end));
 112       os::pretouch_memory(_bitmap0_base + start, _bitmap0_base + end, _page_size);
 113 
 114       log_trace(gc, heap)("Pretouch bitmap under region " SIZE_FORMAT ": " PTR_FORMAT " -> " PTR_FORMAT,
 115                           r->region_number(), p2i(_bitmap1_base + start), p2i(_bitmap1_base + end));
 116       os::pretouch_memory(_bitmap1_base + start, _bitmap1_base + end, _page_size);
 117 
 118       r = _regions.next();
 119     }
 120   }
 121 };
 122 
 123 jint ShenandoahHeap::initialize() {
 124 
 125   BrooksPointer::initial_checks();
 126 
 127   size_t init_byte_size = collector_policy()->initial_heap_byte_size();
 128   size_t max_byte_size = collector_policy()->max_heap_byte_size();
 129   size_t heap_alignment = collector_policy()->heap_alignment();
 130 
 131   if (ShenandoahAlwaysPreTouch) {
 132     // Enabled pre-touch means the entire heap is committed right away.
 133     init_byte_size = max_byte_size;
 134   }
 135 
 136   Universe::check_alignment(max_byte_size,
 137                             ShenandoahHeapRegion::region_size_bytes(),
 138                             "shenandoah heap");
 139   Universe::check_alignment(init_byte_size,
 140                             ShenandoahHeapRegion::region_size_bytes(),
 141                             "shenandoah heap");
 142 
 143   ReservedSpace heap_rs = Universe::reserve_heap(max_byte_size,
 144                                                  heap_alignment);
 145   initialize_reserved_region((HeapWord*)heap_rs.base(), (HeapWord*) (heap_rs.base() + heap_rs.size()));
 146 
 147   BarrierSet::set_barrier_set(new ShenandoahBarrierSet(this));
 148   ReservedSpace pgc_rs = heap_rs.first_part(max_byte_size);
 149 
 150   _num_regions = max_byte_size / ShenandoahHeapRegion::region_size_bytes();
 151   size_t num_committed_regions = init_byte_size / ShenandoahHeapRegion::region_size_bytes();
 152   _initial_size = num_committed_regions * ShenandoahHeapRegion::region_size_bytes();
 153   _committed = _initial_size;
 154 
 155   log_info(gc, heap)("Initialize Shenandoah heap with initial size " SIZE_FORMAT " bytes", init_byte_size);
 156   if (!os::commit_memory(pgc_rs.base(), _initial_size, false)) {
 157     vm_exit_out_of_memory(_initial_size, OOM_MMAP_ERROR, "Shenandoah failed to initialize heap");
 158   }
 159 
 160   size_t reg_size_words = ShenandoahHeapRegion::region_size_words();
 161   size_t reg_size_bytes = ShenandoahHeapRegion::region_size_bytes();
 162 
 163   _regions = NEW_C_HEAP_ARRAY(ShenandoahHeapRegion*, _num_regions, mtGC);
 164   _free_set = new ShenandoahFreeSet(this, _num_regions);
 165 
 166   _collection_set = new ShenandoahCollectionSet(this, (HeapWord*)pgc_rs.base());
 167 
 168   _next_top_at_mark_starts_base = NEW_C_HEAP_ARRAY(HeapWord*, _num_regions, mtGC);
 169   _next_top_at_mark_starts = _next_top_at_mark_starts_base -
 170                ((uintx) pgc_rs.base() >> ShenandoahHeapRegion::region_size_bytes_shift());
 171 
 172   _complete_top_at_mark_starts_base = NEW_C_HEAP_ARRAY(HeapWord*, _num_regions, mtGC);
 173   _complete_top_at_mark_starts = _complete_top_at_mark_starts_base -
 174                ((uintx) pgc_rs.base() >> ShenandoahHeapRegion::region_size_bytes_shift());
 175 
 176   if (ShenandoahPacing) {
 177     _pacer = new ShenandoahPacer(this);
 178     _pacer->setup_for_idle();
 179   } else {
 180     _pacer = NULL;
 181   }
 182 
 183   {
 184     ShenandoahHeapLocker locker(lock());
 185     for (size_t i = 0; i < _num_regions; i++) {
 186       ShenandoahHeapRegion* r = new ShenandoahHeapRegion(this,
 187                                                          (HeapWord*) pgc_rs.base() + reg_size_words * i,
 188                                                          reg_size_words,
 189                                                          i,
 190                                                          i < num_committed_regions);
 191 
 192       _complete_top_at_mark_starts_base[i] = r->bottom();
 193       _next_top_at_mark_starts_base[i] = r->bottom();
 194       _regions[i] = r;
 195       assert(!collection_set()->is_in(i), "New region should not be in collection set");
 196     }
 197 
 198     _free_set->rebuild();
 199   }
 200 
 201   assert((((size_t) base()) & ShenandoahHeapRegion::region_size_bytes_mask()) == 0,
 202          "misaligned heap: "PTR_FORMAT, p2i(base()));
 203 
 204   // The call below uses stuff (the SATB* things) that are in G1, but probably
 205   // belong into a shared location.
 206   ShenandoahBarrierSet::satb_mark_queue_set().initialize(SATB_Q_CBL_mon,
 207                                                SATB_Q_FL_lock,
 208                                                20 /*G1SATBProcessCompletedThreshold */,
 209                                                Shared_SATB_Q_lock);
 210 
 211   // Reserve space for prev and next bitmap.
 212   size_t bitmap_page_size = UseLargePages ? (size_t)os::large_page_size() : (size_t)os::vm_page_size();
 213   _bitmap_size = MarkBitMap::compute_size(heap_rs.size());
 214   _bitmap_size = align_up(_bitmap_size, bitmap_page_size);
 215   _heap_region = MemRegion((HeapWord*) heap_rs.base(), heap_rs.size() / HeapWordSize);
 216 
 217   size_t bitmap_bytes_per_region = reg_size_bytes / MarkBitMap::heap_map_factor();
 218 
 219   guarantee(bitmap_bytes_per_region != 0,
 220             "Bitmap bytes per region should not be zero");
 221   guarantee(is_power_of_2(bitmap_bytes_per_region),
 222             "Bitmap bytes per region should be power of two: " SIZE_FORMAT, bitmap_bytes_per_region);
 223 
 224   if (bitmap_page_size > bitmap_bytes_per_region) {
 225     _bitmap_regions_per_slice = bitmap_page_size / bitmap_bytes_per_region;
 226     _bitmap_bytes_per_slice = bitmap_page_size;
 227   } else {
 228     _bitmap_regions_per_slice = 1;
 229     _bitmap_bytes_per_slice = bitmap_bytes_per_region;
 230   }
 231 
 232   guarantee(_bitmap_regions_per_slice >= 1,
 233             "Should have at least one region per slice: " SIZE_FORMAT,
 234             _bitmap_regions_per_slice);
 235 
 236   guarantee(((_bitmap_bytes_per_slice) % bitmap_page_size) == 0,
 237             "Bitmap slices should be page-granular: bps = " SIZE_FORMAT ", page size = " SIZE_FORMAT,
 238             _bitmap_bytes_per_slice, bitmap_page_size);
 239 
 240   ReservedSpace bitmap0(_bitmap_size, bitmap_page_size);
 241   MemTracker::record_virtual_memory_type(bitmap0.base(), mtGC);
 242   _bitmap0_region = MemRegion((HeapWord*) bitmap0.base(), bitmap0.size() / HeapWordSize);
 243 
 244   ReservedSpace bitmap1(_bitmap_size, bitmap_page_size);
 245   MemTracker::record_virtual_memory_type(bitmap1.base(), mtGC);
 246   _bitmap1_region = MemRegion((HeapWord*) bitmap1.base(), bitmap1.size() / HeapWordSize);
 247 
 248   size_t bitmap_init_commit = _bitmap_bytes_per_slice *
 249                               align_up(num_committed_regions, _bitmap_regions_per_slice) / _bitmap_regions_per_slice;
 250   bitmap_init_commit = MIN2(_bitmap_size, bitmap_init_commit);
 251   os::commit_memory_or_exit((char *) (_bitmap0_region.start()), bitmap_init_commit, false,
 252                             "couldn't allocate initial bitmap");
 253   os::commit_memory_or_exit((char *) (_bitmap1_region.start()), bitmap_init_commit, false,
 254                             "couldn't allocate initial bitmap");
 255 
 256   size_t page_size = UseLargePages ? (size_t)os::large_page_size() : (size_t)os::vm_page_size();
 257 
 258   if (ShenandoahVerify) {
 259     ReservedSpace verify_bitmap(_bitmap_size, page_size);
 260     os::commit_memory_or_exit(verify_bitmap.base(), verify_bitmap.size(), false,
 261                               "couldn't allocate verification bitmap");
 262     MemTracker::record_virtual_memory_type(verify_bitmap.base(), mtGC);
 263     MemRegion verify_bitmap_region = MemRegion((HeapWord *) verify_bitmap.base(), verify_bitmap.size() / HeapWordSize);
 264     _verification_bit_map.initialize(_heap_region, verify_bitmap_region);
 265     _verifier = new ShenandoahVerifier(this, &_verification_bit_map);
 266   }
 267 
 268   if (ShenandoahAlwaysPreTouch) {
 269     assert (!AlwaysPreTouch, "Should have been overridden");
 270 
 271     // For NUMA, it is important to pre-touch the storage under bitmaps with worker threads,
 272     // before initialize() below zeroes it with initializing thread. For any given region,
 273     // we touch the region and the corresponding bitmaps from the same thread.
 274 
 275     log_info(gc, heap)("Parallel pretouch " SIZE_FORMAT " regions with " SIZE_FORMAT " byte pages",
 276                        _num_regions, page_size);
 277     ShenandoahPretouchTask cl(region_iterator(), bitmap0.base(), bitmap1.base(), _bitmap_size, page_size);
 278     _workers->run_task(&cl);
 279   }
 280 
 281   _mark_bit_map0.initialize(_heap_region, _bitmap0_region);
 282   _complete_mark_bit_map = &_mark_bit_map0;
 283 
 284   _mark_bit_map1.initialize(_heap_region, _bitmap1_region);
 285   _next_mark_bit_map = &_mark_bit_map1;
 286 
 287   // Reserve aux bitmap for use in object_iterate(). We don't commit it here.
 288   ReservedSpace aux_bitmap(_bitmap_size, bitmap_page_size);
 289   MemTracker::record_virtual_memory_type(aux_bitmap.base(), mtGC);
 290   _aux_bitmap_region = MemRegion((HeapWord*) aux_bitmap.base(), aux_bitmap.size() / HeapWordSize);
 291   _aux_bit_map.initialize(_heap_region, _aux_bitmap_region);
 292 
 293   if (UseShenandoahMatrix) {
 294     _connection_matrix = new ShenandoahConnectionMatrix(_num_regions);
 295   } else {
 296     _connection_matrix = NULL;
 297   }
 298 
 299   _traversal_gc = _shenandoah_policy->can_do_traversal_gc() ?
 300                 new ShenandoahTraversalGC(this, _num_regions) :
 301                 NULL;
 302 
 303   _monitoring_support = new ShenandoahMonitoringSupport(this);
 304 
 305   _phase_timings = new ShenandoahPhaseTimings();
 306 
 307   if (ShenandoahAllocationTrace) {
 308     _alloc_tracker = new ShenandoahAllocTracker();
 309   }
 310 
 311   ShenandoahStringDedup::initialize();
 312 
 313   _concurrent_gc_thread = new ShenandoahConcurrentThread();
 314 
 315   ShenandoahCodeRoots::initialize();
 316 
 317   LogTarget(Trace, gc, region) lt;
 318   if (lt.is_enabled()) {
 319     ResourceMark rm;
 320     LogStream ls(lt);
 321     log_trace(gc, region)("All Regions");
 322     print_heap_regions_on(&ls);
 323     log_trace(gc, region)("Free Regions");
 324     _free_set->print_on(&ls);
 325   }
 326 
 327   log_info(gc, init)("Safepointing mechanism: %s",
 328                      SafepointMechanism::uses_thread_local_poll() ? "thread-local poll" :
 329                      (SafepointMechanism::uses_global_page_poll() ? "global-page poll" : "unknown"));
 330 
 331   return JNI_OK;
 332 }
 333 
 334 ShenandoahHeap::ShenandoahHeap(ShenandoahCollectorPolicy* policy) :
 335   CollectedHeap(),
 336   _shenandoah_policy(policy),
 337   _soft_ref_policy(),
 338   _regions(NULL),
 339   _free_set(NULL),
 340   _collection_set(NULL),
 341   _update_refs_iterator(ShenandoahRegionIterator(this)),
 342   _bytes_allocated_since_gc_start(0),
 343   _max_workers(MAX2(ConcGCThreads, ParallelGCThreads)),
 344   _ref_processor(NULL),
 345   _next_top_at_mark_starts(NULL),
 346   _next_top_at_mark_starts_base(NULL),
 347   _complete_top_at_mark_starts(NULL),
 348   _complete_top_at_mark_starts_base(NULL),
 349   _mark_bit_map0(),
 350   _mark_bit_map1(),
 351   _aux_bit_map(),
 352   _connection_matrix(NULL),
 353   _verifier(NULL),
 354   _pacer(NULL),
 355   _used_at_last_gc(0),
 356   _alloc_seq_at_last_gc_start(0),
 357   _alloc_seq_at_last_gc_end(0),
 358   _safepoint_workers(NULL),
 359   _gc_cycle_mode(),
 360 #ifdef ASSERT
 361   _heap_expansion_count(0),
 362 #endif
 363   _gc_timer(new (ResourceObj::C_HEAP, mtGC) ConcurrentGCTimer()),
 364   _phase_timings(NULL),
 365   _alloc_tracker(NULL),
 366   _cycle_memory_manager("Shenandoah Cycles", "end of GC cycle"),
 367   _stw_memory_manager("Shenandoah Pauses", "end of GC pause"),
 368   _memory_pool(NULL)
 369 {
 370   log_info(gc, init)("Parallel GC threads: "UINT32_FORMAT, ParallelGCThreads);
 371   log_info(gc, init)("Concurrent GC threads: "UINT32_FORMAT, ConcGCThreads);
 372   log_info(gc, init)("Parallel reference processing enabled: %s", BOOL_TO_STR(ParallelRefProcEnabled));
 373 
 374   _scm = new ShenandoahConcurrentMark();
 375   _full_gc = new ShenandoahMarkCompact();
 376   _used = 0;
 377 
 378   _max_workers = MAX2(_max_workers, 1U);
 379   _workers = new ShenandoahWorkGang("Shenandoah GC Threads", _max_workers,
 380                             /* are_GC_task_threads */true,
 381                             /* are_ConcurrentGC_threads */false);
 382   if (_workers == NULL) {
 383     vm_exit_during_initialization("Failed necessary allocation.");
 384   } else {
 385     _workers->initialize_workers();
 386   }
 387 
 388   if (ParallelSafepointCleanupThreads > 1) {
 389     _safepoint_workers = new ShenandoahWorkGang("Safepoint Cleanup Thread",
 390                                                 ParallelSafepointCleanupThreads,
 391                                                 false, false);
 392     _safepoint_workers->initialize_workers();
 393   }
 394 }
 395 
 396 class ShenandoahResetNextBitmapTask : public AbstractGangTask {
 397 private:
 398   ShenandoahRegionIterator _regions;
 399 
 400 public:
 401   ShenandoahResetNextBitmapTask(ShenandoahRegionIterator regions) :
 402     AbstractGangTask("Parallel Reset Bitmap Task"),
 403     _regions(regions) {}
 404 
 405   void work(uint worker_id) {
 406     ShenandoahHeapRegion* region = _regions.next();
 407     ShenandoahHeap* heap = ShenandoahHeap::heap();
 408     while (region != NULL) {
 409       if (heap->is_bitmap_slice_committed(region)) {
 410         HeapWord* bottom = region->bottom();
 411         HeapWord* top = heap->next_top_at_mark_start(region->bottom());
 412         if (top > bottom) {
 413           heap->next_mark_bit_map()->clear_range_large(MemRegion(bottom, top));
 414         }
 415         assert(heap->is_next_bitmap_clear_range(bottom, region->end()), "must be clear");
 416       }
 417       region = _regions.next();
 418     }
 419   }
 420 };
 421 
 422 void ShenandoahHeap::reset_next_mark_bitmap() {
 423   assert_gc_workers(_workers->active_workers());
 424 
 425   ShenandoahResetNextBitmapTask task(region_iterator());
 426   _workers->run_task(&task);
 427 }
 428 
 429 class ShenandoahResetNextBitmapTraversalTask : public AbstractGangTask {
 430 private:
 431   ShenandoahRegionIterator _regions;
 432 
 433 public:
 434   ShenandoahResetNextBitmapTraversalTask(ShenandoahRegionIterator regions) :
 435     AbstractGangTask("Parallel Reset Bitmap Task for Traversal"),
 436     _regions(regions) {
 437   }
 438 
 439   void work(uint worker_id) {
 440     ShenandoahHeap* heap = ShenandoahHeap::heap();
 441     ShenandoahHeapRegionSet* traversal_set = heap->traversal_gc()->traversal_set();
 442     ShenandoahHeapRegion* region = _regions.next();
 443     while (region != NULL) {
 444       if (heap->is_bitmap_slice_committed(region)) {
 445         if (traversal_set->is_in(region) && !region->is_trash()) {
 446           ShenandoahHeapLocker locker(heap->lock());
 447           HeapWord* bottom = region->bottom();
 448           HeapWord* top = heap->next_top_at_mark_start(bottom);
 449           assert(top <= region->top(),
 450                  "TAMS must smaller/equals than top: TAMS: "PTR_FORMAT", top: "PTR_FORMAT,
 451                  p2i(top), p2i(region->top()));
 452           if (top > bottom) {
 453             heap->complete_mark_bit_map()->copy_from(heap->next_mark_bit_map(), MemRegion(bottom, top));
 454             heap->set_complete_top_at_mark_start(bottom, top);
 455             heap->next_mark_bit_map()->clear_range_large(MemRegion(bottom, top));
 456             heap->set_next_top_at_mark_start(bottom, bottom);
 457           }
 458         }
 459         assert(heap->is_next_bitmap_clear_range(region->bottom(), region->end()),
 460                "need clear next bitmap");
 461       }
 462       region = _regions.next();
 463     }
 464   }
 465 };
 466 
 467 void ShenandoahHeap::reset_next_mark_bitmap_traversal() {
 468   assert_gc_workers(_workers->active_workers());
 469 
 470   ShenandoahResetNextBitmapTraversalTask task(region_iterator());
 471   _workers->run_task(&task);
 472 }
 473 
 474 bool ShenandoahHeap::is_next_bitmap_clear() {
 475   for (size_t idx = 0; idx < _num_regions; idx++) {
 476     ShenandoahHeapRegion* r = get_region(idx);
 477     if (is_bitmap_slice_committed(r) && !is_next_bitmap_clear_range(r->bottom(), r->end())) {
 478       return false;
 479     }
 480   }
 481   return true;
 482 }
 483 
 484 bool ShenandoahHeap::is_next_bitmap_clear_range(HeapWord* start, HeapWord* end) {
 485   return _next_mark_bit_map->getNextMarkedWordAddress(start, end) == end;
 486 }
 487 
 488 bool ShenandoahHeap::is_complete_bitmap_clear_range(HeapWord* start, HeapWord* end) {
 489   return _complete_mark_bit_map->getNextMarkedWordAddress(start, end) == end;
 490 }
 491 
 492 void ShenandoahHeap::print_on(outputStream* st) const {
 493   st->print_cr("Shenandoah Heap");
 494   st->print_cr(" " SIZE_FORMAT "K total, " SIZE_FORMAT "K committed, " SIZE_FORMAT "K used",
 495                capacity() / K, committed() / K, used() / K);
 496   st->print_cr(" " SIZE_FORMAT " x " SIZE_FORMAT"K regions",
 497                num_regions(), ShenandoahHeapRegion::region_size_bytes() / K);
 498 
 499   st->print("Status: ");
 500   if (has_forwarded_objects())               st->print("has forwarded objects, ");
 501   if (is_concurrent_mark_in_progress())      st->print("marking, ");
 502   if (is_evacuation_in_progress())           st->print("evacuating, ");
 503   if (is_update_refs_in_progress())          st->print("updating refs, ");
 504   if (is_concurrent_traversal_in_progress()) st->print("traversal, ");
 505   if (is_degenerated_gc_in_progress())       st->print("degenerated gc, ");
 506   if (is_full_gc_in_progress())              st->print("full gc, ");
 507   if (is_full_gc_move_in_progress())         st->print("full gc move, ");
 508 
 509   if (cancelled_concgc()) {
 510     st->print("conc gc cancelled");
 511   } else {
 512     st->print("not cancelled");
 513   }
 514   st->cr();
 515 
 516   st->print_cr("Reserved region:");
 517   st->print_cr(" - [" PTR_FORMAT ", " PTR_FORMAT ") ",
 518                p2i(reserved_region().start()),
 519                p2i(reserved_region().end()));
 520 
 521   if (UseShenandoahMatrix) {
 522     st->print_cr("Matrix:");
 523 
 524     ShenandoahConnectionMatrix* matrix = connection_matrix();
 525     if (matrix != NULL) {
 526       st->print_cr(" - base: " PTR_FORMAT, p2i(matrix->matrix_addr()));
 527       st->print_cr(" - stride: " SIZE_FORMAT, matrix->stride());
 528       st->print_cr(" - magic: " PTR_FORMAT, matrix->magic_offset());
 529     } else {
 530       st->print_cr(" No matrix.");
 531     }
 532   }
 533 
 534   if (Verbose) {
 535     print_heap_regions_on(st);
 536   }
 537 }
 538 
 539 class ShenandoahInitGCLABClosure : public ThreadClosure {
 540 public:
 541   void do_thread(Thread* thread) {
 542     if (thread != NULL && (thread->is_Java_thread() || thread->is_Worker_thread() ||
 543                            thread->is_ConcurrentGC_thread())) {
 544       thread->gclab().initialize(true);
 545     }
 546   }
 547 };
 548 
 549 void ShenandoahHeap::post_initialize() {
 550   CollectedHeap::post_initialize();
 551   if (UseTLAB) {
 552     MutexLocker ml(Threads_lock);
 553 
 554     ShenandoahInitGCLABClosure init_gclabs;
 555     Threads::threads_do(&init_gclabs);
 556     gc_threads_do(&init_gclabs);
 557 
 558     // gclab can not be initialized early during VM startup, as it can not determinate its max_size.
 559     // Now, we will let WorkGang to initialize gclab when new worker is created.
 560     _workers->set_initialize_gclab();
 561   }
 562 
 563   _scm->initialize(_max_workers);
 564   _full_gc->initialize(_gc_timer);
 565 
 566   ref_processing_init();
 567 
 568   _shenandoah_policy->post_heap_initialize();
 569 }
 570 
 571 size_t ShenandoahHeap::used() const {
 572   return OrderAccess::load_acquire(&_used);
 573 }
 574 
 575 size_t ShenandoahHeap::committed() const {
 576   OrderAccess::acquire();
 577   return _committed;
 578 }
 579 
 580 void ShenandoahHeap::increase_committed(size_t bytes) {
 581   assert_heaplock_or_safepoint();
 582   _committed += bytes;
 583 }
 584 
 585 void ShenandoahHeap::decrease_committed(size_t bytes) {
 586   assert_heaplock_or_safepoint();
 587   _committed -= bytes;
 588 }
 589 
 590 void ShenandoahHeap::increase_used(size_t bytes) {
 591   Atomic::add(bytes, &_used);
 592 }
 593 
 594 void ShenandoahHeap::set_used(size_t bytes) {
 595   OrderAccess::release_store_fence(&_used, bytes);
 596 }
 597 
 598 void ShenandoahHeap::decrease_used(size_t bytes) {
 599   assert(used() >= bytes, "never decrease heap size by more than we've left");
 600   Atomic::add(-bytes, &_used);
 601 }
 602 
 603 void ShenandoahHeap::increase_allocated(size_t bytes) {
 604   Atomic::add(bytes, &_bytes_allocated_since_gc_start);
 605 }
 606 
 607 void ShenandoahHeap::notify_alloc(size_t words, bool waste) {
 608   size_t bytes = words * HeapWordSize;
 609   if (!waste) {
 610     increase_used(bytes);
 611   }
 612   increase_allocated(bytes);
 613   if (ShenandoahPacing) {
 614     concurrent_thread()->pacing_notify_alloc(words);
 615     if (waste) {
 616       pacer()->claim_for_alloc(words, true);
 617     }
 618   }
 619 }
 620 
 621 size_t ShenandoahHeap::capacity() const {
 622   return num_regions() * ShenandoahHeapRegion::region_size_bytes();
 623 }
 624 
 625 bool ShenandoahHeap::is_maximal_no_gc() const {
 626   Unimplemented();
 627   return true;
 628 }
 629 
 630 size_t ShenandoahHeap::max_capacity() const {
 631   return _num_regions * ShenandoahHeapRegion::region_size_bytes();
 632 }
 633 
 634 size_t ShenandoahHeap::initial_capacity() const {
 635   return _initial_size;
 636 }
 637 
 638 bool ShenandoahHeap::is_in(const void* p) const {
 639   HeapWord* heap_base = (HeapWord*) base();
 640   HeapWord* last_region_end = heap_base + ShenandoahHeapRegion::region_size_words() * num_regions();
 641   return p >= heap_base && p < last_region_end;
 642 }
 643 
 644 bool ShenandoahHeap::is_scavengable(oop p) {
 645   return true;
 646 }
 647 
 648 void ShenandoahHeap::handle_heap_shrinkage(double shrink_before) {
 649   if (!ShenandoahUncommit) {
 650     return;
 651   }
 652 
 653   ShenandoahHeapLocker locker(lock());
 654 
 655   size_t count = 0;
 656   for (size_t i = 0; i < num_regions(); i++) {
 657     ShenandoahHeapRegion* r = get_region(i);
 658     if (r->is_empty_committed() && (r->empty_time() < shrink_before)) {
 659       r->make_uncommitted();
 660       count++;
 661     }
 662   }
 663 
 664   if (count > 0) {
 665     log_info(gc)("Uncommitted " SIZE_FORMAT "M. Heap: " SIZE_FORMAT "M reserved, " SIZE_FORMAT "M committed, " SIZE_FORMAT "M used",
 666                  count * ShenandoahHeapRegion::region_size_bytes() / M, capacity() / M, committed() / M, used() / M);
 667     _concurrent_gc_thread->notify_heap_changed();
 668   }
 669 }
 670 
 671 HeapWord* ShenandoahHeap::allocate_from_gclab_slow(Thread* thread, size_t size) {
 672   // Retain tlab and allocate object in shared space if
 673   // the amount free in the tlab is too large to discard.
 674   if (thread->gclab().free() > thread->gclab().refill_waste_limit()) {
 675     thread->gclab().record_slow_allocation(size);
 676     return NULL;
 677   }
 678 
 679   // Discard gclab and allocate a new one.
 680   // To minimize fragmentation, the last GCLAB may be smaller than the rest.
 681   size_t new_gclab_size = thread->gclab().compute_size(size);
 682 
 683   thread->gclab().clear_before_allocation();
 684 
 685   if (new_gclab_size == 0) {
 686     return NULL;
 687   }
 688 
 689   // Allocate a new GCLAB...
 690   HeapWord* obj = allocate_new_gclab(new_gclab_size);
 691   if (obj == NULL) {
 692     return NULL;
 693   }
 694 
 695   if (ZeroTLAB) {
 696     // ..and clear it.
 697     Copy::zero_to_words(obj, new_gclab_size);
 698   } else {
 699     // ...and zap just allocated object.
 700 #ifdef ASSERT
 701     // Skip mangling the space corresponding to the object header to
 702     // ensure that the returned space is not considered parsable by
 703     // any concurrent GC thread.
 704     size_t hdr_size = oopDesc::header_size();
 705     Copy::fill_to_words(obj + hdr_size, new_gclab_size - hdr_size, badHeapWordVal);
 706 #endif // ASSERT
 707   }
 708   thread->gclab().fill(obj, obj + size, new_gclab_size);
 709   return obj;
 710 }
 711 
 712 HeapWord* ShenandoahHeap::allocate_new_tlab(size_t word_size) {
 713 #ifdef ASSERT
 714   log_debug(gc, alloc)("Allocate new tlab, requested size = " SIZE_FORMAT " bytes", word_size * HeapWordSize);
 715 #endif
 716   return allocate_new_lab(word_size, _alloc_tlab);
 717 }
 718 
 719 HeapWord* ShenandoahHeap::allocate_new_gclab(size_t word_size) {
 720 #ifdef ASSERT
 721   log_debug(gc, alloc)("Allocate new gclab, requested size = " SIZE_FORMAT " bytes", word_size * HeapWordSize);
 722 #endif
 723   return allocate_new_lab(word_size, _alloc_gclab);
 724 }
 725 
 726 HeapWord* ShenandoahHeap::allocate_new_lab(size_t word_size, AllocType type) {
 727   HeapWord* result = allocate_memory(word_size, type);
 728 
 729   if (result != NULL) {
 730     assert(! in_collection_set(result), "Never allocate in collection set");
 731 
 732     log_develop_trace(gc, tlab)("allocating new tlab of size "SIZE_FORMAT" at addr "PTR_FORMAT, word_size, p2i(result));
 733 
 734   }
 735   return result;
 736 }
 737 
 738 ShenandoahHeap* ShenandoahHeap::heap() {
 739   CollectedHeap* heap = Universe::heap();
 740   assert(heap != NULL, "Unitialized access to ShenandoahHeap::heap()");
 741   assert(heap->kind() == CollectedHeap::Shenandoah, "not a shenandoah heap");
 742   return (ShenandoahHeap*) heap;
 743 }
 744 
 745 ShenandoahHeap* ShenandoahHeap::heap_no_check() {
 746   CollectedHeap* heap = Universe::heap();
 747   return (ShenandoahHeap*) heap;
 748 }
 749 
 750 HeapWord* ShenandoahHeap::allocate_memory(size_t word_size, AllocType type) {
 751   ShenandoahAllocTrace trace_alloc(word_size, type);
 752 
 753   bool in_new_region = false;
 754   HeapWord* result = NULL;
 755 
 756   if (type == _alloc_tlab || type == _alloc_shared) {
 757     if (ShenandoahPacing) {
 758       pacer()->pace_for_alloc(word_size);
 759     }
 760 
 761     if (!ShenandoahAllocFailureALot || !should_inject_alloc_failure()) {
 762       result = allocate_memory_under_lock(word_size, type, in_new_region);
 763     }
 764 
 765     // Allocation failed, try full-GC, then retry allocation.
 766     //
 767     // It might happen that one of the threads requesting allocation would unblock
 768     // way later after full-GC happened, only to fail the second allocation, because
 769     // other threads have already depleted the free storage. In this case, a better
 770     // strategy would be to try full-GC again.
 771     //
 772     // Lacking the way to detect progress from "collect" call, we are left with blindly
 773     // retrying for some bounded number of times.
 774     // TODO: Poll if Full GC made enough progress to warrant retry.
 775     int tries = 0;
 776     while ((result == NULL) && (tries++ < ShenandoahAllocGCTries)) {
 777       log_debug(gc)("[" PTR_FORMAT " Failed to allocate " SIZE_FORMAT " bytes, doing GC, try %d",
 778                     p2i(Thread::current()), word_size * HeapWordSize, tries);
 779       concurrent_thread()->handle_alloc_failure(word_size);
 780       result = allocate_memory_under_lock(word_size, type, in_new_region);
 781     }
 782   } else {
 783     assert(type == _alloc_gclab || type == _alloc_shared_gc, "Can only accept these types here");
 784     result = allocate_memory_under_lock(word_size, type, in_new_region);
 785     // Do not call handle_alloc_failure() here, because we cannot block.
 786     // The allocation failure would be handled by the WB slowpath with handle_alloc_failure_evac().
 787   }
 788 
 789   if (in_new_region) {
 790     concurrent_thread()->notify_heap_changed();
 791   }
 792 
 793   log_develop_trace(gc, alloc)("allocate memory chunk of size "SIZE_FORMAT" at addr "PTR_FORMAT " by thread %d ",
 794                                word_size, p2i(result), Thread::current()->osthread()->thread_id());
 795 
 796   if (result != NULL) {
 797     notify_alloc(word_size, false);
 798   }
 799 
 800   return result;
 801 }
 802 
 803 HeapWord* ShenandoahHeap::allocate_memory_under_lock(size_t word_size, AllocType type, bool& in_new_region) {
 804   ShenandoahHeapLocker locker(lock());
 805   return _free_set->allocate(word_size, type, in_new_region);
 806 }
 807 
 808 HeapWord*  ShenandoahHeap::mem_allocate(size_t size,
 809                                         bool*  gc_overhead_limit_was_exceeded) {
 810   HeapWord* filler = allocate_memory(size + BrooksPointer::word_size(), _alloc_shared);
 811   HeapWord* result = filler + BrooksPointer::word_size();
 812   if (filler != NULL) {
 813     BrooksPointer::initialize(oop(result));
 814 
 815     assert(! in_collection_set(result), "never allocate in targetted region");
 816     return result;
 817   } else {
 818     return NULL;
 819   }
 820 }
 821 
 822 class ShenandoahEvacuateUpdateRootsClosure: public ExtendedOopClosure {
 823 private:
 824   ShenandoahHeap* _heap;
 825   Thread* _thread;
 826 public:
 827   ShenandoahEvacuateUpdateRootsClosure() :
 828     _heap(ShenandoahHeap::heap()), _thread(Thread::current()) {
 829   }
 830 
 831 private:
 832   template <class T>
 833   void do_oop_work(T* p) {
 834     assert(_heap->is_evacuation_in_progress(), "Only do this when evacuation is in progress");
 835 
 836     T o = RawAccess<>::oop_load(p);
 837     if (! CompressedOops::is_null(o)) {
 838       oop obj = CompressedOops::decode_not_null(o);
 839       if (_heap->in_collection_set(obj)) {
 840         shenandoah_assert_marked_complete(p, obj);
 841         oop resolved = ShenandoahBarrierSet::resolve_forwarded_not_null(obj);
 842         if (oopDesc::unsafe_equals(resolved, obj)) {
 843           resolved = _heap->evacuate_object(obj, _thread);
 844         }
 845         RawAccess<OOP_NOT_NULL>::oop_store(p, resolved);
 846       }
 847     }
 848   }
 849 
 850 public:
 851   void do_oop(oop* p) {
 852     do_oop_work(p);
 853   }
 854   void do_oop(narrowOop* p) {
 855     do_oop_work(p);
 856   }
 857 };
 858 
 859 class ShenandoahEvacuateRootsClosure: public ExtendedOopClosure {
 860 private:
 861   ShenandoahHeap* _heap;
 862   Thread* _thread;
 863 public:
 864   ShenandoahEvacuateRootsClosure() :
 865           _heap(ShenandoahHeap::heap()), _thread(Thread::current()) {
 866   }
 867 
 868 private:
 869   template <class T>
 870   void do_oop_work(T* p) {
 871     T o = RawAccess<>::oop_load(p);
 872     if (! CompressedOops::is_null(o)) {
 873       oop obj = CompressedOops::decode_not_null(o);
 874       if (_heap->in_collection_set(obj)) {
 875         oop resolved = ShenandoahBarrierSet::resolve_forwarded_not_null(obj);
 876         if (oopDesc::unsafe_equals(resolved, obj)) {
 877           _heap->evacuate_object(obj, _thread);
 878         }
 879       }
 880     }
 881   }
 882 
 883 public:
 884   void do_oop(oop* p) {
 885     do_oop_work(p);
 886   }
 887   void do_oop(narrowOop* p) {
 888     do_oop_work(p);
 889   }
 890 };
 891 
 892 class ShenandoahParallelEvacuateRegionObjectClosure : public ObjectClosure {
 893 private:
 894   ShenandoahHeap* const _heap;
 895   Thread* const _thread;
 896 public:
 897   ShenandoahParallelEvacuateRegionObjectClosure(ShenandoahHeap* heap) :
 898     _heap(heap), _thread(Thread::current()) {}
 899 
 900   void do_object(oop p) {
 901     shenandoah_assert_marked_complete(NULL, p);
 902     if (oopDesc::unsafe_equals(p, ShenandoahBarrierSet::resolve_forwarded_not_null(p))) {
 903       _heap->evacuate_object(p, _thread);
 904     }
 905   }
 906 };
 907 
 908 class ShenandoahParallelEvacuationTask : public AbstractGangTask {
 909 private:
 910   ShenandoahHeap* const _sh;
 911   ShenandoahCollectionSet* const _cs;
 912   ShenandoahSharedFlag _claimed_codecache;
 913 
 914 public:
 915   ShenandoahParallelEvacuationTask(ShenandoahHeap* sh,
 916                          ShenandoahCollectionSet* cs) :
 917     AbstractGangTask("Parallel Evacuation Task"),
 918     _cs(cs),
 919     _sh(sh)
 920   {}
 921 
 922   void work(uint worker_id) {
 923 
 924     ShenandoahEvacOOMScope oom_evac_scope;
 925     SuspendibleThreadSetJoiner stsj(ShenandoahSuspendibleWorkers);
 926 
 927     // If concurrent code cache evac is enabled, evacuate it here.
 928     // Note we cannot update the roots here, because we risk non-atomic stores to the alive
 929     // nmethods. The update would be handled elsewhere.
 930     if (ShenandoahConcurrentEvacCodeRoots && _claimed_codecache.try_set()) {
 931       ShenandoahEvacuateRootsClosure cl;
 932       MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
 933       CodeBlobToOopClosure blobs(&cl, !CodeBlobToOopClosure::FixRelocations);
 934       CodeCache::blobs_do(&blobs);
 935     }
 936 
 937     ShenandoahParallelEvacuateRegionObjectClosure cl(_sh);
 938     ShenandoahHeapRegion* r;
 939     while ((r =_cs->claim_next()) != NULL) {
 940       log_develop_trace(gc, region)("Thread "INT32_FORMAT" claimed Heap Region "SIZE_FORMAT,
 941                                     worker_id,
 942                                     r->region_number());
 943 
 944       assert(r->has_live(), "all-garbage regions are reclaimed early");
 945       _sh->marked_object_iterate(r, &cl);
 946 
 947       if (_sh->check_cancelled_concgc_and_yield()) {
 948         log_develop_trace(gc, region)("Cancelled concgc while evacuating region " SIZE_FORMAT, r->region_number());
 949         break;
 950       }
 951 
 952       if (ShenandoahPacing) {
 953         _sh->pacer()->report_evac(r->get_live_data_words());
 954       }
 955     }
 956   }
 957 };
 958 
 959 void ShenandoahHeap::trash_cset_regions() {
 960   ShenandoahHeapLocker locker(lock());
 961 
 962   ShenandoahCollectionSet* set = collection_set();
 963   ShenandoahHeapRegion* r;
 964   set->clear_current_index();
 965   while ((r = set->next()) != NULL) {
 966     r->make_trash();
 967   }
 968   collection_set()->clear();
 969 }
 970 
 971 void ShenandoahHeap::print_heap_regions_on(outputStream* st) const {
 972   st->print_cr("Heap Regions:");
 973   st->print_cr("EU=empty-uncommitted, EC=empty-committed, R=regular, H=humongous start, HC=humongous continuation, CS=collection set, T=trash, P=pinned");
 974   st->print_cr("BTE=bottom/top/end, U=used, T=TLAB allocs, G=GCLAB allocs, S=shared allocs, L=live data");
 975   st->print_cr("R=root, CP=critical pins, TAMS=top-at-mark-start (previous, next)");
 976   st->print_cr("SN=alloc sequence numbers (first mutator, last mutator, first gc, last gc)");
 977 
 978   for (size_t i = 0; i < num_regions(); i++) {
 979     get_region(i)->print_on(st);
 980   }
 981 }
 982 
 983 void ShenandoahHeap::trash_humongous_region_at(ShenandoahHeapRegion* start) {
 984   assert(start->is_humongous_start(), "reclaim regions starting with the first one");
 985 
 986   oop humongous_obj = oop(start->bottom() + BrooksPointer::word_size());
 987   size_t size = humongous_obj->size() + BrooksPointer::word_size();
 988   size_t required_regions = ShenandoahHeapRegion::required_regions(size * HeapWordSize);
 989   size_t index = start->region_number() + required_regions - 1;
 990 
 991   assert(!start->has_live(), "liveness must be zero");
 992   log_trace(gc, humongous)("Reclaiming "SIZE_FORMAT" humongous regions for object of size: "SIZE_FORMAT" words", required_regions, size);
 993 
 994   for(size_t i = 0; i < required_regions; i++) {
 995     // Reclaim from tail. Otherwise, assertion fails when printing region to trace log,
 996     // as it expects that every region belongs to a humongous region starting with a humongous start region.
 997     ShenandoahHeapRegion* region = get_region(index --);
 998 
 999     LogTarget(Trace, gc, humongous) lt;
1000     if (lt.is_enabled()) {
1001       ResourceMark rm;
1002       LogStream ls(lt);
1003       region->print_on(&ls);
1004     }
1005 
1006     assert(region->is_humongous(), "expect correct humongous start or continuation");
1007     assert(!in_collection_set(region), "Humongous region should not be in collection set");
1008 
1009     region->make_trash();
1010   }
1011 }
1012 
1013 #ifdef ASSERT
1014 class ShenandoahCheckCollectionSetClosure: public ShenandoahHeapRegionClosure {
1015   bool heap_region_do(ShenandoahHeapRegion* r) {
1016     assert(! ShenandoahHeap::heap()->in_collection_set(r), "Should have been cleared by now");
1017     return false;
1018   }
1019 };
1020 #endif
1021 
1022 void ShenandoahHeap::prepare_for_concurrent_evacuation() {
1023   log_develop_trace(gc)("Thread %d started prepare_for_concurrent_evacuation", Thread::current()->osthread()->thread_id());
1024 
1025   if (!cancelled_concgc()) {
1026     // Allocations might have happened before we STWed here, record peak:
1027     shenandoahPolicy()->record_peak_occupancy();
1028 
1029     make_tlabs_parsable(true);
1030 
1031     if (ShenandoahVerify) {
1032       verifier()->verify_after_concmark();
1033     }
1034 
1035     trash_cset_regions();
1036 
1037     // NOTE: This needs to be done during a stop the world pause, because
1038     // putting regions into the collection set concurrently with Java threads
1039     // will create a race. In particular, acmp could fail because when we
1040     // resolve the first operand, the containing region might not yet be in
1041     // the collection set, and thus return the original oop. When the 2nd
1042     // operand gets resolved, the region could be in the collection set
1043     // and the oop gets evacuated. If both operands have originally been
1044     // the same, we get false negatives.
1045 
1046     {
1047       ShenandoahHeapLocker locker(lock());
1048       _collection_set->clear();
1049       _free_set->clear();
1050 
1051 #ifdef ASSERT
1052       ShenandoahCheckCollectionSetClosure ccsc;
1053       heap_region_iterate(&ccsc);
1054 #endif
1055 
1056       _shenandoah_policy->choose_collection_set(_collection_set);
1057 
1058       _free_set->rebuild();
1059     }
1060 
1061     Universe::update_heap_info_at_gc();
1062 
1063     if (ShenandoahVerify) {
1064       verifier()->verify_before_evacuation();
1065     }
1066   }
1067 }
1068 
1069 
1070 class ShenandoahRetireTLABClosure : public ThreadClosure {
1071 private:
1072   bool _retire;
1073 
1074 public:
1075   ShenandoahRetireTLABClosure(bool retire) : _retire(retire) {}
1076 
1077   void do_thread(Thread* thread) {
1078     assert(thread->gclab().is_initialized(), "GCLAB should be initialized for %s", thread->name());
1079     thread->gclab().make_parsable(_retire);
1080   }
1081 };
1082 
1083 void ShenandoahHeap::make_tlabs_parsable(bool retire_tlabs) {
1084   if (UseTLAB) {
1085     CollectedHeap::ensure_parsability(retire_tlabs);
1086     ShenandoahRetireTLABClosure cl(retire_tlabs);
1087     for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) {
1088       cl.do_thread(t);
1089     }
1090     gc_threads_do(&cl);
1091   }
1092 }
1093 
1094 
1095 class ShenandoahEvacuateUpdateRootsTask : public AbstractGangTask {
1096   ShenandoahRootEvacuator* _rp;
1097 public:
1098 
1099   ShenandoahEvacuateUpdateRootsTask(ShenandoahRootEvacuator* rp) :
1100     AbstractGangTask("Shenandoah evacuate and update roots"),
1101     _rp(rp)
1102   {
1103     // Nothing else to do.
1104   }
1105 
1106   void work(uint worker_id) {
1107     ShenandoahEvacOOMScope oom_evac_scope;
1108     ShenandoahEvacuateUpdateRootsClosure cl;
1109 
1110     if (ShenandoahConcurrentEvacCodeRoots) {
1111       _rp->process_evacuate_roots(&cl, NULL, worker_id);
1112     } else {
1113       MarkingCodeBlobClosure blobsCl(&cl, CodeBlobToOopClosure::FixRelocations);
1114       _rp->process_evacuate_roots(&cl, &blobsCl, worker_id);
1115     }
1116   }
1117 };
1118 
1119 class ShenandoahFixRootsTask : public AbstractGangTask {
1120   ShenandoahRootEvacuator* _rp;
1121 public:
1122 
1123   ShenandoahFixRootsTask(ShenandoahRootEvacuator* rp) :
1124     AbstractGangTask("Shenandoah update roots"),
1125     _rp(rp)
1126   {
1127     // Nothing else to do.
1128   }
1129 
1130   void work(uint worker_id) {
1131     ShenandoahEvacOOMScope oom_evac_scope;
1132     ShenandoahUpdateRefsClosure cl;
1133     MarkingCodeBlobClosure blobsCl(&cl, CodeBlobToOopClosure::FixRelocations);
1134 
1135     _rp->process_evacuate_roots(&cl, &blobsCl, worker_id);
1136   }
1137 };
1138 
1139 void ShenandoahHeap::evacuate_and_update_roots() {
1140 
1141 #if defined(COMPILER2) || INCLUDE_JVMCI
1142   DerivedPointerTable::clear();
1143 #endif
1144   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Only iterate roots while world is stopped");
1145 
1146   {
1147     ShenandoahRootEvacuator rp(this, workers()->active_workers(), ShenandoahPhaseTimings::init_evac);
1148     ShenandoahEvacuateUpdateRootsTask roots_task(&rp);
1149     workers()->run_task(&roots_task);
1150   }
1151 
1152 #if defined(COMPILER2) || INCLUDE_JVMCI
1153   DerivedPointerTable::update_pointers();
1154 #endif
1155   if (cancelled_concgc()) {
1156     fixup_roots();
1157   }
1158 }
1159 
1160 void ShenandoahHeap::fixup_roots() {
1161     assert(cancelled_concgc(), "Only after concurrent cycle failed");
1162 
1163     // If initial evacuation has been cancelled, we need to update all references
1164     // after all workers have finished. Otherwise we might run into the following problem:
1165     // GC thread 1 cannot allocate anymore, thus evacuation fails, leaves from-space ptr of object X.
1166     // GC thread 2 evacuates the same object X to to-space
1167     // which leaves a truly dangling from-space reference in the first root oop*. This must not happen.
1168     // clear() and update_pointers() must always be called in pairs,
1169     // cannot nest with above clear()/update_pointers().
1170 #if defined(COMPILER2) || INCLUDE_JVMCI
1171     DerivedPointerTable::clear();
1172 #endif
1173     ShenandoahRootEvacuator rp(this, workers()->active_workers(), ShenandoahPhaseTimings::init_evac);
1174     ShenandoahFixRootsTask update_roots_task(&rp);
1175     workers()->run_task(&update_roots_task);
1176 #if defined(COMPILER2) || INCLUDE_JVMCI
1177     DerivedPointerTable::update_pointers();
1178 #endif
1179 }
1180 
1181 void ShenandoahHeap::roots_iterate(OopClosure* cl) {
1182   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Only iterate roots while world is stopped");
1183 
1184   CodeBlobToOopClosure blobsCl(cl, false);
1185   CLDToOopClosure cldCl(cl);
1186 
1187   ShenandoahRootProcessor rp(this, 1, ShenandoahPhaseTimings::_num_phases);
1188   rp.process_all_roots(cl, NULL, &cldCl, &blobsCl, NULL, 0);
1189 }
1190 
1191 bool ShenandoahHeap::supports_tlab_allocation() const {
1192   return true;
1193 }
1194 
1195 size_t  ShenandoahHeap::unsafe_max_tlab_alloc(Thread *thread) const {
1196   return MIN2(_free_set->unsafe_peek_free(), max_tlab_size());
1197 }
1198 
1199 size_t ShenandoahHeap::max_tlab_size() const {
1200   return ShenandoahHeapRegion::max_tlab_size_bytes();
1201 }
1202 
1203 class ShenandoahResizeGCLABClosure : public ThreadClosure {
1204 public:
1205   void do_thread(Thread* thread) {
1206     assert(thread->gclab().is_initialized(), "GCLAB should be initialized for %s", thread->name());
1207     thread->gclab().resize();
1208   }
1209 };
1210 
1211 void ShenandoahHeap::resize_all_tlabs() {
1212   CollectedHeap::resize_all_tlabs();
1213 
1214   ShenandoahResizeGCLABClosure cl;
1215   for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) {
1216     cl.do_thread(t);
1217   }
1218   gc_threads_do(&cl);
1219 }
1220 
1221 class ShenandoahAccumulateStatisticsGCLABClosure : public ThreadClosure {
1222 public:
1223   void do_thread(Thread* thread) {
1224     assert(thread->gclab().is_initialized(), "GCLAB should be initialized for %s", thread->name());
1225     thread->gclab().accumulate_statistics();
1226     thread->gclab().initialize_statistics();
1227   }
1228 };
1229 
1230 void ShenandoahHeap::accumulate_statistics_all_gclabs() {
1231   ShenandoahAccumulateStatisticsGCLABClosure cl;
1232   for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) {
1233     cl.do_thread(t);
1234   }
1235   gc_threads_do(&cl);
1236 }
1237 
1238 bool  ShenandoahHeap::can_elide_tlab_store_barriers() const {
1239   return true;
1240 }
1241 
1242 oop ShenandoahHeap::new_store_pre_barrier(JavaThread* thread, oop new_obj) {
1243   // Overridden to do nothing.
1244   return new_obj;
1245 }
1246 
1247 bool  ShenandoahHeap::can_elide_initializing_store_barrier(oop new_obj) {
1248   return true;
1249 }
1250 
1251 bool ShenandoahHeap::card_mark_must_follow_store() const {
1252   return false;
1253 }
1254 
1255 void ShenandoahHeap::collect(GCCause::Cause cause) {
1256   _concurrent_gc_thread->handle_explicit_gc(cause);
1257 }
1258 
1259 void ShenandoahHeap::do_full_collection(bool clear_all_soft_refs) {
1260   //assert(false, "Shouldn't need to do full collections");
1261 }
1262 
1263 AdaptiveSizePolicy* ShenandoahHeap::size_policy() {
1264   Unimplemented();
1265   return NULL;
1266 
1267 }
1268 
1269 CollectorPolicy* ShenandoahHeap::collector_policy() const {
1270   return _shenandoah_policy;
1271 }
1272 
1273 
1274 HeapWord* ShenandoahHeap::block_start(const void* addr) const {
1275   Space* sp = heap_region_containing(addr);
1276   if (sp != NULL) {
1277     return sp->block_start(addr);
1278   }
1279   return NULL;
1280 }
1281 
1282 size_t ShenandoahHeap::block_size(const HeapWord* addr) const {
1283   Space* sp = heap_region_containing(addr);
1284   assert(sp != NULL, "block_size of address outside of heap");
1285   return sp->block_size(addr);
1286 }
1287 
1288 bool ShenandoahHeap::block_is_obj(const HeapWord* addr) const {
1289   Space* sp = heap_region_containing(addr);
1290   return sp->block_is_obj(addr);
1291 }
1292 
1293 jlong ShenandoahHeap::millis_since_last_gc() {
1294   return 0;
1295 }
1296 
1297 void ShenandoahHeap::prepare_for_verify() {
1298   if (SafepointSynchronize::is_at_safepoint() || ! UseTLAB) {
1299     make_tlabs_parsable(false);
1300   }
1301 }
1302 
1303 void ShenandoahHeap::print_gc_threads_on(outputStream* st) const {
1304   workers()->print_worker_threads_on(st);
1305   if (ShenandoahStringDedup::is_enabled()) {
1306     ShenandoahStringDedup::print_worker_threads_on(st);
1307   }
1308 }
1309 
1310 void ShenandoahHeap::gc_threads_do(ThreadClosure* tcl) const {
1311   workers()->threads_do(tcl);
1312   if (ShenandoahStringDedup::is_enabled()) {
1313     ShenandoahStringDedup::threads_do(tcl);
1314   }
1315 }
1316 
1317 void ShenandoahHeap::print_tracing_info() const {
1318   LogTarget(Info, gc, stats) lt;
1319   if (lt.is_enabled()) {
1320     ResourceMark rm;
1321     LogStream ls(lt);
1322 
1323     phase_timings()->print_on(&ls);
1324 
1325     ls.cr();
1326     ls.cr();
1327 
1328     shenandoahPolicy()->print_gc_stats(&ls);
1329 
1330     ls.cr();
1331     ls.cr();
1332 
1333     if (ShenandoahPacing) {
1334       pacer()->print_on(&ls);
1335     }
1336 
1337     ls.cr();
1338     ls.cr();
1339 
1340     if (ShenandoahAllocationTrace) {
1341       assert(alloc_tracker() != NULL, "Must be");
1342       alloc_tracker()->print_on(&ls);
1343     } else {
1344       ls.print_cr("  Allocation tracing is disabled, use -XX:+ShenandoahAllocationTrace to enable.");
1345     }
1346   }
1347 }
1348 
1349 void ShenandoahHeap::verify(VerifyOption vo) {
1350   if (ShenandoahSafepoint::is_at_shenandoah_safepoint()) {
1351     if (ShenandoahVerify) {
1352       verifier()->verify_generic(vo);
1353     } else {
1354       // TODO: Consider allocating verification bitmaps on demand,
1355       // and turn this on unconditionally.
1356     }
1357   }
1358 }
1359 size_t ShenandoahHeap::tlab_capacity(Thread *thr) const {
1360   return _free_set->capacity();
1361 }
1362 
1363 class ObjectIterateScanRootClosure : public ExtendedOopClosure {
1364 private:
1365   MarkBitMap* _bitmap;
1366   Stack<oop,mtGC>* _oop_stack;
1367 
1368   template <class T>
1369   void do_oop_work(T* p) {
1370     T o = RawAccess<>::oop_load(p);
1371     if (!CompressedOops::is_null(o)) {
1372       oop obj = CompressedOops::decode_not_null(o);
1373       obj = ShenandoahBarrierSet::resolve_forwarded_not_null(obj);
1374       assert(oopDesc::is_oop(obj), "must be a valid oop");
1375       if (!_bitmap->isMarked((HeapWord*) obj)) {
1376         _bitmap->mark((HeapWord*) obj);
1377         _oop_stack->push(obj);
1378       }
1379     }
1380   }
1381 public:
1382   ObjectIterateScanRootClosure(MarkBitMap* bitmap, Stack<oop,mtGC>* oop_stack) :
1383     _bitmap(bitmap), _oop_stack(oop_stack) {}
1384   void do_oop(oop* p)       { do_oop_work(p); }
1385   void do_oop(narrowOop* p) { do_oop_work(p); }
1386 };
1387 
1388 /*
1389  * This is public API, used in preparation of object_iterate().
1390  * Since we don't do linear scan of heap in object_iterate() (see comment below), we don't
1391  * need to make the heap parsable. For Shenandoah-internal linear heap scans that we can
1392  * control, we call SH::make_tlabs_parsable().
1393  */
1394 void ShenandoahHeap::ensure_parsability(bool retire_tlabs) {
1395   // No-op.
1396 }
1397 
1398 /*
1399  * Iterates objects in the heap. This is public API, used for, e.g., heap dumping.
1400  *
1401  * We cannot safely iterate objects by doing a linear scan at random points in time. Linear
1402  * scanning needs to deal with dead objects, which may have dead Klass* pointers (e.g.
1403  * calling oopDesc::size() would crash) or dangling reference fields (crashes) etc. Linear
1404  * scanning therefore depends on having a valid marking bitmap to support it. However, we only
1405  * have a valid marking bitmap after successful marking. In particular, we *don't* have a valid
1406  * marking bitmap during marking, after aborted marking or during/after cleanup (when we just
1407  * wiped the bitmap in preparation for next marking).
1408  *
1409  * For all those reasons, we implement object iteration as a single marking traversal, reporting
1410  * objects as we mark+traverse through the heap, starting from GC roots. JVMTI IterateThroughHeap
1411  * is allowed to report dead objects, but is not required to do so.
1412  */
1413 void ShenandoahHeap::object_iterate(ObjectClosure* cl) {
1414   assert(SafepointSynchronize::is_at_safepoint(), "safe iteration is only available during safepoints");
1415   if (!os::commit_memory((char*)_aux_bitmap_region.start(), _aux_bitmap_region.byte_size(), false)) {
1416     log_warning(gc)("Could not commit native memory for auxiliary marking bitmap for heap iteration");
1417     return;
1418   }
1419 
1420   Stack<oop,mtGC> oop_stack;
1421 
1422   // First, we process all GC roots. This populates the work stack with initial objects.
1423   ShenandoahRootProcessor rp(this, 1, ShenandoahPhaseTimings::_num_phases);
1424   ObjectIterateScanRootClosure oops(&_aux_bit_map, &oop_stack);
1425   CLDToOopClosure clds(&oops, false);
1426   CodeBlobToOopClosure blobs(&oops, false);
1427   rp.process_all_roots(&oops, &oops, &clds, &blobs, NULL, 0);
1428 
1429   // Work through the oop stack to traverse heap.
1430   while (! oop_stack.is_empty()) {
1431     oop obj = oop_stack.pop();
1432     assert(oopDesc::is_oop(obj), "must be a valid oop");
1433     cl->do_object(obj);
1434     obj->oop_iterate(&oops);
1435   }
1436 
1437   assert(oop_stack.is_empty(), "should be empty");
1438 
1439   if (!os::uncommit_memory((char*)_aux_bitmap_region.start(), _aux_bitmap_region.byte_size())) {
1440     log_warning(gc)("Could not uncommit native memory for auxiliary marking bitmap for heap iteration");
1441   }
1442 }
1443 
1444 void ShenandoahHeap::safe_object_iterate(ObjectClosure* cl) {
1445   assert(SafepointSynchronize::is_at_safepoint(), "safe iteration is only available during safepoints");
1446   object_iterate(cl);
1447 }
1448 
1449 // Apply blk->heap_region_do() on all committed regions in address order,
1450 // terminating the iteration early if heap_region_do() returns true.
1451 void ShenandoahHeap::heap_region_iterate(ShenandoahHeapRegionClosure* blk, bool skip_cset_regions, bool skip_humongous_continuation) const {
1452   for (size_t i = 0; i < num_regions(); i++) {
1453     ShenandoahHeapRegion* current  = get_region(i);
1454     if (skip_humongous_continuation && current->is_humongous_continuation()) {
1455       continue;
1456     }
1457     if (skip_cset_regions && in_collection_set(current)) {
1458       continue;
1459     }
1460     if (blk->heap_region_do(current)) {
1461       return;
1462     }
1463   }
1464 }
1465 
1466 class ShenandoahClearLivenessClosure : public ShenandoahHeapRegionClosure {
1467 private:
1468   ShenandoahHeap* sh;
1469 public:
1470   ShenandoahClearLivenessClosure(ShenandoahHeap* heap) : sh(heap) {}
1471 
1472   bool heap_region_do(ShenandoahHeapRegion* r) {
1473     r->clear_live_data();
1474     sh->set_next_top_at_mark_start(r->bottom(), r->top());
1475     return false;
1476   }
1477 };
1478 
1479 void ShenandoahHeap::op_init_mark() {
1480   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Should be at safepoint");
1481 
1482   assert(is_next_bitmap_clear(), "need clear marking bitmap");
1483 
1484   if (ShenandoahVerify) {
1485     verifier()->verify_before_concmark();
1486   }
1487 
1488   {
1489     ShenandoahGCPhase phase(ShenandoahPhaseTimings::accumulate_stats);
1490     accumulate_statistics_all_tlabs();
1491   }
1492 
1493   set_concurrent_mark_in_progress(true);
1494   // We need to reset all TLABs because we'd lose marks on all objects allocated in them.
1495   if (UseTLAB) {
1496     ShenandoahGCPhase phase(ShenandoahPhaseTimings::make_parsable);
1497     make_tlabs_parsable(true);
1498   }
1499 
1500   {
1501     ShenandoahGCPhase phase(ShenandoahPhaseTimings::clear_liveness);
1502     ShenandoahClearLivenessClosure clc(this);
1503     heap_region_iterate(&clc);
1504   }
1505 
1506   // Make above changes visible to worker threads
1507   OrderAccess::fence();
1508 
1509   concurrentMark()->init_mark_roots();
1510 
1511   if (UseTLAB) {
1512     ShenandoahGCPhase phase(ShenandoahPhaseTimings::resize_tlabs);
1513     resize_all_tlabs();
1514   }
1515 
1516   if (ShenandoahPacing) {
1517     pacer()->setup_for_mark();
1518   }
1519 }
1520 
1521 void ShenandoahHeap::op_mark() {
1522   concurrentMark()->mark_from_roots();
1523 
1524   // Allocations happen during concurrent mark, record peak after the phase:
1525   shenandoahPolicy()->record_peak_occupancy();
1526 }
1527 
1528 void ShenandoahHeap::op_final_mark() {
1529   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Should be at safepoint");
1530 
1531   // It is critical that we
1532   // evacuate roots right after finishing marking, so that we don't
1533   // get unmarked objects in the roots.
1534 
1535   if (! cancelled_concgc()) {
1536     concurrentMark()->finish_mark_from_roots();
1537     stop_concurrent_marking();
1538 
1539     {
1540       ShenandoahGCPhase prepare_evac(ShenandoahPhaseTimings::prepare_evac);
1541       prepare_for_concurrent_evacuation();
1542     }
1543 
1544     // If collection set has candidates, start evacuation.
1545     // Otherwise, bypass the rest of the cycle.
1546     if (!collection_set()->is_empty()) {
1547       set_evacuation_in_progress(true);
1548       // From here on, we need to update references.
1549       set_has_forwarded_objects(true);
1550 
1551       ShenandoahGCPhase init_evac(ShenandoahPhaseTimings::init_evac);
1552       evacuate_and_update_roots();
1553     }
1554 
1555     if (ShenandoahPacing) {
1556       pacer()->setup_for_evac();
1557     }
1558   } else {
1559     concurrentMark()->cancel();
1560     stop_concurrent_marking();
1561 
1562     if (process_references()) {
1563       // Abandon reference processing right away: pre-cleaning must have failed.
1564       ReferenceProcessor *rp = ref_processor();
1565       rp->disable_discovery();
1566       rp->abandon_partial_discovery();
1567       rp->verify_no_references_recorded();
1568     }
1569   }
1570 }
1571 
1572 void ShenandoahHeap::op_final_evac() {
1573   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Should be at safepoint");
1574 
1575   set_evacuation_in_progress(false);
1576   if (ShenandoahVerify) {
1577     verifier()->verify_after_evacuation();
1578   }
1579 }
1580 
1581 void ShenandoahHeap::op_evac() {
1582 
1583   LogTarget(Trace, gc, region) lt_region;
1584   LogTarget(Trace, gc, cset) lt_cset;
1585 
1586   if (lt_region.is_enabled()) {
1587     ResourceMark rm;
1588     LogStream ls(lt_region);
1589     ls.print_cr("All available regions:");
1590     print_heap_regions_on(&ls);
1591   }
1592 
1593   if (lt_cset.is_enabled()) {
1594     ResourceMark rm;
1595     LogStream ls(lt_cset);
1596     ls.print_cr("Collection set ("SIZE_FORMAT" regions):", _collection_set->count());
1597     _collection_set->print_on(&ls);
1598 
1599     ls.print_cr("Free set:");
1600     _free_set->print_on(&ls);
1601   }
1602 
1603   ShenandoahParallelEvacuationTask task(this, _collection_set);
1604   workers()->run_task(&task);
1605 
1606   if (lt_cset.is_enabled()) {
1607     ResourceMark rm;
1608     LogStream ls(lt_cset);
1609     ls.print_cr("After evacuation collection set ("SIZE_FORMAT" regions):",
1610                 _collection_set->count());
1611     _collection_set->print_on(&ls);
1612 
1613     ls.print_cr("After evacuation free set:");
1614     _free_set->print_on(&ls);
1615   }
1616 
1617   if (lt_region.is_enabled()) {
1618     ResourceMark rm;
1619     LogStream ls(lt_region);
1620     ls.print_cr("All regions after evacuation:");
1621     print_heap_regions_on(&ls);
1622   }
1623 
1624   // Allocations happen during evacuation, record peak after the phase:
1625   shenandoahPolicy()->record_peak_occupancy();
1626 }
1627 
1628 void ShenandoahHeap::op_updaterefs() {
1629   update_heap_references(true);
1630 
1631   // Allocations happen during update-refs, record peak after the phase:
1632   shenandoahPolicy()->record_peak_occupancy();
1633 }
1634 
1635 void ShenandoahHeap::op_cleanup() {
1636   ShenandoahGCPhase phase_recycle(ShenandoahPhaseTimings::conc_cleanup_recycle);
1637   free_set()->recycle_trash();
1638 
1639   // Allocations happen during cleanup, record peak after the phase:
1640   shenandoahPolicy()->record_peak_occupancy();
1641 }
1642 
1643 void ShenandoahHeap::op_cleanup_bitmaps() {
1644   op_cleanup();
1645 
1646   ShenandoahGCPhase phase_reset(ShenandoahPhaseTimings::conc_cleanup_reset_bitmaps);
1647   reset_next_mark_bitmap();
1648 
1649   // Allocations happen during bitmap cleanup, record peak after the phase:
1650   shenandoahPolicy()->record_peak_occupancy();
1651 }
1652 
1653 void ShenandoahHeap::op_cleanup_traversal() {
1654 
1655   {
1656     ShenandoahGCPhase phase_reset(ShenandoahPhaseTimings::conc_cleanup_reset_bitmaps);
1657     reset_next_mark_bitmap_traversal();
1658   }
1659 
1660   op_cleanup();
1661 
1662   // Allocations happen during bitmap cleanup, record peak after the phase:
1663   shenandoahPolicy()->record_peak_occupancy();
1664 }
1665 
1666 void ShenandoahHeap::op_preclean() {
1667   concurrentMark()->preclean_weak_refs();
1668 
1669   // Allocations happen during concurrent preclean, record peak after the phase:
1670   shenandoahPolicy()->record_peak_occupancy();
1671 }
1672 
1673 void ShenandoahHeap::op_init_traversal() {
1674   traversal_gc()->init_traversal_collection();
1675 }
1676 
1677 void ShenandoahHeap::op_traversal() {
1678   traversal_gc()->concurrent_traversal_collection();
1679 }
1680 
1681 void ShenandoahHeap::op_final_traversal() {
1682   traversal_gc()->final_traversal_collection();
1683 }
1684 
1685 void ShenandoahHeap::op_full(GCCause::Cause cause) {
1686   full_gc()->do_it(cause);
1687 }
1688 
1689 void ShenandoahHeap::op_degenerated(ShenandoahDegenPoint point) {
1690   // Degenerated GC is STW, but it can also fail. Current mechanics communicates
1691   // GC failure via cancelled_concgc() flag. So, if we detect the failure after
1692   // some phase, we have to upgrade the Degenerate GC to Full GC.
1693 
1694   clear_cancelled_concgc();
1695 
1696   size_t used_before = used();
1697 
1698   switch (point) {
1699     case _degenerated_evac:
1700       // Not possible to degenerate from here, upgrade to Full GC right away.
1701       cancel_concgc(GCCause::_shenandoah_upgrade_to_full_gc);
1702       op_degenerated_fail();
1703       return;
1704 
1705     // The cases below form the Duff's-like device: it describes the actual GC cycle,
1706     // but enters it at different points, depending on which concurrent phase had
1707     // degenerated.
1708 
1709     case _degenerated_traversal:
1710       {
1711         ShenandoahHeapLocker locker(lock());
1712         collection_set()->clear_current_index();
1713         for (size_t i = 0; i < collection_set()->count(); i++) {
1714           ShenandoahHeapRegion* r = collection_set()->next();
1715           r->make_regular_bypass();
1716         }
1717         collection_set()->clear();
1718       }
1719       op_final_traversal();
1720       op_cleanup_traversal();
1721       return;
1722 
1723     case _degenerated_outside_cycle:
1724       if (shenandoahPolicy()->can_do_traversal_gc()) {
1725         // Not possible to degenerate from here, upgrade to Full GC right away.
1726         cancel_concgc(GCCause::_shenandoah_upgrade_to_full_gc);
1727         op_degenerated_fail();
1728         return;
1729       }
1730       op_init_mark();
1731       if (cancelled_concgc()) {
1732         op_degenerated_fail();
1733         return;
1734       }
1735 
1736     case _degenerated_mark:
1737       op_final_mark();
1738       if (cancelled_concgc()) {
1739         op_degenerated_fail();
1740         return;
1741       }
1742 
1743       op_cleanup();
1744 
1745       // If heuristics thinks we should do the cycle, this flag would be set,
1746       // and we can do evacuation. Otherwise, it would be the shortcut cycle.
1747       if (is_evacuation_in_progress()) {
1748         op_evac();
1749         if (cancelled_concgc()) {
1750           op_degenerated_fail();
1751           return;
1752         }
1753       }
1754 
1755       // If heuristics thinks we should do the cycle, this flag would be set,
1756       // and we need to do update-refs. Otherwise, it would be the shortcut cycle.
1757       if (has_forwarded_objects()) {
1758         op_init_updaterefs();
1759         if (cancelled_concgc()) {
1760           op_degenerated_fail();
1761           return;
1762         }
1763       }
1764 
1765     case _degenerated_updaterefs:
1766       if (has_forwarded_objects()) {
1767         op_final_updaterefs();
1768         if (cancelled_concgc()) {
1769           op_degenerated_fail();
1770           return;
1771         }
1772       }
1773 
1774       op_cleanup_bitmaps();
1775       break;
1776 
1777     default:
1778       ShouldNotReachHere();
1779   }
1780 
1781   if (ShenandoahVerify) {
1782     verifier()->verify_after_degenerated();
1783   }
1784 
1785   // Check for futility and fail. There is no reason to do several back-to-back Degenerated cycles,
1786   // because that probably means the heap is overloaded and/or fragmented.
1787   size_t used_after = used();
1788   size_t difference = (used_before > used_after) ? used_before - used_after : 0;
1789   if (difference < ShenandoahHeapRegion::region_size_words()) {
1790     cancel_concgc(GCCause::_shenandoah_upgrade_to_full_gc);
1791     op_degenerated_futile();
1792   }
1793 }
1794 
1795 void ShenandoahHeap::op_degenerated_fail() {
1796   log_info(gc)("Cannot finish degeneration, upgrading to Full GC");
1797   shenandoahPolicy()->record_degenerated_upgrade_to_full();
1798   op_full(GCCause::_shenandoah_upgrade_to_full_gc);
1799 }
1800 
1801 void ShenandoahHeap::op_degenerated_futile() {
1802   log_info(gc)("Degenerated GC had not reclaimed enough, upgrading to Full GC");
1803   shenandoahPolicy()->record_degenerated_upgrade_to_full();
1804   op_full(GCCause::_shenandoah_upgrade_to_full_gc);
1805 }
1806 
1807 void ShenandoahHeap::swap_mark_bitmaps() {
1808   // Swap bitmaps.
1809   MarkBitMap* tmp1 = _complete_mark_bit_map;
1810   _complete_mark_bit_map = _next_mark_bit_map;
1811   _next_mark_bit_map = tmp1;
1812 
1813   // Swap top-at-mark-start pointers
1814   HeapWord** tmp2 = _complete_top_at_mark_starts;
1815   _complete_top_at_mark_starts = _next_top_at_mark_starts;
1816   _next_top_at_mark_starts = tmp2;
1817 
1818   HeapWord** tmp3 = _complete_top_at_mark_starts_base;
1819   _complete_top_at_mark_starts_base = _next_top_at_mark_starts_base;
1820   _next_top_at_mark_starts_base = tmp3;
1821 }
1822 
1823 
1824 void ShenandoahHeap::stop_concurrent_marking() {
1825   assert(is_concurrent_mark_in_progress(), "How else could we get here?");
1826   if (! cancelled_concgc()) {
1827     // If we needed to update refs, and concurrent marking has been cancelled,
1828     // we need to finish updating references.
1829     set_has_forwarded_objects(false);
1830     swap_mark_bitmaps();
1831   }
1832   set_concurrent_mark_in_progress(false);
1833 
1834   LogTarget(Trace, gc, region) lt;
1835   if (lt.is_enabled()) {
1836     ResourceMark rm;
1837     LogStream ls(lt);
1838     ls.print_cr("Regions at stopping the concurrent mark:");
1839     print_heap_regions_on(&ls);
1840   }
1841 }
1842 
1843 void ShenandoahHeap::set_gc_state_all_threads(char state) {
1844   for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) {
1845     ShenandoahThreadLocalData::set_gc_state(t, state);
1846   }
1847 }
1848 
1849 void ShenandoahHeap::set_gc_state_mask(uint mask, bool value) {
1850   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Should really be Shenandoah safepoint");
1851   _gc_state.set_cond(mask, value);
1852   set_gc_state_all_threads(_gc_state.raw_value());
1853 }
1854 
1855 void ShenandoahHeap::set_concurrent_mark_in_progress(bool in_progress) {
1856   set_gc_state_mask(MARKING, in_progress);
1857   ShenandoahBarrierSet::satb_mark_queue_set().set_active_all_threads(in_progress, !in_progress);
1858 }
1859 
1860 void ShenandoahHeap::set_concurrent_traversal_in_progress(bool in_progress) {
1861    set_gc_state_mask(TRAVERSAL | HAS_FORWARDED, in_progress);
1862    ShenandoahBarrierSet::satb_mark_queue_set().set_active_all_threads(in_progress, !in_progress);
1863 }
1864 
1865 void ShenandoahHeap::set_evacuation_in_progress(bool in_progress) {
1866   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Only call this at safepoint");
1867   set_gc_state_mask(EVACUATION, in_progress);
1868 }
1869 
1870 HeapWord* ShenandoahHeap::tlab_post_allocation_setup(HeapWord* obj) {
1871   // Initialize Brooks pointer for the next object
1872   HeapWord* result = obj + BrooksPointer::word_size();
1873   BrooksPointer::initialize(oop(result));
1874   return result;
1875 }
1876 
1877 uint ShenandoahHeap::oop_extra_words() {
1878   return BrooksPointer::word_size();
1879 }
1880 
1881 ShenandoahForwardedIsAliveClosure::ShenandoahForwardedIsAliveClosure() :
1882   _heap(ShenandoahHeap::heap_no_check()) {
1883 }
1884 
1885 ShenandoahIsAliveClosure::ShenandoahIsAliveClosure() :
1886   _heap(ShenandoahHeap::heap_no_check()) {
1887 }
1888 
1889 bool ShenandoahForwardedIsAliveClosure::do_object_b(oop obj) {
1890   if (CompressedOops::is_null(obj)) {
1891     return false;
1892   }
1893   obj = ShenandoahBarrierSet::resolve_forwarded_not_null(obj);
1894   shenandoah_assert_not_forwarded_if(NULL, obj, _heap->is_concurrent_mark_in_progress() || _heap->is_concurrent_traversal_in_progress())
1895   return _heap->is_marked_next(obj);
1896 }
1897 
1898 bool ShenandoahIsAliveClosure::do_object_b(oop obj) {
1899   if (CompressedOops::is_null(obj)) {
1900     return false;
1901   }
1902   shenandoah_assert_not_forwarded(NULL, obj);
1903   return _heap->is_marked_next(obj);
1904 }
1905 
1906 BoolObjectClosure* ShenandoahHeap::is_alive_closure() {
1907   return has_forwarded_objects() ?
1908          (BoolObjectClosure*) &_forwarded_is_alive :
1909          (BoolObjectClosure*) &_is_alive;
1910 }
1911 
1912 void ShenandoahHeap::ref_processing_init() {
1913   MemRegion mr = reserved_region();
1914 
1915   _forwarded_is_alive.init(this);
1916   _is_alive.init(this);
1917   assert(_max_workers > 0, "Sanity");
1918 
1919   _ref_processor =
1920     new ReferenceProcessor(mr,    // span
1921                            ParallelRefProcEnabled,  // MT processing
1922                            _max_workers,            // Degree of MT processing
1923                            true,                    // MT discovery
1924                            _max_workers,            // Degree of MT discovery
1925                            false,                   // Reference discovery is not atomic
1926                            NULL);                   // No closure, should be installed before use
1927 
1928   shenandoah_assert_rp_isalive_not_installed();
1929 }
1930 
1931 
1932 GCTracer* ShenandoahHeap::tracer() {
1933   return shenandoahPolicy()->tracer();
1934 }
1935 
1936 size_t ShenandoahHeap::tlab_used(Thread* thread) const {
1937   return _free_set->used();
1938 }
1939 
1940 void ShenandoahHeap::cancel_concgc(GCCause::Cause cause) {
1941   if (try_cancel_concgc()) {
1942     FormatBuffer<> msg("Cancelling concurrent GC: %s", GCCause::to_string(cause));
1943     log_info(gc)("%s", msg.buffer());
1944     Events::log(Thread::current(), "%s", msg.buffer());
1945   }
1946 }
1947 
1948 uint ShenandoahHeap::max_workers() {
1949   return _max_workers;
1950 }
1951 
1952 void ShenandoahHeap::stop() {
1953   // The shutdown sequence should be able to terminate when GC is running.
1954 
1955   // Step 0. Notify policy to disable event recording.
1956   _shenandoah_policy->record_shutdown();
1957 
1958   // Step 1. Notify control thread that we are in shutdown.
1959   // Note that we cannot do that with stop(), because stop() is blocking and waits for the actual shutdown.
1960   // Doing stop() here would wait for the normal GC cycle to complete, never falling through to cancel below.
1961   _concurrent_gc_thread->prepare_for_graceful_shutdown();
1962 
1963   // Step 2. Notify GC workers that we are cancelling GC.
1964   cancel_concgc(GCCause::_shenandoah_stop_vm);
1965 
1966   // Step 3. Wait until GC worker exits normally.
1967   _concurrent_gc_thread->stop();
1968 
1969   // Step 4. Stop String Dedup thread if it is active
1970   if (ShenandoahStringDedup::is_enabled()) {
1971     ShenandoahStringDedup::stop();
1972   }
1973 }
1974 
1975 void ShenandoahHeap::unload_classes_and_cleanup_tables(bool full_gc) {
1976   ShenandoahPhaseTimings::Phase phase_root =
1977           full_gc ?
1978           ShenandoahPhaseTimings::full_gc_purge :
1979           ShenandoahPhaseTimings::purge;
1980 
1981   ShenandoahPhaseTimings::Phase phase_unload =
1982           full_gc ?
1983           ShenandoahPhaseTimings::full_gc_purge_class_unload :
1984           ShenandoahPhaseTimings::purge_class_unload;
1985 
1986   ShenandoahPhaseTimings::Phase phase_cldg =
1987           full_gc ?
1988           ShenandoahPhaseTimings::full_gc_purge_cldg :
1989           ShenandoahPhaseTimings::purge_cldg;
1990 
1991   ShenandoahPhaseTimings::Phase phase_par =
1992           full_gc ?
1993           ShenandoahPhaseTimings::full_gc_purge_par :
1994           ShenandoahPhaseTimings::purge_par;
1995 
1996   ShenandoahPhaseTimings::Phase phase_par_classes =
1997           full_gc ?
1998           ShenandoahPhaseTimings::full_gc_purge_par_classes :
1999           ShenandoahPhaseTimings::purge_par_classes;
2000 
2001   ShenandoahPhaseTimings::Phase phase_par_codecache =
2002           full_gc ?
2003           ShenandoahPhaseTimings::full_gc_purge_par_codecache :
2004           ShenandoahPhaseTimings::purge_par_codecache;
2005 
2006   ShenandoahPhaseTimings::Phase phase_par_rmt =
2007           full_gc ?
2008           ShenandoahPhaseTimings::full_gc_purge_par_rmt :
2009           ShenandoahPhaseTimings::purge_par_rmt;
2010 
2011   ShenandoahPhaseTimings::Phase phase_par_symbstring =
2012           full_gc ?
2013           ShenandoahPhaseTimings::full_gc_purge_par_symbstring :
2014           ShenandoahPhaseTimings::purge_par_symbstring;
2015 
2016   ShenandoahPhaseTimings::Phase phase_par_sync =
2017           full_gc ?
2018           ShenandoahPhaseTimings::full_gc_purge_par_sync :
2019           ShenandoahPhaseTimings::purge_par_sync;
2020 
2021   ShenandoahGCPhase root_phase(phase_root);
2022 
2023   BoolObjectClosure* is_alive = is_alive_closure();
2024 
2025   bool purged_class;
2026 
2027   // Unload classes and purge SystemDictionary.
2028   {
2029     ShenandoahGCPhase phase(phase_unload);
2030     purged_class = SystemDictionary::do_unloading(is_alive,
2031                                                   gc_timer(),
2032                                                   false /* defer cleaning */);
2033   }
2034 
2035   {
2036     ShenandoahGCPhase phase(phase_par);
2037     uint active = _workers->active_workers();
2038     ParallelCleaningTask unlink_task(is_alive, true, true, active, purged_class);
2039     _workers->run_task(&unlink_task);
2040 
2041     ShenandoahPhaseTimings* p = phase_timings();
2042     ParallelCleaningTimes times = unlink_task.times();
2043 
2044     // "times" report total time, phase_tables_cc reports wall time. Divide total times
2045     // by active workers to get average time per worker, that would add up to wall time.
2046     p->record_phase_time(phase_par_classes,    times.klass_work_us() / active);
2047     p->record_phase_time(phase_par_codecache,  times.codecache_work_us() / active);
2048     p->record_phase_time(phase_par_rmt,        times.rmt_work_us() / active);
2049     p->record_phase_time(phase_par_symbstring, times.tables_work_us() / active);
2050     p->record_phase_time(phase_par_sync,       times.sync_us() / active);
2051   }
2052 
2053   if (ShenandoahStringDedup::is_enabled()) {
2054     ShenandoahPhaseTimings::Phase phase_par_string_dedup =
2055             full_gc ?
2056             ShenandoahPhaseTimings::full_gc_purge_par_string_dedup :
2057             ShenandoahPhaseTimings::purge_par_string_dedup;
2058     ShenandoahGCPhase phase(phase_par_string_dedup);
2059     ShenandoahStringDedup::parallel_cleanup();
2060   }
2061 
2062 
2063   {
2064     ShenandoahGCPhase phase(phase_cldg);
2065     ClassLoaderDataGraph::purge();
2066   }
2067 }
2068 
2069 void ShenandoahHeap::set_has_forwarded_objects(bool cond) {
2070   set_gc_state_mask(HAS_FORWARDED, cond);
2071 }
2072 
2073 void ShenandoahHeap::set_process_references(bool pr) {
2074   _process_references.set_cond(pr);
2075 }
2076 
2077 void ShenandoahHeap::set_unload_classes(bool uc) {
2078   _unload_classes.set_cond(uc);
2079 }
2080 
2081 bool ShenandoahHeap::process_references() const {
2082   return _process_references.is_set();
2083 }
2084 
2085 bool ShenandoahHeap::unload_classes() const {
2086   return _unload_classes.is_set();
2087 }
2088 
2089 //fixme this should be in heapregionset
2090 ShenandoahHeapRegion* ShenandoahHeap::next_compaction_region(const ShenandoahHeapRegion* r) {
2091   size_t region_idx = r->region_number() + 1;
2092   ShenandoahHeapRegion* next = get_region(region_idx);
2093   guarantee(next->region_number() == region_idx, "region number must match");
2094   while (next->is_humongous()) {
2095     region_idx = next->region_number() + 1;
2096     next = get_region(region_idx);
2097     guarantee(next->region_number() == region_idx, "region number must match");
2098   }
2099   return next;
2100 }
2101 
2102 ShenandoahMonitoringSupport* ShenandoahHeap::monitoring_support() {
2103   return _monitoring_support;
2104 }
2105 
2106 MarkBitMap* ShenandoahHeap::complete_mark_bit_map() {
2107   return _complete_mark_bit_map;
2108 }
2109 
2110 MarkBitMap* ShenandoahHeap::next_mark_bit_map() {
2111   return _next_mark_bit_map;
2112 }
2113 
2114 address ShenandoahHeap::in_cset_fast_test_addr() {
2115   ShenandoahHeap* heap = ShenandoahHeap::heap();
2116   assert(heap->collection_set() != NULL, "Sanity");
2117   return (address) heap->collection_set()->biased_map_address();
2118 }
2119 
2120 address ShenandoahHeap::cancelled_concgc_addr() {
2121   return (address) ShenandoahHeap::heap()->_cancelled_concgc.addr_of();
2122 }
2123 
2124 address ShenandoahHeap::gc_state_addr() {
2125   return (address) ShenandoahHeap::heap()->_gc_state.addr_of();
2126 }
2127 
2128 size_t ShenandoahHeap::bytes_allocated_since_gc_start() {
2129   return OrderAccess::load_acquire(&_bytes_allocated_since_gc_start);
2130 }
2131 
2132 void ShenandoahHeap::reset_bytes_allocated_since_gc_start() {
2133   OrderAccess::release_store_fence(&_bytes_allocated_since_gc_start, (size_t)0);
2134 }
2135 
2136 ShenandoahPacer* ShenandoahHeap::pacer() const {
2137   assert (_pacer != NULL, "sanity");
2138   return _pacer;
2139 }
2140 
2141 void ShenandoahHeap::set_next_top_at_mark_start(HeapWord* region_base, HeapWord* addr) {
2142   uintx index = ((uintx) region_base) >> ShenandoahHeapRegion::region_size_bytes_shift();
2143   _next_top_at_mark_starts[index] = addr;
2144 }
2145 
2146 HeapWord* ShenandoahHeap::next_top_at_mark_start(HeapWord* region_base) {
2147   uintx index = ((uintx) region_base) >> ShenandoahHeapRegion::region_size_bytes_shift();
2148   return _next_top_at_mark_starts[index];
2149 }
2150 
2151 void ShenandoahHeap::set_complete_top_at_mark_start(HeapWord* region_base, HeapWord* addr) {
2152   uintx index = ((uintx) region_base) >> ShenandoahHeapRegion::region_size_bytes_shift();
2153   _complete_top_at_mark_starts[index] = addr;
2154 }
2155 
2156 HeapWord* ShenandoahHeap::complete_top_at_mark_start(HeapWord* region_base) {
2157   uintx index = ((uintx) region_base) >> ShenandoahHeapRegion::region_size_bytes_shift();
2158   return _complete_top_at_mark_starts[index];
2159 }
2160 
2161 void ShenandoahHeap::set_degenerated_gc_in_progress(bool in_progress) {
2162   _degenerated_gc_in_progress.set_cond(in_progress);
2163 }
2164 
2165 void ShenandoahHeap::set_full_gc_in_progress(bool in_progress) {
2166   _full_gc_in_progress.set_cond(in_progress);
2167 }
2168 
2169 void ShenandoahHeap::set_full_gc_move_in_progress(bool in_progress) {
2170   assert (is_full_gc_in_progress(), "should be");
2171   _full_gc_move_in_progress.set_cond(in_progress);
2172 }
2173 
2174 void ShenandoahHeap::set_update_refs_in_progress(bool in_progress) {
2175   set_gc_state_mask(UPDATEREFS, in_progress);
2176 }
2177 
2178 void ShenandoahHeap::register_nmethod(nmethod* nm) {
2179   ShenandoahCodeRoots::add_nmethod(nm);
2180 }
2181 
2182 void ShenandoahHeap::unregister_nmethod(nmethod* nm) {
2183   ShenandoahCodeRoots::remove_nmethod(nm);
2184 }
2185 
2186 oop ShenandoahHeap::pin_object(JavaThread* thr, oop o) {
2187   o = BarrierSet::barrier_set()->write_barrier(o);
2188   ShenandoahHeapLocker locker(lock());
2189   heap_region_containing(o)->make_pinned();
2190   return o;
2191 }
2192 
2193 void ShenandoahHeap::unpin_object(JavaThread* thr, oop o) {
2194   o = BarrierSet::barrier_set()->read_barrier(o);
2195   ShenandoahHeapLocker locker(lock());
2196   heap_region_containing(o)->make_unpinned();
2197 }
2198 
2199 GCTimer* ShenandoahHeap::gc_timer() const {
2200   return _gc_timer;
2201 }
2202 
2203 #ifdef ASSERT
2204 void ShenandoahHeap::assert_gc_workers(uint nworkers) {
2205   assert(nworkers > 0 && nworkers <= max_workers(), "Sanity");
2206 
2207   if (ShenandoahSafepoint::is_at_shenandoah_safepoint()) {
2208     if (UseDynamicNumberOfGCThreads ||
2209         (FLAG_IS_DEFAULT(ParallelGCThreads) && ForceDynamicNumberOfGCThreads)) {
2210       assert(nworkers <= ParallelGCThreads, "Cannot use more than it has");
2211     } else {
2212       // Use ParallelGCThreads inside safepoints
2213       assert(nworkers == ParallelGCThreads, "Use ParalleGCThreads within safepoints");
2214     }
2215   } else {
2216     if (UseDynamicNumberOfGCThreads ||
2217         (FLAG_IS_DEFAULT(ConcGCThreads) && ForceDynamicNumberOfGCThreads)) {
2218       assert(nworkers <= ConcGCThreads, "Cannot use more than it has");
2219     } else {
2220       // Use ConcGCThreads outside safepoints
2221       assert(nworkers == ConcGCThreads, "Use ConcGCThreads outside safepoints");
2222     }
2223   }
2224 }
2225 #endif
2226 
2227 ShenandoahConnectionMatrix* ShenandoahHeap::connection_matrix() const {
2228   return _connection_matrix;
2229 }
2230 
2231 ShenandoahTraversalGC* ShenandoahHeap::traversal_gc() {
2232   return _traversal_gc;
2233 }
2234 
2235 ShenandoahVerifier* ShenandoahHeap::verifier() {
2236   guarantee(ShenandoahVerify, "Should be enabled");
2237   assert (_verifier != NULL, "sanity");
2238   return _verifier;
2239 }
2240 
2241 template<class T>
2242 class ShenandoahUpdateHeapRefsTask : public AbstractGangTask {
2243 private:
2244   T cl;
2245   ShenandoahHeap* _heap;
2246   ShenandoahRegionIterator _regions;
2247   bool _concurrent;
2248 public:
2249   ShenandoahUpdateHeapRefsTask(ShenandoahRegionIterator regions, bool concurrent) :
2250     AbstractGangTask("Concurrent Update References Task"),
2251     cl(T()),
2252     _heap(ShenandoahHeap::heap()),
2253     _regions(regions),
2254     _concurrent(concurrent) {
2255   }
2256 
2257   void work(uint worker_id) {
2258     SuspendibleThreadSetJoiner stsj(_concurrent && ShenandoahSuspendibleWorkers);
2259     ShenandoahHeapRegion* r = _regions.next();
2260     while (r != NULL) {
2261       if (_heap->in_collection_set(r)) {
2262         HeapWord* bottom = r->bottom();
2263         HeapWord* top = _heap->complete_top_at_mark_start(r->bottom());
2264         if (top > bottom) {
2265           _heap->complete_mark_bit_map()->clear_range_large(MemRegion(bottom, top));
2266         }
2267       } else {
2268         if (r->is_active()) {
2269           _heap->marked_object_oop_safe_iterate(r, &cl);
2270           if (ShenandoahPacing) {
2271             _heap->pacer()->report_updaterefs(r->get_live_data_words());
2272           }
2273         }
2274       }
2275       if (_heap->check_cancelled_concgc_and_yield(_concurrent)) {
2276         return;
2277       }
2278       r = _regions.next();
2279     }
2280   }
2281 };
2282 
2283 void ShenandoahHeap::update_heap_references(bool concurrent) {
2284   if (UseShenandoahMatrix) {
2285     ShenandoahUpdateHeapRefsTask<ShenandoahUpdateHeapRefsMatrixClosure> task(_update_refs_iterator, concurrent);
2286     workers()->run_task(&task);
2287   } else {
2288     ShenandoahUpdateHeapRefsTask<ShenandoahUpdateHeapRefsClosure> task(_update_refs_iterator, concurrent);
2289     workers()->run_task(&task);
2290   }
2291 }
2292 
2293 void ShenandoahHeap::op_init_updaterefs() {
2294   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "must be at safepoint");
2295 
2296   if (ShenandoahVerify) {
2297     verifier()->verify_before_updaterefs();
2298   }
2299 
2300   set_evacuation_in_progress(false);
2301   set_update_refs_in_progress(true);
2302   make_tlabs_parsable(true);
2303   if (UseShenandoahMatrix) {
2304     connection_matrix()->clear_all();
2305   }
2306   for (uint i = 0; i < num_regions(); i++) {
2307     ShenandoahHeapRegion* r = get_region(i);
2308     r->set_concurrent_iteration_safe_limit(r->top());
2309   }
2310 
2311   // Reset iterator.
2312   _update_refs_iterator = region_iterator();
2313 
2314   if (ShenandoahPacing) {
2315     pacer()->setup_for_updaterefs();
2316   }
2317 }
2318 
2319 void ShenandoahHeap::op_final_updaterefs() {
2320   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "must be at safepoint");
2321 
2322   // Check if there is left-over work, and finish it
2323   if (_update_refs_iterator.has_next()) {
2324     ShenandoahGCPhase final_work(ShenandoahPhaseTimings::final_update_refs_finish_work);
2325 
2326     // Finish updating references where we left off.
2327     clear_cancelled_concgc();
2328     update_heap_references(false);
2329   }
2330 
2331   // Clear cancelled conc GC, if set. On cancellation path, the block before would handle
2332   // everything. On degenerated paths, cancelled gc would not be set anyway.
2333   if (cancelled_concgc()) {
2334     clear_cancelled_concgc();
2335   }
2336   assert(!cancelled_concgc(), "Should have been done right before");
2337 
2338   concurrentMark()->update_roots(ShenandoahPhaseTimings::final_update_refs_roots);
2339 
2340   // Allocations might have happened before we STWed here, record peak:
2341   shenandoahPolicy()->record_peak_occupancy();
2342 
2343   ShenandoahGCPhase final_update_refs(ShenandoahPhaseTimings::final_update_refs_recycle);
2344 
2345   trash_cset_regions();
2346   set_has_forwarded_objects(false);
2347 
2348   if (ShenandoahVerify) {
2349     verifier()->verify_after_updaterefs();
2350   }
2351 
2352   {
2353     ShenandoahHeapLocker locker(lock());
2354     _free_set->rebuild();
2355   }
2356 
2357   set_update_refs_in_progress(false);
2358 }
2359 
2360 void ShenandoahHeap::set_alloc_seq_gc_start() {
2361   // Take next number, the start seq number is inclusive
2362   _alloc_seq_at_last_gc_start = ShenandoahHeapRegion::seqnum_current_alloc() + 1;
2363 }
2364 
2365 void ShenandoahHeap::set_alloc_seq_gc_end() {
2366   // Take current number, the end seq number is also inclusive
2367   _alloc_seq_at_last_gc_end = ShenandoahHeapRegion::seqnum_current_alloc();
2368 }
2369 
2370 
2371 #ifdef ASSERT
2372 void ShenandoahHeap::assert_heaplock_owned_by_current_thread() {
2373   _lock.assert_owned_by_current_thread();
2374 }
2375 
2376 void ShenandoahHeap::assert_heaplock_not_owned_by_current_thread() {
2377   _lock.assert_not_owned_by_current_thread();
2378 }
2379 
2380 void ShenandoahHeap::assert_heaplock_or_safepoint() {
2381   _lock.assert_owned_by_current_thread_or_safepoint();
2382 }
2383 #endif
2384 
2385 void ShenandoahHeap::print_extended_on(outputStream *st) const {
2386   print_on(st);
2387   print_heap_regions_on(st);
2388 }
2389 
2390 bool ShenandoahHeap::is_bitmap_slice_committed(ShenandoahHeapRegion* r, bool skip_self) {
2391   size_t slice = r->region_number() / _bitmap_regions_per_slice;
2392 
2393   size_t regions_from = _bitmap_regions_per_slice * slice;
2394   size_t regions_to   = MIN2(num_regions(), _bitmap_regions_per_slice * (slice + 1));
2395   for (size_t g = regions_from; g < regions_to; g++) {
2396     assert (g / _bitmap_regions_per_slice == slice, "same slice");
2397     if (skip_self && g == r->region_number()) continue;
2398     if (get_region(g)->is_committed()) {
2399       return true;
2400     }
2401   }
2402   return false;
2403 }
2404 
2405 bool ShenandoahHeap::commit_bitmap_slice(ShenandoahHeapRegion* r) {
2406   assert_heaplock_owned_by_current_thread();
2407 
2408   if (is_bitmap_slice_committed(r, true)) {
2409     // Some other region from the group is already committed, meaning the bitmap
2410     // slice is already committed, we exit right away.
2411     return true;
2412   }
2413 
2414   // Commit the bitmap slice:
2415   size_t slice = r->region_number() / _bitmap_regions_per_slice;
2416   size_t off = _bitmap_bytes_per_slice * slice;
2417   size_t len = _bitmap_bytes_per_slice;
2418   if (!os::commit_memory((char*)_bitmap0_region.start() + off, len, false)) {
2419     return false;
2420   }
2421   if (!os::commit_memory((char*)_bitmap1_region.start() + off, len, false)) {
2422     return false;
2423   }
2424   return true;
2425 }
2426 
2427 bool ShenandoahHeap::uncommit_bitmap_slice(ShenandoahHeapRegion *r) {
2428   assert_heaplock_owned_by_current_thread();
2429 
2430   if (is_bitmap_slice_committed(r, true)) {
2431     // Some other region from the group is still committed, meaning the bitmap
2432     // slice is should stay committed, exit right away.
2433     return true;
2434   }
2435 
2436   // Uncommit the bitmap slice:
2437   size_t slice = r->region_number() / _bitmap_regions_per_slice;
2438   size_t off = _bitmap_bytes_per_slice * slice;
2439   size_t len = _bitmap_bytes_per_slice;
2440   if (!os::uncommit_memory((char*)_bitmap0_region.start() + off, len)) {
2441     return false;
2442   }
2443   if (!os::uncommit_memory((char*)_bitmap1_region.start() + off, len)) {
2444     return false;
2445   }
2446   return true;
2447 }
2448 
2449 bool ShenandoahHeap::idle_bitmap_slice(ShenandoahHeapRegion *r) {
2450   assert_heaplock_owned_by_current_thread();
2451   assert(ShenandoahUncommitWithIdle, "Must be enabled");
2452 
2453   if (is_bitmap_slice_committed(r, true)) {
2454     // Some other region from the group is still committed, meaning the bitmap
2455     // slice is should stay committed, exit right away.
2456     return true;
2457   }
2458 
2459   // Idle the bitmap slice:
2460   size_t slice = r->region_number() / _bitmap_regions_per_slice;
2461   size_t off = _bitmap_bytes_per_slice * slice;
2462   size_t len = _bitmap_bytes_per_slice;
2463   if (!os::idle_memory((char*)_bitmap0_region.start() + off, len)) {
2464     return false;
2465   }
2466   if (!os::idle_memory((char*)_bitmap1_region.start() + off, len)) {
2467     return false;
2468   }
2469   return true;
2470 }
2471 
2472 void ShenandoahHeap::activate_bitmap_slice(ShenandoahHeapRegion* r) {
2473   assert_heaplock_owned_by_current_thread();
2474   assert(ShenandoahUncommitWithIdle, "Must be enabled");
2475   size_t slice = r->region_number() / _bitmap_regions_per_slice;
2476   size_t off = _bitmap_bytes_per_slice * slice;
2477   size_t len = _bitmap_bytes_per_slice;
2478   os::activate_memory((char*)_bitmap0_region.start() + off, len);
2479   os::activate_memory((char*)_bitmap1_region.start() + off, len);
2480 }
2481 
2482 void ShenandoahHeap::safepoint_synchronize_begin() {
2483   if (ShenandoahSuspendibleWorkers || UseStringDeduplication) {
2484     SuspendibleThreadSet::synchronize();
2485   }
2486 }
2487 
2488 void ShenandoahHeap::safepoint_synchronize_end() {
2489   if (ShenandoahSuspendibleWorkers || UseStringDeduplication) {
2490     SuspendibleThreadSet::desynchronize();
2491   }
2492 }
2493 
2494 void ShenandoahHeap::vmop_entry_init_mark() {
2495   TraceCollectorStats tcs(monitoring_support()->stw_collection_counters());
2496   ShenandoahGCPhase total(ShenandoahPhaseTimings::total_pause_gross);
2497   ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_mark_gross);
2498 
2499   try_inject_alloc_failure();
2500   VM_ShenandoahInitMark op;
2501   VMThread::execute(&op); // jump to entry_init_mark() under safepoint
2502 }
2503 
2504 void ShenandoahHeap::vmop_entry_final_mark() {
2505   TraceCollectorStats tcs(monitoring_support()->stw_collection_counters());
2506   ShenandoahGCPhase total(ShenandoahPhaseTimings::total_pause_gross);
2507   ShenandoahGCPhase phase(ShenandoahPhaseTimings::final_mark_gross);
2508 
2509   try_inject_alloc_failure();
2510   VM_ShenandoahFinalMarkStartEvac op;
2511   VMThread::execute(&op); // jump to entry_final_mark under safepoint
2512 }
2513 
2514 void ShenandoahHeap::vmop_entry_final_evac() {
2515   TraceCollectorStats tcs(monitoring_support()->stw_collection_counters());
2516   ShenandoahGCPhase total(ShenandoahPhaseTimings::total_pause_gross);
2517   ShenandoahGCPhase phase(ShenandoahPhaseTimings::final_evac_gross);
2518 
2519   VM_ShenandoahFinalEvac op;
2520   VMThread::execute(&op); // jump to entry_final_evac under safepoint
2521 }
2522 
2523 void ShenandoahHeap::vmop_entry_init_updaterefs() {
2524   TraceCollectorStats tcs(monitoring_support()->stw_collection_counters());
2525   ShenandoahGCPhase total(ShenandoahPhaseTimings::total_pause_gross);
2526   ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_update_refs_gross);
2527 
2528   try_inject_alloc_failure();
2529   VM_ShenandoahInitUpdateRefs op;
2530   VMThread::execute(&op);
2531 }
2532 
2533 void ShenandoahHeap::vmop_entry_final_updaterefs() {
2534   TraceCollectorStats tcs(monitoring_support()->stw_collection_counters());
2535   ShenandoahGCPhase total(ShenandoahPhaseTimings::total_pause_gross);
2536   ShenandoahGCPhase phase(ShenandoahPhaseTimings::final_update_refs_gross);
2537 
2538   try_inject_alloc_failure();
2539   VM_ShenandoahFinalUpdateRefs op;
2540   VMThread::execute(&op);
2541 }
2542 
2543 void ShenandoahHeap::vmop_entry_init_traversal() {
2544   TraceCollectorStats tcs(monitoring_support()->stw_collection_counters());
2545   ShenandoahGCPhase total(ShenandoahPhaseTimings::total_pause_gross);
2546   ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_traversal_gc_gross);
2547 
2548   try_inject_alloc_failure();
2549   VM_ShenandoahInitTraversalGC op;
2550   VMThread::execute(&op);
2551 }
2552 
2553 void ShenandoahHeap::vmop_entry_final_traversal() {
2554   TraceCollectorStats tcs(monitoring_support()->stw_collection_counters());
2555   ShenandoahGCPhase total(ShenandoahPhaseTimings::total_pause_gross);
2556   ShenandoahGCPhase phase(ShenandoahPhaseTimings::final_traversal_gc_gross);
2557 
2558   try_inject_alloc_failure();
2559   VM_ShenandoahFinalTraversalGC op;
2560   VMThread::execute(&op);
2561 }
2562 
2563 void ShenandoahHeap::vmop_entry_full(GCCause::Cause cause) {
2564   TraceCollectorStats tcs(monitoring_support()->full_stw_collection_counters());
2565   ShenandoahGCPhase total(ShenandoahPhaseTimings::total_pause_gross);
2566   ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_gross);
2567 
2568   try_inject_alloc_failure();
2569   VM_ShenandoahFullGC op(cause);
2570   VMThread::execute(&op);
2571 }
2572 
2573 void ShenandoahHeap::vmop_degenerated(ShenandoahDegenPoint point) {
2574   TraceCollectorStats tcs(monitoring_support()->full_stw_collection_counters());
2575   ShenandoahGCPhase total(ShenandoahPhaseTimings::total_pause_gross);
2576   ShenandoahGCPhase phase(ShenandoahPhaseTimings::degen_gc_gross);
2577 
2578   VM_ShenandoahDegeneratedGC degenerated_gc((int)point);
2579   VMThread::execute(&degenerated_gc);
2580 }
2581 
2582 void ShenandoahHeap::entry_init_mark() {
2583   ShenandoahGCPhase total_phase(ShenandoahPhaseTimings::total_pause);
2584   ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_mark);
2585 
2586   FormatBuffer<> msg("Pause Init Mark%s%s%s",
2587                      has_forwarded_objects() ? " (update refs)"    : "",
2588                      process_references() ?    " (process refs)"   : "",
2589                      unload_classes() ?        " (unload classes)" : "");
2590   GCTraceTime(Info, gc) time(msg, gc_timer());
2591   EventMark em("%s", msg.buffer());
2592 
2593   ShenandoahWorkerScope scope(workers(), ShenandoahWorkerPolicy::calc_workers_for_init_marking());
2594 
2595   op_init_mark();
2596 }
2597 
2598 void ShenandoahHeap::entry_final_mark() {
2599   ShenandoahGCPhase total_phase(ShenandoahPhaseTimings::total_pause);
2600   ShenandoahGCPhase phase(ShenandoahPhaseTimings::final_mark);
2601 
2602   FormatBuffer<> msg("Pause Final Mark%s%s%s",
2603                      has_forwarded_objects() ? " (update refs)"    : "",
2604                      process_references() ?    " (process refs)"   : "",
2605                      unload_classes() ?        " (unload classes)" : "");
2606   GCTraceTime(Info, gc) time(msg, gc_timer());
2607   EventMark em("%s", msg.buffer());
2608 
2609   ShenandoahWorkerScope scope(workers(), ShenandoahWorkerPolicy::calc_workers_for_final_marking());
2610 
2611   op_final_mark();
2612 }
2613 
2614 void ShenandoahHeap::entry_final_evac() {
2615   ShenandoahGCPhase total_phase(ShenandoahPhaseTimings::total_pause);
2616   ShenandoahGCPhase phase(ShenandoahPhaseTimings::final_evac);
2617 
2618   FormatBuffer<> msg("Pause Final Evac");
2619   GCTraceTime(Info, gc) time(msg, gc_timer());
2620   EventMark em("%s", msg.buffer());
2621 
2622   op_final_evac();
2623 }
2624 
2625 void ShenandoahHeap::entry_init_updaterefs() {
2626   ShenandoahGCPhase total_phase(ShenandoahPhaseTimings::total_pause);
2627   ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_update_refs);
2628 
2629   static const char* msg = "Pause Init Update Refs";
2630   GCTraceTime(Info, gc) time(msg, gc_timer());
2631   EventMark em("%s", msg);
2632 
2633   // No workers used in this phase, no setup required
2634 
2635   op_init_updaterefs();
2636 }
2637 
2638 void ShenandoahHeap::entry_final_updaterefs() {
2639   ShenandoahGCPhase total_phase(ShenandoahPhaseTimings::total_pause);
2640   ShenandoahGCPhase phase(ShenandoahPhaseTimings::final_update_refs);
2641 
2642   static const char* msg = "Pause Final Update Refs";
2643   GCTraceTime(Info, gc) time(msg, gc_timer());
2644   EventMark em("%s", msg);
2645 
2646   ShenandoahWorkerScope scope(workers(), ShenandoahWorkerPolicy::calc_workers_for_final_update_ref());
2647 
2648   op_final_updaterefs();
2649 }
2650 
2651 void ShenandoahHeap::entry_init_traversal() {
2652   ShenandoahGCPhase total_phase(ShenandoahPhaseTimings::total_pause);
2653   ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_traversal_gc);
2654 
2655   static const char* msg = "Pause Init Traversal";
2656   GCTraceTime(Info, gc) time(msg, gc_timer());
2657   EventMark em("%s", msg);
2658 
2659   ShenandoahWorkerScope scope(workers(), ShenandoahWorkerPolicy::calc_workers_for_stw_traversal());
2660 
2661   op_init_traversal();
2662 }
2663 
2664 void ShenandoahHeap::entry_final_traversal() {
2665   ShenandoahGCPhase total_phase(ShenandoahPhaseTimings::total_pause);
2666   ShenandoahGCPhase phase(ShenandoahPhaseTimings::final_traversal_gc);
2667 
2668   static const char* msg = "Pause Final Traversal";
2669   GCTraceTime(Info, gc) time(msg, gc_timer());
2670   EventMark em("%s", msg);
2671 
2672   ShenandoahWorkerScope scope(workers(), ShenandoahWorkerPolicy::calc_workers_for_stw_traversal());
2673 
2674   op_final_traversal();
2675 }
2676 
2677 void ShenandoahHeap::entry_full(GCCause::Cause cause) {
2678   ShenandoahGCPhase total_phase(ShenandoahPhaseTimings::total_pause);
2679   ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc);
2680 
2681   static const char* msg = "Pause Full";
2682   GCTraceTime(Info, gc) time(msg, gc_timer(), cause, true);
2683   EventMark em("%s", msg);
2684 
2685   ShenandoahWorkerScope scope(workers(), ShenandoahWorkerPolicy::calc_workers_for_fullgc());
2686 
2687   op_full(cause);
2688 }
2689 
2690 void ShenandoahHeap::entry_degenerated(int point) {
2691   ShenandoahGCPhase total_phase(ShenandoahPhaseTimings::total_pause);
2692   ShenandoahGCPhase phase(ShenandoahPhaseTimings::degen_gc);
2693 
2694   ShenandoahDegenPoint dpoint = (ShenandoahDegenPoint)point;
2695   FormatBuffer<> msg("Pause Degenerated GC (%s)", degen_point_to_string(dpoint));
2696   GCTraceTime(Info, gc) time(msg, gc_timer(), GCCause::_no_gc, true);
2697   EventMark em("%s", msg.buffer());
2698 
2699   ShenandoahWorkerScope scope(workers(), ShenandoahWorkerPolicy::calc_workers_for_stw_degenerated());
2700 
2701   set_degenerated_gc_in_progress(true);
2702   op_degenerated(dpoint);
2703   set_degenerated_gc_in_progress(false);
2704 }
2705 
2706 void ShenandoahHeap::entry_mark() {
2707   TraceCollectorStats tcs(monitoring_support()->concurrent_collection_counters());
2708 
2709   FormatBuffer<> msg("Concurrent marking%s%s%s",
2710                      has_forwarded_objects() ? " (update refs)"    : "",
2711                      process_references() ?    " (process refs)"   : "",
2712                      unload_classes() ?        " (unload classes)" : "");
2713   GCTraceTime(Info, gc) time(msg, gc_timer(), GCCause::_no_gc, true);
2714   EventMark em("%s", msg.buffer());
2715 
2716   ShenandoahWorkerScope scope(workers(), ShenandoahWorkerPolicy::calc_workers_for_conc_marking());
2717 
2718   try_inject_alloc_failure();
2719   op_mark();
2720 }
2721 
2722 void ShenandoahHeap::entry_evac() {
2723   ShenandoahGCPhase conc_evac_phase(ShenandoahPhaseTimings::conc_evac);
2724   TraceCollectorStats tcs(monitoring_support()->concurrent_collection_counters());
2725 
2726   static const char* msg = "Concurrent evacuation";
2727   GCTraceTime(Info, gc) time(msg, gc_timer(), GCCause::_no_gc, true);
2728   EventMark em("%s", msg);
2729 
2730   ShenandoahWorkerScope scope(workers(), ShenandoahWorkerPolicy::calc_workers_for_conc_evac());
2731 
2732   try_inject_alloc_failure();
2733   op_evac();
2734 }
2735 
2736 void ShenandoahHeap::entry_updaterefs() {
2737   ShenandoahGCPhase phase(ShenandoahPhaseTimings::conc_update_refs);
2738 
2739   static const char* msg = "Concurrent update references";
2740   GCTraceTime(Info, gc) time(msg, gc_timer(), GCCause::_no_gc, true);
2741   EventMark em("%s", msg);
2742 
2743   ShenandoahWorkerScope scope(workers(), ShenandoahWorkerPolicy::calc_workers_for_conc_update_ref());
2744 
2745   try_inject_alloc_failure();
2746   op_updaterefs();
2747 }
2748 void ShenandoahHeap::entry_cleanup() {
2749   ShenandoahGCPhase phase(ShenandoahPhaseTimings::conc_cleanup);
2750 
2751   static const char* msg = "Concurrent cleanup";
2752   GCTraceTime(Info, gc) time(msg, gc_timer(), GCCause::_no_gc, true);
2753   EventMark em("%s", msg);
2754 
2755   // This phase does not use workers, no need for setup
2756 
2757   try_inject_alloc_failure();
2758   op_cleanup();
2759 }
2760 
2761 void ShenandoahHeap::entry_cleanup_traversal() {
2762   ShenandoahGCPhase phase(ShenandoahPhaseTimings::conc_cleanup);
2763 
2764   static const char* msg = "Concurrent cleanup";
2765   GCTraceTime(Info, gc) time(msg, gc_timer(), GCCause::_no_gc, true);
2766   EventMark em("%s", msg);
2767 
2768   ShenandoahWorkerScope scope(workers(), ShenandoahWorkerPolicy::calc_workers_for_conc_traversal());
2769 
2770   try_inject_alloc_failure();
2771   op_cleanup_traversal();
2772 }
2773 
2774 void ShenandoahHeap::entry_cleanup_bitmaps() {
2775   ShenandoahGCPhase phase(ShenandoahPhaseTimings::conc_cleanup);
2776 
2777   static const char* msg = "Concurrent cleanup";
2778   GCTraceTime(Info, gc) time(msg, gc_timer(), GCCause::_no_gc, true);
2779   EventMark em("%s", msg);
2780 
2781   ShenandoahWorkerScope scope(workers(), ShenandoahWorkerPolicy::calc_workers_for_conc_cleanup());
2782 
2783   try_inject_alloc_failure();
2784   op_cleanup_bitmaps();
2785 }
2786 
2787 void ShenandoahHeap::entry_preclean() {
2788   if (ShenandoahPreclean && process_references()) {
2789     static const char* msg = "Concurrent precleaning";
2790     GCTraceTime(Info, gc) time(msg, gc_timer(), GCCause::_no_gc, true);
2791     EventMark em("%s", msg);
2792 
2793     ShenandoahGCPhase conc_preclean(ShenandoahPhaseTimings::conc_preclean);
2794 
2795     ShenandoahWorkerScope scope(workers(), ShenandoahWorkerPolicy::calc_workers_for_conc_preclean());
2796 
2797     try_inject_alloc_failure();
2798     op_preclean();
2799   }
2800 }
2801 
2802 void ShenandoahHeap::entry_traversal() {
2803   static const char* msg = "Concurrent traversal";
2804   GCTraceTime(Info, gc) time(msg, gc_timer(), GCCause::_no_gc, true);
2805   EventMark em("%s", msg);
2806 
2807   TraceCollectorStats tcs(is_minor_gc() ? monitoring_support()->partial_collection_counters()
2808                                         : monitoring_support()->concurrent_collection_counters());
2809 
2810   ShenandoahWorkerScope scope(workers(), ShenandoahWorkerPolicy::calc_workers_for_conc_traversal());
2811 
2812   try_inject_alloc_failure();
2813   op_traversal();
2814 }
2815 
2816 void ShenandoahHeap::try_inject_alloc_failure() {
2817   if (ShenandoahAllocFailureALot && !cancelled_concgc() && ((os::random() % 1000) > 950)) {
2818     _inject_alloc_failure.set();
2819     os::naked_short_sleep(1);
2820     if (cancelled_concgc()) {
2821       log_info(gc)("Allocation failure was successfully injected");
2822     }
2823   }
2824 }
2825 
2826 bool ShenandoahHeap::should_inject_alloc_failure() {
2827   return _inject_alloc_failure.is_set() && _inject_alloc_failure.try_unset();
2828 }
2829 
2830 void ShenandoahHeap::initialize_serviceability() {
2831   _memory_pool = new ShenandoahMemoryPool(this);
2832   _cycle_memory_manager.add_pool(_memory_pool);
2833   _stw_memory_manager.add_pool(_memory_pool);
2834 }
2835 
2836 GrowableArray<GCMemoryManager*> ShenandoahHeap::memory_managers() {
2837   GrowableArray<GCMemoryManager*> memory_managers(2);
2838   memory_managers.append(&_cycle_memory_manager);
2839   memory_managers.append(&_stw_memory_manager);
2840   return memory_managers;
2841 }
2842 
2843 GrowableArray<MemoryPool*> ShenandoahHeap::memory_pools() {
2844   GrowableArray<MemoryPool*> memory_pools(1);
2845   memory_pools.append(_memory_pool);
2846   return memory_pools;
2847 }
2848 
2849 void ShenandoahHeap::enter_evacuation() {
2850   _oom_evac_handler.enter_evacuation();
2851 }
2852 
2853 void ShenandoahHeap::leave_evacuation() {
2854   _oom_evac_handler.leave_evacuation();
2855 }
2856 
2857 SoftRefPolicy* ShenandoahHeap::soft_ref_policy() {
2858   return &_soft_ref_policy;
2859 }
2860 
2861 ShenandoahRegionIterator::ShenandoahRegionIterator() :
2862   _index(0),
2863   _heap(ShenandoahHeap::heap()) {}
2864 
2865 ShenandoahRegionIterator::ShenandoahRegionIterator(ShenandoahHeap* heap) :
2866   _index(0),
2867   _heap(heap) {}
2868 
2869 bool ShenandoahRegionIterator::has_next() const {
2870   return _index < _heap->num_regions();
2871 }
2872 
2873 ShenandoahRegionIterator ShenandoahHeap::region_iterator() const {
2874   return ShenandoahRegionIterator();
2875 }
2876 
2877 void ShenandoahHeap::heap_region_iterate(ShenandoahHeapRegionClosure& cl) const {
2878   ShenandoahRegionIterator regions = region_iterator();
2879   ShenandoahHeapRegion* r = regions.next();
2880   while (r != NULL) {
2881     if (cl.heap_region_do(r)) {
2882       break;
2883     }
2884     r = regions.next();
2885   }
2886 }
2887 
2888 bool ShenandoahHeap::is_minor_gc() const {
2889   return _gc_cycle_mode.get() == MINOR;
2890 }
2891 
2892 bool ShenandoahHeap::is_major_gc() const {
2893   return _gc_cycle_mode.get() == MAJOR;
2894 }
2895 
2896 void ShenandoahHeap::set_cycle_mode(GCCycleMode gc_cycle_mode) {
2897   _gc_cycle_mode.set(gc_cycle_mode);
2898 }
2899 
2900 char ShenandoahHeap::gc_state() const {
2901   return _gc_state.raw_value();
2902 }