1 /*
   2  * Copyright (c) 2013, 2017, Red Hat, Inc. and/or its affiliates.
   3  *
   4  * This code is free software; you can redistribute it and/or modify it
   5  * under the terms of the GNU General Public License version 2 only, as
   6  * published by the Free Software Foundation.
   7  *
   8  * This code is distributed in the hope that it will be useful, but WITHOUT
   9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  11  * version 2 for more details (a copy is included in the LICENSE file that
  12  * accompanied this code).
  13  *
  14  * You should have received a copy of the GNU General Public License version
  15  * 2 along with this work; if not, write to the Free Software Foundation,
  16  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  17  *
  18  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  19  * or visit www.oracle.com if you need additional information or have any
  20  * questions.
  21  *
  22  */
  23 
  24 #include "precompiled.hpp"
  25 #include "memory/allocation.hpp"
  26 
  27 #include "gc/shared/gcTimer.hpp"
  28 #include "gc/shared/gcTraceTime.inline.hpp"
  29 #include "gc/shared/parallelCleaning.hpp"
  30 
  31 #include "gc/shenandoah/brooksPointer.hpp"
  32 #include "gc/shenandoah/shenandoahAllocTracker.hpp"
  33 #include "gc/shenandoah/shenandoahBarrierSet.hpp"
  34 #include "gc/shenandoah/shenandoahCollectionSet.hpp"
  35 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
  36 #include "gc/shenandoah/shenandoahConcurrentMark.hpp"
  37 #include "gc/shenandoah/shenandoahConcurrentMark.inline.hpp"
  38 #include "gc/shenandoah/shenandoahControlThread.hpp"
  39 #include "gc/shenandoah/shenandoahFreeSet.hpp"
  40 #include "gc/shenandoah/shenandoahPhaseTimings.hpp"
  41 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
  42 #include "gc/shenandoah/shenandoahHeapRegion.hpp"
  43 #include "gc/shenandoah/shenandoahHeapRegionSet.hpp"
  44 #include "gc/shenandoah/shenandoahMarkCompact.hpp"
  45 #include "gc/shenandoah/shenandoahMemoryPool.hpp"
  46 #include "gc/shenandoah/shenandoahMonitoringSupport.hpp"
  47 #include "gc/shenandoah/shenandoahOopClosures.inline.hpp"
  48 #include "gc/shenandoah/shenandoahPacer.hpp"
  49 #include "gc/shenandoah/shenandoahPacer.inline.hpp"
  50 #include "gc/shenandoah/shenandoahRootProcessor.hpp"
  51 #include "gc/shenandoah/shenandoahStringDedup.hpp"
  52 #include "gc/shenandoah/shenandoahUtils.hpp"
  53 #include "gc/shenandoah/shenandoahVerifier.hpp"
  54 #include "gc/shenandoah/shenandoahCodeRoots.hpp"
  55 #include "gc/shenandoah/shenandoahWorkerPolicy.hpp"
  56 #include "gc/shenandoah/vm_operations_shenandoah.hpp"
  57 
  58 #include "runtime/vmThread.hpp"
  59 #include "services/mallocTracker.hpp"
  60 
  61 ShenandoahUpdateRefsClosure::ShenandoahUpdateRefsClosure() : _heap(ShenandoahHeap::heap()) {}
  62 
  63 #ifdef ASSERT
  64 template <class T>
  65 void ShenandoahAssertToSpaceClosure::do_oop_nv(T* p) {
  66   T o = oopDesc::load_heap_oop(p);
  67   if (! oopDesc::is_null(o)) {
  68     oop obj = oopDesc::decode_heap_oop_not_null(o);
  69     shenandoah_assert_not_forwarded(p, obj);
  70   }
  71 }
  72 
  73 void ShenandoahAssertToSpaceClosure::do_oop(narrowOop* p) { do_oop_nv(p); }
  74 void ShenandoahAssertToSpaceClosure::do_oop(oop* p)       { do_oop_nv(p); }
  75 #endif
  76 
  77 const char* ShenandoahHeap::name() const {
  78   return "Shenandoah";
  79 }
  80 
  81 class ShenandoahPretouchTask : public AbstractGangTask {
  82 private:
  83   ShenandoahRegionIterator _regions;
  84   const size_t _bitmap_size;
  85   const size_t _page_size;
  86   char* _bitmap0_base;
  87   char* _bitmap1_base;
  88 public:
  89   ShenandoahPretouchTask(char* bitmap0_base, char* bitmap1_base, size_t bitmap_size,
  90                          size_t page_size) :
  91     AbstractGangTask("Shenandoah PreTouch",
  92                      Universe::is_fully_initialized() ? GCId::current_raw() :
  93                                                         // During VM initialization there is
  94                                                         // no GC cycle that this task can be
  95                                                         // associated with.
  96                                                         GCId::undefined()),
  97     _bitmap0_base(bitmap0_base),
  98     _bitmap1_base(bitmap1_base),
  99     _bitmap_size(bitmap_size),
 100     _page_size(page_size) {}
 101 
 102   virtual void work(uint worker_id) {
 103     ShenandoahHeapRegion* r = _regions.next();
 104     while (r != NULL) {
 105       log_trace(gc, heap)("Pretouch region " SIZE_FORMAT ": " PTR_FORMAT " -> " PTR_FORMAT,
 106                           r->region_number(), p2i(r->bottom()), p2i(r->end()));
 107       os::pretouch_memory(r->bottom(), r->end(), _page_size);
 108 
 109       size_t start = r->region_number()       * ShenandoahHeapRegion::region_size_bytes() / MarkBitMap::heap_map_factor();
 110       size_t end   = (r->region_number() + 1) * ShenandoahHeapRegion::region_size_bytes() / MarkBitMap::heap_map_factor();
 111       assert (end <= _bitmap_size, "end is sane: " SIZE_FORMAT " < " SIZE_FORMAT, end, _bitmap_size);
 112 
 113       log_trace(gc, heap)("Pretouch bitmap under region " SIZE_FORMAT ": " PTR_FORMAT " -> " PTR_FORMAT,
 114                           r->region_number(), p2i(_bitmap0_base + start), p2i(_bitmap0_base + end));
 115       os::pretouch_memory(_bitmap0_base + start, _bitmap0_base + end, _page_size);
 116 
 117       log_trace(gc, heap)("Pretouch bitmap under region " SIZE_FORMAT ": " PTR_FORMAT " -> " PTR_FORMAT,
 118                           r->region_number(), p2i(_bitmap1_base + start), p2i(_bitmap1_base + end));
 119       os::pretouch_memory(_bitmap1_base + start, _bitmap1_base + end, _page_size);
 120 
 121       r = _regions.next();
 122     }
 123   }
 124 };
 125 
 126 jint ShenandoahHeap::initialize() {
 127   CollectedHeap::pre_initialize();
 128 
 129   BrooksPointer::initial_checks();
 130 
 131   size_t init_byte_size = collector_policy()->initial_heap_byte_size();
 132   size_t max_byte_size = collector_policy()->max_heap_byte_size();
 133   size_t heap_alignment = collector_policy()->heap_alignment();
 134 
 135   if (ShenandoahAlwaysPreTouch) {
 136     // Enabled pre-touch means the entire heap is committed right away.
 137     init_byte_size = max_byte_size;
 138   }
 139 
 140   Universe::check_alignment(max_byte_size,
 141                             ShenandoahHeapRegion::region_size_bytes(),
 142                             "shenandoah heap");
 143   Universe::check_alignment(init_byte_size,
 144                             ShenandoahHeapRegion::region_size_bytes(),
 145                             "shenandoah heap");
 146 
 147   ReservedSpace heap_rs = Universe::reserve_heap(max_byte_size,
 148                                                  heap_alignment);
 149   initialize_reserved_region((HeapWord*)heap_rs.base(), (HeapWord*) (heap_rs.base() + heap_rs.size()));
 150 
 151   set_barrier_set(new ShenandoahBarrierSet(this));
 152   ReservedSpace pgc_rs = heap_rs.first_part(max_byte_size);
 153 
 154   _num_regions = max_byte_size / ShenandoahHeapRegion::region_size_bytes();
 155   size_t num_committed_regions = init_byte_size / ShenandoahHeapRegion::region_size_bytes();
 156   _initial_size = num_committed_regions * ShenandoahHeapRegion::region_size_bytes();
 157   _committed = _initial_size;
 158 
 159   log_info(gc, heap)("Initialize Shenandoah heap with initial size " SIZE_FORMAT " bytes", init_byte_size);
 160   if (!os::commit_memory(pgc_rs.base(), _initial_size, false)) {
 161     vm_exit_out_of_memory(_initial_size, OOM_MMAP_ERROR, "Shenandoah failed to initialize heap");
 162   }
 163 
 164   size_t reg_size_words = ShenandoahHeapRegion::region_size_words();
 165   size_t reg_size_bytes = ShenandoahHeapRegion::region_size_bytes();
 166 
 167   _regions = NEW_C_HEAP_ARRAY(ShenandoahHeapRegion*, _num_regions, mtGC);
 168   _free_set = new ShenandoahFreeSet(this, _num_regions);
 169 
 170   _collection_set = new ShenandoahCollectionSet(this, (HeapWord*)pgc_rs.base());
 171 
 172   _next_top_at_mark_starts_base = NEW_C_HEAP_ARRAY(HeapWord*, _num_regions, mtGC);
 173   _next_top_at_mark_starts = _next_top_at_mark_starts_base -
 174                ((uintx) pgc_rs.base() >> ShenandoahHeapRegion::region_size_bytes_shift());
 175 
 176   _complete_top_at_mark_starts_base = NEW_C_HEAP_ARRAY(HeapWord*, _num_regions, mtGC);
 177   _complete_top_at_mark_starts = _complete_top_at_mark_starts_base -
 178                ((uintx) pgc_rs.base() >> ShenandoahHeapRegion::region_size_bytes_shift());
 179 
 180   if (ShenandoahPacing) {
 181     _pacer = new ShenandoahPacer(this);
 182     _pacer->setup_for_idle();
 183   } else {
 184     _pacer = NULL;
 185   }
 186 
 187   {
 188     ShenandoahHeapLocker locker(lock());
 189     for (size_t i = 0; i < _num_regions; i++) {
 190       ShenandoahHeapRegion* r = new ShenandoahHeapRegion(this,
 191                                                          (HeapWord*) pgc_rs.base() + reg_size_words * i,
 192                                                          reg_size_words,
 193                                                          i,
 194                                                          i < num_committed_regions);
 195 
 196       _complete_top_at_mark_starts_base[i] = r->bottom();
 197       _next_top_at_mark_starts_base[i] = r->bottom();
 198       _regions[i] = r;
 199       assert(!collection_set()->is_in(i), "New region should not be in collection set");
 200     }
 201 
 202     _free_set->rebuild();
 203   }
 204 
 205   assert((((size_t) base()) & ShenandoahHeapRegion::region_size_bytes_mask()) == 0,
 206          "misaligned heap: "PTR_FORMAT, p2i(base()));
 207 
 208   // The call below uses stuff (the SATB* things) that are in G1, but probably
 209   // belong into a shared location.
 210   JavaThread::satb_mark_queue_set().initialize(SATB_Q_CBL_mon,
 211                                                SATB_Q_FL_lock,
 212                                                20 /*G1SATBProcessCompletedThreshold */,
 213                                                Shared_SATB_Q_lock);
 214 
 215   // Reserve space for prev and next bitmap.
 216   size_t bitmap_page_size = UseLargePages ? (size_t)os::large_page_size() : (size_t)os::vm_page_size();
 217   _bitmap_size = MarkBitMap::compute_size(heap_rs.size());
 218   _bitmap_size = align_up(_bitmap_size, bitmap_page_size);
 219   _heap_region = MemRegion((HeapWord*) heap_rs.base(), heap_rs.size() / HeapWordSize);
 220 
 221   size_t bitmap_bytes_per_region = reg_size_bytes / MarkBitMap::heap_map_factor();
 222 
 223   guarantee(bitmap_bytes_per_region != 0,
 224             "Bitmap bytes per region should not be zero");
 225   guarantee(is_power_of_2(bitmap_bytes_per_region),
 226             "Bitmap bytes per region should be power of two: " SIZE_FORMAT, bitmap_bytes_per_region);
 227 
 228   if (bitmap_page_size > bitmap_bytes_per_region) {
 229     _bitmap_regions_per_slice = bitmap_page_size / bitmap_bytes_per_region;
 230     _bitmap_bytes_per_slice = bitmap_page_size;
 231   } else {
 232     _bitmap_regions_per_slice = 1;
 233     _bitmap_bytes_per_slice = bitmap_bytes_per_region;
 234   }
 235 
 236   guarantee(_bitmap_regions_per_slice >= 1,
 237             "Should have at least one region per slice: " SIZE_FORMAT,
 238             _bitmap_regions_per_slice);
 239 
 240   guarantee(((_bitmap_bytes_per_slice) % bitmap_page_size) == 0,
 241             "Bitmap slices should be page-granular: bps = " SIZE_FORMAT ", page size = " SIZE_FORMAT,
 242             _bitmap_bytes_per_slice, bitmap_page_size);
 243 
 244   ReservedSpace bitmap0(_bitmap_size, bitmap_page_size);
 245   MemTracker::record_virtual_memory_type(bitmap0.base(), mtGC);
 246   _bitmap0_region = MemRegion((HeapWord*) bitmap0.base(), bitmap0.size() / HeapWordSize);
 247 
 248   ReservedSpace bitmap1(_bitmap_size, bitmap_page_size);
 249   MemTracker::record_virtual_memory_type(bitmap1.base(), mtGC);
 250   _bitmap1_region = MemRegion((HeapWord*) bitmap1.base(), bitmap1.size() / HeapWordSize);
 251 
 252   size_t bitmap_init_commit = _bitmap_bytes_per_slice *
 253                               align_up(num_committed_regions, _bitmap_regions_per_slice) / _bitmap_regions_per_slice;
 254   bitmap_init_commit = MIN2(_bitmap_size, bitmap_init_commit);
 255   os::commit_memory_or_exit((char *) (_bitmap0_region.start()), bitmap_init_commit, false,
 256                             "couldn't allocate initial bitmap");
 257   os::commit_memory_or_exit((char *) (_bitmap1_region.start()), bitmap_init_commit, false,
 258                             "couldn't allocate initial bitmap");
 259 
 260   size_t page_size = UseLargePages ? (size_t)os::large_page_size() : (size_t)os::vm_page_size();
 261 
 262   if (ShenandoahVerify) {
 263     ReservedSpace verify_bitmap(_bitmap_size, page_size);
 264     os::commit_memory_or_exit(verify_bitmap.base(), verify_bitmap.size(), false,
 265                               "couldn't allocate verification bitmap");
 266     MemTracker::record_virtual_memory_type(verify_bitmap.base(), mtGC);
 267     MemRegion verify_bitmap_region = MemRegion((HeapWord *) verify_bitmap.base(), verify_bitmap.size() / HeapWordSize);
 268     _verification_bit_map.initialize(_heap_region, verify_bitmap_region);
 269     _verifier = new ShenandoahVerifier(this, &_verification_bit_map);
 270   }
 271 
 272   if (ShenandoahAlwaysPreTouch) {
 273     assert (!AlwaysPreTouch, "Should have been overridden");
 274 
 275     // For NUMA, it is important to pre-touch the storage under bitmaps with worker threads,
 276     // before initialize() below zeroes it with initializing thread. For any given region,
 277     // we touch the region and the corresponding bitmaps from the same thread.
 278 
 279     log_info(gc, heap)("Parallel pretouch " SIZE_FORMAT " regions with " SIZE_FORMAT " byte pages",
 280                        _num_regions, page_size);
 281     ShenandoahPretouchTask cl(bitmap0.base(), bitmap1.base(), _bitmap_size, page_size);
 282     _workers->run_task(&cl);
 283   }
 284 
 285   _mark_bit_map0.initialize(_heap_region, _bitmap0_region);
 286   _complete_mark_bit_map = &_mark_bit_map0;
 287 
 288   _mark_bit_map1.initialize(_heap_region, _bitmap1_region);
 289   _next_mark_bit_map = &_mark_bit_map1;
 290 
 291   // Reserve aux bitmap for use in object_iterate(). We don't commit it here.
 292   ReservedSpace aux_bitmap(_bitmap_size, bitmap_page_size);
 293   MemTracker::record_virtual_memory_type(aux_bitmap.base(), mtGC);
 294   _aux_bitmap_region = MemRegion((HeapWord*) aux_bitmap.base(), aux_bitmap.size() / HeapWordSize);
 295   _aux_bit_map.initialize(_heap_region, _aux_bitmap_region);
 296 
 297   if (UseShenandoahMatrix) {
 298     _connection_matrix = new ShenandoahConnectionMatrix(_num_regions);
 299   } else {
 300     _connection_matrix = NULL;
 301   }
 302 
 303   _traversal_gc = _shenandoah_policy->can_do_traversal_gc() ?
 304                 new ShenandoahTraversalGC(this, _num_regions) :
 305                 NULL;
 306 
 307   _monitoring_support = new ShenandoahMonitoringSupport(this);
 308 
 309   _phase_timings = new ShenandoahPhaseTimings();
 310 
 311   if (ShenandoahAllocationTrace) {
 312     _alloc_tracker = new ShenandoahAllocTracker();
 313   }
 314 
 315   ShenandoahStringDedup::initialize();
 316 
 317   _control_thread = new ShenandoahControlThread();
 318 
 319   ShenandoahCodeRoots::initialize();
 320 
 321   LogTarget(Trace, gc, region) lt;
 322   if (lt.is_enabled()) {
 323     ResourceMark rm;
 324     LogStream ls(lt);
 325     log_trace(gc, region)("All Regions");
 326     print_heap_regions_on(&ls);
 327     log_trace(gc, region)("Free Regions");
 328     _free_set->print_on(&ls);
 329   }
 330 
 331   log_info(gc, init)("Safepointing mechanism: %s",
 332                      SafepointMechanism::uses_thread_local_poll() ? "thread-local poll" :
 333                      (SafepointMechanism::uses_global_page_poll() ? "global-page poll" : "unknown"));
 334 
 335   return JNI_OK;
 336 }
 337 
 338 #ifdef _MSC_VER
 339 #pragma warning( push )
 340 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list
 341 #endif
 342 
 343 ShenandoahHeap::ShenandoahHeap(ShenandoahCollectorPolicy* policy) :
 344   CollectedHeap(),
 345   _shenandoah_policy(policy),
 346   _regions(NULL),
 347   _free_set(NULL),
 348   _collection_set(NULL),
 349   _update_refs_iterator(this),
 350   _bytes_allocated_since_gc_start(0),
 351   _max_workers(MAX2(ConcGCThreads, ParallelGCThreads)),
 352   _ref_processor(NULL),
 353   _next_top_at_mark_starts(NULL),
 354   _next_top_at_mark_starts_base(NULL),
 355   _complete_top_at_mark_starts(NULL),
 356   _complete_top_at_mark_starts_base(NULL),
 357   _mark_bit_map0(),
 358   _mark_bit_map1(),
 359   _aux_bit_map(),
 360   _connection_matrix(NULL),
 361   _verifier(NULL),
 362   _pacer(NULL),
 363   _used_at_last_gc(0),
 364   _alloc_seq_at_last_gc_start(0),
 365   _alloc_seq_at_last_gc_end(0),
 366   _safepoint_workers(NULL),
 367   _gc_cycle_mode(),
 368 #ifdef ASSERT
 369   _heap_expansion_count(0),
 370 #endif
 371   _gc_timer(new (ResourceObj::C_HEAP, mtGC) ConcurrentGCTimer()),
 372   _phase_timings(NULL),
 373   _alloc_tracker(NULL),
 374   _cycle_memory_manager("Shenandoah Cycles", "end of GC cycle"),
 375   _stw_memory_manager("Shenandoah Pauses", "end of GC pause"),
 376   _memory_pool(NULL)
 377 {
 378   log_info(gc, init)("Parallel GC threads: "UINT32_FORMAT, ParallelGCThreads);
 379   log_info(gc, init)("Concurrent GC threads: "UINT32_FORMAT, ConcGCThreads);
 380   log_info(gc, init)("Parallel reference processing enabled: %s", BOOL_TO_STR(ParallelRefProcEnabled));
 381 
 382   _scm = new ShenandoahConcurrentMark();
 383   _full_gc = new ShenandoahMarkCompact();
 384   _used = 0;
 385 
 386   _max_workers = MAX2(_max_workers, 1U);
 387   _workers = new ShenandoahWorkGang("Shenandoah GC Threads", _max_workers,
 388                             /* are_GC_task_threads */true,
 389                             /* are_ConcurrentGC_threads */false);
 390   if (_workers == NULL) {
 391     vm_exit_during_initialization("Failed necessary allocation.");
 392   } else {
 393     _workers->initialize_workers();
 394   }
 395 
 396   if (ParallelSafepointCleanupThreads > 1) {
 397     _safepoint_workers = new ShenandoahWorkGang("Safepoint Cleanup Thread",
 398                                                 ParallelSafepointCleanupThreads,
 399                                                 false, false);
 400     _safepoint_workers->initialize_workers();
 401   }
 402 }
 403 
 404 #ifdef _MSC_VER
 405 #pragma warning( pop )
 406 #endif
 407 
 408 class ShenandoahResetNextBitmapTask : public AbstractGangTask {
 409 private:
 410   ShenandoahRegionIterator _regions;
 411 
 412 public:
 413   ShenandoahResetNextBitmapTask() :
 414     AbstractGangTask("Parallel Reset Bitmap Task") {}
 415 
 416   void work(uint worker_id) {
 417     ShenandoahHeapRegion* region = _regions.next();
 418     ShenandoahHeap* heap = ShenandoahHeap::heap();
 419     while (region != NULL) {
 420       if (heap->is_bitmap_slice_committed(region)) {
 421         HeapWord* bottom = region->bottom();
 422         HeapWord* top = heap->next_top_at_mark_start(region->bottom());
 423         if (top > bottom) {
 424           heap->next_mark_bit_map()->clear_range_large(MemRegion(bottom, top));
 425         }
 426         assert(heap->is_next_bitmap_clear_range(bottom, region->end()), "must be clear");
 427       }
 428       region = _regions.next();
 429     }
 430   }
 431 };
 432 
 433 void ShenandoahHeap::reset_next_mark_bitmap() {
 434   assert_gc_workers(_workers->active_workers());
 435 
 436   ShenandoahResetNextBitmapTask task;
 437   _workers->run_task(&task);
 438 }
 439 
 440 class ShenandoahResetNextBitmapTraversalTask : public AbstractGangTask {
 441 private:
 442   ShenandoahRegionIterator _regions;
 443 
 444 public:
 445   ShenandoahResetNextBitmapTraversalTask() :
 446     AbstractGangTask("Parallel Reset Bitmap Task for Traversal") {}
 447 
 448   void work(uint worker_id) {
 449     ShenandoahHeap* heap = ShenandoahHeap::heap();
 450     ShenandoahHeapRegionSet* traversal_set = heap->traversal_gc()->traversal_set();
 451     ShenandoahHeapRegion* region = _regions.next();
 452     while (region != NULL) {
 453       if (heap->is_bitmap_slice_committed(region)) {
 454         if (traversal_set->is_in(region) && !region->is_trash()) {
 455           ShenandoahHeapLocker locker(heap->lock());
 456           HeapWord* bottom = region->bottom();
 457           HeapWord* top = heap->next_top_at_mark_start(bottom);
 458           assert(top <= region->top(),
 459                  "TAMS must smaller/equals than top: TAMS: "PTR_FORMAT", top: "PTR_FORMAT,
 460                  p2i(top), p2i(region->top()));
 461           if (top > bottom) {
 462             heap->complete_mark_bit_map()->copy_from(heap->next_mark_bit_map(), MemRegion(bottom, top));
 463             heap->set_complete_top_at_mark_start(bottom, top);
 464             heap->next_mark_bit_map()->clear_range_large(MemRegion(bottom, top));
 465             heap->set_next_top_at_mark_start(bottom, bottom);
 466           }
 467         }
 468         assert(heap->is_next_bitmap_clear_range(region->bottom(), region->end()),
 469                "need clear next bitmap");
 470       }
 471       region = _regions.next();
 472     }
 473   }
 474 };
 475 
 476 void ShenandoahHeap::reset_next_mark_bitmap_traversal() {
 477   assert_gc_workers(_workers->active_workers());
 478 
 479   ShenandoahResetNextBitmapTraversalTask task;
 480   _workers->run_task(&task);
 481 }
 482 
 483 bool ShenandoahHeap::is_next_bitmap_clear() {
 484   for (size_t idx = 0; idx < _num_regions; idx++) {
 485     ShenandoahHeapRegion* r = get_region(idx);
 486     if (is_bitmap_slice_committed(r) && !is_next_bitmap_clear_range(r->bottom(), r->end())) {
 487       return false;
 488     }
 489   }
 490   return true;
 491 }
 492 
 493 bool ShenandoahHeap::is_next_bitmap_clear_range(HeapWord* start, HeapWord* end) {
 494   return _next_mark_bit_map->getNextMarkedWordAddress(start, end) == end;
 495 }
 496 
 497 bool ShenandoahHeap::is_complete_bitmap_clear_range(HeapWord* start, HeapWord* end) {
 498   return _complete_mark_bit_map->getNextMarkedWordAddress(start, end) == end;
 499 }
 500 
 501 void ShenandoahHeap::print_on(outputStream* st) const {
 502   st->print_cr("Shenandoah Heap");
 503   st->print_cr(" " SIZE_FORMAT "K total, " SIZE_FORMAT "K committed, " SIZE_FORMAT "K used",
 504                capacity() / K, committed() / K, used() / K);
 505   st->print_cr(" " SIZE_FORMAT " x " SIZE_FORMAT"K regions",
 506                num_regions(), ShenandoahHeapRegion::region_size_bytes() / K);
 507 
 508   st->print("Status: ");
 509   if (has_forwarded_objects())               st->print("has forwarded objects, ");
 510   if (is_concurrent_mark_in_progress())      st->print("marking, ");
 511   if (is_evacuation_in_progress())           st->print("evacuating, ");
 512   if (is_update_refs_in_progress())          st->print("updating refs, ");
 513   if (is_concurrent_traversal_in_progress()) st->print("traversal, ");
 514   if (is_degenerated_gc_in_progress())       st->print("degenerated gc, ");
 515   if (is_full_gc_in_progress())              st->print("full gc, ");
 516   if (is_full_gc_move_in_progress())         st->print("full gc move, ");
 517 
 518   if (cancelled_concgc()) {
 519     st->print("conc gc cancelled");
 520   } else {
 521     st->print("not cancelled");
 522   }
 523   st->cr();
 524 
 525   st->print_cr("Reserved region:");
 526   st->print_cr(" - [" PTR_FORMAT ", " PTR_FORMAT ") ",
 527                p2i(reserved_region().start()),
 528                p2i(reserved_region().end()));
 529 
 530   if (UseShenandoahMatrix) {
 531     st->print_cr("Matrix:");
 532 
 533     ShenandoahConnectionMatrix* matrix = connection_matrix();
 534     if (matrix != NULL) {
 535       st->print_cr(" - base: " PTR_FORMAT, p2i(matrix->matrix_addr()));
 536       st->print_cr(" - stride: " SIZE_FORMAT, matrix->stride());
 537       st->print_cr(" - magic: " PTR_FORMAT, matrix->magic_offset());
 538     } else {
 539       st->print_cr(" No matrix.");
 540     }
 541   }
 542 
 543   if (Verbose) {
 544     print_heap_regions_on(st);
 545   }
 546 }
 547 
 548 class ShenandoahInitGCLABClosure : public ThreadClosure {
 549 public:
 550   void do_thread(Thread* thread) {
 551     thread->gclab().initialize(true);
 552   }
 553 };
 554 
 555 void ShenandoahHeap::post_initialize() {
 556   CollectedHeap::post_initialize();
 557   if (UseTLAB) {
 558     MutexLocker ml(Threads_lock);
 559 
 560     ShenandoahInitGCLABClosure init_gclabs;
 561     Threads::java_threads_do(&init_gclabs);
 562     gc_threads_do(&init_gclabs);
 563 
 564     // gclab can not be initialized early during VM startup, as it can not determinate its max_size.
 565     // Now, we will let WorkGang to initialize gclab when new worker is created.
 566     _workers->set_initialize_gclab();
 567   }
 568 
 569   _scm->initialize(_max_workers);
 570   _full_gc->initialize(_gc_timer);
 571 
 572   ref_processing_init();
 573 
 574   _shenandoah_policy->post_heap_initialize();
 575 }
 576 
 577 size_t ShenandoahHeap::used() const {
 578   return OrderAccess::load_acquire(&_used);
 579 }
 580 
 581 size_t ShenandoahHeap::committed() const {
 582   OrderAccess::acquire();
 583   return _committed;
 584 }
 585 
 586 void ShenandoahHeap::increase_committed(size_t bytes) {
 587   assert_heaplock_or_safepoint();
 588   _committed += bytes;
 589 }
 590 
 591 void ShenandoahHeap::decrease_committed(size_t bytes) {
 592   assert_heaplock_or_safepoint();
 593   _committed -= bytes;
 594 }
 595 
 596 void ShenandoahHeap::increase_used(size_t bytes) {
 597   Atomic::add(bytes, &_used);
 598 }
 599 
 600 void ShenandoahHeap::set_used(size_t bytes) {
 601   OrderAccess::release_store_fence(&_used, bytes);
 602 }
 603 
 604 void ShenandoahHeap::decrease_used(size_t bytes) {
 605   assert(used() >= bytes, "never decrease heap size by more than we've left");
 606   Atomic::sub(bytes, &_used);
 607 }
 608 
 609 void ShenandoahHeap::increase_allocated(size_t bytes) {
 610   Atomic::add(bytes, &_bytes_allocated_since_gc_start);
 611 }
 612 
 613 void ShenandoahHeap::notify_alloc(size_t words, bool waste) {
 614   size_t bytes = words * HeapWordSize;
 615   if (!waste) {
 616     increase_used(bytes);
 617   }
 618   increase_allocated(bytes);
 619   if (ShenandoahPacing) {
 620     control_thread()->pacing_notify_alloc(words);
 621     if (waste) {
 622       pacer()->claim_for_alloc(words, true);
 623     }
 624   }
 625 }
 626 
 627 size_t ShenandoahHeap::capacity() const {
 628   return num_regions() * ShenandoahHeapRegion::region_size_bytes();
 629 }
 630 
 631 bool ShenandoahHeap::is_maximal_no_gc() const {
 632   Unimplemented();
 633   return true;
 634 }
 635 
 636 size_t ShenandoahHeap::max_capacity() const {
 637   return _num_regions * ShenandoahHeapRegion::region_size_bytes();
 638 }
 639 
 640 size_t ShenandoahHeap::initial_capacity() const {
 641   return _initial_size;
 642 }
 643 
 644 bool ShenandoahHeap::is_in(const void* p) const {
 645   HeapWord* heap_base = (HeapWord*) base();
 646   HeapWord* last_region_end = heap_base + ShenandoahHeapRegion::region_size_words() * num_regions();
 647   return p >= heap_base && p < last_region_end;
 648 }
 649 
 650 bool ShenandoahHeap::is_scavengable(oop p) {
 651   return true;
 652 }
 653 
 654 void ShenandoahHeap::handle_heap_shrinkage(double shrink_before) {
 655   if (!ShenandoahUncommit) {
 656     return;
 657   }
 658 
 659   ShenandoahHeapLocker locker(lock());
 660 
 661   size_t count = 0;
 662   for (size_t i = 0; i < num_regions(); i++) {
 663     ShenandoahHeapRegion* r = get_region(i);
 664     if (r->is_empty_committed() && (r->empty_time() < shrink_before)) {
 665       r->make_uncommitted();
 666       count++;
 667     }
 668   }
 669 
 670   if (count > 0) {
 671     log_info(gc)("Uncommitted " SIZE_FORMAT "M. Heap: " SIZE_FORMAT "M reserved, " SIZE_FORMAT "M committed, " SIZE_FORMAT "M used",
 672                  count * ShenandoahHeapRegion::region_size_bytes() / M, capacity() / M, committed() / M, used() / M);
 673     _control_thread->notify_heap_changed();
 674   }
 675 }
 676 
 677 HeapWord* ShenandoahHeap::allocate_from_gclab_slow(Thread* thread, size_t size) {
 678   // Retain tlab and allocate object in shared space if
 679   // the amount free in the tlab is too large to discard.
 680   if (thread->gclab().free() > thread->gclab().refill_waste_limit()) {
 681     thread->gclab().record_slow_allocation(size);
 682     return NULL;
 683   }
 684 
 685   // Discard gclab and allocate a new one.
 686   // To minimize fragmentation, the last GCLAB may be smaller than the rest.
 687   size_t new_gclab_size = thread->gclab().compute_size(size);
 688 
 689   thread->gclab().clear_before_allocation();
 690 
 691   if (new_gclab_size == 0) {
 692     return NULL;
 693   }
 694 
 695   // Allocate a new GCLAB...
 696   HeapWord* obj = allocate_new_gclab(new_gclab_size);
 697   if (obj == NULL) {
 698     return NULL;
 699   }
 700 
 701   if (ZeroTLAB) {
 702     // ..and clear it.
 703     Copy::zero_to_words(obj, new_gclab_size);
 704   } else {
 705     // ...and zap just allocated object.
 706 #ifdef ASSERT
 707     // Skip mangling the space corresponding to the object header to
 708     // ensure that the returned space is not considered parsable by
 709     // any concurrent GC thread.
 710     size_t hdr_size = oopDesc::header_size();
 711     Copy::fill_to_words(obj + hdr_size, new_gclab_size - hdr_size, badHeapWordVal);
 712 #endif // ASSERT
 713   }
 714   thread->gclab().fill(obj, obj + size, new_gclab_size);
 715   return obj;
 716 }
 717 
 718 HeapWord* ShenandoahHeap::allocate_new_tlab(size_t word_size) {
 719 #ifdef ASSERT
 720   log_debug(gc, alloc)("Allocate new tlab, requested size = " SIZE_FORMAT " bytes", word_size * HeapWordSize);
 721 #endif
 722   return allocate_new_lab(word_size, _alloc_tlab);
 723 }
 724 
 725 HeapWord* ShenandoahHeap::allocate_new_gclab(size_t word_size) {
 726 #ifdef ASSERT
 727   log_debug(gc, alloc)("Allocate new gclab, requested size = " SIZE_FORMAT " bytes", word_size * HeapWordSize);
 728 #endif
 729   return allocate_new_lab(word_size, _alloc_gclab);
 730 }
 731 
 732 HeapWord* ShenandoahHeap::allocate_new_lab(size_t word_size, AllocType type) {
 733   HeapWord* result = allocate_memory(word_size, type);
 734 
 735   if (result != NULL) {
 736     assert(! in_collection_set(result), "Never allocate in collection set");
 737 
 738     log_develop_trace(gc, tlab)("allocating new tlab of size "SIZE_FORMAT" at addr "PTR_FORMAT, word_size, p2i(result));
 739 
 740   }
 741   return result;
 742 }
 743 
 744 ShenandoahHeap* ShenandoahHeap::heap() {
 745   CollectedHeap* heap = Universe::heap();
 746   assert(heap != NULL, "Unitialized access to ShenandoahHeap::heap()");
 747   assert(heap->kind() == CollectedHeap::ShenandoahHeap, "not a shenandoah heap");
 748   return (ShenandoahHeap*) heap;
 749 }
 750 
 751 ShenandoahHeap* ShenandoahHeap::heap_no_check() {
 752   CollectedHeap* heap = Universe::heap();
 753   return (ShenandoahHeap*) heap;
 754 }
 755 
 756 HeapWord* ShenandoahHeap::allocate_memory(size_t word_size, AllocType type) {
 757   ShenandoahAllocTrace trace_alloc(word_size, type);
 758 
 759   bool in_new_region = false;
 760   HeapWord* result = NULL;
 761 
 762   if (type == _alloc_tlab || type == _alloc_shared) {
 763     if (ShenandoahPacing) {
 764       pacer()->pace_for_alloc(word_size);
 765     }
 766 
 767     if (!ShenandoahAllocFailureALot || !should_inject_alloc_failure()) {
 768       result = allocate_memory_under_lock(word_size, type, in_new_region);
 769     }
 770 
 771     // Allocation failed, try full-GC, then retry allocation.
 772     //
 773     // It might happen that one of the threads requesting allocation would unblock
 774     // way later after full-GC happened, only to fail the second allocation, because
 775     // other threads have already depleted the free storage. In this case, a better
 776     // strategy would be to try full-GC again.
 777     //
 778     // Lacking the way to detect progress from "collect" call, we are left with blindly
 779     // retrying for some bounded number of times.
 780     // TODO: Poll if Full GC made enough progress to warrant retry.
 781     int tries = 0;
 782     while ((result == NULL) && (tries++ < ShenandoahAllocGCTries)) {
 783       log_debug(gc)("[" PTR_FORMAT " Failed to allocate " SIZE_FORMAT " bytes, doing GC, try %d",
 784                     p2i(Thread::current()), word_size * HeapWordSize, tries);
 785       control_thread()->handle_alloc_failure(word_size);
 786       result = allocate_memory_under_lock(word_size, type, in_new_region);
 787     }
 788   } else {
 789     assert(type == _alloc_gclab || type == _alloc_shared_gc, "Can only accept these types here");
 790     result = allocate_memory_under_lock(word_size, type, in_new_region);
 791     // Do not call handle_alloc_failure() here, because we cannot block.
 792     // The allocation failure would be handled by the WB slowpath with handle_alloc_failure_evac().
 793   }
 794 
 795   if (in_new_region) {
 796     control_thread()->notify_heap_changed();
 797   }
 798 
 799   log_develop_trace(gc, alloc)("allocate memory chunk of size "SIZE_FORMAT" at addr "PTR_FORMAT " by thread %d ",
 800                                word_size, p2i(result), Thread::current()->osthread()->thread_id());
 801 
 802   if (result != NULL) {
 803     notify_alloc(word_size, false);
 804   }
 805 
 806   return result;
 807 }
 808 
 809 HeapWord* ShenandoahHeap::allocate_memory_under_lock(size_t word_size, AllocType type, bool& in_new_region) {
 810   ShenandoahHeapLocker locker(lock());
 811   return _free_set->allocate(word_size, type, in_new_region);
 812 }
 813 
 814 HeapWord*  ShenandoahHeap::mem_allocate(size_t size,
 815                                         bool*  gc_overhead_limit_was_exceeded) {
 816   HeapWord* filler = allocate_memory(size + BrooksPointer::word_size(), _alloc_shared);
 817   HeapWord* result = filler + BrooksPointer::word_size();
 818   if (filler != NULL) {
 819     BrooksPointer::initialize(oop(result));
 820 
 821     assert(! in_collection_set(result), "never allocate in targetted region");
 822     return result;
 823   } else {
 824     return NULL;
 825   }
 826 }
 827 
 828 class ShenandoahEvacuateUpdateRootsClosure: public ExtendedOopClosure {
 829 private:
 830   ShenandoahHeap* _heap;
 831   Thread* _thread;
 832 public:
 833   ShenandoahEvacuateUpdateRootsClosure() :
 834     _heap(ShenandoahHeap::heap()), _thread(Thread::current()) {
 835   }
 836 
 837 private:
 838   template <class T>
 839   void do_oop_work(T* p) {
 840     assert(_heap->is_evacuation_in_progress(), "Only do this when evacuation is in progress");
 841 
 842     T o = oopDesc::load_heap_oop(p);
 843     if (! oopDesc::is_null(o)) {
 844       oop obj = oopDesc::decode_heap_oop_not_null(o);
 845       if (_heap->in_collection_set(obj)) {
 846         shenandoah_assert_marked_complete(p, obj);
 847         oop resolved = ShenandoahBarrierSet::resolve_forwarded_not_null(obj);
 848         if (oopDesc::unsafe_equals(resolved, obj)) {
 849           resolved = _heap->evacuate_object(obj, _thread);
 850         }
 851         oopDesc::encode_store_heap_oop(p, resolved);
 852       }
 853     }
 854   }
 855 
 856 public:
 857   void do_oop(oop* p) {
 858     do_oop_work(p);
 859   }
 860   void do_oop(narrowOop* p) {
 861     do_oop_work(p);
 862   }
 863 };
 864 
 865 class ShenandoahEvacuateRootsClosure: public ExtendedOopClosure {
 866 private:
 867   ShenandoahHeap* _heap;
 868   Thread* _thread;
 869 public:
 870   ShenandoahEvacuateRootsClosure() :
 871           _heap(ShenandoahHeap::heap()), _thread(Thread::current()) {
 872   }
 873 
 874 private:
 875   template <class T>
 876   void do_oop_work(T* p) {
 877     T o = oopDesc::load_heap_oop(p);
 878     if (! oopDesc::is_null(o)) {
 879       oop obj = oopDesc::decode_heap_oop_not_null(o);
 880       if (_heap->in_collection_set(obj)) {
 881         oop resolved = ShenandoahBarrierSet::resolve_forwarded_not_null(obj);
 882         if (oopDesc::unsafe_equals(resolved, obj)) {
 883           _heap->evacuate_object(obj, _thread);
 884         }
 885       }
 886     }
 887   }
 888 
 889 public:
 890   void do_oop(oop* p) {
 891     do_oop_work(p);
 892   }
 893   void do_oop(narrowOop* p) {
 894     do_oop_work(p);
 895   }
 896 };
 897 
 898 class ShenandoahParallelEvacuateRegionObjectClosure : public ObjectClosure {
 899 private:
 900   ShenandoahHeap* const _heap;
 901   Thread* const _thread;
 902 public:
 903   ShenandoahParallelEvacuateRegionObjectClosure(ShenandoahHeap* heap) :
 904     _heap(heap), _thread(Thread::current()) {}
 905 
 906   void do_object(oop p) {
 907     shenandoah_assert_marked_complete(NULL, p);
 908     if (oopDesc::unsafe_equals(p, ShenandoahBarrierSet::resolve_forwarded_not_null(p))) {
 909       _heap->evacuate_object(p, _thread);
 910     }
 911   }
 912 };
 913 
 914 class ShenandoahParallelEvacuationTask : public AbstractGangTask {
 915 private:
 916   ShenandoahHeap* const _sh;
 917   ShenandoahCollectionSet* const _cs;
 918   ShenandoahSharedFlag _claimed_codecache;
 919 
 920 public:
 921   ShenandoahParallelEvacuationTask(ShenandoahHeap* sh,
 922                          ShenandoahCollectionSet* cs) :
 923     AbstractGangTask("Parallel Evacuation Task"),
 924     _cs(cs),
 925     _sh(sh)
 926   {}
 927 
 928   void work(uint worker_id) {
 929 
 930     ShenandoahEvacOOMScope oom_evac_scope;
 931     SuspendibleThreadSetJoiner stsj(ShenandoahSuspendibleWorkers);
 932 
 933     // If concurrent code cache evac is enabled, evacuate it here.
 934     // Note we cannot update the roots here, because we risk non-atomic stores to the alive
 935     // nmethods. The update would be handled elsewhere.
 936     if (ShenandoahConcurrentEvacCodeRoots && _claimed_codecache.try_set()) {
 937       ShenandoahEvacuateRootsClosure cl;
 938       MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
 939       CodeBlobToOopClosure blobs(&cl, !CodeBlobToOopClosure::FixRelocations);
 940       CodeCache::blobs_do(&blobs);
 941     }
 942 
 943     ShenandoahParallelEvacuateRegionObjectClosure cl(_sh);
 944     ShenandoahHeapRegion* r;
 945     while ((r =_cs->claim_next()) != NULL) {
 946       log_develop_trace(gc, region)("Thread "INT32_FORMAT" claimed Heap Region "SIZE_FORMAT,
 947                                     worker_id,
 948                                     r->region_number());
 949 
 950       assert(r->has_live(), "all-garbage regions are reclaimed early");
 951       _sh->marked_object_iterate(r, &cl);
 952 
 953       if (_sh->check_cancelled_concgc_and_yield()) {
 954         log_develop_trace(gc, region)("Cancelled concgc while evacuating region " SIZE_FORMAT, r->region_number());
 955         break;
 956       }
 957 
 958       if (ShenandoahPacing) {
 959         _sh->pacer()->report_evac(r->get_live_data_words());
 960       }
 961     }
 962   }
 963 };
 964 
 965 void ShenandoahHeap::trash_cset_regions() {
 966   ShenandoahHeapLocker locker(lock());
 967 
 968   ShenandoahCollectionSet* set = collection_set();
 969   ShenandoahHeapRegion* r;
 970   set->clear_current_index();
 971   while ((r = set->next()) != NULL) {
 972     r->make_trash();
 973   }
 974   collection_set()->clear();
 975 }
 976 
 977 void ShenandoahHeap::print_heap_regions_on(outputStream* st) const {
 978   st->print_cr("Heap Regions:");
 979   st->print_cr("EU=empty-uncommitted, EC=empty-committed, R=regular, H=humongous start, HC=humongous continuation, CS=collection set, T=trash, P=pinned");
 980   st->print_cr("BTE=bottom/top/end, U=used, T=TLAB allocs, G=GCLAB allocs, S=shared allocs, L=live data");
 981   st->print_cr("R=root, CP=critical pins, TAMS=top-at-mark-start (previous, next)");
 982   st->print_cr("SN=alloc sequence numbers (first mutator, last mutator, first gc, last gc)");
 983 
 984   for (size_t i = 0; i < num_regions(); i++) {
 985     get_region(i)->print_on(st);
 986   }
 987 }
 988 
 989 void ShenandoahHeap::trash_humongous_region_at(ShenandoahHeapRegion* start) {
 990   assert(start->is_humongous_start(), "reclaim regions starting with the first one");
 991 
 992   oop humongous_obj = oop(start->bottom() + BrooksPointer::word_size());
 993   size_t size = humongous_obj->size() + BrooksPointer::word_size();
 994   size_t required_regions = ShenandoahHeapRegion::required_regions(size * HeapWordSize);
 995   size_t index = start->region_number() + required_regions - 1;
 996 
 997   assert(!start->has_live(), "liveness must be zero");
 998   log_trace(gc, humongous)("Reclaiming "SIZE_FORMAT" humongous regions for object of size: "SIZE_FORMAT" words", required_regions, size);
 999 
1000   for(size_t i = 0; i < required_regions; i++) {
1001     // Reclaim from tail. Otherwise, assertion fails when printing region to trace log,
1002     // as it expects that every region belongs to a humongous region starting with a humongous start region.
1003     ShenandoahHeapRegion* region = get_region(index --);
1004 
1005     LogTarget(Trace, gc, humongous) lt;
1006     if (lt.is_enabled()) {
1007       ResourceMark rm;
1008       LogStream ls(lt);
1009       region->print_on(&ls);
1010     }
1011 
1012     assert(region->is_humongous(), "expect correct humongous start or continuation");
1013     assert(!in_collection_set(region), "Humongous region should not be in collection set");
1014 
1015     region->make_trash();
1016   }
1017 }
1018 
1019 #ifdef ASSERT
1020 class ShenandoahCheckCollectionSetClosure: public ShenandoahHeapRegionClosure {
1021   bool heap_region_do(ShenandoahHeapRegion* r) {
1022     assert(! ShenandoahHeap::heap()->in_collection_set(r), "Should have been cleared by now");
1023     return false;
1024   }
1025 };
1026 #endif
1027 
1028 void ShenandoahHeap::prepare_for_concurrent_evacuation() {
1029   log_develop_trace(gc)("Thread %d started prepare_for_concurrent_evacuation", Thread::current()->osthread()->thread_id());
1030 
1031   if (!cancelled_concgc()) {
1032     // Allocations might have happened before we STWed here, record peak:
1033     shenandoahPolicy()->record_peak_occupancy();
1034 
1035     make_tlabs_parsable(true);
1036 
1037     if (ShenandoahVerify) {
1038       verifier()->verify_after_concmark();
1039     }
1040 
1041     trash_cset_regions();
1042 
1043     // NOTE: This needs to be done during a stop the world pause, because
1044     // putting regions into the collection set concurrently with Java threads
1045     // will create a race. In particular, acmp could fail because when we
1046     // resolve the first operand, the containing region might not yet be in
1047     // the collection set, and thus return the original oop. When the 2nd
1048     // operand gets resolved, the region could be in the collection set
1049     // and the oop gets evacuated. If both operands have originally been
1050     // the same, we get false negatives.
1051 
1052     {
1053       ShenandoahHeapLocker locker(lock());
1054       _collection_set->clear();
1055       _free_set->clear();
1056 
1057 #ifdef ASSERT
1058       ShenandoahCheckCollectionSetClosure ccsc;
1059       heap_region_iterate(&ccsc);
1060 #endif
1061 
1062       _shenandoah_policy->choose_collection_set(_collection_set);
1063 
1064       _free_set->rebuild();
1065     }
1066 
1067     Universe::update_heap_info_at_gc();
1068 
1069     if (ShenandoahVerify) {
1070       verifier()->verify_before_evacuation();
1071     }
1072   }
1073 }
1074 
1075 
1076 class ShenandoahRetireTLABClosure : public ThreadClosure {
1077 private:
1078   bool _retire;
1079 
1080 public:
1081   ShenandoahRetireTLABClosure(bool retire) : _retire(retire) {}
1082 
1083   void do_thread(Thread* thread) {
1084     assert(thread->gclab().is_initialized(), "GCLAB should be initialized for %s", thread->name());
1085     thread->gclab().make_parsable(_retire);
1086   }
1087 };
1088 
1089 void ShenandoahHeap::make_tlabs_parsable(bool retire_tlabs) {
1090   if (UseTLAB) {
1091     CollectedHeap::ensure_parsability(retire_tlabs);
1092     ShenandoahRetireTLABClosure cl(retire_tlabs);
1093     Threads::java_threads_do(&cl);
1094     gc_threads_do(&cl);
1095   }
1096 }
1097 
1098 
1099 class ShenandoahEvacuateUpdateRootsTask : public AbstractGangTask {
1100   ShenandoahRootEvacuator* _rp;
1101 public:
1102 
1103   ShenandoahEvacuateUpdateRootsTask(ShenandoahRootEvacuator* rp) :
1104     AbstractGangTask("Shenandoah evacuate and update roots"),
1105     _rp(rp)
1106   {
1107     // Nothing else to do.
1108   }
1109 
1110   void work(uint worker_id) {
1111     ShenandoahEvacOOMScope oom_evac_scope;
1112     ShenandoahEvacuateUpdateRootsClosure cl;
1113 
1114     if (ShenandoahConcurrentEvacCodeRoots) {
1115       _rp->process_evacuate_roots(&cl, NULL, worker_id);
1116     } else {
1117       MarkingCodeBlobClosure blobsCl(&cl, CodeBlobToOopClosure::FixRelocations);
1118       _rp->process_evacuate_roots(&cl, &blobsCl, worker_id);
1119     }
1120   }
1121 };
1122 
1123 class ShenandoahFixRootsTask : public AbstractGangTask {
1124   ShenandoahRootEvacuator* _rp;
1125 public:
1126 
1127   ShenandoahFixRootsTask(ShenandoahRootEvacuator* rp) :
1128     AbstractGangTask("Shenandoah update roots"),
1129     _rp(rp)
1130   {
1131     // Nothing else to do.
1132   }
1133 
1134   void work(uint worker_id) {
1135     ShenandoahEvacOOMScope oom_evac_scope;
1136     ShenandoahUpdateRefsClosure cl;
1137     MarkingCodeBlobClosure blobsCl(&cl, CodeBlobToOopClosure::FixRelocations);
1138 
1139     _rp->process_evacuate_roots(&cl, &blobsCl, worker_id);
1140   }
1141 };
1142 
1143 void ShenandoahHeap::evacuate_and_update_roots() {
1144 
1145 #if defined(COMPILER2) || INCLUDE_JVMCI
1146   DerivedPointerTable::clear();
1147 #endif
1148   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Only iterate roots while world is stopped");
1149 
1150   {
1151     ShenandoahRootEvacuator rp(this, workers()->active_workers(), ShenandoahPhaseTimings::init_evac);
1152     ShenandoahEvacuateUpdateRootsTask roots_task(&rp);
1153     workers()->run_task(&roots_task);
1154   }
1155 
1156 #if defined(COMPILER2) || INCLUDE_JVMCI
1157   DerivedPointerTable::update_pointers();
1158 #endif
1159   if (cancelled_concgc()) {
1160     fixup_roots();
1161   }
1162 }
1163 
1164 void ShenandoahHeap::fixup_roots() {
1165     assert(cancelled_concgc(), "Only after concurrent cycle failed");
1166 
1167     // If initial evacuation has been cancelled, we need to update all references
1168     // after all workers have finished. Otherwise we might run into the following problem:
1169     // GC thread 1 cannot allocate anymore, thus evacuation fails, leaves from-space ptr of object X.
1170     // GC thread 2 evacuates the same object X to to-space
1171     // which leaves a truly dangling from-space reference in the first root oop*. This must not happen.
1172     // clear() and update_pointers() must always be called in pairs,
1173     // cannot nest with above clear()/update_pointers().
1174 #if defined(COMPILER2) || INCLUDE_JVMCI
1175     DerivedPointerTable::clear();
1176 #endif
1177     ShenandoahRootEvacuator rp(this, workers()->active_workers(), ShenandoahPhaseTimings::init_evac);
1178     ShenandoahFixRootsTask update_roots_task(&rp);
1179     workers()->run_task(&update_roots_task);
1180 #if defined(COMPILER2) || INCLUDE_JVMCI
1181     DerivedPointerTable::update_pointers();
1182 #endif
1183 }
1184 
1185 void ShenandoahHeap::roots_iterate(OopClosure* cl) {
1186   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Only iterate roots while world is stopped");
1187 
1188   CodeBlobToOopClosure blobsCl(cl, false);
1189   CLDToOopClosure cldCl(cl);
1190 
1191   ShenandoahRootProcessor rp(this, 1, ShenandoahPhaseTimings::_num_phases);
1192   rp.process_all_roots(cl, NULL, &cldCl, &blobsCl, NULL, 0);
1193 }
1194 
1195 bool ShenandoahHeap::supports_tlab_allocation() const {
1196   return true;
1197 }
1198 
1199 size_t  ShenandoahHeap::unsafe_max_tlab_alloc(Thread *thread) const {
1200   return MIN2(_free_set->unsafe_peek_free(), max_tlab_size());
1201 }
1202 
1203 size_t ShenandoahHeap::max_tlab_size() const {
1204   return ShenandoahHeapRegion::max_tlab_size_bytes();
1205 }
1206 
1207 class ShenandoahResizeGCLABClosure : public ThreadClosure {
1208 public:
1209   void do_thread(Thread* thread) {
1210     assert(thread->gclab().is_initialized(), "GCLAB should be initialized for %s", thread->name());
1211     thread->gclab().resize();
1212   }
1213 };
1214 
1215 void ShenandoahHeap::resize_all_tlabs() {
1216   CollectedHeap::resize_all_tlabs();
1217 
1218   ShenandoahResizeGCLABClosure cl;
1219   Threads::java_threads_do(&cl);
1220   gc_threads_do(&cl);
1221 }
1222 
1223 class ShenandoahAccumulateStatisticsGCLABClosure : public ThreadClosure {
1224 public:
1225   void do_thread(Thread* thread) {
1226     assert(thread->gclab().is_initialized(), "GCLAB should be initialized for %s", thread->name());
1227     thread->gclab().accumulate_statistics();
1228     thread->gclab().initialize_statistics();
1229   }
1230 };
1231 
1232 void ShenandoahHeap::accumulate_statistics_all_gclabs() {
1233   ShenandoahAccumulateStatisticsGCLABClosure cl;
1234   Threads::java_threads_do(&cl);
1235   gc_threads_do(&cl);
1236 }
1237 
1238 bool  ShenandoahHeap::can_elide_tlab_store_barriers() const {
1239   return true;
1240 }
1241 
1242 oop ShenandoahHeap::new_store_pre_barrier(JavaThread* thread, oop new_obj) {
1243   // Overridden to do nothing.
1244   return new_obj;
1245 }
1246 
1247 bool  ShenandoahHeap::can_elide_initializing_store_barrier(oop new_obj) {
1248   return true;
1249 }
1250 
1251 bool ShenandoahHeap::card_mark_must_follow_store() const {
1252   return false;
1253 }
1254 
1255 void ShenandoahHeap::collect(GCCause::Cause cause) {
1256   _control_thread->handle_explicit_gc(cause);
1257 }
1258 
1259 void ShenandoahHeap::do_full_collection(bool clear_all_soft_refs) {
1260   //assert(false, "Shouldn't need to do full collections");
1261 }
1262 
1263 AdaptiveSizePolicy* ShenandoahHeap::size_policy() {
1264   Unimplemented();
1265   return NULL;
1266 
1267 }
1268 
1269 CollectorPolicy* ShenandoahHeap::collector_policy() const {
1270   return _shenandoah_policy;
1271 }
1272 
1273 
1274 HeapWord* ShenandoahHeap::block_start(const void* addr) const {
1275   Space* sp = heap_region_containing(addr);
1276   if (sp != NULL) {
1277     return sp->block_start(addr);
1278   }
1279   return NULL;
1280 }
1281 
1282 size_t ShenandoahHeap::block_size(const HeapWord* addr) const {
1283   Space* sp = heap_region_containing(addr);
1284   assert(sp != NULL, "block_size of address outside of heap");
1285   return sp->block_size(addr);
1286 }
1287 
1288 bool ShenandoahHeap::block_is_obj(const HeapWord* addr) const {
1289   Space* sp = heap_region_containing(addr);
1290   return sp->block_is_obj(addr);
1291 }
1292 
1293 jlong ShenandoahHeap::millis_since_last_gc() {
1294   return 0;
1295 }
1296 
1297 void ShenandoahHeap::prepare_for_verify() {
1298   if (SafepointSynchronize::is_at_safepoint() || ! UseTLAB) {
1299     make_tlabs_parsable(false);
1300   }
1301 }
1302 
1303 void ShenandoahHeap::print_gc_threads_on(outputStream* st) const {
1304   workers()->print_worker_threads_on(st);
1305   if (ShenandoahStringDedup::is_enabled()) {
1306     ShenandoahStringDedup::print_worker_threads_on(st);
1307   }
1308 }
1309 
1310 void ShenandoahHeap::gc_threads_do(ThreadClosure* tcl) const {
1311   workers()->threads_do(tcl);
1312   if (ShenandoahStringDedup::is_enabled()) {
1313     ShenandoahStringDedup::threads_do(tcl);
1314   }
1315 }
1316 
1317 void ShenandoahHeap::print_tracing_info() const {
1318   LogTarget(Info, gc, stats) lt;
1319   if (lt.is_enabled()) {
1320     ResourceMark rm;
1321     LogStream ls(lt);
1322 
1323     phase_timings()->print_on(&ls);
1324 
1325     ls.cr();
1326     ls.cr();
1327 
1328     shenandoahPolicy()->print_gc_stats(&ls);
1329 
1330     ls.cr();
1331     ls.cr();
1332 
1333     if (ShenandoahPacing) {
1334       pacer()->print_on(&ls);
1335     }
1336 
1337     ls.cr();
1338     ls.cr();
1339 
1340     if (ShenandoahAllocationTrace) {
1341       assert(alloc_tracker() != NULL, "Must be");
1342       alloc_tracker()->print_on(&ls);
1343     } else {
1344       ls.print_cr("  Allocation tracing is disabled, use -XX:+ShenandoahAllocationTrace to enable.");
1345     }
1346   }
1347 }
1348 
1349 void ShenandoahHeap::verify(VerifyOption vo) {
1350   if (ShenandoahSafepoint::is_at_shenandoah_safepoint()) {
1351     if (ShenandoahVerify) {
1352       verifier()->verify_generic(vo);
1353     } else {
1354       // TODO: Consider allocating verification bitmaps on demand,
1355       // and turn this on unconditionally.
1356     }
1357   }
1358 }
1359 size_t ShenandoahHeap::tlab_capacity(Thread *thr) const {
1360   return _free_set->capacity();
1361 }
1362 
1363 class ObjectIterateScanRootClosure : public ExtendedOopClosure {
1364 private:
1365   MarkBitMap* _bitmap;
1366   Stack<oop,mtGC>* _oop_stack;
1367 
1368   template <class T>
1369   void do_oop_work(T* p) {
1370     T o = oopDesc::load_heap_oop(p);
1371     if (!oopDesc::is_null(o)) {
1372       oop obj = oopDesc::decode_heap_oop_not_null(o);
1373       obj = ShenandoahBarrierSet::resolve_forwarded_not_null(obj);
1374       assert(oopDesc::is_oop(obj), "must be a valid oop");
1375       if (!_bitmap->isMarked((HeapWord*) obj)) {
1376         _bitmap->mark((HeapWord*) obj);
1377         _oop_stack->push(obj);
1378       }
1379     }
1380   }
1381 public:
1382   ObjectIterateScanRootClosure(MarkBitMap* bitmap, Stack<oop,mtGC>* oop_stack) :
1383     _bitmap(bitmap), _oop_stack(oop_stack) {}
1384   void do_oop(oop* p)       { do_oop_work(p); }
1385   void do_oop(narrowOop* p) { do_oop_work(p); }
1386 };
1387 
1388 /*
1389  * This is public API, used in preparation of object_iterate().
1390  * Since we don't do linear scan of heap in object_iterate() (see comment below), we don't
1391  * need to make the heap parsable. For Shenandoah-internal linear heap scans that we can
1392  * control, we call SH::make_tlabs_parsable().
1393  */
1394 void ShenandoahHeap::ensure_parsability(bool retire_tlabs) {
1395   // No-op.
1396 }
1397 
1398 /*
1399  * Iterates objects in the heap. This is public API, used for, e.g., heap dumping.
1400  *
1401  * We cannot safely iterate objects by doing a linear scan at random points in time. Linear
1402  * scanning needs to deal with dead objects, which may have dead Klass* pointers (e.g.
1403  * calling oopDesc::size() would crash) or dangling reference fields (crashes) etc. Linear
1404  * scanning therefore depends on having a valid marking bitmap to support it. However, we only
1405  * have a valid marking bitmap after successful marking. In particular, we *don't* have a valid
1406  * marking bitmap during marking, after aborted marking or during/after cleanup (when we just
1407  * wiped the bitmap in preparation for next marking).
1408  *
1409  * For all those reasons, we implement object iteration as a single marking traversal, reporting
1410  * objects as we mark+traverse through the heap, starting from GC roots. JVMTI IterateThroughHeap
1411  * is allowed to report dead objects, but is not required to do so.
1412  */
1413 void ShenandoahHeap::object_iterate(ObjectClosure* cl) {
1414   assert(SafepointSynchronize::is_at_safepoint(), "safe iteration is only available during safepoints");
1415   if (!os::commit_memory((char*)_aux_bitmap_region.start(), _aux_bitmap_region.byte_size(), false)) {
1416     log_warning(gc)("Could not commit native memory for auxiliary marking bitmap for heap iteration");
1417     return;
1418   }
1419 
1420   Stack<oop,mtGC> oop_stack;
1421 
1422   // First, we process all GC roots. This populates the work stack with initial objects.
1423   ShenandoahRootProcessor rp(this, 1, ShenandoahPhaseTimings::_num_phases);
1424   ObjectIterateScanRootClosure oops(&_aux_bit_map, &oop_stack);
1425   CLDToOopClosure clds(&oops, false);
1426   CodeBlobToOopClosure blobs(&oops, false);
1427   rp.process_all_roots(&oops, &oops, &clds, &blobs, NULL, 0);
1428 
1429   // Work through the oop stack to traverse heap.
1430   while (! oop_stack.is_empty()) {
1431     oop obj = oop_stack.pop();
1432     assert(oopDesc::is_oop(obj), "must be a valid oop");
1433     cl->do_object(obj);
1434     obj->oop_iterate(&oops);
1435   }
1436 
1437   assert(oop_stack.is_empty(), "should be empty");
1438 
1439   if (!os::uncommit_memory((char*)_aux_bitmap_region.start(), _aux_bitmap_region.byte_size())) {
1440     log_warning(gc)("Could not uncommit native memory for auxiliary marking bitmap for heap iteration");
1441   }
1442 }
1443 
1444 void ShenandoahHeap::safe_object_iterate(ObjectClosure* cl) {
1445   assert(SafepointSynchronize::is_at_safepoint(), "safe iteration is only available during safepoints");
1446   object_iterate(cl);
1447 }
1448 
1449 // Apply blk->heap_region_do() on all committed regions in address order,
1450 // terminating the iteration early if heap_region_do() returns true.
1451 void ShenandoahHeap::heap_region_iterate(ShenandoahHeapRegionClosure* blk, bool skip_cset_regions, bool skip_humongous_continuation) const {
1452   for (size_t i = 0; i < num_regions(); i++) {
1453     ShenandoahHeapRegion* current  = get_region(i);
1454     if (skip_humongous_continuation && current->is_humongous_continuation()) {
1455       continue;
1456     }
1457     if (skip_cset_regions && in_collection_set(current)) {
1458       continue;
1459     }
1460     if (blk->heap_region_do(current)) {
1461       return;
1462     }
1463   }
1464 }
1465 
1466 class ShenandoahClearLivenessClosure : public ShenandoahHeapRegionClosure {
1467 private:
1468   ShenandoahHeap* sh;
1469 public:
1470   ShenandoahClearLivenessClosure(ShenandoahHeap* heap) : sh(heap) {}
1471 
1472   bool heap_region_do(ShenandoahHeapRegion* r) {
1473     r->clear_live_data();
1474     sh->set_next_top_at_mark_start(r->bottom(), r->top());
1475     return false;
1476   }
1477 };
1478 
1479 void ShenandoahHeap::op_init_mark() {
1480   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Should be at safepoint");
1481 
1482   assert(is_next_bitmap_clear(), "need clear marking bitmap");
1483 
1484   if (ShenandoahVerify) {
1485     verifier()->verify_before_concmark();
1486   }
1487 
1488   {
1489     ShenandoahGCPhase phase(ShenandoahPhaseTimings::accumulate_stats);
1490     accumulate_statistics_all_tlabs();
1491   }
1492 
1493   set_concurrent_mark_in_progress(true);
1494   // We need to reset all TLABs because we'd lose marks on all objects allocated in them.
1495   if (UseTLAB) {
1496     ShenandoahGCPhase phase(ShenandoahPhaseTimings::make_parsable);
1497     make_tlabs_parsable(true);
1498   }
1499 
1500   {
1501     ShenandoahGCPhase phase(ShenandoahPhaseTimings::clear_liveness);
1502     ShenandoahClearLivenessClosure clc(this);
1503     heap_region_iterate(&clc);
1504   }
1505 
1506   // Make above changes visible to worker threads
1507   OrderAccess::fence();
1508 
1509   concurrentMark()->init_mark_roots();
1510 
1511   if (UseTLAB) {
1512     ShenandoahGCPhase phase(ShenandoahPhaseTimings::resize_tlabs);
1513     resize_all_tlabs();
1514   }
1515 
1516   if (ShenandoahPacing) {
1517     pacer()->setup_for_mark();
1518   }
1519 }
1520 
1521 void ShenandoahHeap::op_mark() {
1522   concurrentMark()->mark_from_roots();
1523 
1524   // Allocations happen during concurrent mark, record peak after the phase:
1525   shenandoahPolicy()->record_peak_occupancy();
1526 }
1527 
1528 void ShenandoahHeap::op_final_mark() {
1529   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Should be at safepoint");
1530 
1531   // It is critical that we
1532   // evacuate roots right after finishing marking, so that we don't
1533   // get unmarked objects in the roots.
1534 
1535   if (! cancelled_concgc()) {
1536     concurrentMark()->finish_mark_from_roots();
1537     stop_concurrent_marking();
1538 
1539     {
1540       ShenandoahGCPhase prepare_evac(ShenandoahPhaseTimings::prepare_evac);
1541       prepare_for_concurrent_evacuation();
1542     }
1543 
1544     // If collection set has candidates, start evacuation.
1545     // Otherwise, bypass the rest of the cycle.
1546     if (!collection_set()->is_empty()) {
1547       set_evacuation_in_progress(true);
1548       // From here on, we need to update references.
1549       set_has_forwarded_objects(true);
1550 
1551       ShenandoahGCPhase init_evac(ShenandoahPhaseTimings::init_evac);
1552       evacuate_and_update_roots();
1553     }
1554 
1555     if (ShenandoahPacing) {
1556       pacer()->setup_for_evac();
1557     }
1558   } else {
1559     concurrentMark()->cancel();
1560     stop_concurrent_marking();
1561 
1562     if (process_references()) {
1563       // Abandon reference processing right away: pre-cleaning must have failed.
1564       ReferenceProcessor *rp = ref_processor();
1565       rp->disable_discovery();
1566       rp->abandon_partial_discovery();
1567       rp->verify_no_references_recorded();
1568     }
1569   }
1570 }
1571 
1572 void ShenandoahHeap::op_final_evac() {
1573   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Should be at safepoint");
1574 
1575   set_evacuation_in_progress(false);
1576   if (ShenandoahVerify) {
1577     verifier()->verify_after_evacuation();
1578   }
1579 }
1580 
1581 void ShenandoahHeap::op_evac() {
1582 
1583   LogTarget(Trace, gc, region) lt_region;
1584   LogTarget(Trace, gc, cset) lt_cset;
1585 
1586   if (lt_region.is_enabled()) {
1587     ResourceMark rm;
1588     LogStream ls(lt_region);
1589     ls.print_cr("All available regions:");
1590     print_heap_regions_on(&ls);
1591   }
1592 
1593   if (lt_cset.is_enabled()) {
1594     ResourceMark rm;
1595     LogStream ls(lt_cset);
1596     ls.print_cr("Collection set ("SIZE_FORMAT" regions):", _collection_set->count());
1597     _collection_set->print_on(&ls);
1598 
1599     ls.print_cr("Free set:");
1600     _free_set->print_on(&ls);
1601   }
1602 
1603   ShenandoahParallelEvacuationTask task(this, _collection_set);
1604   workers()->run_task(&task);
1605 
1606   if (lt_cset.is_enabled()) {
1607     ResourceMark rm;
1608     LogStream ls(lt_cset);
1609     ls.print_cr("After evacuation collection set ("SIZE_FORMAT" regions):",
1610                 _collection_set->count());
1611     _collection_set->print_on(&ls);
1612 
1613     ls.print_cr("After evacuation free set:");
1614     _free_set->print_on(&ls);
1615   }
1616 
1617   if (lt_region.is_enabled()) {
1618     ResourceMark rm;
1619     LogStream ls(lt_region);
1620     ls.print_cr("All regions after evacuation:");
1621     print_heap_regions_on(&ls);
1622   }
1623 
1624   // Allocations happen during evacuation, record peak after the phase:
1625   shenandoahPolicy()->record_peak_occupancy();
1626 }
1627 
1628 void ShenandoahHeap::op_updaterefs() {
1629   update_heap_references(true);
1630 
1631   // Allocations happen during update-refs, record peak after the phase:
1632   shenandoahPolicy()->record_peak_occupancy();
1633 }
1634 
1635 void ShenandoahHeap::op_cleanup() {
1636   ShenandoahGCPhase phase_recycle(ShenandoahPhaseTimings::conc_cleanup_recycle);
1637   free_set()->recycle_trash();
1638 
1639   // Allocations happen during cleanup, record peak after the phase:
1640   shenandoahPolicy()->record_peak_occupancy();
1641 }
1642 
1643 void ShenandoahHeap::op_cleanup_bitmaps() {
1644   op_cleanup();
1645 
1646   ShenandoahGCPhase phase_reset(ShenandoahPhaseTimings::conc_cleanup_reset_bitmaps);
1647   reset_next_mark_bitmap();
1648 
1649   // Allocations happen during bitmap cleanup, record peak after the phase:
1650   shenandoahPolicy()->record_peak_occupancy();
1651 }
1652 
1653 void ShenandoahHeap::op_cleanup_traversal() {
1654 
1655   {
1656     ShenandoahGCPhase phase_reset(ShenandoahPhaseTimings::conc_cleanup_reset_bitmaps);
1657     reset_next_mark_bitmap_traversal();
1658   }
1659 
1660   op_cleanup();
1661 
1662   // Allocations happen during bitmap cleanup, record peak after the phase:
1663   shenandoahPolicy()->record_peak_occupancy();
1664 }
1665 
1666 void ShenandoahHeap::op_preclean() {
1667   concurrentMark()->preclean_weak_refs();
1668 
1669   // Allocations happen during concurrent preclean, record peak after the phase:
1670   shenandoahPolicy()->record_peak_occupancy();
1671 }
1672 
1673 void ShenandoahHeap::op_init_traversal() {
1674   traversal_gc()->init_traversal_collection();
1675 }
1676 
1677 void ShenandoahHeap::op_traversal() {
1678   traversal_gc()->concurrent_traversal_collection();
1679 }
1680 
1681 void ShenandoahHeap::op_final_traversal() {
1682   traversal_gc()->final_traversal_collection();
1683 }
1684 
1685 void ShenandoahHeap::op_full(GCCause::Cause cause) {
1686   full_gc()->do_it(cause);
1687 }
1688 
1689 void ShenandoahHeap::op_degenerated(ShenandoahDegenPoint point) {
1690   // Degenerated GC is STW, but it can also fail. Current mechanics communicates
1691   // GC failure via cancelled_concgc() flag. So, if we detect the failure after
1692   // some phase, we have to upgrade the Degenerate GC to Full GC.
1693 
1694   clear_cancelled_concgc();
1695 
1696   size_t used_before = used();
1697 
1698   switch (point) {
1699     case _degenerated_evac:
1700       // Not possible to degenerate from here, upgrade to Full GC right away.
1701       cancel_concgc(GCCause::_shenandoah_upgrade_to_full_gc);
1702       op_degenerated_fail();
1703       return;
1704 
1705     // The cases below form the Duff's-like device: it describes the actual GC cycle,
1706     // but enters it at different points, depending on which concurrent phase had
1707     // degenerated.
1708 
1709     case _degenerated_traversal:
1710       {
1711         ShenandoahHeapLocker locker(lock());
1712         collection_set()->clear_current_index();
1713         for (size_t i = 0; i < collection_set()->count(); i++) {
1714           ShenandoahHeapRegion* r = collection_set()->next();
1715           r->make_regular_bypass();
1716         }
1717         collection_set()->clear();
1718       }
1719       op_final_traversal();
1720       op_cleanup_traversal();
1721       return;
1722 
1723     case _degenerated_outside_cycle:
1724       if (shenandoahPolicy()->can_do_traversal_gc()) {
1725         // Not possible to degenerate from here, upgrade to Full GC right away.
1726         cancel_concgc(GCCause::_shenandoah_upgrade_to_full_gc);
1727         op_degenerated_fail();
1728         return;
1729       }
1730       op_init_mark();
1731       if (cancelled_concgc()) {
1732         op_degenerated_fail();
1733         return;
1734       }
1735 
1736     case _degenerated_mark:
1737       op_final_mark();
1738       if (cancelled_concgc()) {
1739         op_degenerated_fail();
1740         return;
1741       }
1742 
1743       op_cleanup();
1744 
1745       // If heuristics thinks we should do the cycle, this flag would be set,
1746       // and we can do evacuation. Otherwise, it would be the shortcut cycle.
1747       if (is_evacuation_in_progress()) {
1748         op_evac();
1749         if (cancelled_concgc()) {
1750           op_degenerated_fail();
1751           return;
1752         }
1753       }
1754 
1755       // If heuristics thinks we should do the cycle, this flag would be set,
1756       // and we need to do update-refs. Otherwise, it would be the shortcut cycle.
1757       if (has_forwarded_objects()) {
1758         op_init_updaterefs();
1759         if (cancelled_concgc()) {
1760           op_degenerated_fail();
1761           return;
1762         }
1763       }
1764 
1765     case _degenerated_updaterefs:
1766       if (has_forwarded_objects()) {
1767         op_final_updaterefs();
1768         if (cancelled_concgc()) {
1769           op_degenerated_fail();
1770           return;
1771         }
1772       }
1773 
1774       op_cleanup_bitmaps();
1775       break;
1776 
1777     default:
1778       ShouldNotReachHere();
1779   }
1780 
1781   if (ShenandoahVerify) {
1782     verifier()->verify_after_degenerated();
1783   }
1784 
1785   // Check for futility and fail. There is no reason to do several back-to-back Degenerated cycles,
1786   // because that probably means the heap is overloaded and/or fragmented.
1787   size_t used_after = used();
1788   size_t difference = (used_before > used_after) ? used_before - used_after : 0;
1789   if (difference < ShenandoahHeapRegion::region_size_words()) {
1790     cancel_concgc(GCCause::_shenandoah_upgrade_to_full_gc);
1791     op_degenerated_futile();
1792   }
1793 }
1794 
1795 void ShenandoahHeap::op_degenerated_fail() {
1796   log_info(gc)("Cannot finish degeneration, upgrading to Full GC");
1797   shenandoahPolicy()->record_degenerated_upgrade_to_full();
1798   op_full(GCCause::_shenandoah_upgrade_to_full_gc);
1799 }
1800 
1801 void ShenandoahHeap::op_degenerated_futile() {
1802   log_info(gc)("Degenerated GC had not reclaimed enough, upgrading to Full GC");
1803   shenandoahPolicy()->record_degenerated_upgrade_to_full();
1804   op_full(GCCause::_shenandoah_upgrade_to_full_gc);
1805 }
1806 
1807 void ShenandoahHeap::swap_mark_bitmaps() {
1808   // Swap bitmaps.
1809   MarkBitMap* tmp1 = _complete_mark_bit_map;
1810   _complete_mark_bit_map = _next_mark_bit_map;
1811   _next_mark_bit_map = tmp1;
1812 
1813   // Swap top-at-mark-start pointers
1814   HeapWord** tmp2 = _complete_top_at_mark_starts;
1815   _complete_top_at_mark_starts = _next_top_at_mark_starts;
1816   _next_top_at_mark_starts = tmp2;
1817 
1818   HeapWord** tmp3 = _complete_top_at_mark_starts_base;
1819   _complete_top_at_mark_starts_base = _next_top_at_mark_starts_base;
1820   _next_top_at_mark_starts_base = tmp3;
1821 }
1822 
1823 
1824 void ShenandoahHeap::stop_concurrent_marking() {
1825   assert(is_concurrent_mark_in_progress(), "How else could we get here?");
1826   if (! cancelled_concgc()) {
1827     // If we needed to update refs, and concurrent marking has been cancelled,
1828     // we need to finish updating references.
1829     set_has_forwarded_objects(false);
1830     swap_mark_bitmaps();
1831   }
1832   set_concurrent_mark_in_progress(false);
1833 
1834   LogTarget(Trace, gc, region) lt;
1835   if (lt.is_enabled()) {
1836     ResourceMark rm;
1837     LogStream ls(lt);
1838     ls.print_cr("Regions at stopping the concurrent mark:");
1839     print_heap_regions_on(&ls);
1840   }
1841 }
1842 
1843 void ShenandoahHeap::set_gc_state_mask(uint mask, bool value) {
1844   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Should really be Shenandoah safepoint");
1845   _gc_state.set_cond(mask, value);
1846   JavaThread::set_gc_state_all_threads(_gc_state.raw_value());
1847 }
1848 
1849 void ShenandoahHeap::set_concurrent_mark_in_progress(bool in_progress) {
1850   set_gc_state_mask(MARKING, in_progress);
1851   JavaThread::satb_mark_queue_set().set_active_all_threads(in_progress, !in_progress);
1852 }
1853 
1854 void ShenandoahHeap::set_concurrent_traversal_in_progress(bool in_progress) {
1855    set_gc_state_mask(TRAVERSAL | HAS_FORWARDED, in_progress);
1856    JavaThread::satb_mark_queue_set().set_active_all_threads(in_progress, !in_progress);
1857 }
1858 
1859 void ShenandoahHeap::set_evacuation_in_progress(bool in_progress) {
1860   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Only call this at safepoint");
1861   set_gc_state_mask(EVACUATION, in_progress);
1862 }
1863 
1864 HeapWord* ShenandoahHeap::tlab_post_allocation_setup(HeapWord* obj) {
1865   // Initialize Brooks pointer for the next object
1866   HeapWord* result = obj + BrooksPointer::word_size();
1867   BrooksPointer::initialize(oop(result));
1868   return result;
1869 }
1870 
1871 uint ShenandoahHeap::oop_extra_words() {
1872   return BrooksPointer::word_size();
1873 }
1874 
1875 ShenandoahForwardedIsAliveClosure::ShenandoahForwardedIsAliveClosure() :
1876   _heap(ShenandoahHeap::heap_no_check()) {
1877 }
1878 
1879 ShenandoahIsAliveClosure::ShenandoahIsAliveClosure() :
1880   _heap(ShenandoahHeap::heap_no_check()) {
1881 }
1882 
1883 bool ShenandoahForwardedIsAliveClosure::do_object_b(oop obj) {
1884   if (oopDesc::is_null(obj)) {
1885     return false;
1886   }
1887   obj = ShenandoahBarrierSet::resolve_forwarded_not_null(obj);
1888   shenandoah_assert_not_forwarded_if(NULL, obj, _heap->is_concurrent_mark_in_progress() || _heap->is_concurrent_traversal_in_progress())
1889   return _heap->is_marked_next(obj);
1890 }
1891 
1892 bool ShenandoahIsAliveClosure::do_object_b(oop obj) {
1893   if (oopDesc::is_null(obj)) {
1894     return false;
1895   }
1896   shenandoah_assert_not_forwarded(NULL, obj);
1897   return _heap->is_marked_next(obj);
1898 }
1899 
1900 BoolObjectClosure* ShenandoahHeap::is_alive_closure() {
1901   return has_forwarded_objects() ?
1902          (BoolObjectClosure*) &_forwarded_is_alive :
1903          (BoolObjectClosure*) &_is_alive;
1904 }
1905 
1906 void ShenandoahHeap::ref_processing_init() {
1907   MemRegion mr = reserved_region();
1908 
1909   _forwarded_is_alive.init(this);
1910   _is_alive.init(this);
1911   assert(_max_workers > 0, "Sanity");
1912 
1913   _ref_processor =
1914     new ReferenceProcessor(mr,    // span
1915                            ParallelRefProcEnabled,  // MT processing
1916                            _max_workers,            // Degree of MT processing
1917                            true,                    // MT discovery
1918                            _max_workers,            // Degree of MT discovery
1919                            false,                   // Reference discovery is not atomic
1920                            NULL);                   // No closure, should be installed before use
1921 
1922   shenandoah_assert_rp_isalive_not_installed();
1923 }
1924 
1925 
1926 GCTracer* ShenandoahHeap::tracer() {
1927   return shenandoahPolicy()->tracer();
1928 }
1929 
1930 size_t ShenandoahHeap::tlab_used(Thread* thread) const {
1931   return _free_set->used();
1932 }
1933 
1934 void ShenandoahHeap::cancel_concgc(GCCause::Cause cause) {
1935   if (try_cancel_concgc()) {
1936     FormatBuffer<> msg("Cancelling concurrent GC: %s", GCCause::to_string(cause));
1937     log_info(gc)("%s", msg.buffer());
1938     Events::log(Thread::current(), "%s", msg.buffer());
1939   }
1940 }
1941 
1942 uint ShenandoahHeap::max_workers() {
1943   return _max_workers;
1944 }
1945 
1946 void ShenandoahHeap::stop() {
1947   // The shutdown sequence should be able to terminate when GC is running.
1948 
1949   // Step 0. Notify policy to disable event recording.
1950   _shenandoah_policy->record_shutdown();
1951 
1952   // Step 1. Notify control thread that we are in shutdown.
1953   // Note that we cannot do that with stop(), because stop() is blocking and waits for the actual shutdown.
1954   // Doing stop() here would wait for the normal GC cycle to complete, never falling through to cancel below.
1955   _control_thread->prepare_for_graceful_shutdown();
1956 
1957   // Step 2. Notify GC workers that we are cancelling GC.
1958   cancel_concgc(GCCause::_shenandoah_stop_vm);
1959 
1960   // Step 3. Wait until GC worker exits normally.
1961   _control_thread->stop();
1962 
1963   // Step 4. Stop String Dedup thread if it is active
1964   if (ShenandoahStringDedup::is_enabled()) {
1965     ShenandoahStringDedup::stop();
1966   }
1967 }
1968 
1969 void ShenandoahHeap::unload_classes_and_cleanup_tables(bool full_gc) {
1970   ShenandoahPhaseTimings::Phase phase_root =
1971           full_gc ?
1972           ShenandoahPhaseTimings::full_gc_purge :
1973           ShenandoahPhaseTimings::purge;
1974 
1975   ShenandoahPhaseTimings::Phase phase_unload =
1976           full_gc ?
1977           ShenandoahPhaseTimings::full_gc_purge_class_unload :
1978           ShenandoahPhaseTimings::purge_class_unload;
1979 
1980   ShenandoahPhaseTimings::Phase phase_cldg =
1981           full_gc ?
1982           ShenandoahPhaseTimings::full_gc_purge_cldg :
1983           ShenandoahPhaseTimings::purge_cldg;
1984 
1985   ShenandoahPhaseTimings::Phase phase_par =
1986           full_gc ?
1987           ShenandoahPhaseTimings::full_gc_purge_par :
1988           ShenandoahPhaseTimings::purge_par;
1989 
1990   ShenandoahPhaseTimings::Phase phase_par_classes =
1991           full_gc ?
1992           ShenandoahPhaseTimings::full_gc_purge_par_classes :
1993           ShenandoahPhaseTimings::purge_par_classes;
1994 
1995   ShenandoahPhaseTimings::Phase phase_par_codecache =
1996           full_gc ?
1997           ShenandoahPhaseTimings::full_gc_purge_par_codecache :
1998           ShenandoahPhaseTimings::purge_par_codecache;
1999 
2000   ShenandoahPhaseTimings::Phase phase_par_rmt =
2001           full_gc ?
2002           ShenandoahPhaseTimings::full_gc_purge_par_rmt :
2003           ShenandoahPhaseTimings::purge_par_rmt;
2004 
2005   ShenandoahPhaseTimings::Phase phase_par_symbstring =
2006           full_gc ?
2007           ShenandoahPhaseTimings::full_gc_purge_par_symbstring :
2008           ShenandoahPhaseTimings::purge_par_symbstring;
2009 
2010   ShenandoahPhaseTimings::Phase phase_par_sync =
2011           full_gc ?
2012           ShenandoahPhaseTimings::full_gc_purge_par_sync :
2013           ShenandoahPhaseTimings::purge_par_sync;
2014 
2015   ShenandoahGCPhase root_phase(phase_root);
2016 
2017   BoolObjectClosure* is_alive = is_alive_closure();
2018 
2019   bool purged_class;
2020 
2021   // Unload classes and purge SystemDictionary.
2022   {
2023     ShenandoahGCPhase phase(phase_unload);
2024     purged_class = SystemDictionary::do_unloading(is_alive,
2025                                                   gc_timer(),
2026                                                   false /* defer cleaning */);
2027   }
2028 
2029   {
2030     ShenandoahGCPhase phase(phase_par);
2031     uint active = _workers->active_workers();
2032     ParallelCleaningTask unlink_task(is_alive, true, true, active, purged_class);
2033     _workers->run_task(&unlink_task);
2034 
2035     ShenandoahPhaseTimings* p = phase_timings();
2036     ParallelCleaningTimes times = unlink_task.times();
2037 
2038     // "times" report total time, phase_tables_cc reports wall time. Divide total times
2039     // by active workers to get average time per worker, that would add up to wall time.
2040     p->record_phase_time(phase_par_classes,    times.klass_work_us() / active);
2041     p->record_phase_time(phase_par_codecache,  times.codecache_work_us() / active);
2042     p->record_phase_time(phase_par_rmt,        times.rmt_work_us() / active);
2043     p->record_phase_time(phase_par_symbstring, times.tables_work_us() / active);
2044     p->record_phase_time(phase_par_sync,       times.sync_us() / active);
2045   }
2046 
2047   if (ShenandoahStringDedup::is_enabled()) {
2048     ShenandoahPhaseTimings::Phase phase_par_string_dedup =
2049             full_gc ?
2050             ShenandoahPhaseTimings::full_gc_purge_par_string_dedup :
2051             ShenandoahPhaseTimings::purge_par_string_dedup;
2052     ShenandoahGCPhase phase(phase_par_string_dedup);
2053     ShenandoahStringDedup::parallel_cleanup();
2054   }
2055 
2056 
2057   {
2058     ShenandoahGCPhase phase(phase_cldg);
2059     ClassLoaderDataGraph::purge();
2060   }
2061 }
2062 
2063 void ShenandoahHeap::set_has_forwarded_objects(bool cond) {
2064   set_gc_state_mask(HAS_FORWARDED, cond);
2065 }
2066 
2067 void ShenandoahHeap::set_process_references(bool pr) {
2068   _process_references.set_cond(pr);
2069 }
2070 
2071 void ShenandoahHeap::set_unload_classes(bool uc) {
2072   _unload_classes.set_cond(uc);
2073 }
2074 
2075 bool ShenandoahHeap::process_references() const {
2076   return _process_references.is_set();
2077 }
2078 
2079 bool ShenandoahHeap::unload_classes() const {
2080   return _unload_classes.is_set();
2081 }
2082 
2083 //fixme this should be in heapregionset
2084 ShenandoahHeapRegion* ShenandoahHeap::next_compaction_region(const ShenandoahHeapRegion* r) {
2085   size_t region_idx = r->region_number() + 1;
2086   ShenandoahHeapRegion* next = get_region(region_idx);
2087   guarantee(next->region_number() == region_idx, "region number must match");
2088   while (next->is_humongous()) {
2089     region_idx = next->region_number() + 1;
2090     next = get_region(region_idx);
2091     guarantee(next->region_number() == region_idx, "region number must match");
2092   }
2093   return next;
2094 }
2095 
2096 ShenandoahMonitoringSupport* ShenandoahHeap::monitoring_support() {
2097   return _monitoring_support;
2098 }
2099 
2100 MarkBitMap* ShenandoahHeap::complete_mark_bit_map() {
2101   return _complete_mark_bit_map;
2102 }
2103 
2104 MarkBitMap* ShenandoahHeap::next_mark_bit_map() {
2105   return _next_mark_bit_map;
2106 }
2107 
2108 address ShenandoahHeap::in_cset_fast_test_addr() {
2109   ShenandoahHeap* heap = ShenandoahHeap::heap();
2110   assert(heap->collection_set() != NULL, "Sanity");
2111   return (address) heap->collection_set()->biased_map_address();
2112 }
2113 
2114 address ShenandoahHeap::cancelled_concgc_addr() {
2115   return (address) ShenandoahHeap::heap()->_cancelled_concgc.addr_of();
2116 }
2117 
2118 address ShenandoahHeap::gc_state_addr() {
2119   return (address) ShenandoahHeap::heap()->_gc_state.addr_of();
2120 }
2121 
2122 size_t ShenandoahHeap::bytes_allocated_since_gc_start() {
2123   return OrderAccess::load_acquire(&_bytes_allocated_since_gc_start);
2124 }
2125 
2126 void ShenandoahHeap::reset_bytes_allocated_since_gc_start() {
2127   OrderAccess::release_store_fence(&_bytes_allocated_since_gc_start, (size_t)0);
2128 }
2129 
2130 ShenandoahPacer* ShenandoahHeap::pacer() const {
2131   assert (_pacer != NULL, "sanity");
2132   return _pacer;
2133 }
2134 
2135 void ShenandoahHeap::set_next_top_at_mark_start(HeapWord* region_base, HeapWord* addr) {
2136   uintx index = ((uintx) region_base) >> ShenandoahHeapRegion::region_size_bytes_shift();
2137   _next_top_at_mark_starts[index] = addr;
2138 }
2139 
2140 HeapWord* ShenandoahHeap::next_top_at_mark_start(HeapWord* region_base) {
2141   uintx index = ((uintx) region_base) >> ShenandoahHeapRegion::region_size_bytes_shift();
2142   return _next_top_at_mark_starts[index];
2143 }
2144 
2145 void ShenandoahHeap::set_complete_top_at_mark_start(HeapWord* region_base, HeapWord* addr) {
2146   uintx index = ((uintx) region_base) >> ShenandoahHeapRegion::region_size_bytes_shift();
2147   _complete_top_at_mark_starts[index] = addr;
2148 }
2149 
2150 HeapWord* ShenandoahHeap::complete_top_at_mark_start(HeapWord* region_base) {
2151   uintx index = ((uintx) region_base) >> ShenandoahHeapRegion::region_size_bytes_shift();
2152   return _complete_top_at_mark_starts[index];
2153 }
2154 
2155 void ShenandoahHeap::set_degenerated_gc_in_progress(bool in_progress) {
2156   _degenerated_gc_in_progress.set_cond(in_progress);
2157 }
2158 
2159 void ShenandoahHeap::set_full_gc_in_progress(bool in_progress) {
2160   _full_gc_in_progress.set_cond(in_progress);
2161 }
2162 
2163 void ShenandoahHeap::set_full_gc_move_in_progress(bool in_progress) {
2164   assert (is_full_gc_in_progress(), "should be");
2165   _full_gc_move_in_progress.set_cond(in_progress);
2166 }
2167 
2168 void ShenandoahHeap::set_update_refs_in_progress(bool in_progress) {
2169   set_gc_state_mask(UPDATEREFS, in_progress);
2170 }
2171 
2172 void ShenandoahHeap::register_nmethod(nmethod* nm) {
2173   ShenandoahCodeRoots::add_nmethod(nm);
2174 }
2175 
2176 void ShenandoahHeap::unregister_nmethod(nmethod* nm) {
2177   ShenandoahCodeRoots::remove_nmethod(nm);
2178 }
2179 
2180 oop ShenandoahHeap::pin_object(JavaThread* thr, oop o) {
2181   o = BarrierSet::barrier_set()->write_barrier(o);
2182   ShenandoahHeapLocker locker(lock());
2183   heap_region_containing(o)->make_pinned();
2184   return o;
2185 }
2186 
2187 void ShenandoahHeap::unpin_object(JavaThread* thr, oop o) {
2188   o = BarrierSet::barrier_set()->read_barrier(o);
2189   ShenandoahHeapLocker locker(lock());
2190   heap_region_containing(o)->make_unpinned();
2191 }
2192 
2193 GCTimer* ShenandoahHeap::gc_timer() const {
2194   return _gc_timer;
2195 }
2196 
2197 #ifdef ASSERT
2198 void ShenandoahHeap::assert_gc_workers(uint nworkers) {
2199   assert(nworkers > 0 && nworkers <= max_workers(), "Sanity");
2200 
2201   if (ShenandoahSafepoint::is_at_shenandoah_safepoint()) {
2202     if (UseDynamicNumberOfGCThreads ||
2203         (FLAG_IS_DEFAULT(ParallelGCThreads) && ForceDynamicNumberOfGCThreads)) {
2204       assert(nworkers <= ParallelGCThreads, "Cannot use more than it has");
2205     } else {
2206       // Use ParallelGCThreads inside safepoints
2207       assert(nworkers == ParallelGCThreads, "Use ParalleGCThreads within safepoints");
2208     }
2209   } else {
2210     if (UseDynamicNumberOfGCThreads ||
2211         (FLAG_IS_DEFAULT(ConcGCThreads) && ForceDynamicNumberOfGCThreads)) {
2212       assert(nworkers <= ConcGCThreads, "Cannot use more than it has");
2213     } else {
2214       // Use ConcGCThreads outside safepoints
2215       assert(nworkers == ConcGCThreads, "Use ConcGCThreads outside safepoints");
2216     }
2217   }
2218 }
2219 #endif
2220 
2221 ShenandoahConnectionMatrix* ShenandoahHeap::connection_matrix() const {
2222   return _connection_matrix;
2223 }
2224 
2225 ShenandoahTraversalGC* ShenandoahHeap::traversal_gc() {
2226   return _traversal_gc;
2227 }
2228 
2229 ShenandoahVerifier* ShenandoahHeap::verifier() {
2230   guarantee(ShenandoahVerify, "Should be enabled");
2231   assert (_verifier != NULL, "sanity");
2232   return _verifier;
2233 }
2234 
2235 template<class T>
2236 class ShenandoahUpdateHeapRefsTask : public AbstractGangTask {
2237 private:
2238   T cl;
2239   ShenandoahHeap* _heap;
2240   ShenandoahRegionIterator* _regions;
2241   bool _concurrent;
2242 public:
2243   ShenandoahUpdateHeapRefsTask(ShenandoahRegionIterator* regions, bool concurrent) :
2244     AbstractGangTask("Concurrent Update References Task"),
2245     cl(T()),
2246     _heap(ShenandoahHeap::heap()),
2247     _regions(regions),
2248     _concurrent(concurrent) {
2249   }
2250 
2251   void work(uint worker_id) {
2252     SuspendibleThreadSetJoiner stsj(_concurrent && ShenandoahSuspendibleWorkers);
2253     ShenandoahHeapRegion* r = _regions->next();
2254     while (r != NULL) {
2255       if (_heap->in_collection_set(r)) {
2256         HeapWord* bottom = r->bottom();
2257         HeapWord* top = _heap->complete_top_at_mark_start(r->bottom());
2258         if (top > bottom) {
2259           _heap->complete_mark_bit_map()->clear_range_large(MemRegion(bottom, top));
2260         }
2261       } else {
2262         if (r->is_active()) {
2263           _heap->marked_object_oop_safe_iterate(r, &cl);
2264           if (ShenandoahPacing) {
2265             _heap->pacer()->report_updaterefs(r->get_live_data_words());
2266           }
2267         }
2268       }
2269       if (_heap->check_cancelled_concgc_and_yield(_concurrent)) {
2270         return;
2271       }
2272       r = _regions->next();
2273     }
2274   }
2275 };
2276 
2277 void ShenandoahHeap::update_heap_references(bool concurrent) {
2278   if (UseShenandoahMatrix) {
2279     ShenandoahUpdateHeapRefsTask<ShenandoahUpdateHeapRefsMatrixClosure> task(&_update_refs_iterator, concurrent);
2280     workers()->run_task(&task);
2281   } else {
2282     ShenandoahUpdateHeapRefsTask<ShenandoahUpdateHeapRefsClosure> task(&_update_refs_iterator, concurrent);
2283     workers()->run_task(&task);
2284   }
2285 }
2286 
2287 void ShenandoahHeap::op_init_updaterefs() {
2288   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "must be at safepoint");
2289 
2290   if (ShenandoahVerify) {
2291     verifier()->verify_before_updaterefs();
2292   }
2293 
2294   set_evacuation_in_progress(false);
2295   set_update_refs_in_progress(true);
2296   make_tlabs_parsable(true);
2297   if (UseShenandoahMatrix) {
2298     connection_matrix()->clear_all();
2299   }
2300   for (uint i = 0; i < num_regions(); i++) {
2301     ShenandoahHeapRegion* r = get_region(i);
2302     r->set_concurrent_iteration_safe_limit(r->top());
2303   }
2304 
2305   // Reset iterator.
2306   _update_refs_iterator = ShenandoahRegionIterator();
2307 
2308   if (ShenandoahPacing) {
2309     pacer()->setup_for_updaterefs();
2310   }
2311 }
2312 
2313 void ShenandoahHeap::op_final_updaterefs() {
2314   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "must be at safepoint");
2315 
2316   // Check if there is left-over work, and finish it
2317   if (_update_refs_iterator.has_next()) {
2318     ShenandoahGCPhase final_work(ShenandoahPhaseTimings::final_update_refs_finish_work);
2319 
2320     // Finish updating references where we left off.
2321     clear_cancelled_concgc();
2322     update_heap_references(false);
2323   }
2324 
2325   // Clear cancelled conc GC, if set. On cancellation path, the block before would handle
2326   // everything. On degenerated paths, cancelled gc would not be set anyway.
2327   if (cancelled_concgc()) {
2328     clear_cancelled_concgc();
2329   }
2330   assert(!cancelled_concgc(), "Should have been done right before");
2331 
2332   concurrentMark()->update_roots(ShenandoahPhaseTimings::final_update_refs_roots);
2333 
2334   // Allocations might have happened before we STWed here, record peak:
2335   shenandoahPolicy()->record_peak_occupancy();
2336 
2337   ShenandoahGCPhase final_update_refs(ShenandoahPhaseTimings::final_update_refs_recycle);
2338 
2339   trash_cset_regions();
2340   set_has_forwarded_objects(false);
2341 
2342   if (ShenandoahVerify) {
2343     verifier()->verify_after_updaterefs();
2344   }
2345 
2346   {
2347     ShenandoahHeapLocker locker(lock());
2348     _free_set->rebuild();
2349   }
2350 
2351   set_update_refs_in_progress(false);
2352 }
2353 
2354 void ShenandoahHeap::set_alloc_seq_gc_start() {
2355   // Take next number, the start seq number is inclusive
2356   _alloc_seq_at_last_gc_start = ShenandoahHeapRegion::seqnum_current_alloc() + 1;
2357 }
2358 
2359 void ShenandoahHeap::set_alloc_seq_gc_end() {
2360   // Take current number, the end seq number is also inclusive
2361   _alloc_seq_at_last_gc_end = ShenandoahHeapRegion::seqnum_current_alloc();
2362 }
2363 
2364 
2365 #ifdef ASSERT
2366 void ShenandoahHeap::assert_heaplock_owned_by_current_thread() {
2367   _lock.assert_owned_by_current_thread();
2368 }
2369 
2370 void ShenandoahHeap::assert_heaplock_not_owned_by_current_thread() {
2371   _lock.assert_not_owned_by_current_thread();
2372 }
2373 
2374 void ShenandoahHeap::assert_heaplock_or_safepoint() {
2375   _lock.assert_owned_by_current_thread_or_safepoint();
2376 }
2377 #endif
2378 
2379 void ShenandoahHeap::print_extended_on(outputStream *st) const {
2380   print_on(st);
2381   print_heap_regions_on(st);
2382 }
2383 
2384 bool ShenandoahHeap::is_bitmap_slice_committed(ShenandoahHeapRegion* r, bool skip_self) {
2385   size_t slice = r->region_number() / _bitmap_regions_per_slice;
2386 
2387   size_t regions_from = _bitmap_regions_per_slice * slice;
2388   size_t regions_to   = MIN2(num_regions(), _bitmap_regions_per_slice * (slice + 1));
2389   for (size_t g = regions_from; g < regions_to; g++) {
2390     assert (g / _bitmap_regions_per_slice == slice, "same slice");
2391     if (skip_self && g == r->region_number()) continue;
2392     if (get_region(g)->is_committed()) {
2393       return true;
2394     }
2395   }
2396   return false;
2397 }
2398 
2399 bool ShenandoahHeap::commit_bitmap_slice(ShenandoahHeapRegion* r) {
2400   assert_heaplock_owned_by_current_thread();
2401 
2402   if (is_bitmap_slice_committed(r, true)) {
2403     // Some other region from the group is already committed, meaning the bitmap
2404     // slice is already committed, we exit right away.
2405     return true;
2406   }
2407 
2408   // Commit the bitmap slice:
2409   size_t slice = r->region_number() / _bitmap_regions_per_slice;
2410   size_t off = _bitmap_bytes_per_slice * slice;
2411   size_t len = _bitmap_bytes_per_slice;
2412   if (!os::commit_memory((char*)_bitmap0_region.start() + off, len, false)) {
2413     return false;
2414   }
2415   if (!os::commit_memory((char*)_bitmap1_region.start() + off, len, false)) {
2416     return false;
2417   }
2418   return true;
2419 }
2420 
2421 bool ShenandoahHeap::uncommit_bitmap_slice(ShenandoahHeapRegion *r) {
2422   assert_heaplock_owned_by_current_thread();
2423 
2424   if (is_bitmap_slice_committed(r, true)) {
2425     // Some other region from the group is still committed, meaning the bitmap
2426     // slice is should stay committed, exit right away.
2427     return true;
2428   }
2429 
2430   // Uncommit the bitmap slice:
2431   size_t slice = r->region_number() / _bitmap_regions_per_slice;
2432   size_t off = _bitmap_bytes_per_slice * slice;
2433   size_t len = _bitmap_bytes_per_slice;
2434   if (!os::uncommit_memory((char*)_bitmap0_region.start() + off, len)) {
2435     return false;
2436   }
2437   if (!os::uncommit_memory((char*)_bitmap1_region.start() + off, len)) {
2438     return false;
2439   }
2440   return true;
2441 }
2442 
2443 bool ShenandoahHeap::idle_bitmap_slice(ShenandoahHeapRegion *r) {
2444   assert_heaplock_owned_by_current_thread();
2445   assert(ShenandoahUncommitWithIdle, "Must be enabled");
2446 
2447   if (is_bitmap_slice_committed(r, true)) {
2448     // Some other region from the group is still committed, meaning the bitmap
2449     // slice is should stay committed, exit right away.
2450     return true;
2451   }
2452 
2453   // Idle the bitmap slice:
2454   size_t slice = r->region_number() / _bitmap_regions_per_slice;
2455   size_t off = _bitmap_bytes_per_slice * slice;
2456   size_t len = _bitmap_bytes_per_slice;
2457   if (!os::idle_memory((char*)_bitmap0_region.start() + off, len)) {
2458     return false;
2459   }
2460   if (!os::idle_memory((char*)_bitmap1_region.start() + off, len)) {
2461     return false;
2462   }
2463   return true;
2464 }
2465 
2466 void ShenandoahHeap::activate_bitmap_slice(ShenandoahHeapRegion* r) {
2467   assert_heaplock_owned_by_current_thread();
2468   assert(ShenandoahUncommitWithIdle, "Must be enabled");
2469   size_t slice = r->region_number() / _bitmap_regions_per_slice;
2470   size_t off = _bitmap_bytes_per_slice * slice;
2471   size_t len = _bitmap_bytes_per_slice;
2472   os::activate_memory((char*)_bitmap0_region.start() + off, len);
2473   os::activate_memory((char*)_bitmap1_region.start() + off, len);
2474 }
2475 
2476 void ShenandoahHeap::safepoint_synchronize_begin() {
2477   if (ShenandoahSuspendibleWorkers || UseStringDeduplication) {
2478     SuspendibleThreadSet::synchronize();
2479   }
2480 }
2481 
2482 void ShenandoahHeap::safepoint_synchronize_end() {
2483   if (ShenandoahSuspendibleWorkers || UseStringDeduplication) {
2484     SuspendibleThreadSet::desynchronize();
2485   }
2486 }
2487 
2488 void ShenandoahHeap::vmop_entry_init_mark() {
2489   TraceCollectorStats tcs(monitoring_support()->stw_collection_counters());
2490   ShenandoahGCPhase total(ShenandoahPhaseTimings::total_pause_gross);
2491   ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_mark_gross);
2492 
2493   try_inject_alloc_failure();
2494   VM_ShenandoahInitMark op;
2495   VMThread::execute(&op); // jump to entry_init_mark() under safepoint
2496 }
2497 
2498 void ShenandoahHeap::vmop_entry_final_mark() {
2499   TraceCollectorStats tcs(monitoring_support()->stw_collection_counters());
2500   ShenandoahGCPhase total(ShenandoahPhaseTimings::total_pause_gross);
2501   ShenandoahGCPhase phase(ShenandoahPhaseTimings::final_mark_gross);
2502 
2503   try_inject_alloc_failure();
2504   VM_ShenandoahFinalMarkStartEvac op;
2505   VMThread::execute(&op); // jump to entry_final_mark under safepoint
2506 }
2507 
2508 void ShenandoahHeap::vmop_entry_final_evac() {
2509   TraceCollectorStats tcs(monitoring_support()->stw_collection_counters());
2510   ShenandoahGCPhase total(ShenandoahPhaseTimings::total_pause_gross);
2511   ShenandoahGCPhase phase(ShenandoahPhaseTimings::final_evac_gross);
2512 
2513   VM_ShenandoahFinalEvac op;
2514   VMThread::execute(&op); // jump to entry_final_evac under safepoint
2515 }
2516 
2517 void ShenandoahHeap::vmop_entry_init_updaterefs() {
2518   TraceCollectorStats tcs(monitoring_support()->stw_collection_counters());
2519   ShenandoahGCPhase total(ShenandoahPhaseTimings::total_pause_gross);
2520   ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_update_refs_gross);
2521 
2522   try_inject_alloc_failure();
2523   VM_ShenandoahInitUpdateRefs op;
2524   VMThread::execute(&op);
2525 }
2526 
2527 void ShenandoahHeap::vmop_entry_final_updaterefs() {
2528   TraceCollectorStats tcs(monitoring_support()->stw_collection_counters());
2529   ShenandoahGCPhase total(ShenandoahPhaseTimings::total_pause_gross);
2530   ShenandoahGCPhase phase(ShenandoahPhaseTimings::final_update_refs_gross);
2531 
2532   try_inject_alloc_failure();
2533   VM_ShenandoahFinalUpdateRefs op;
2534   VMThread::execute(&op);
2535 }
2536 
2537 void ShenandoahHeap::vmop_entry_init_traversal() {
2538   TraceCollectorStats tcs(monitoring_support()->stw_collection_counters());
2539   ShenandoahGCPhase total(ShenandoahPhaseTimings::total_pause_gross);
2540   ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_traversal_gc_gross);
2541 
2542   try_inject_alloc_failure();
2543   VM_ShenandoahInitTraversalGC op;
2544   VMThread::execute(&op);
2545 }
2546 
2547 void ShenandoahHeap::vmop_entry_final_traversal() {
2548   TraceCollectorStats tcs(monitoring_support()->stw_collection_counters());
2549   ShenandoahGCPhase total(ShenandoahPhaseTimings::total_pause_gross);
2550   ShenandoahGCPhase phase(ShenandoahPhaseTimings::final_traversal_gc_gross);
2551 
2552   try_inject_alloc_failure();
2553   VM_ShenandoahFinalTraversalGC op;
2554   VMThread::execute(&op);
2555 }
2556 
2557 void ShenandoahHeap::vmop_entry_full(GCCause::Cause cause) {
2558   TraceCollectorStats tcs(monitoring_support()->full_stw_collection_counters());
2559   ShenandoahGCPhase total(ShenandoahPhaseTimings::total_pause_gross);
2560   ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_gross);
2561 
2562   try_inject_alloc_failure();
2563   VM_ShenandoahFullGC op(cause);
2564   VMThread::execute(&op);
2565 }
2566 
2567 void ShenandoahHeap::vmop_degenerated(ShenandoahDegenPoint point) {
2568   TraceCollectorStats tcs(monitoring_support()->full_stw_collection_counters());
2569   ShenandoahGCPhase total(ShenandoahPhaseTimings::total_pause_gross);
2570   ShenandoahGCPhase phase(ShenandoahPhaseTimings::degen_gc_gross);
2571 
2572   VM_ShenandoahDegeneratedGC degenerated_gc((int)point);
2573   VMThread::execute(&degenerated_gc);
2574 }
2575 
2576 void ShenandoahHeap::entry_init_mark() {
2577   ShenandoahGCPhase total_phase(ShenandoahPhaseTimings::total_pause);
2578   ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_mark);
2579 
2580   FormatBuffer<> msg("Pause Init Mark%s%s%s",
2581                      has_forwarded_objects() ? " (update refs)"    : "",
2582                      process_references() ?    " (process refs)"   : "",
2583                      unload_classes() ?        " (unload classes)" : "");
2584   GCTraceTime(Info, gc) time(msg, gc_timer());
2585   EventMark em("%s", msg.buffer());
2586 
2587   ShenandoahWorkerScope scope(workers(), ShenandoahWorkerPolicy::calc_workers_for_init_marking());
2588 
2589   op_init_mark();
2590 }
2591 
2592 void ShenandoahHeap::entry_final_mark() {
2593   ShenandoahGCPhase total_phase(ShenandoahPhaseTimings::total_pause);
2594   ShenandoahGCPhase phase(ShenandoahPhaseTimings::final_mark);
2595 
2596   FormatBuffer<> msg("Pause Final Mark%s%s%s",
2597                      has_forwarded_objects() ? " (update refs)"    : "",
2598                      process_references() ?    " (process refs)"   : "",
2599                      unload_classes() ?        " (unload classes)" : "");
2600   GCTraceTime(Info, gc) time(msg, gc_timer());
2601   EventMark em("%s", msg.buffer());
2602 
2603   ShenandoahWorkerScope scope(workers(), ShenandoahWorkerPolicy::calc_workers_for_final_marking());
2604 
2605   op_final_mark();
2606 }
2607 
2608 void ShenandoahHeap::entry_final_evac() {
2609   ShenandoahGCPhase total_phase(ShenandoahPhaseTimings::total_pause);
2610   ShenandoahGCPhase phase(ShenandoahPhaseTimings::final_evac);
2611 
2612   FormatBuffer<> msg("Pause Final Evac");
2613   GCTraceTime(Info, gc) time(msg, gc_timer());
2614   EventMark em("%s", msg.buffer());
2615 
2616   op_final_evac();
2617 }
2618 
2619 void ShenandoahHeap::entry_init_updaterefs() {
2620   ShenandoahGCPhase total_phase(ShenandoahPhaseTimings::total_pause);
2621   ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_update_refs);
2622 
2623   static const char* msg = "Pause Init Update Refs";
2624   GCTraceTime(Info, gc) time(msg, gc_timer());
2625   EventMark em("%s", msg);
2626 
2627   // No workers used in this phase, no setup required
2628 
2629   op_init_updaterefs();
2630 }
2631 
2632 void ShenandoahHeap::entry_final_updaterefs() {
2633   ShenandoahGCPhase total_phase(ShenandoahPhaseTimings::total_pause);
2634   ShenandoahGCPhase phase(ShenandoahPhaseTimings::final_update_refs);
2635 
2636   static const char* msg = "Pause Final Update Refs";
2637   GCTraceTime(Info, gc) time(msg, gc_timer());
2638   EventMark em("%s", msg);
2639 
2640   ShenandoahWorkerScope scope(workers(), ShenandoahWorkerPolicy::calc_workers_for_final_update_ref());
2641 
2642   op_final_updaterefs();
2643 }
2644 
2645 void ShenandoahHeap::entry_init_traversal() {
2646   ShenandoahGCPhase total_phase(ShenandoahPhaseTimings::total_pause);
2647   ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_traversal_gc);
2648 
2649   static const char* msg = "Pause Init Traversal";
2650   GCTraceTime(Info, gc) time(msg, gc_timer());
2651   EventMark em("%s", msg);
2652 
2653   ShenandoahWorkerScope scope(workers(), ShenandoahWorkerPolicy::calc_workers_for_stw_traversal());
2654 
2655   op_init_traversal();
2656 }
2657 
2658 void ShenandoahHeap::entry_final_traversal() {
2659   ShenandoahGCPhase total_phase(ShenandoahPhaseTimings::total_pause);
2660   ShenandoahGCPhase phase(ShenandoahPhaseTimings::final_traversal_gc);
2661 
2662   static const char* msg = "Pause Final Traversal";
2663   GCTraceTime(Info, gc) time(msg, gc_timer());
2664   EventMark em("%s", msg);
2665 
2666   ShenandoahWorkerScope scope(workers(), ShenandoahWorkerPolicy::calc_workers_for_stw_traversal());
2667 
2668   op_final_traversal();
2669 }
2670 
2671 void ShenandoahHeap::entry_full(GCCause::Cause cause) {
2672   ShenandoahGCPhase total_phase(ShenandoahPhaseTimings::total_pause);
2673   ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc);
2674 
2675   static const char* msg = "Pause Full";
2676   GCTraceTime(Info, gc) time(msg, gc_timer(), cause, true);
2677   EventMark em("%s", msg);
2678 
2679   ShenandoahWorkerScope scope(workers(), ShenandoahWorkerPolicy::calc_workers_for_fullgc());
2680 
2681   op_full(cause);
2682 }
2683 
2684 void ShenandoahHeap::entry_degenerated(int point) {
2685   ShenandoahGCPhase total_phase(ShenandoahPhaseTimings::total_pause);
2686   ShenandoahGCPhase phase(ShenandoahPhaseTimings::degen_gc);
2687 
2688   ShenandoahDegenPoint dpoint = (ShenandoahDegenPoint)point;
2689   FormatBuffer<> msg("Pause Degenerated GC (%s)", degen_point_to_string(dpoint));
2690   GCTraceTime(Info, gc) time(msg, gc_timer(), GCCause::_no_gc, true);
2691   EventMark em("%s", msg.buffer());
2692 
2693   ShenandoahWorkerScope scope(workers(), ShenandoahWorkerPolicy::calc_workers_for_stw_degenerated());
2694 
2695   set_degenerated_gc_in_progress(true);
2696   op_degenerated(dpoint);
2697   set_degenerated_gc_in_progress(false);
2698 }
2699 
2700 void ShenandoahHeap::entry_mark() {
2701   TraceCollectorStats tcs(monitoring_support()->concurrent_collection_counters());
2702 
2703   FormatBuffer<> msg("Concurrent marking%s%s%s",
2704                      has_forwarded_objects() ? " (update refs)"    : "",
2705                      process_references() ?    " (process refs)"   : "",
2706                      unload_classes() ?        " (unload classes)" : "");
2707   GCTraceTime(Info, gc) time(msg, gc_timer(), GCCause::_no_gc, true);
2708   EventMark em("%s", msg.buffer());
2709 
2710   ShenandoahWorkerScope scope(workers(), ShenandoahWorkerPolicy::calc_workers_for_conc_marking());
2711 
2712   try_inject_alloc_failure();
2713   op_mark();
2714 }
2715 
2716 void ShenandoahHeap::entry_evac() {
2717   ShenandoahGCPhase conc_evac_phase(ShenandoahPhaseTimings::conc_evac);
2718   TraceCollectorStats tcs(monitoring_support()->concurrent_collection_counters());
2719 
2720   static const char* msg = "Concurrent evacuation";
2721   GCTraceTime(Info, gc) time(msg, gc_timer(), GCCause::_no_gc, true);
2722   EventMark em("%s", msg);
2723 
2724   ShenandoahWorkerScope scope(workers(), ShenandoahWorkerPolicy::calc_workers_for_conc_evac());
2725 
2726   try_inject_alloc_failure();
2727   op_evac();
2728 }
2729 
2730 void ShenandoahHeap::entry_updaterefs() {
2731   ShenandoahGCPhase phase(ShenandoahPhaseTimings::conc_update_refs);
2732 
2733   static const char* msg = "Concurrent update references";
2734   GCTraceTime(Info, gc) time(msg, gc_timer(), GCCause::_no_gc, true);
2735   EventMark em("%s", msg);
2736 
2737   ShenandoahWorkerScope scope(workers(), ShenandoahWorkerPolicy::calc_workers_for_conc_update_ref());
2738 
2739   try_inject_alloc_failure();
2740   op_updaterefs();
2741 }
2742 void ShenandoahHeap::entry_cleanup() {
2743   ShenandoahGCPhase phase(ShenandoahPhaseTimings::conc_cleanup);
2744 
2745   static const char* msg = "Concurrent cleanup";
2746   GCTraceTime(Info, gc) time(msg, gc_timer(), GCCause::_no_gc, true);
2747   EventMark em("%s", msg);
2748 
2749   // This phase does not use workers, no need for setup
2750 
2751   try_inject_alloc_failure();
2752   op_cleanup();
2753 }
2754 
2755 void ShenandoahHeap::entry_cleanup_traversal() {
2756   ShenandoahGCPhase phase(ShenandoahPhaseTimings::conc_cleanup);
2757 
2758   static const char* msg = "Concurrent cleanup";
2759   GCTraceTime(Info, gc) time(msg, gc_timer(), GCCause::_no_gc, true);
2760   EventMark em("%s", msg);
2761 
2762   ShenandoahWorkerScope scope(workers(), ShenandoahWorkerPolicy::calc_workers_for_conc_traversal());
2763 
2764   try_inject_alloc_failure();
2765   op_cleanup_traversal();
2766 }
2767 
2768 void ShenandoahHeap::entry_cleanup_bitmaps() {
2769   ShenandoahGCPhase phase(ShenandoahPhaseTimings::conc_cleanup);
2770 
2771   static const char* msg = "Concurrent cleanup";
2772   GCTraceTime(Info, gc) time(msg, gc_timer(), GCCause::_no_gc, true);
2773   EventMark em("%s", msg);
2774 
2775   ShenandoahWorkerScope scope(workers(), ShenandoahWorkerPolicy::calc_workers_for_conc_cleanup());
2776 
2777   try_inject_alloc_failure();
2778   op_cleanup_bitmaps();
2779 }
2780 
2781 void ShenandoahHeap::entry_preclean() {
2782   if (ShenandoahPreclean && process_references()) {
2783     static const char* msg = "Concurrent precleaning";
2784     GCTraceTime(Info, gc) time(msg, gc_timer(), GCCause::_no_gc, true);
2785     EventMark em("%s", msg);
2786 
2787     ShenandoahGCPhase conc_preclean(ShenandoahPhaseTimings::conc_preclean);
2788 
2789     ShenandoahWorkerScope scope(workers(), ShenandoahWorkerPolicy::calc_workers_for_conc_preclean());
2790 
2791     try_inject_alloc_failure();
2792     op_preclean();
2793   }
2794 }
2795 
2796 void ShenandoahHeap::entry_traversal() {
2797   static const char* msg = "Concurrent traversal";
2798   GCTraceTime(Info, gc) time(msg, gc_timer(), GCCause::_no_gc, true);
2799   EventMark em("%s", msg);
2800 
2801   TraceCollectorStats tcs(is_minor_gc() ? monitoring_support()->partial_collection_counters()
2802                                         : monitoring_support()->concurrent_collection_counters());
2803 
2804   ShenandoahWorkerScope scope(workers(), ShenandoahWorkerPolicy::calc_workers_for_conc_traversal());
2805 
2806   try_inject_alloc_failure();
2807   op_traversal();
2808 }
2809 
2810 void ShenandoahHeap::try_inject_alloc_failure() {
2811   if (ShenandoahAllocFailureALot && !cancelled_concgc() && ((os::random() % 1000) > 950)) {
2812     _inject_alloc_failure.set();
2813     os::naked_short_sleep(1);
2814     if (cancelled_concgc()) {
2815       log_info(gc)("Allocation failure was successfully injected");
2816     }
2817   }
2818 }
2819 
2820 bool ShenandoahHeap::should_inject_alloc_failure() {
2821   return _inject_alloc_failure.is_set() && _inject_alloc_failure.try_unset();
2822 }
2823 
2824 void ShenandoahHeap::initialize_serviceability() {
2825   _memory_pool = new ShenandoahMemoryPool(this);
2826   _cycle_memory_manager.add_pool(_memory_pool);
2827   _stw_memory_manager.add_pool(_memory_pool);
2828 }
2829 
2830 GrowableArray<GCMemoryManager*> ShenandoahHeap::memory_managers() {
2831   GrowableArray<GCMemoryManager*> memory_managers(2);
2832   memory_managers.append(&_cycle_memory_manager);
2833   memory_managers.append(&_stw_memory_manager);
2834   return memory_managers;
2835 }
2836 
2837 GrowableArray<MemoryPool*> ShenandoahHeap::memory_pools() {
2838   GrowableArray<MemoryPool*> memory_pools(1);
2839   memory_pools.append(_memory_pool);
2840   return memory_pools;
2841 }
2842 
2843 void ShenandoahHeap::enter_evacuation() {
2844   _oom_evac_handler.enter_evacuation();
2845 }
2846 
2847 void ShenandoahHeap::leave_evacuation() {
2848   _oom_evac_handler.leave_evacuation();
2849 }
2850 
2851 ShenandoahRegionIterator::ShenandoahRegionIterator() :
2852   _index(0),
2853   _heap(ShenandoahHeap::heap()) {}
2854 
2855 ShenandoahRegionIterator::ShenandoahRegionIterator(ShenandoahHeap* heap) :
2856   _index(0),
2857   _heap(heap) {}
2858 
2859 ShenandoahRegionIterator& ShenandoahRegionIterator::operator=(const ShenandoahRegionIterator& o) {
2860   _index = o._index;
2861   assert(_heap == o._heap, "must be same");
2862   return *this;
2863 }
2864 
2865 bool ShenandoahRegionIterator::has_next() const {
2866   return _index < _heap->num_regions();
2867 }
2868 
2869 void ShenandoahHeap::heap_region_iterate(ShenandoahHeapRegionClosure& cl) const {
2870   ShenandoahRegionIterator regions;
2871   ShenandoahHeapRegion* r = regions.next();
2872   while (r != NULL) {
2873     if (cl.heap_region_do(r)) {
2874       break;
2875     }
2876     r = regions.next();
2877   }
2878 }
2879 
2880 bool ShenandoahHeap::is_minor_gc() const {
2881   return _gc_cycle_mode.get() == MINOR;
2882 }
2883 
2884 bool ShenandoahHeap::is_major_gc() const {
2885   return _gc_cycle_mode.get() == MAJOR;
2886 }
2887 
2888 void ShenandoahHeap::set_cycle_mode(GCCycleMode gc_cycle_mode) {
2889   _gc_cycle_mode.set(gc_cycle_mode);
2890 }
2891 
2892 char ShenandoahHeap::gc_state() {
2893   return _gc_state.raw_value();
2894 }