1 /*
   2  * Copyright (c) 2013, 2017, Red Hat, Inc. and/or its affiliates.
   3  *
   4  * This code is free software; you can redistribute it and/or modify it
   5  * under the terms of the GNU General Public License version 2 only, as
   6  * published by the Free Software Foundation.
   7  *
   8  * This code is distributed in the hope that it will be useful, but WITHOUT
   9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  11  * version 2 for more details (a copy is included in the LICENSE file that
  12  * accompanied this code).
  13  *
  14  * You should have received a copy of the GNU General Public License version
  15  * 2 along with this work; if not, write to the Free Software Foundation,
  16  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  17  *
  18  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  19  * or visit www.oracle.com if you need additional information or have any
  20  * questions.
  21  *
  22  */
  23 
  24 #include "precompiled.hpp"
  25 #include "memory/allocation.hpp"
  26 
  27 #include "gc/shared/gcTimer.hpp"
  28 #include "gc/shared/gcTraceTime.inline.hpp"
  29 #include "gc/shared/parallelCleaning.hpp"
  30 
  31 #include "gc/shenandoah/brooksPointer.hpp"
  32 #include "gc/shenandoah/shenandoahAllocTracker.hpp"
  33 #include "gc/shenandoah/shenandoahBarrierSet.hpp"
  34 #include "gc/shenandoah/shenandoahCollectionSet.hpp"
  35 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
  36 #include "gc/shenandoah/shenandoahConcurrentMark.hpp"
  37 #include "gc/shenandoah/shenandoahConcurrentMark.inline.hpp"
  38 #include "gc/shenandoah/shenandoahConcurrentThread.hpp"
  39 #include "gc/shenandoah/shenandoahFreeSet.hpp"
  40 #include "gc/shenandoah/shenandoahPhaseTimings.hpp"
  41 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
  42 #include "gc/shenandoah/shenandoahHeapRegion.hpp"
  43 #include "gc/shenandoah/shenandoahHeapRegionSet.hpp"
  44 #include "gc/shenandoah/shenandoahMarkCompact.hpp"
  45 #include "gc/shenandoah/shenandoahMemoryPool.hpp"
  46 #include "gc/shenandoah/shenandoahMonitoringSupport.hpp"
  47 #include "gc/shenandoah/shenandoahOopClosures.inline.hpp"
  48 #include "gc/shenandoah/shenandoahPartialGC.hpp"
  49 #include "gc/shenandoah/shenandoahPacer.hpp"
  50 #include "gc/shenandoah/shenandoahPacer.inline.hpp"
  51 #include "gc/shenandoah/shenandoahRootProcessor.hpp"
  52 #include "gc/shenandoah/shenandoahStringDedup.hpp"
  53 #include "gc/shenandoah/shenandoahUtils.hpp"
  54 #include "gc/shenandoah/shenandoahVerifier.hpp"
  55 #include "gc/shenandoah/shenandoahCodeRoots.hpp"
  56 #include "gc/shenandoah/shenandoahWorkerPolicy.hpp"
  57 #include "gc/shenandoah/vm_operations_shenandoah.hpp"
  58 
  59 #include "runtime/vmThread.hpp"
  60 #include "services/mallocTracker.hpp"
  61 
  62 ShenandoahUpdateRefsClosure::ShenandoahUpdateRefsClosure() : _heap(ShenandoahHeap::heap()) {}
  63 
  64 #ifdef ASSERT
  65 template <class T>
  66 void ShenandoahAssertToSpaceClosure::do_oop_nv(T* p) {
  67   T o = RawAccess<>::oop_load(p);
  68   if (! CompressedOops::is_null(o)) {
  69     oop obj = CompressedOops::decode_not_null(o);
  70     shenandoah_assert_not_forwarded(p, obj);
  71   }
  72 }
  73 
  74 void ShenandoahAssertToSpaceClosure::do_oop(narrowOop* p) { do_oop_nv(p); }
  75 void ShenandoahAssertToSpaceClosure::do_oop(oop* p)       { do_oop_nv(p); }
  76 #endif
  77 
  78 const char* ShenandoahHeap::name() const {
  79   return "Shenandoah";
  80 }
  81 
  82 class ShenandoahPretouchTask : public AbstractGangTask {
  83 private:
  84   ShenandoahRegionIterator _regions;
  85   const size_t _bitmap_size;
  86   const size_t _page_size;
  87   char* _bitmap0_base;
  88   char* _bitmap1_base;
  89 public:
  90   ShenandoahPretouchTask(ShenandoahRegionIterator regions,
  91                          char* bitmap0_base, char* bitmap1_base, size_t bitmap_size,
  92                          size_t page_size) :
  93     AbstractGangTask("Shenandoah PreTouch"),
  94     _bitmap0_base(bitmap0_base),
  95     _bitmap1_base(bitmap1_base),
  96     _regions(regions),
  97     _bitmap_size(bitmap_size),
  98     _page_size(page_size) {}
  99 
 100   virtual void work(uint worker_id) {
 101     ShenandoahHeapRegion* r = _regions.next();
 102     while (r != NULL) {
 103       log_trace(gc, heap)("Pretouch region " SIZE_FORMAT ": " PTR_FORMAT " -> " PTR_FORMAT,
 104                           r->region_number(), p2i(r->bottom()), p2i(r->end()));
 105       os::pretouch_memory(r->bottom(), r->end(), _page_size);
 106 
 107       size_t start = r->region_number()       * ShenandoahHeapRegion::region_size_bytes() / MarkBitMap::heap_map_factor();
 108       size_t end   = (r->region_number() + 1) * ShenandoahHeapRegion::region_size_bytes() / MarkBitMap::heap_map_factor();
 109       assert (end <= _bitmap_size, "end is sane: " SIZE_FORMAT " < " SIZE_FORMAT, end, _bitmap_size);
 110 
 111       log_trace(gc, heap)("Pretouch bitmap under region " SIZE_FORMAT ": " PTR_FORMAT " -> " PTR_FORMAT,
 112                           r->region_number(), p2i(_bitmap0_base + start), p2i(_bitmap0_base + end));
 113       os::pretouch_memory(_bitmap0_base + start, _bitmap0_base + end, _page_size);
 114 
 115       log_trace(gc, heap)("Pretouch bitmap under region " SIZE_FORMAT ": " PTR_FORMAT " -> " PTR_FORMAT,
 116                           r->region_number(), p2i(_bitmap1_base + start), p2i(_bitmap1_base + end));
 117       os::pretouch_memory(_bitmap1_base + start, _bitmap1_base + end, _page_size);
 118 
 119       r = _regions.next();
 120     }
 121   }
 122 };
 123 
 124 jint ShenandoahHeap::initialize() {
 125 
 126   BrooksPointer::initial_checks();
 127 
 128   size_t init_byte_size = collector_policy()->initial_heap_byte_size();
 129   size_t max_byte_size = collector_policy()->max_heap_byte_size();
 130   size_t heap_alignment = collector_policy()->heap_alignment();
 131 
 132   if (ShenandoahAlwaysPreTouch) {
 133     // Enabled pre-touch means the entire heap is committed right away.
 134     init_byte_size = max_byte_size;
 135   }
 136 
 137   Universe::check_alignment(max_byte_size,
 138                             ShenandoahHeapRegion::region_size_bytes(),
 139                             "shenandoah heap");
 140   Universe::check_alignment(init_byte_size,
 141                             ShenandoahHeapRegion::region_size_bytes(),
 142                             "shenandoah heap");
 143 
 144   ReservedSpace heap_rs = Universe::reserve_heap(max_byte_size,
 145                                                  heap_alignment);
 146   initialize_reserved_region((HeapWord*)heap_rs.base(), (HeapWord*) (heap_rs.base() + heap_rs.size()));
 147 
 148   BarrierSet::set_barrier_set(new ShenandoahBarrierSet(this));
 149   ReservedSpace pgc_rs = heap_rs.first_part(max_byte_size);
 150 
 151   _num_regions = max_byte_size / ShenandoahHeapRegion::region_size_bytes();
 152   size_t num_committed_regions = init_byte_size / ShenandoahHeapRegion::region_size_bytes();
 153   _initial_size = num_committed_regions * ShenandoahHeapRegion::region_size_bytes();
 154   _committed = _initial_size;
 155 
 156   log_info(gc, heap)("Initialize Shenandoah heap with initial size " SIZE_FORMAT " bytes", init_byte_size);
 157   if (!os::commit_memory(pgc_rs.base(), _initial_size, false)) {
 158     vm_exit_out_of_memory(_initial_size, OOM_MMAP_ERROR, "Shenandoah failed to initialize heap");
 159   }
 160 
 161   size_t reg_size_words = ShenandoahHeapRegion::region_size_words();
 162   size_t reg_size_bytes = ShenandoahHeapRegion::region_size_bytes();
 163 
 164   _regions = NEW_C_HEAP_ARRAY(ShenandoahHeapRegion*, _num_regions, mtGC);
 165   _free_set = new ShenandoahFreeSet(this, _num_regions);
 166 
 167   _collection_set = new ShenandoahCollectionSet(this, (HeapWord*)pgc_rs.base());
 168 
 169   _next_top_at_mark_starts_base = NEW_C_HEAP_ARRAY(HeapWord*, _num_regions, mtGC);
 170   _next_top_at_mark_starts = _next_top_at_mark_starts_base -
 171                ((uintx) pgc_rs.base() >> ShenandoahHeapRegion::region_size_bytes_shift());
 172 
 173   _complete_top_at_mark_starts_base = NEW_C_HEAP_ARRAY(HeapWord*, _num_regions, mtGC);
 174   _complete_top_at_mark_starts = _complete_top_at_mark_starts_base -
 175                ((uintx) pgc_rs.base() >> ShenandoahHeapRegion::region_size_bytes_shift());
 176 
 177   if (ShenandoahPacing) {
 178     _pacer = new ShenandoahPacer(this);
 179     _pacer->setup_for_idle();
 180   } else {
 181     _pacer = NULL;
 182   }
 183 
 184   {
 185     ShenandoahHeapLocker locker(lock());
 186     for (size_t i = 0; i < _num_regions; i++) {
 187       ShenandoahHeapRegion* r = new ShenandoahHeapRegion(this,
 188                                                          (HeapWord*) pgc_rs.base() + reg_size_words * i,
 189                                                          reg_size_words,
 190                                                          i,
 191                                                          i < num_committed_regions);
 192 
 193       _complete_top_at_mark_starts_base[i] = r->bottom();
 194       _next_top_at_mark_starts_base[i] = r->bottom();
 195       _regions[i] = r;
 196       assert(!collection_set()->is_in(i), "New region should not be in collection set");
 197     }
 198 
 199     _free_set->rebuild();
 200   }
 201 
 202   assert((((size_t) base()) & ShenandoahHeapRegion::region_size_bytes_mask()) == 0,
 203          "misaligned heap: "PTR_FORMAT, p2i(base()));
 204 
 205   LogTarget(Trace, gc, region) lt;
 206   if (lt.is_enabled()) {
 207     ResourceMark rm;
 208     LogStream ls(lt);
 209     log_trace(gc, region)("All Regions");
 210     print_heap_regions_on(&ls);
 211     log_trace(gc, region)("Free Regions");
 212     _free_set->print_on(&ls);
 213   }
 214 
 215   // The call below uses stuff (the SATB* things) that are in G1, but probably
 216   // belong into a shared location.
 217   ShenandoahBarrierSet::satb_mark_queue_set().initialize(SATB_Q_CBL_mon,
 218                                                SATB_Q_FL_lock,
 219                                                20 /*G1SATBProcessCompletedThreshold */,
 220                                                Shared_SATB_Q_lock);
 221 
 222   // Reserve space for prev and next bitmap.
 223   size_t bitmap_page_size = UseLargePages ? (size_t)os::large_page_size() : (size_t)os::vm_page_size();
 224   _bitmap_size = MarkBitMap::compute_size(heap_rs.size());
 225   _bitmap_size = align_up(_bitmap_size, bitmap_page_size);
 226   _heap_region = MemRegion((HeapWord*) heap_rs.base(), heap_rs.size() / HeapWordSize);
 227 
 228   size_t bitmap_bytes_per_region = reg_size_bytes / MarkBitMap::heap_map_factor();
 229 
 230   guarantee(bitmap_bytes_per_region != 0,
 231             "Bitmap bytes per region should not be zero");
 232   guarantee(is_power_of_2(bitmap_bytes_per_region),
 233             "Bitmap bytes per region should be power of two: " SIZE_FORMAT, bitmap_bytes_per_region);
 234 
 235   if (bitmap_page_size > bitmap_bytes_per_region) {
 236     _bitmap_regions_per_slice = bitmap_page_size / bitmap_bytes_per_region;
 237     _bitmap_bytes_per_slice = bitmap_page_size;
 238   } else {
 239     _bitmap_regions_per_slice = 1;
 240     _bitmap_bytes_per_slice = bitmap_bytes_per_region;
 241   }
 242 
 243   guarantee(_bitmap_regions_per_slice >= 1,
 244             "Should have at least one region per slice: " SIZE_FORMAT,
 245             _bitmap_regions_per_slice);
 246 
 247   guarantee(((_bitmap_bytes_per_slice) % bitmap_page_size) == 0,
 248             "Bitmap slices should be page-granular: bps = " SIZE_FORMAT ", page size = " SIZE_FORMAT,
 249             _bitmap_bytes_per_slice, bitmap_page_size);
 250 
 251   ReservedSpace bitmap0(_bitmap_size, bitmap_page_size);
 252   MemTracker::record_virtual_memory_type(bitmap0.base(), mtGC);
 253   _bitmap0_region = MemRegion((HeapWord*) bitmap0.base(), bitmap0.size() / HeapWordSize);
 254 
 255   ReservedSpace bitmap1(_bitmap_size, bitmap_page_size);
 256   MemTracker::record_virtual_memory_type(bitmap1.base(), mtGC);
 257   _bitmap1_region = MemRegion((HeapWord*) bitmap1.base(), bitmap1.size() / HeapWordSize);
 258 
 259   size_t bitmap_init_commit = _bitmap_bytes_per_slice *
 260                               align_up(num_committed_regions, _bitmap_regions_per_slice) / _bitmap_regions_per_slice;
 261   bitmap_init_commit = MIN2(_bitmap_size, bitmap_init_commit);
 262   os::commit_memory_or_exit((char *) (_bitmap0_region.start()), bitmap_init_commit, false,
 263                             "couldn't allocate initial bitmap");
 264   os::commit_memory_or_exit((char *) (_bitmap1_region.start()), bitmap_init_commit, false,
 265                             "couldn't allocate initial bitmap");
 266 
 267   size_t page_size = UseLargePages ? (size_t)os::large_page_size() : (size_t)os::vm_page_size();
 268 
 269   if (ShenandoahVerify) {
 270     ReservedSpace verify_bitmap(_bitmap_size, page_size);
 271     os::commit_memory_or_exit(verify_bitmap.base(), verify_bitmap.size(), false,
 272                               "couldn't allocate verification bitmap");
 273     MemTracker::record_virtual_memory_type(verify_bitmap.base(), mtGC);
 274     MemRegion verify_bitmap_region = MemRegion((HeapWord *) verify_bitmap.base(), verify_bitmap.size() / HeapWordSize);
 275     _verification_bit_map.initialize(_heap_region, verify_bitmap_region);
 276     _verifier = new ShenandoahVerifier(this, &_verification_bit_map);
 277   }
 278 
 279   if (ShenandoahAlwaysPreTouch) {
 280     assert (!AlwaysPreTouch, "Should have been overridden");
 281 
 282     // For NUMA, it is important to pre-touch the storage under bitmaps with worker threads,
 283     // before initialize() below zeroes it with initializing thread. For any given region,
 284     // we touch the region and the corresponding bitmaps from the same thread.
 285 
 286     log_info(gc, heap)("Parallel pretouch " SIZE_FORMAT " regions with " SIZE_FORMAT " byte pages",
 287                        _num_regions, page_size);
 288     ShenandoahPretouchTask cl(region_iterator(), bitmap0.base(), bitmap1.base(), _bitmap_size, page_size);
 289     _workers->run_task(&cl);
 290   }
 291 
 292   _mark_bit_map0.initialize(_heap_region, _bitmap0_region);
 293   _complete_mark_bit_map = &_mark_bit_map0;
 294 
 295   _mark_bit_map1.initialize(_heap_region, _bitmap1_region);
 296   _next_mark_bit_map = &_mark_bit_map1;
 297 
 298   // Reserve aux bitmap for use in object_iterate(). We don't commit it here.
 299   ReservedSpace aux_bitmap(_bitmap_size, bitmap_page_size);
 300   MemTracker::record_virtual_memory_type(aux_bitmap.base(), mtGC);
 301   _aux_bitmap_region = MemRegion((HeapWord*) aux_bitmap.base(), aux_bitmap.size() / HeapWordSize);
 302   _aux_bit_map.initialize(_heap_region, _aux_bitmap_region);
 303 
 304   if (UseShenandoahMatrix) {
 305     _connection_matrix = new ShenandoahConnectionMatrix(_num_regions);
 306   } else {
 307     _connection_matrix = NULL;
 308   }
 309 
 310   _partial_gc = _shenandoah_policy->can_do_partial_gc() ?
 311                 new ShenandoahPartialGC(this, _num_regions) :
 312                 NULL;
 313 
 314   _traversal_gc = _shenandoah_policy->can_do_traversal_gc() ?
 315                 new ShenandoahTraversalGC(this, _num_regions) :
 316                 NULL;
 317 
 318   _monitoring_support = new ShenandoahMonitoringSupport(this);
 319 
 320   _phase_timings = new ShenandoahPhaseTimings();
 321 
 322   if (ShenandoahAllocationTrace) {
 323     _alloc_tracker = new ShenandoahAllocTracker();
 324   }
 325 
 326   ShenandoahStringDedup::initialize();
 327 
 328   _concurrent_gc_thread = new ShenandoahConcurrentThread();
 329 
 330   ShenandoahCodeRoots::initialize();
 331 
 332   log_info(gc, init)("Safepointing mechanism: %s",
 333                      SafepointMechanism::uses_thread_local_poll() ? "thread-local poll" :
 334                      (SafepointMechanism::uses_global_page_poll() ? "global-page poll" : "unknown"));
 335 
 336   return JNI_OK;
 337 }
 338 
 339 ShenandoahHeap::ShenandoahHeap(ShenandoahCollectorPolicy* policy) :
 340   CollectedHeap(),
 341   _shenandoah_policy(policy),
 342   _soft_ref_policy(),
 343   _regions(NULL),
 344   _free_set(NULL),
 345   _collection_set(NULL),
 346   _update_refs_iterator(ShenandoahRegionIterator(this)),
 347   _bytes_allocated_since_gc_start(0),
 348   _max_workers(MAX2(ConcGCThreads, ParallelGCThreads)),
 349   _ref_processor(NULL),
 350   _next_top_at_mark_starts(NULL),
 351   _next_top_at_mark_starts_base(NULL),
 352   _complete_top_at_mark_starts(NULL),
 353   _complete_top_at_mark_starts_base(NULL),
 354   _mark_bit_map0(),
 355   _mark_bit_map1(),
 356   _aux_bit_map(),
 357   _connection_matrix(NULL),
 358   _verifier(NULL),
 359   _pacer(NULL),
 360   _used_at_last_gc(0),
 361   _alloc_seq_at_last_gc_start(0),
 362   _alloc_seq_at_last_gc_end(0),
 363   _safepoint_workers(NULL),
 364 #ifdef ASSERT
 365   _heap_expansion_count(0),
 366 #endif
 367   _gc_timer(new (ResourceObj::C_HEAP, mtGC) ConcurrentGCTimer()),
 368   _phase_timings(NULL),
 369   _alloc_tracker(NULL),
 370   _cycle_memory_manager("Shenandoah Cycles", "end of GC cycle"),
 371   _stw_memory_manager("Shenandoah Pauses", "end of GC pause"),
 372   _memory_pool(NULL)
 373 {
 374   log_info(gc, init)("Parallel GC threads: "UINT32_FORMAT, ParallelGCThreads);
 375   log_info(gc, init)("Concurrent GC threads: "UINT32_FORMAT, ConcGCThreads);
 376   log_info(gc, init)("Parallel reference processing enabled: %s", BOOL_TO_STR(ParallelRefProcEnabled));
 377 
 378   _scm = new ShenandoahConcurrentMark();
 379   _full_gc = new ShenandoahMarkCompact();
 380   _used = 0;
 381 
 382   _max_workers = MAX2(_max_workers, 1U);
 383   _workers = new ShenandoahWorkGang("Shenandoah GC Threads", _max_workers,
 384                             /* are_GC_task_threads */true,
 385                             /* are_ConcurrentGC_threads */false);
 386   if (_workers == NULL) {
 387     vm_exit_during_initialization("Failed necessary allocation.");
 388   } else {
 389     _workers->initialize_workers();
 390   }
 391 
 392   if (ParallelSafepointCleanupThreads > 1) {
 393     _safepoint_workers = new ShenandoahWorkGang("Safepoint Cleanup Thread",
 394                                                 ParallelSafepointCleanupThreads,
 395                                                 false, false);
 396     _safepoint_workers->initialize_workers();
 397   }
 398 }
 399 
 400 class ShenandoahResetNextBitmapTask : public AbstractGangTask {
 401 private:
 402   ShenandoahRegionIterator _regions;
 403 
 404 public:
 405   ShenandoahResetNextBitmapTask(ShenandoahRegionIterator regions) :
 406     AbstractGangTask("Parallel Reset Bitmap Task"),
 407     _regions(regions) {}
 408 
 409   void work(uint worker_id) {
 410     ShenandoahHeapRegion* region = _regions.next();
 411     ShenandoahHeap* heap = ShenandoahHeap::heap();
 412     while (region != NULL) {
 413       if (heap->is_bitmap_slice_committed(region)) {
 414         HeapWord* bottom = region->bottom();
 415         HeapWord* top = heap->next_top_at_mark_start(region->bottom());
 416         if (top > bottom) {
 417           heap->next_mark_bit_map()->clear_range_large(MemRegion(bottom, top));
 418         }
 419         assert(heap->is_next_bitmap_clear_range(bottom, region->end()), "must be clear");
 420       }
 421       region = _regions.next();
 422     }
 423   }
 424 };
 425 
 426 void ShenandoahHeap::reset_next_mark_bitmap() {
 427   assert_gc_workers(_workers->active_workers());
 428 
 429   ShenandoahResetNextBitmapTask task(region_iterator());
 430   _workers->run_task(&task);
 431 }
 432 
 433 bool ShenandoahHeap::is_next_bitmap_clear() {
 434   for (size_t idx = 0; idx < _num_regions; idx++) {
 435     ShenandoahHeapRegion* r = get_region(idx);
 436     if (is_bitmap_slice_committed(r) && !is_next_bitmap_clear_range(r->bottom(), r->end())) {
 437       return false;
 438     }
 439   }
 440   return true;
 441 }
 442 
 443 bool ShenandoahHeap::is_next_bitmap_clear_range(HeapWord* start, HeapWord* end) {
 444   return _next_mark_bit_map->getNextMarkedWordAddress(start, end) == end;
 445 }
 446 
 447 bool ShenandoahHeap::is_complete_bitmap_clear_range(HeapWord* start, HeapWord* end) {
 448   return _complete_mark_bit_map->getNextMarkedWordAddress(start, end) == end;
 449 }
 450 
 451 void ShenandoahHeap::print_on(outputStream* st) const {
 452   st->print_cr("Shenandoah Heap");
 453   st->print_cr(" " SIZE_FORMAT "K total, " SIZE_FORMAT "K committed, " SIZE_FORMAT "K used",
 454                capacity() / K, committed() / K, used() / K);
 455   st->print_cr(" " SIZE_FORMAT " x " SIZE_FORMAT"K regions",
 456                num_regions(), ShenandoahHeapRegion::region_size_bytes() / K);
 457 
 458   st->print("Status: ");
 459   if (has_forwarded_objects())               st->print("has forwarded objects, ");
 460   if (is_concurrent_mark_in_progress())      st->print("marking, ");
 461   if (is_evacuation_in_progress())           st->print("evacuating, ");
 462   if (is_update_refs_in_progress())          st->print("updating refs, ");
 463   if (is_concurrent_partial_in_progress())   st->print("partial, ");
 464   if (is_concurrent_traversal_in_progress()) st->print("traversal, ");
 465   if (is_degenerated_gc_in_progress())       st->print("degenerated gc, ");
 466   if (is_full_gc_in_progress())              st->print("full gc, ");
 467   if (is_full_gc_move_in_progress())         st->print("full gc move, ");
 468 
 469   if (cancelled_concgc()) {
 470     st->print("conc gc cancelled");
 471   } else {
 472     st->print("not cancelled");
 473   }
 474   st->cr();
 475 
 476   st->print_cr("Reserved region:");
 477   st->print_cr(" - [" PTR_FORMAT ", " PTR_FORMAT ") ",
 478                p2i(reserved_region().start()),
 479                p2i(reserved_region().end()));
 480 
 481   if (UseShenandoahMatrix) {
 482     st->print_cr("Matrix:");
 483 
 484     ShenandoahConnectionMatrix* matrix = connection_matrix();
 485     if (matrix != NULL) {
 486       st->print_cr(" - base: " PTR_FORMAT, p2i(matrix->matrix_addr()));
 487       st->print_cr(" - stride: " SIZE_FORMAT, matrix->stride());
 488       st->print_cr(" - magic: " PTR_FORMAT, matrix->magic_offset());
 489     } else {
 490       st->print_cr(" No matrix.");
 491     }
 492   }
 493 
 494   if (Verbose) {
 495     print_heap_regions_on(st);
 496   }
 497 }
 498 
 499 class ShenandoahInitGCLABClosure : public ThreadClosure {
 500 public:
 501   void do_thread(Thread* thread) {
 502     if (thread != NULL && (thread->is_Java_thread() || thread->is_Worker_thread() ||
 503                            thread->is_ConcurrentGC_thread())) {
 504       thread->gclab().initialize(true);
 505     }
 506   }
 507 };
 508 
 509 void ShenandoahHeap::post_initialize() {
 510   CollectedHeap::post_initialize();
 511   if (UseTLAB) {
 512     MutexLocker ml(Threads_lock);
 513 
 514     ShenandoahInitGCLABClosure init_gclabs;
 515     Threads::threads_do(&init_gclabs);
 516     gc_threads_do(&init_gclabs);
 517 
 518     // gclab can not be initialized early during VM startup, as it can not determinate its max_size.
 519     // Now, we will let WorkGang to initialize gclab when new worker is created.
 520     _workers->set_initialize_gclab();
 521   }
 522 
 523   _scm->initialize(_max_workers);
 524   _full_gc->initialize(_gc_timer);
 525 
 526   ref_processing_init();
 527 
 528   _shenandoah_policy->post_heap_initialize();
 529 }
 530 
 531 size_t ShenandoahHeap::used() const {
 532   return OrderAccess::load_acquire(&_used);
 533 }
 534 
 535 size_t ShenandoahHeap::committed() const {
 536   OrderAccess::acquire();
 537   return _committed;
 538 }
 539 
 540 void ShenandoahHeap::increase_committed(size_t bytes) {
 541   assert_heaplock_or_safepoint();
 542   _committed += bytes;
 543 }
 544 
 545 void ShenandoahHeap::decrease_committed(size_t bytes) {
 546   assert_heaplock_or_safepoint();
 547   _committed -= bytes;
 548 }
 549 
 550 void ShenandoahHeap::increase_used(size_t bytes) {
 551   Atomic::add(bytes, &_used);
 552 }
 553 
 554 void ShenandoahHeap::set_used(size_t bytes) {
 555   OrderAccess::release_store_fence(&_used, bytes);
 556 }
 557 
 558 void ShenandoahHeap::decrease_used(size_t bytes) {
 559   assert(used() >= bytes, "never decrease heap size by more than we've left");
 560   Atomic::add(-bytes, &_used);
 561 }
 562 
 563 void ShenandoahHeap::increase_allocated(size_t bytes) {
 564   Atomic::add(bytes, &_bytes_allocated_since_gc_start);
 565 }
 566 
 567 void ShenandoahHeap::notify_alloc(size_t words, bool waste) {
 568   size_t bytes = words * HeapWordSize;
 569   if (!waste) {
 570     increase_used(bytes);
 571   }
 572   increase_allocated(bytes);
 573   if (ShenandoahPacing) {
 574     concurrent_thread()->pacing_notify_alloc(words);
 575     if (waste) {
 576       pacer()->claim_for_alloc(words, true);
 577     }
 578   }
 579 }
 580 
 581 size_t ShenandoahHeap::capacity() const {
 582   return num_regions() * ShenandoahHeapRegion::region_size_bytes();
 583 }
 584 
 585 bool ShenandoahHeap::is_maximal_no_gc() const {
 586   Unimplemented();
 587   return true;
 588 }
 589 
 590 size_t ShenandoahHeap::max_capacity() const {
 591   return _num_regions * ShenandoahHeapRegion::region_size_bytes();
 592 }
 593 
 594 size_t ShenandoahHeap::initial_capacity() const {
 595   return _initial_size;
 596 }
 597 
 598 bool ShenandoahHeap::is_in(const void* p) const {
 599   HeapWord* heap_base = (HeapWord*) base();
 600   HeapWord* last_region_end = heap_base + ShenandoahHeapRegion::region_size_words() * num_regions();
 601   return p >= heap_base && p < last_region_end;
 602 }
 603 
 604 bool ShenandoahHeap::is_scavengable(oop p) {
 605   return true;
 606 }
 607 
 608 void ShenandoahHeap::handle_heap_shrinkage(double shrink_before) {
 609   if (!ShenandoahUncommit) {
 610     return;
 611   }
 612 
 613   ShenandoahHeapLocker locker(lock());
 614 
 615   size_t count = 0;
 616   for (size_t i = 0; i < num_regions(); i++) {
 617     ShenandoahHeapRegion* r = get_region(i);
 618     if (r->is_empty_committed() && (r->empty_time() < shrink_before)) {
 619       r->make_uncommitted();
 620       count++;
 621     }
 622   }
 623 
 624   if (count > 0) {
 625     log_info(gc)("Uncommitted " SIZE_FORMAT "M. Heap: " SIZE_FORMAT "M reserved, " SIZE_FORMAT "M committed, " SIZE_FORMAT "M used",
 626                  count * ShenandoahHeapRegion::region_size_bytes() / M, capacity() / M, committed() / M, used() / M);
 627     _concurrent_gc_thread->notify_heap_changed();
 628   }
 629 }
 630 
 631 HeapWord* ShenandoahHeap::allocate_from_gclab_slow(Thread* thread, size_t size) {
 632   // Retain tlab and allocate object in shared space if
 633   // the amount free in the tlab is too large to discard.
 634   if (thread->gclab().free() > thread->gclab().refill_waste_limit()) {
 635     thread->gclab().record_slow_allocation(size);
 636     return NULL;
 637   }
 638 
 639   // Discard gclab and allocate a new one.
 640   // To minimize fragmentation, the last GCLAB may be smaller than the rest.
 641   size_t new_gclab_size = thread->gclab().compute_size(size);
 642 
 643   thread->gclab().clear_before_allocation();
 644 
 645   if (new_gclab_size == 0) {
 646     return NULL;
 647   }
 648 
 649   // Allocate a new GCLAB...
 650   HeapWord* obj = allocate_new_gclab(new_gclab_size);
 651   if (obj == NULL) {
 652     return NULL;
 653   }
 654 
 655   if (ZeroTLAB) {
 656     // ..and clear it.
 657     Copy::zero_to_words(obj, new_gclab_size);
 658   } else {
 659     // ...and zap just allocated object.
 660 #ifdef ASSERT
 661     // Skip mangling the space corresponding to the object header to
 662     // ensure that the returned space is not considered parsable by
 663     // any concurrent GC thread.
 664     size_t hdr_size = oopDesc::header_size();
 665     Copy::fill_to_words(obj + hdr_size, new_gclab_size - hdr_size, badHeapWordVal);
 666 #endif // ASSERT
 667   }
 668   thread->gclab().fill(obj, obj + size, new_gclab_size);
 669   return obj;
 670 }
 671 
 672 HeapWord* ShenandoahHeap::allocate_new_tlab(size_t word_size) {
 673 #ifdef ASSERT
 674   log_debug(gc, alloc)("Allocate new tlab, requested size = " SIZE_FORMAT " bytes", word_size * HeapWordSize);
 675 #endif
 676   return allocate_new_lab(word_size, _alloc_tlab);
 677 }
 678 
 679 HeapWord* ShenandoahHeap::allocate_new_gclab(size_t word_size) {
 680 #ifdef ASSERT
 681   log_debug(gc, alloc)("Allocate new gclab, requested size = " SIZE_FORMAT " bytes", word_size * HeapWordSize);
 682 #endif
 683   return allocate_new_lab(word_size, _alloc_gclab);
 684 }
 685 
 686 HeapWord* ShenandoahHeap::allocate_new_lab(size_t word_size, AllocType type) {
 687   HeapWord* result = allocate_memory(word_size, type);
 688 
 689   if (result != NULL) {
 690     assert(! in_collection_set(result), "Never allocate in collection set");
 691 
 692     log_develop_trace(gc, tlab)("allocating new tlab of size "SIZE_FORMAT" at addr "PTR_FORMAT, word_size, p2i(result));
 693 
 694   }
 695   return result;
 696 }
 697 
 698 ShenandoahHeap* ShenandoahHeap::heap() {
 699   CollectedHeap* heap = Universe::heap();
 700   assert(heap != NULL, "Unitialized access to ShenandoahHeap::heap()");
 701   assert(heap->kind() == CollectedHeap::Shenandoah, "not a shenandoah heap");
 702   return (ShenandoahHeap*) heap;
 703 }
 704 
 705 ShenandoahHeap* ShenandoahHeap::heap_no_check() {
 706   CollectedHeap* heap = Universe::heap();
 707   return (ShenandoahHeap*) heap;
 708 }
 709 
 710 HeapWord* ShenandoahHeap::allocate_memory(size_t word_size, AllocType type) {
 711   ShenandoahAllocTrace trace_alloc(word_size, type);
 712 
 713   bool in_new_region = false;
 714   HeapWord* result = NULL;
 715 
 716   if (type == _alloc_tlab || type == _alloc_shared) {
 717     if (ShenandoahPacing) {
 718       pacer()->pace_for_alloc(word_size);
 719     }
 720 
 721     if (!ShenandoahAllocFailureALot || !should_inject_alloc_failure()) {
 722       result = allocate_memory_under_lock(word_size, type, in_new_region);
 723     }
 724 
 725     // Allocation failed, try full-GC, then retry allocation.
 726     //
 727     // It might happen that one of the threads requesting allocation would unblock
 728     // way later after full-GC happened, only to fail the second allocation, because
 729     // other threads have already depleted the free storage. In this case, a better
 730     // strategy would be to try full-GC again.
 731     //
 732     // Lacking the way to detect progress from "collect" call, we are left with blindly
 733     // retrying for some bounded number of times.
 734     // TODO: Poll if Full GC made enough progress to warrant retry.
 735     int tries = 0;
 736     while ((result == NULL) && (tries++ < ShenandoahAllocGCTries)) {
 737       log_debug(gc)("[" PTR_FORMAT " Failed to allocate " SIZE_FORMAT " bytes, doing GC, try %d",
 738                     p2i(Thread::current()), word_size * HeapWordSize, tries);
 739       concurrent_thread()->handle_alloc_failure(word_size);
 740       result = allocate_memory_under_lock(word_size, type, in_new_region);
 741     }
 742   } else {
 743     assert(type == _alloc_gclab || type == _alloc_shared_gc, "Can only accept these types here");
 744     result = allocate_memory_under_lock(word_size, type, in_new_region);
 745     // Do not call handle_alloc_failure() here, because we cannot block.
 746     // The allocation failure would be handled by the WB slowpath with handle_alloc_failure_evac().
 747   }
 748 
 749   if (in_new_region) {
 750     concurrent_thread()->notify_heap_changed();
 751   }
 752 
 753   log_develop_trace(gc, alloc)("allocate memory chunk of size "SIZE_FORMAT" at addr "PTR_FORMAT " by thread %d ",
 754                                word_size, p2i(result), Thread::current()->osthread()->thread_id());
 755 
 756   if (result != NULL) {
 757     notify_alloc(word_size, false);
 758   }
 759 
 760   return result;
 761 }
 762 
 763 HeapWord* ShenandoahHeap::allocate_memory_under_lock(size_t word_size, AllocType type, bool& in_new_region) {
 764   ShenandoahHeapLocker locker(lock());
 765   return _free_set->allocate(word_size, type, in_new_region);
 766 }
 767 
 768 HeapWord*  ShenandoahHeap::mem_allocate(size_t size,
 769                                         bool*  gc_overhead_limit_was_exceeded) {
 770   HeapWord* filler = allocate_memory(size + BrooksPointer::word_size(), _alloc_shared);
 771   HeapWord* result = filler + BrooksPointer::word_size();
 772   if (filler != NULL) {
 773     BrooksPointer::initialize(oop(result));
 774 
 775     assert(! in_collection_set(result), "never allocate in targetted region");
 776     return result;
 777   } else {
 778     return NULL;
 779   }
 780 }
 781 
 782 class ShenandoahEvacuateUpdateRootsClosure: public ExtendedOopClosure {
 783 private:
 784   ShenandoahHeap* _heap;
 785   Thread* _thread;
 786 public:
 787   ShenandoahEvacuateUpdateRootsClosure() :
 788     _heap(ShenandoahHeap::heap()), _thread(Thread::current()) {
 789   }
 790 
 791 private:
 792   template <class T>
 793   void do_oop_work(T* p) {
 794     assert(_heap->is_evacuation_in_progress(), "Only do this when evacuation is in progress");
 795 
 796     T o = RawAccess<>::oop_load(p);
 797     if (! CompressedOops::is_null(o)) {
 798       oop obj = CompressedOops::decode_not_null(o);
 799       if (_heap->in_collection_set(obj)) {
 800         shenandoah_assert_marked_complete(p, obj);
 801         oop resolved = ShenandoahBarrierSet::resolve_forwarded_not_null(obj);
 802         if (oopDesc::unsafe_equals(resolved, obj)) {
 803           bool evac;
 804           resolved = _heap->evacuate_object(obj, _thread, evac);
 805         }
 806         RawAccess<OOP_NOT_NULL>::oop_store(p, resolved);
 807       }
 808     }
 809   }
 810 
 811 public:
 812   void do_oop(oop* p) {
 813     do_oop_work(p);
 814   }
 815   void do_oop(narrowOop* p) {
 816     do_oop_work(p);
 817   }
 818 };
 819 
 820 class ShenandoahEvacuateRootsClosure: public ExtendedOopClosure {
 821 private:
 822   ShenandoahHeap* _heap;
 823   Thread* _thread;
 824 public:
 825   ShenandoahEvacuateRootsClosure() :
 826           _heap(ShenandoahHeap::heap()), _thread(Thread::current()) {
 827   }
 828 
 829 private:
 830   template <class T>
 831   void do_oop_work(T* p) {
 832     T o = RawAccess<>::oop_load(p);
 833     if (! CompressedOops::is_null(o)) {
 834       oop obj = CompressedOops::decode_not_null(o);
 835       if (_heap->in_collection_set(obj)) {
 836         oop resolved = ShenandoahBarrierSet::resolve_forwarded_not_null(obj);
 837         if (oopDesc::unsafe_equals(resolved, obj)) {
 838           bool evac;
 839           _heap->evacuate_object(obj, _thread, evac);
 840         }
 841       }
 842     }
 843   }
 844 
 845 public:
 846   void do_oop(oop* p) {
 847     do_oop_work(p);
 848   }
 849   void do_oop(narrowOop* p) {
 850     do_oop_work(p);
 851   }
 852 };
 853 
 854 class ShenandoahParallelEvacuateRegionObjectClosure : public ObjectClosure {
 855 private:
 856   ShenandoahHeap* const _heap;
 857   Thread* const _thread;
 858 public:
 859   ShenandoahParallelEvacuateRegionObjectClosure(ShenandoahHeap* heap) :
 860     _heap(heap), _thread(Thread::current()) {}
 861 
 862   void do_object(oop p) {
 863     shenandoah_assert_marked_complete(NULL, p);
 864     if (oopDesc::unsafe_equals(p, ShenandoahBarrierSet::resolve_forwarded_not_null(p))) {
 865       bool evac;
 866       _heap->evacuate_object(p, _thread, evac);
 867     }
 868   }
 869 };
 870 
 871 class ShenandoahParallelEvacuationTask : public AbstractGangTask {
 872 private:
 873   ShenandoahHeap* const _sh;
 874   ShenandoahCollectionSet* const _cs;
 875   ShenandoahSharedFlag _claimed_codecache;
 876 
 877 public:
 878   ShenandoahParallelEvacuationTask(ShenandoahHeap* sh,
 879                          ShenandoahCollectionSet* cs) :
 880     AbstractGangTask("Parallel Evacuation Task"),
 881     _cs(cs),
 882     _sh(sh)
 883   {}
 884 
 885   void work(uint worker_id) {
 886 
 887     ShenandoahEvacOOMScope oom_evac_scope;
 888     SuspendibleThreadSetJoiner stsj(ShenandoahSuspendibleWorkers);
 889 
 890     // If concurrent code cache evac is enabled, evacuate it here.
 891     // Note we cannot update the roots here, because we risk non-atomic stores to the alive
 892     // nmethods. The update would be handled elsewhere.
 893     if (ShenandoahConcurrentEvacCodeRoots && _claimed_codecache.try_set()) {
 894       ShenandoahEvacuateRootsClosure cl;
 895       MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
 896       CodeBlobToOopClosure blobs(&cl, !CodeBlobToOopClosure::FixRelocations);
 897       CodeCache::blobs_do(&blobs);
 898     }
 899 
 900     ShenandoahParallelEvacuateRegionObjectClosure cl(_sh);
 901     ShenandoahHeapRegion* r;
 902     while ((r =_cs->claim_next()) != NULL) {
 903       log_develop_trace(gc, region)("Thread "INT32_FORMAT" claimed Heap Region "SIZE_FORMAT,
 904                                     worker_id,
 905                                     r->region_number());
 906 
 907       assert(r->has_live(), "all-garbage regions are reclaimed early");
 908       _sh->marked_object_iterate(r, &cl);
 909 
 910       if (_sh->check_cancelled_concgc_and_yield()) {
 911         log_develop_trace(gc, region)("Cancelled concgc while evacuating region " SIZE_FORMAT, r->region_number());
 912         break;
 913       }
 914 
 915       if (ShenandoahPacing) {
 916         _sh->pacer()->report_evac(r->get_live_data_words());
 917       }
 918     }
 919   }
 920 };
 921 
 922 void ShenandoahHeap::trash_cset_regions() {
 923   ShenandoahHeapLocker locker(lock());
 924 
 925   ShenandoahCollectionSet* set = collection_set();
 926   ShenandoahHeapRegion* r;
 927   set->clear_current_index();
 928   while ((r = set->next()) != NULL) {
 929     r->make_trash();
 930   }
 931   collection_set()->clear();
 932 }
 933 
 934 void ShenandoahHeap::print_heap_regions_on(outputStream* st) const {
 935   st->print_cr("Heap Regions:");
 936   st->print_cr("EU=empty-uncommitted, EC=empty-committed, R=regular, H=humongous start, HC=humongous continuation, CS=collection set, T=trash, P=pinned");
 937   st->print_cr("BTE=bottom/top/end, U=used, T=TLAB allocs, G=GCLAB allocs, S=shared allocs, L=live data");
 938   st->print_cr("R=root, CP=critical pins, TAMS=top-at-mark-start (previous, next)");
 939   st->print_cr("SN=alloc sequence numbers (first mutator, last mutator, first gc, last gc)");
 940 
 941   for (size_t i = 0; i < num_regions(); i++) {
 942     get_region(i)->print_on(st);
 943   }
 944 }
 945 
 946 void ShenandoahHeap::trash_humongous_region_at(ShenandoahHeapRegion* start) {
 947   assert(start->is_humongous_start(), "reclaim regions starting with the first one");
 948 
 949   oop humongous_obj = oop(start->bottom() + BrooksPointer::word_size());
 950   size_t size = humongous_obj->size() + BrooksPointer::word_size();
 951   size_t required_regions = ShenandoahHeapRegion::required_regions(size * HeapWordSize);
 952   size_t index = start->region_number() + required_regions - 1;
 953 
 954   assert(!start->has_live(), "liveness must be zero");
 955   log_trace(gc, humongous)("Reclaiming "SIZE_FORMAT" humongous regions for object of size: "SIZE_FORMAT" words", required_regions, size);
 956 
 957   for(size_t i = 0; i < required_regions; i++) {
 958     // Reclaim from tail. Otherwise, assertion fails when printing region to trace log,
 959     // as it expects that every region belongs to a humongous region starting with a humongous start region.
 960     ShenandoahHeapRegion* region = get_region(index --);
 961 
 962     LogTarget(Trace, gc, humongous) lt;
 963     if (lt.is_enabled()) {
 964       ResourceMark rm;
 965       LogStream ls(lt);
 966       region->print_on(&ls);
 967     }
 968 
 969     assert(region->is_humongous(), "expect correct humongous start or continuation");
 970     assert(!in_collection_set(region), "Humongous region should not be in collection set");
 971 
 972     region->make_trash();
 973   }
 974 }
 975 
 976 #ifdef ASSERT
 977 class ShenandoahCheckCollectionSetClosure: public ShenandoahHeapRegionClosure {
 978   bool heap_region_do(ShenandoahHeapRegion* r) {
 979     assert(! ShenandoahHeap::heap()->in_collection_set(r), "Should have been cleared by now");
 980     return false;
 981   }
 982 };
 983 #endif
 984 
 985 void ShenandoahHeap::prepare_for_concurrent_evacuation() {
 986   log_develop_trace(gc)("Thread %d started prepare_for_concurrent_evacuation", Thread::current()->osthread()->thread_id());
 987 
 988   if (!cancelled_concgc()) {
 989     // Allocations might have happened before we STWed here, record peak:
 990     shenandoahPolicy()->record_peak_occupancy();
 991 
 992     make_tlabs_parsable(true);
 993 
 994     if (ShenandoahVerify) {
 995       verifier()->verify_after_concmark();
 996     }
 997 
 998     trash_cset_regions();
 999 
1000     // NOTE: This needs to be done during a stop the world pause, because
1001     // putting regions into the collection set concurrently with Java threads
1002     // will create a race. In particular, acmp could fail because when we
1003     // resolve the first operand, the containing region might not yet be in
1004     // the collection set, and thus return the original oop. When the 2nd
1005     // operand gets resolved, the region could be in the collection set
1006     // and the oop gets evacuated. If both operands have originally been
1007     // the same, we get false negatives.
1008 
1009     {
1010       ShenandoahHeapLocker locker(lock());
1011       _collection_set->clear();
1012       _free_set->clear();
1013 
1014 #ifdef ASSERT
1015       ShenandoahCheckCollectionSetClosure ccsc;
1016       heap_region_iterate(&ccsc);
1017 #endif
1018 
1019       _shenandoah_policy->choose_collection_set(_collection_set);
1020 
1021       _free_set->rebuild();
1022     }
1023 
1024     Universe::update_heap_info_at_gc();
1025 
1026     if (ShenandoahVerify) {
1027       verifier()->verify_before_evacuation();
1028     }
1029   }
1030 }
1031 
1032 
1033 class ShenandoahRetireTLABClosure : public ThreadClosure {
1034 private:
1035   bool _retire;
1036 
1037 public:
1038   ShenandoahRetireTLABClosure(bool retire) : _retire(retire) {}
1039 
1040   void do_thread(Thread* thread) {
1041     assert(thread->gclab().is_initialized(), "GCLAB should be initialized for %s", thread->name());
1042     thread->gclab().make_parsable(_retire);
1043   }
1044 };
1045 
1046 void ShenandoahHeap::make_tlabs_parsable(bool retire_tlabs) {
1047   if (UseTLAB) {
1048     CollectedHeap::ensure_parsability(retire_tlabs);
1049     ShenandoahRetireTLABClosure cl(retire_tlabs);
1050     for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) {
1051       cl.do_thread(t);
1052     }
1053     gc_threads_do(&cl);
1054   }
1055 }
1056 
1057 
1058 class ShenandoahEvacuateUpdateRootsTask : public AbstractGangTask {
1059   ShenandoahRootEvacuator* _rp;
1060 public:
1061 
1062   ShenandoahEvacuateUpdateRootsTask(ShenandoahRootEvacuator* rp) :
1063     AbstractGangTask("Shenandoah evacuate and update roots"),
1064     _rp(rp)
1065   {
1066     // Nothing else to do.
1067   }
1068 
1069   void work(uint worker_id) {
1070     ShenandoahEvacOOMScope oom_evac_scope;
1071     ShenandoahEvacuateUpdateRootsClosure cl;
1072 
1073     if (ShenandoahConcurrentEvacCodeRoots) {
1074       _rp->process_evacuate_roots(&cl, NULL, worker_id);
1075     } else {
1076       MarkingCodeBlobClosure blobsCl(&cl, CodeBlobToOopClosure::FixRelocations);
1077       _rp->process_evacuate_roots(&cl, &blobsCl, worker_id);
1078     }
1079   }
1080 };
1081 
1082 class ShenandoahFixRootsTask : public AbstractGangTask {
1083   ShenandoahRootEvacuator* _rp;
1084 public:
1085 
1086   ShenandoahFixRootsTask(ShenandoahRootEvacuator* rp) :
1087     AbstractGangTask("Shenandoah update roots"),
1088     _rp(rp)
1089   {
1090     // Nothing else to do.
1091   }
1092 
1093   void work(uint worker_id) {
1094     ShenandoahEvacOOMScope oom_evac_scope;
1095     ShenandoahUpdateRefsClosure cl;
1096     MarkingCodeBlobClosure blobsCl(&cl, CodeBlobToOopClosure::FixRelocations);
1097 
1098     _rp->process_evacuate_roots(&cl, &blobsCl, worker_id);
1099   }
1100 };
1101 
1102 void ShenandoahHeap::evacuate_and_update_roots() {
1103 
1104 #if defined(COMPILER2) || INCLUDE_JVMCI
1105   DerivedPointerTable::clear();
1106 #endif
1107   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Only iterate roots while world is stopped");
1108 
1109   {
1110     ShenandoahRootEvacuator rp(this, workers()->active_workers(), ShenandoahPhaseTimings::init_evac);
1111     ShenandoahEvacuateUpdateRootsTask roots_task(&rp);
1112     workers()->run_task(&roots_task);
1113   }
1114 
1115 #if defined(COMPILER2) || INCLUDE_JVMCI
1116   DerivedPointerTable::update_pointers();
1117 #endif
1118   if (cancelled_concgc()) {
1119     fixup_roots();
1120   }
1121 }
1122 
1123 void ShenandoahHeap::fixup_roots() {
1124     assert(cancelled_concgc(), "Only after concurrent cycle failed");
1125 
1126     // If initial evacuation has been cancelled, we need to update all references
1127     // after all workers have finished. Otherwise we might run into the following problem:
1128     // GC thread 1 cannot allocate anymore, thus evacuation fails, leaves from-space ptr of object X.
1129     // GC thread 2 evacuates the same object X to to-space
1130     // which leaves a truly dangling from-space reference in the first root oop*. This must not happen.
1131     // clear() and update_pointers() must always be called in pairs,
1132     // cannot nest with above clear()/update_pointers().
1133 #if defined(COMPILER2) || INCLUDE_JVMCI
1134     DerivedPointerTable::clear();
1135 #endif
1136     ShenandoahRootEvacuator rp(this, workers()->active_workers(), ShenandoahPhaseTimings::init_evac);
1137     ShenandoahFixRootsTask update_roots_task(&rp);
1138     workers()->run_task(&update_roots_task);
1139 #if defined(COMPILER2) || INCLUDE_JVMCI
1140     DerivedPointerTable::update_pointers();
1141 #endif
1142 }
1143 
1144 void ShenandoahHeap::roots_iterate(OopClosure* cl) {
1145   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Only iterate roots while world is stopped");
1146 
1147   CodeBlobToOopClosure blobsCl(cl, false);
1148   CLDToOopClosure cldCl(cl);
1149 
1150   ShenandoahRootProcessor rp(this, 1, ShenandoahPhaseTimings::_num_phases);
1151   rp.process_all_roots(cl, NULL, &cldCl, &blobsCl, NULL, 0);
1152 }
1153 
1154 bool ShenandoahHeap::supports_tlab_allocation() const {
1155   return true;
1156 }
1157 
1158 size_t  ShenandoahHeap::unsafe_max_tlab_alloc(Thread *thread) const {
1159   return MIN2(_free_set->unsafe_peek_free(), max_tlab_size());
1160 }
1161 
1162 size_t ShenandoahHeap::max_tlab_size() const {
1163   return ShenandoahHeapRegion::max_tlab_size_bytes();
1164 }
1165 
1166 class ShenandoahResizeGCLABClosure : public ThreadClosure {
1167 public:
1168   void do_thread(Thread* thread) {
1169     assert(thread->gclab().is_initialized(), "GCLAB should be initialized for %s", thread->name());
1170     thread->gclab().resize();
1171   }
1172 };
1173 
1174 void ShenandoahHeap::resize_all_tlabs() {
1175   CollectedHeap::resize_all_tlabs();
1176 
1177   ShenandoahResizeGCLABClosure cl;
1178   for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) {
1179     cl.do_thread(t);
1180   }
1181   gc_threads_do(&cl);
1182 }
1183 
1184 class ShenandoahAccumulateStatisticsGCLABClosure : public ThreadClosure {
1185 public:
1186   void do_thread(Thread* thread) {
1187     assert(thread->gclab().is_initialized(), "GCLAB should be initialized for %s", thread->name());
1188     thread->gclab().accumulate_statistics();
1189     thread->gclab().initialize_statistics();
1190   }
1191 };
1192 
1193 void ShenandoahHeap::accumulate_statistics_all_gclabs() {
1194   ShenandoahAccumulateStatisticsGCLABClosure cl;
1195   for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) {
1196     cl.do_thread(t);
1197   }
1198   gc_threads_do(&cl);
1199 }
1200 
1201 bool  ShenandoahHeap::can_elide_tlab_store_barriers() const {
1202   return true;
1203 }
1204 
1205 oop ShenandoahHeap::new_store_pre_barrier(JavaThread* thread, oop new_obj) {
1206   // Overridden to do nothing.
1207   return new_obj;
1208 }
1209 
1210 bool  ShenandoahHeap::can_elide_initializing_store_barrier(oop new_obj) {
1211   return true;
1212 }
1213 
1214 bool ShenandoahHeap::card_mark_must_follow_store() const {
1215   return false;
1216 }
1217 
1218 void ShenandoahHeap::collect(GCCause::Cause cause) {
1219   _concurrent_gc_thread->handle_explicit_gc(cause);
1220 }
1221 
1222 void ShenandoahHeap::do_full_collection(bool clear_all_soft_refs) {
1223   //assert(false, "Shouldn't need to do full collections");
1224 }
1225 
1226 AdaptiveSizePolicy* ShenandoahHeap::size_policy() {
1227   Unimplemented();
1228   return NULL;
1229 
1230 }
1231 
1232 CollectorPolicy* ShenandoahHeap::collector_policy() const {
1233   return _shenandoah_policy;
1234 }
1235 
1236 
1237 HeapWord* ShenandoahHeap::block_start(const void* addr) const {
1238   Space* sp = heap_region_containing(addr);
1239   if (sp != NULL) {
1240     return sp->block_start(addr);
1241   }
1242   return NULL;
1243 }
1244 
1245 size_t ShenandoahHeap::block_size(const HeapWord* addr) const {
1246   Space* sp = heap_region_containing(addr);
1247   assert(sp != NULL, "block_size of address outside of heap");
1248   return sp->block_size(addr);
1249 }
1250 
1251 bool ShenandoahHeap::block_is_obj(const HeapWord* addr) const {
1252   Space* sp = heap_region_containing(addr);
1253   return sp->block_is_obj(addr);
1254 }
1255 
1256 jlong ShenandoahHeap::millis_since_last_gc() {
1257   return 0;
1258 }
1259 
1260 void ShenandoahHeap::prepare_for_verify() {
1261   if (SafepointSynchronize::is_at_safepoint() || ! UseTLAB) {
1262     make_tlabs_parsable(false);
1263   }
1264 }
1265 
1266 void ShenandoahHeap::print_gc_threads_on(outputStream* st) const {
1267   workers()->print_worker_threads_on(st);
1268   if (ShenandoahStringDedup::is_enabled()) {
1269     ShenandoahStringDedup::print_worker_threads_on(st);
1270   }
1271 }
1272 
1273 void ShenandoahHeap::gc_threads_do(ThreadClosure* tcl) const {
1274   workers()->threads_do(tcl);
1275   if (ShenandoahStringDedup::is_enabled()) {
1276     ShenandoahStringDedup::threads_do(tcl);
1277   }
1278 }
1279 
1280 void ShenandoahHeap::print_tracing_info() const {
1281   LogTarget(Info, gc, stats) lt;
1282   if (lt.is_enabled()) {
1283     ResourceMark rm;
1284     LogStream ls(lt);
1285 
1286     phase_timings()->print_on(&ls);
1287 
1288     ls.cr();
1289     ls.cr();
1290 
1291     shenandoahPolicy()->print_gc_stats(&ls);
1292 
1293     ls.cr();
1294     ls.cr();
1295 
1296     if (ShenandoahPacing) {
1297       pacer()->print_on(&ls);
1298     }
1299 
1300     ls.cr();
1301     ls.cr();
1302 
1303     if (ShenandoahAllocationTrace) {
1304       assert(alloc_tracker() != NULL, "Must be");
1305       alloc_tracker()->print_on(&ls);
1306     } else {
1307       ls.print_cr("  Allocation tracing is disabled, use -XX:+ShenandoahAllocationTrace to enable.");
1308     }
1309   }
1310 }
1311 
1312 void ShenandoahHeap::verify(VerifyOption vo) {
1313   if (ShenandoahSafepoint::is_at_shenandoah_safepoint()) {
1314     if (ShenandoahVerify) {
1315       verifier()->verify_generic(vo);
1316     } else {
1317       // TODO: Consider allocating verification bitmaps on demand,
1318       // and turn this on unconditionally.
1319     }
1320   }
1321 }
1322 size_t ShenandoahHeap::tlab_capacity(Thread *thr) const {
1323   return _free_set->capacity();
1324 }
1325 
1326 class ObjectIterateScanRootClosure : public ExtendedOopClosure {
1327 private:
1328   MarkBitMap* _bitmap;
1329   Stack<oop,mtGC>* _oop_stack;
1330 
1331   template <class T>
1332   void do_oop_work(T* p) {
1333     T o = RawAccess<>::oop_load(p);
1334     if (!CompressedOops::is_null(o)) {
1335       oop obj = CompressedOops::decode_not_null(o);
1336       obj = ShenandoahBarrierSet::resolve_forwarded_not_null(obj);
1337       assert(oopDesc::is_oop(obj), "must be a valid oop");
1338       if (!_bitmap->isMarked((HeapWord*) obj)) {
1339         _bitmap->mark((HeapWord*) obj);
1340         _oop_stack->push(obj);
1341       }
1342     }
1343   }
1344 public:
1345   ObjectIterateScanRootClosure(MarkBitMap* bitmap, Stack<oop,mtGC>* oop_stack) :
1346     _bitmap(bitmap), _oop_stack(oop_stack) {}
1347   void do_oop(oop* p)       { do_oop_work(p); }
1348   void do_oop(narrowOop* p) { do_oop_work(p); }
1349 };
1350 
1351 /*
1352  * This is public API, used in preparation of object_iterate().
1353  * Since we don't do linear scan of heap in object_iterate() (see comment below), we don't
1354  * need to make the heap parsable. For Shenandoah-internal linear heap scans that we can
1355  * control, we call SH::make_tlabs_parsable().
1356  */
1357 void ShenandoahHeap::ensure_parsability(bool retire_tlabs) {
1358   // No-op.
1359 }
1360 
1361 /*
1362  * Iterates objects in the heap. This is public API, used for, e.g., heap dumping.
1363  *
1364  * We cannot safely iterate objects by doing a linear scan at random points in time. Linear
1365  * scanning needs to deal with dead objects, which may have dead Klass* pointers (e.g.
1366  * calling oopDesc::size() would crash) or dangling reference fields (crashes) etc. Linear
1367  * scanning therefore depends on having a valid marking bitmap to support it. However, we only
1368  * have a valid marking bitmap after successful marking. In particular, we *don't* have a valid
1369  * marking bitmap during marking, after aborted marking or during/after cleanup (when we just
1370  * wiped the bitmap in preparation for next marking).
1371  *
1372  * For all those reasons, we implement object iteration as a single marking traversal, reporting
1373  * objects as we mark+traverse through the heap, starting from GC roots. JVMTI IterateThroughHeap
1374  * is allowed to report dead objects, but is not required to do so.
1375  */
1376 void ShenandoahHeap::object_iterate(ObjectClosure* cl) {
1377   assert(SafepointSynchronize::is_at_safepoint(), "safe iteration is only available during safepoints");
1378   if (!os::commit_memory((char*)_aux_bitmap_region.start(), _aux_bitmap_region.byte_size(), false)) {
1379     log_warning(gc)("Could not commit native memory for auxiliary marking bitmap for heap iteration");
1380     return;
1381   }
1382 
1383   Stack<oop,mtGC> oop_stack;
1384 
1385   // First, we process all GC roots. This populates the work stack with initial objects.
1386   ShenandoahRootProcessor rp(this, 1, ShenandoahPhaseTimings::_num_phases);
1387   ObjectIterateScanRootClosure oops(&_aux_bit_map, &oop_stack);
1388   CLDToOopClosure clds(&oops, false);
1389   CodeBlobToOopClosure blobs(&oops, false);
1390   rp.process_all_roots(&oops, &oops, &clds, &blobs, NULL, 0);
1391 
1392   // Work through the oop stack to traverse heap.
1393   while (! oop_stack.is_empty()) {
1394     oop obj = oop_stack.pop();
1395     assert(oopDesc::is_oop(obj), "must be a valid oop");
1396     cl->do_object(obj);
1397     obj->oop_iterate(&oops);
1398   }
1399 
1400   assert(oop_stack.is_empty(), "should be empty");
1401 
1402   if (!os::uncommit_memory((char*)_aux_bitmap_region.start(), _aux_bitmap_region.byte_size())) {
1403     log_warning(gc)("Could not uncommit native memory for auxiliary marking bitmap for heap iteration");
1404   }
1405 }
1406 
1407 void ShenandoahHeap::safe_object_iterate(ObjectClosure* cl) {
1408   assert(SafepointSynchronize::is_at_safepoint(), "safe iteration is only available during safepoints");
1409   object_iterate(cl);
1410 }
1411 
1412 // Apply blk->heap_region_do() on all committed regions in address order,
1413 // terminating the iteration early if heap_region_do() returns true.
1414 void ShenandoahHeap::heap_region_iterate(ShenandoahHeapRegionClosure* blk, bool skip_cset_regions, bool skip_humongous_continuation) const {
1415   for (size_t i = 0; i < num_regions(); i++) {
1416     ShenandoahHeapRegion* current  = get_region(i);
1417     if (skip_humongous_continuation && current->is_humongous_continuation()) {
1418       continue;
1419     }
1420     if (skip_cset_regions && in_collection_set(current)) {
1421       continue;
1422     }
1423     if (blk->heap_region_do(current)) {
1424       return;
1425     }
1426   }
1427 }
1428 
1429 class ShenandoahClearLivenessClosure : public ShenandoahHeapRegionClosure {
1430 private:
1431   ShenandoahHeap* sh;
1432 public:
1433   ShenandoahClearLivenessClosure(ShenandoahHeap* heap) : sh(heap) {}
1434 
1435   bool heap_region_do(ShenandoahHeapRegion* r) {
1436     r->clear_live_data();
1437     sh->set_next_top_at_mark_start(r->bottom(), r->top());
1438     return false;
1439   }
1440 };
1441 
1442 void ShenandoahHeap::op_init_mark() {
1443   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Should be at safepoint");
1444 
1445   assert(is_next_bitmap_clear(), "need clear marking bitmap");
1446 
1447   if (ShenandoahVerify) {
1448     verifier()->verify_before_concmark();
1449   }
1450 
1451   {
1452     ShenandoahGCPhase phase(ShenandoahPhaseTimings::accumulate_stats);
1453     accumulate_statistics_all_tlabs();
1454   }
1455 
1456   set_concurrent_mark_in_progress(true);
1457   // We need to reset all TLABs because we'd lose marks on all objects allocated in them.
1458   if (UseTLAB) {
1459     ShenandoahGCPhase phase(ShenandoahPhaseTimings::make_parsable);
1460     make_tlabs_parsable(true);
1461   }
1462 
1463   {
1464     ShenandoahGCPhase phase(ShenandoahPhaseTimings::clear_liveness);
1465     ShenandoahClearLivenessClosure clc(this);
1466     heap_region_iterate(&clc);
1467   }
1468 
1469   // Make above changes visible to worker threads
1470   OrderAccess::fence();
1471 
1472   concurrentMark()->init_mark_roots();
1473 
1474   if (UseTLAB) {
1475     ShenandoahGCPhase phase(ShenandoahPhaseTimings::resize_tlabs);
1476     resize_all_tlabs();
1477   }
1478 
1479   if (ShenandoahPacing) {
1480     pacer()->setup_for_mark();
1481   }
1482 }
1483 
1484 void ShenandoahHeap::op_mark() {
1485   concurrentMark()->mark_from_roots();
1486 
1487   // Allocations happen during concurrent mark, record peak after the phase:
1488   shenandoahPolicy()->record_peak_occupancy();
1489 }
1490 
1491 void ShenandoahHeap::op_final_mark() {
1492   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Should be at safepoint");
1493 
1494   // It is critical that we
1495   // evacuate roots right after finishing marking, so that we don't
1496   // get unmarked objects in the roots.
1497 
1498   if (! cancelled_concgc()) {
1499     concurrentMark()->finish_mark_from_roots();
1500     stop_concurrent_marking();
1501 
1502     {
1503       ShenandoahGCPhase prepare_evac(ShenandoahPhaseTimings::prepare_evac);
1504       prepare_for_concurrent_evacuation();
1505     }
1506 
1507     // If collection set has candidates, start evacuation.
1508     // Otherwise, bypass the rest of the cycle.
1509     if (!collection_set()->is_empty()) {
1510       set_evacuation_in_progress(true);
1511       // From here on, we need to update references.
1512       set_has_forwarded_objects(true);
1513 
1514       ShenandoahGCPhase init_evac(ShenandoahPhaseTimings::init_evac);
1515       evacuate_and_update_roots();
1516     }
1517 
1518     if (ShenandoahPacing) {
1519       pacer()->setup_for_evac();
1520     }
1521   } else {
1522     concurrentMark()->cancel();
1523     stop_concurrent_marking();
1524 
1525     if (process_references()) {
1526       // Abandon reference processing right away: pre-cleaning must have failed.
1527       ReferenceProcessor *rp = ref_processor();
1528       rp->disable_discovery();
1529       rp->abandon_partial_discovery();
1530       rp->verify_no_references_recorded();
1531     }
1532   }
1533 }
1534 
1535 void ShenandoahHeap::op_final_evac() {
1536   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Should be at safepoint");
1537 
1538   set_evacuation_in_progress(false);
1539   if (ShenandoahVerify) {
1540     verifier()->verify_after_evacuation();
1541   }
1542 }
1543 
1544 void ShenandoahHeap::op_evac() {
1545 
1546   LogTarget(Trace, gc, region) lt_region;
1547   LogTarget(Trace, gc, cset) lt_cset;
1548 
1549   if (lt_region.is_enabled()) {
1550     ResourceMark rm;
1551     LogStream ls(lt_region);
1552     ls.print_cr("All available regions:");
1553     print_heap_regions_on(&ls);
1554   }
1555 
1556   if (lt_cset.is_enabled()) {
1557     ResourceMark rm;
1558     LogStream ls(lt_cset);
1559     ls.print_cr("Collection set ("SIZE_FORMAT" regions):", _collection_set->count());
1560     _collection_set->print_on(&ls);
1561 
1562     ls.print_cr("Free set:");
1563     _free_set->print_on(&ls);
1564   }
1565 
1566   ShenandoahParallelEvacuationTask task(this, _collection_set);
1567   workers()->run_task(&task);
1568 
1569   if (lt_cset.is_enabled()) {
1570     ResourceMark rm;
1571     LogStream ls(lt_cset);
1572     ls.print_cr("After evacuation collection set ("SIZE_FORMAT" regions):",
1573                 _collection_set->count());
1574     _collection_set->print_on(&ls);
1575 
1576     ls.print_cr("After evacuation free set:");
1577     _free_set->print_on(&ls);
1578   }
1579 
1580   if (lt_region.is_enabled()) {
1581     ResourceMark rm;
1582     LogStream ls(lt_region);
1583     ls.print_cr("All regions after evacuation:");
1584     print_heap_regions_on(&ls);
1585   }
1586 
1587   // Allocations happen during evacuation, record peak after the phase:
1588   shenandoahPolicy()->record_peak_occupancy();
1589 }
1590 
1591 void ShenandoahHeap::op_updaterefs() {
1592   update_heap_references(true);
1593 
1594   // Allocations happen during update-refs, record peak after the phase:
1595   shenandoahPolicy()->record_peak_occupancy();
1596 }
1597 
1598 void ShenandoahHeap::op_cleanup() {
1599   ShenandoahGCPhase phase_recycle(ShenandoahPhaseTimings::conc_cleanup_recycle);
1600   free_set()->recycle_trash();
1601 
1602   // Allocations happen during cleanup, record peak after the phase:
1603   shenandoahPolicy()->record_peak_occupancy();
1604 }
1605 
1606 void ShenandoahHeap::op_cleanup_bitmaps() {
1607   op_cleanup();
1608 
1609   ShenandoahGCPhase phase_reset(ShenandoahPhaseTimings::conc_cleanup_reset_bitmaps);
1610   reset_next_mark_bitmap();
1611 
1612   // Allocations happen during bitmap cleanup, record peak after the phase:
1613   shenandoahPolicy()->record_peak_occupancy();
1614 }
1615 
1616 void ShenandoahHeap::op_preclean() {
1617   concurrentMark()->preclean_weak_refs();
1618 
1619   // Allocations happen during concurrent preclean, record peak after the phase:
1620   shenandoahPolicy()->record_peak_occupancy();
1621 }
1622 
1623 void ShenandoahHeap::op_init_partial() {
1624   partial_gc()->init_partial_collection();
1625 }
1626 
1627 void ShenandoahHeap::op_partial() {
1628   partial_gc()->concurrent_partial_collection();
1629 }
1630 
1631 void ShenandoahHeap::op_final_partial() {
1632   partial_gc()->final_partial_collection();
1633 }
1634 
1635 void ShenandoahHeap::op_init_traversal() {
1636   traversal_gc()->init_traversal_collection();
1637 }
1638 
1639 void ShenandoahHeap::op_traversal() {
1640   traversal_gc()->concurrent_traversal_collection();
1641 }
1642 
1643 void ShenandoahHeap::op_final_traversal() {
1644   traversal_gc()->final_traversal_collection();
1645 }
1646 
1647 void ShenandoahHeap::op_full(GCCause::Cause cause) {
1648   full_gc()->do_it(cause);
1649 }
1650 
1651 void ShenandoahHeap::op_degenerated(ShenandoahDegenPoint point) {
1652   // Degenerated GC is STW, but it can also fail. Current mechanics communicates
1653   // GC failure via cancelled_concgc() flag. So, if we detect the failure after
1654   // some phase, we have to upgrade the Degenerate GC to Full GC.
1655 
1656   clear_cancelled_concgc();
1657 
1658   size_t used_before = used();
1659 
1660   switch (point) {
1661     case _degenerated_partial:
1662     case _degenerated_evac:
1663       // Not possible to degenerate from here, upgrade to Full GC right away.
1664       cancel_concgc(GCCause::_shenandoah_upgrade_to_full_gc);
1665       op_degenerated_fail();
1666       return;
1667 
1668     // The cases below form the Duff's-like device: it describes the actual GC cycle,
1669     // but enters it at different points, depending on which concurrent phase had
1670     // degenerated.
1671 
1672     case _degenerated_traversal:
1673       {
1674         ShenandoahHeapLocker locker(lock());
1675         collection_set()->clear_current_index();
1676         for (size_t i = 0; i < collection_set()->count(); i++) {
1677           ShenandoahHeapRegion* r = collection_set()->next();
1678           r->make_regular_bypass();
1679         }
1680         collection_set()->clear();
1681       }
1682       op_final_traversal();
1683       op_cleanup_bitmaps();
1684       return;
1685 
1686     case _degenerated_outside_cycle:
1687       if (shenandoahPolicy()->can_do_traversal_gc()) {
1688         // Not possible to degenerate from here, upgrade to Full GC right away.
1689         cancel_concgc(GCCause::_shenandoah_upgrade_to_full_gc);
1690         op_degenerated_fail();
1691         return;
1692       }
1693       op_init_mark();
1694       if (cancelled_concgc()) {
1695         op_degenerated_fail();
1696         return;
1697       }
1698 
1699     case _degenerated_mark:
1700       op_final_mark();
1701       if (cancelled_concgc()) {
1702         op_degenerated_fail();
1703         return;
1704       }
1705 
1706       op_cleanup();
1707 
1708       // If heuristics thinks we should do the cycle, this flag would be set,
1709       // and we can do evacuation. Otherwise, it would be the shortcut cycle.
1710       if (is_evacuation_in_progress()) {
1711         op_evac();
1712         if (cancelled_concgc()) {
1713           op_degenerated_fail();
1714           return;
1715         }
1716       }
1717 
1718       // If heuristics thinks we should do the cycle, this flag would be set,
1719       // and we need to do update-refs. Otherwise, it would be the shortcut cycle.
1720       if (has_forwarded_objects()) {
1721         op_init_updaterefs();
1722         if (cancelled_concgc()) {
1723           op_degenerated_fail();
1724           return;
1725         }
1726       }
1727 
1728     case _degenerated_updaterefs:
1729       if (has_forwarded_objects()) {
1730         op_final_updaterefs();
1731         if (cancelled_concgc()) {
1732           op_degenerated_fail();
1733           return;
1734         }
1735       }
1736 
1737       op_cleanup_bitmaps();
1738       break;
1739 
1740     default:
1741       ShouldNotReachHere();
1742   }
1743 
1744   if (ShenandoahVerify) {
1745     verifier()->verify_after_degenerated();
1746   }
1747 
1748   // Check for futility and fail. There is no reason to do several back-to-back Degenerated cycles,
1749   // because that probably means the heap is overloaded and/or fragmented.
1750   size_t used_after = used();
1751   size_t difference = (used_before > used_after) ? used_before - used_after : 0;
1752   if (difference < ShenandoahHeapRegion::region_size_words()) {
1753     cancel_concgc(GCCause::_shenandoah_upgrade_to_full_gc);
1754     op_degenerated_futile();
1755   }
1756 }
1757 
1758 void ShenandoahHeap::op_degenerated_fail() {
1759   log_info(gc)("Cannot finish degeneration, upgrading to Full GC");
1760   shenandoahPolicy()->record_degenerated_upgrade_to_full();
1761   op_full(GCCause::_shenandoah_upgrade_to_full_gc);
1762 }
1763 
1764 void ShenandoahHeap::op_degenerated_futile() {
1765   log_info(gc)("Degenerated GC had not reclaimed enough, upgrading to Full GC");
1766   shenandoahPolicy()->record_degenerated_upgrade_to_full();
1767   op_full(GCCause::_shenandoah_upgrade_to_full_gc);
1768 }
1769 
1770 void ShenandoahHeap::swap_mark_bitmaps() {
1771   // Swap bitmaps.
1772   MarkBitMap* tmp1 = _complete_mark_bit_map;
1773   _complete_mark_bit_map = _next_mark_bit_map;
1774   _next_mark_bit_map = tmp1;
1775 
1776   // Swap top-at-mark-start pointers
1777   HeapWord** tmp2 = _complete_top_at_mark_starts;
1778   _complete_top_at_mark_starts = _next_top_at_mark_starts;
1779   _next_top_at_mark_starts = tmp2;
1780 
1781   HeapWord** tmp3 = _complete_top_at_mark_starts_base;
1782   _complete_top_at_mark_starts_base = _next_top_at_mark_starts_base;
1783   _next_top_at_mark_starts_base = tmp3;
1784 }
1785 
1786 
1787 void ShenandoahHeap::stop_concurrent_marking() {
1788   assert(is_concurrent_mark_in_progress(), "How else could we get here?");
1789   if (! cancelled_concgc()) {
1790     // If we needed to update refs, and concurrent marking has been cancelled,
1791     // we need to finish updating references.
1792     set_has_forwarded_objects(false);
1793     swap_mark_bitmaps();
1794   }
1795   set_concurrent_mark_in_progress(false);
1796 
1797   LogTarget(Trace, gc, region) lt;
1798   if (lt.is_enabled()) {
1799     ResourceMark rm;
1800     LogStream ls(lt);
1801     ls.print_cr("Regions at stopping the concurrent mark:");
1802     print_heap_regions_on(&ls);
1803   }
1804 }
1805 
1806 void ShenandoahHeap::set_gc_state_all_threads(char state) {
1807   for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) {
1808     ShenandoahThreadLocalData::set_gc_state(t, state);
1809   }
1810 }
1811 
1812 void ShenandoahHeap::set_gc_state_mask(uint mask, bool value) {
1813   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Should really be Shenandoah safepoint");
1814   _gc_state.set_cond(mask, value);
1815   set_gc_state_all_threads(_gc_state.raw_value());
1816 }
1817 
1818 void ShenandoahHeap::set_concurrent_mark_in_progress(bool in_progress) {
1819   set_gc_state_mask(MARKING, in_progress);
1820   ShenandoahBarrierSet::satb_mark_queue_set().set_active_all_threads(in_progress, !in_progress);
1821 }
1822 
1823 void ShenandoahHeap::set_concurrent_partial_in_progress(bool in_progress) {
1824 
1825   set_gc_state_mask(PARTIAL | HAS_FORWARDED, in_progress);
1826   ShenandoahBarrierSet::satb_mark_queue_set().set_active_all_threads(in_progress, !in_progress);
1827 }
1828 
1829 void ShenandoahHeap::set_concurrent_traversal_in_progress(bool in_progress) {
1830    set_gc_state_mask(TRAVERSAL | HAS_FORWARDED, in_progress);
1831    ShenandoahBarrierSet::satb_mark_queue_set().set_active_all_threads(in_progress, !in_progress);
1832 }
1833 
1834 void ShenandoahHeap::set_evacuation_in_progress(bool in_progress) {
1835   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Only call this at safepoint");
1836   set_gc_state_mask(EVACUATION, in_progress);
1837 }
1838 
1839 HeapWord* ShenandoahHeap::tlab_post_allocation_setup(HeapWord* obj) {
1840   // Initialize Brooks pointer for the next object
1841   HeapWord* result = obj + BrooksPointer::word_size();
1842   BrooksPointer::initialize(oop(result));
1843   return result;
1844 }
1845 
1846 uint ShenandoahHeap::oop_extra_words() {
1847   return BrooksPointer::word_size();
1848 }
1849 
1850 ShenandoahForwardedIsAliveClosure::ShenandoahForwardedIsAliveClosure() :
1851   _heap(ShenandoahHeap::heap_no_check()) {
1852 }
1853 
1854 ShenandoahIsAliveClosure::ShenandoahIsAliveClosure() :
1855   _heap(ShenandoahHeap::heap_no_check()) {
1856 }
1857 
1858 bool ShenandoahForwardedIsAliveClosure::do_object_b(oop obj) {
1859   if (CompressedOops::is_null(obj)) {
1860     return false;
1861   }
1862   obj = ShenandoahBarrierSet::resolve_forwarded_not_null(obj);
1863   shenandoah_assert_not_forwarded_if(NULL, obj, _heap->is_concurrent_mark_in_progress() || _heap->is_concurrent_traversal_in_progress())
1864   return _heap->is_marked_next(obj);
1865 }
1866 
1867 bool ShenandoahIsAliveClosure::do_object_b(oop obj) {
1868   if (CompressedOops::is_null(obj)) {
1869     return false;
1870   }
1871   shenandoah_assert_not_forwarded(NULL, obj);
1872   return _heap->is_marked_next(obj);
1873 }
1874 
1875 BoolObjectClosure* ShenandoahHeap::is_alive_closure() {
1876   return has_forwarded_objects() ?
1877          (BoolObjectClosure*) &_forwarded_is_alive :
1878          (BoolObjectClosure*) &_is_alive;
1879 }
1880 
1881 void ShenandoahHeap::ref_processing_init() {
1882   MemRegion mr = reserved_region();
1883 
1884   _forwarded_is_alive.init(this);
1885   _is_alive.init(this);
1886   assert(_max_workers > 0, "Sanity");
1887 
1888   _ref_processor =
1889     new ReferenceProcessor(mr,    // span
1890                            ParallelRefProcEnabled,  // MT processing
1891                            _max_workers,            // Degree of MT processing
1892                            true,                    // MT discovery
1893                            _max_workers,            // Degree of MT discovery
1894                            false,                   // Reference discovery is not atomic
1895                            NULL);                   // No closure, should be installed before use
1896 
1897   shenandoah_assert_rp_isalive_not_installed();
1898 }
1899 
1900 
1901 GCTracer* ShenandoahHeap::tracer() {
1902   return shenandoahPolicy()->tracer();
1903 }
1904 
1905 size_t ShenandoahHeap::tlab_used(Thread* thread) const {
1906   return _free_set->used();
1907 }
1908 
1909 void ShenandoahHeap::cancel_concgc(GCCause::Cause cause) {
1910   if (try_cancel_concgc()) {
1911     FormatBuffer<> msg("Cancelling concurrent GC: %s", GCCause::to_string(cause));
1912     log_info(gc)("%s", msg.buffer());
1913     Events::log(Thread::current(), "%s", msg.buffer());
1914   }
1915 }
1916 
1917 uint ShenandoahHeap::max_workers() {
1918   return _max_workers;
1919 }
1920 
1921 void ShenandoahHeap::stop() {
1922   // The shutdown sequence should be able to terminate when GC is running.
1923 
1924   // Step 0. Notify policy to disable event recording.
1925   _shenandoah_policy->record_shutdown();
1926 
1927   // Step 1. Notify control thread that we are in shutdown.
1928   // Note that we cannot do that with stop(), because stop() is blocking and waits for the actual shutdown.
1929   // Doing stop() here would wait for the normal GC cycle to complete, never falling through to cancel below.
1930   _concurrent_gc_thread->prepare_for_graceful_shutdown();
1931 
1932   // Step 2. Notify GC workers that we are cancelling GC.
1933   cancel_concgc(GCCause::_shenandoah_stop_vm);
1934 
1935   // Step 3. Wait until GC worker exits normally.
1936   _concurrent_gc_thread->stop();
1937 
1938   // Step 4. Stop String Dedup thread if it is active
1939   if (ShenandoahStringDedup::is_enabled()) {
1940     ShenandoahStringDedup::stop();
1941   }
1942 }
1943 
1944 void ShenandoahHeap::unload_classes_and_cleanup_tables(bool full_gc) {
1945   ShenandoahPhaseTimings::Phase phase_root =
1946           full_gc ?
1947           ShenandoahPhaseTimings::full_gc_purge :
1948           ShenandoahPhaseTimings::purge;
1949 
1950   ShenandoahPhaseTimings::Phase phase_unload =
1951           full_gc ?
1952           ShenandoahPhaseTimings::full_gc_purge_class_unload :
1953           ShenandoahPhaseTimings::purge_class_unload;
1954 
1955   ShenandoahPhaseTimings::Phase phase_cldg =
1956           full_gc ?
1957           ShenandoahPhaseTimings::full_gc_purge_cldg :
1958           ShenandoahPhaseTimings::purge_cldg;
1959 
1960   ShenandoahPhaseTimings::Phase phase_par =
1961           full_gc ?
1962           ShenandoahPhaseTimings::full_gc_purge_par :
1963           ShenandoahPhaseTimings::purge_par;
1964 
1965   ShenandoahPhaseTimings::Phase phase_par_classes =
1966           full_gc ?
1967           ShenandoahPhaseTimings::full_gc_purge_par_classes :
1968           ShenandoahPhaseTimings::purge_par_classes;
1969 
1970   ShenandoahPhaseTimings::Phase phase_par_codecache =
1971           full_gc ?
1972           ShenandoahPhaseTimings::full_gc_purge_par_codecache :
1973           ShenandoahPhaseTimings::purge_par_codecache;
1974 
1975   ShenandoahPhaseTimings::Phase phase_par_rmt =
1976           full_gc ?
1977           ShenandoahPhaseTimings::full_gc_purge_par_rmt :
1978           ShenandoahPhaseTimings::purge_par_rmt;
1979 
1980   ShenandoahPhaseTimings::Phase phase_par_symbstring =
1981           full_gc ?
1982           ShenandoahPhaseTimings::full_gc_purge_par_symbstring :
1983           ShenandoahPhaseTimings::purge_par_symbstring;
1984 
1985   ShenandoahPhaseTimings::Phase phase_par_sync =
1986           full_gc ?
1987           ShenandoahPhaseTimings::full_gc_purge_par_sync :
1988           ShenandoahPhaseTimings::purge_par_sync;
1989 
1990   ShenandoahGCPhase root_phase(phase_root);
1991 
1992   BoolObjectClosure* is_alive = is_alive_closure();
1993 
1994   bool purged_class;
1995 
1996   // Unload classes and purge SystemDictionary.
1997   {
1998     ShenandoahGCPhase phase(phase_unload);
1999     purged_class = SystemDictionary::do_unloading(is_alive,
2000                                                   gc_timer(),
2001                                                   false /* defer cleaning */);
2002   }
2003 
2004   {
2005     ShenandoahGCPhase phase(phase_par);
2006     uint active = _workers->active_workers();
2007     ParallelCleaningTask unlink_task(is_alive, true, true, active, purged_class);
2008     _workers->run_task(&unlink_task);
2009 
2010     ShenandoahPhaseTimings* p = phase_timings();
2011     ParallelCleaningTimes times = unlink_task.times();
2012 
2013     // "times" report total time, phase_tables_cc reports wall time. Divide total times
2014     // by active workers to get average time per worker, that would add up to wall time.
2015     p->record_phase_time(phase_par_classes,    times.klass_work_us() / active);
2016     p->record_phase_time(phase_par_codecache,  times.codecache_work_us() / active);
2017     p->record_phase_time(phase_par_rmt,        times.rmt_work_us() / active);
2018     p->record_phase_time(phase_par_symbstring, times.tables_work_us() / active);
2019     p->record_phase_time(phase_par_sync,       times.sync_us() / active);
2020   }
2021 
2022   if (ShenandoahStringDedup::is_enabled()) {
2023     ShenandoahPhaseTimings::Phase phase_par_string_dedup =
2024             full_gc ?
2025             ShenandoahPhaseTimings::full_gc_purge_par_string_dedup :
2026             ShenandoahPhaseTimings::purge_par_string_dedup;
2027     ShenandoahGCPhase phase(phase_par_string_dedup);
2028     ShenandoahStringDedup::parallel_cleanup();
2029   }
2030 
2031 
2032   {
2033     ShenandoahGCPhase phase(phase_cldg);
2034     ClassLoaderDataGraph::purge();
2035   }
2036 }
2037 
2038 void ShenandoahHeap::set_has_forwarded_objects(bool cond) {
2039   set_gc_state_mask(HAS_FORWARDED, cond);
2040 }
2041 
2042 void ShenandoahHeap::set_process_references(bool pr) {
2043   _process_references.set_cond(pr);
2044 }
2045 
2046 void ShenandoahHeap::set_unload_classes(bool uc) {
2047   _unload_classes.set_cond(uc);
2048 }
2049 
2050 bool ShenandoahHeap::process_references() const {
2051   return _process_references.is_set();
2052 }
2053 
2054 bool ShenandoahHeap::unload_classes() const {
2055   return _unload_classes.is_set();
2056 }
2057 
2058 //fixme this should be in heapregionset
2059 ShenandoahHeapRegion* ShenandoahHeap::next_compaction_region(const ShenandoahHeapRegion* r) {
2060   size_t region_idx = r->region_number() + 1;
2061   ShenandoahHeapRegion* next = get_region(region_idx);
2062   guarantee(next->region_number() == region_idx, "region number must match");
2063   while (next->is_humongous()) {
2064     region_idx = next->region_number() + 1;
2065     next = get_region(region_idx);
2066     guarantee(next->region_number() == region_idx, "region number must match");
2067   }
2068   return next;
2069 }
2070 
2071 ShenandoahMonitoringSupport* ShenandoahHeap::monitoring_support() {
2072   return _monitoring_support;
2073 }
2074 
2075 MarkBitMap* ShenandoahHeap::complete_mark_bit_map() {
2076   return _complete_mark_bit_map;
2077 }
2078 
2079 MarkBitMap* ShenandoahHeap::next_mark_bit_map() {
2080   return _next_mark_bit_map;
2081 }
2082 
2083 address ShenandoahHeap::in_cset_fast_test_addr() {
2084   ShenandoahHeap* heap = ShenandoahHeap::heap();
2085   assert(heap->collection_set() != NULL, "Sanity");
2086   return (address) heap->collection_set()->biased_map_address();
2087 }
2088 
2089 address ShenandoahHeap::cancelled_concgc_addr() {
2090   return (address) ShenandoahHeap::heap()->_cancelled_concgc.addr_of();
2091 }
2092 
2093 address ShenandoahHeap::gc_state_addr() {
2094   return (address) ShenandoahHeap::heap()->_gc_state.addr_of();
2095 }
2096 
2097 size_t ShenandoahHeap::bytes_allocated_since_gc_start() {
2098   return OrderAccess::load_acquire(&_bytes_allocated_since_gc_start);
2099 }
2100 
2101 void ShenandoahHeap::reset_bytes_allocated_since_gc_start() {
2102   OrderAccess::release_store_fence(&_bytes_allocated_since_gc_start, (size_t)0);
2103 }
2104 
2105 ShenandoahPacer* ShenandoahHeap::pacer() const {
2106   assert (_pacer != NULL, "sanity");
2107   return _pacer;
2108 }
2109 
2110 void ShenandoahHeap::set_next_top_at_mark_start(HeapWord* region_base, HeapWord* addr) {
2111   uintx index = ((uintx) region_base) >> ShenandoahHeapRegion::region_size_bytes_shift();
2112   _next_top_at_mark_starts[index] = addr;
2113 }
2114 
2115 HeapWord* ShenandoahHeap::next_top_at_mark_start(HeapWord* region_base) {
2116   uintx index = ((uintx) region_base) >> ShenandoahHeapRegion::region_size_bytes_shift();
2117   return _next_top_at_mark_starts[index];
2118 }
2119 
2120 void ShenandoahHeap::set_complete_top_at_mark_start(HeapWord* region_base, HeapWord* addr) {
2121   uintx index = ((uintx) region_base) >> ShenandoahHeapRegion::region_size_bytes_shift();
2122   _complete_top_at_mark_starts[index] = addr;
2123 }
2124 
2125 HeapWord* ShenandoahHeap::complete_top_at_mark_start(HeapWord* region_base) {
2126   uintx index = ((uintx) region_base) >> ShenandoahHeapRegion::region_size_bytes_shift();
2127   return _complete_top_at_mark_starts[index];
2128 }
2129 
2130 void ShenandoahHeap::set_degenerated_gc_in_progress(bool in_progress) {
2131   _degenerated_gc_in_progress.set_cond(in_progress);
2132 }
2133 
2134 void ShenandoahHeap::set_full_gc_in_progress(bool in_progress) {
2135   _full_gc_in_progress.set_cond(in_progress);
2136 }
2137 
2138 void ShenandoahHeap::set_full_gc_move_in_progress(bool in_progress) {
2139   assert (is_full_gc_in_progress(), "should be");
2140   _full_gc_move_in_progress.set_cond(in_progress);
2141 }
2142 
2143 void ShenandoahHeap::set_update_refs_in_progress(bool in_progress) {
2144   set_gc_state_mask(UPDATEREFS, in_progress);
2145 }
2146 
2147 void ShenandoahHeap::register_nmethod(nmethod* nm) {
2148   ShenandoahCodeRoots::add_nmethod(nm);
2149 }
2150 
2151 void ShenandoahHeap::unregister_nmethod(nmethod* nm) {
2152   ShenandoahCodeRoots::remove_nmethod(nm);
2153 }
2154 
2155 oop ShenandoahHeap::pin_object(JavaThread* thr, oop o) {
2156   o = BarrierSet::barrier_set()->write_barrier(o);
2157   ShenandoahHeapLocker locker(lock());
2158   heap_region_containing(o)->make_pinned();
2159   return o;
2160 }
2161 
2162 void ShenandoahHeap::unpin_object(JavaThread* thr, oop o) {
2163   o = BarrierSet::barrier_set()->read_barrier(o);
2164   ShenandoahHeapLocker locker(lock());
2165   heap_region_containing(o)->make_unpinned();
2166 }
2167 
2168 GCTimer* ShenandoahHeap::gc_timer() const {
2169   return _gc_timer;
2170 }
2171 
2172 #ifdef ASSERT
2173 void ShenandoahHeap::assert_gc_workers(uint nworkers) {
2174   assert(nworkers > 0 && nworkers <= max_workers(), "Sanity");
2175 
2176   if (ShenandoahSafepoint::is_at_shenandoah_safepoint()) {
2177     if (UseDynamicNumberOfGCThreads ||
2178         (FLAG_IS_DEFAULT(ParallelGCThreads) && ForceDynamicNumberOfGCThreads)) {
2179       assert(nworkers <= ParallelGCThreads, "Cannot use more than it has");
2180     } else {
2181       // Use ParallelGCThreads inside safepoints
2182       assert(nworkers == ParallelGCThreads, "Use ParalleGCThreads within safepoints");
2183     }
2184   } else {
2185     if (UseDynamicNumberOfGCThreads ||
2186         (FLAG_IS_DEFAULT(ConcGCThreads) && ForceDynamicNumberOfGCThreads)) {
2187       assert(nworkers <= ConcGCThreads, "Cannot use more than it has");
2188     } else {
2189       // Use ConcGCThreads outside safepoints
2190       assert(nworkers == ConcGCThreads, "Use ConcGCThreads outside safepoints");
2191     }
2192   }
2193 }
2194 #endif
2195 
2196 ShenandoahConnectionMatrix* ShenandoahHeap::connection_matrix() const {
2197   return _connection_matrix;
2198 }
2199 
2200 ShenandoahPartialGC* ShenandoahHeap::partial_gc() {
2201   return _partial_gc;
2202 }
2203 
2204 ShenandoahTraversalGC* ShenandoahHeap::traversal_gc() {
2205   return _traversal_gc;
2206 }
2207 
2208 ShenandoahVerifier* ShenandoahHeap::verifier() {
2209   guarantee(ShenandoahVerify, "Should be enabled");
2210   assert (_verifier != NULL, "sanity");
2211   return _verifier;
2212 }
2213 
2214 template<class T>
2215 class ShenandoahUpdateHeapRefsTask : public AbstractGangTask {
2216 private:
2217   T cl;
2218   ShenandoahHeap* _heap;
2219   ShenandoahRegionIterator _regions;
2220   bool _concurrent;
2221 public:
2222   ShenandoahUpdateHeapRefsTask(ShenandoahRegionIterator regions, bool concurrent) :
2223     AbstractGangTask("Concurrent Update References Task"),
2224     cl(T()),
2225     _heap(ShenandoahHeap::heap()),
2226     _regions(regions),
2227     _concurrent(concurrent) {
2228   }
2229 
2230   void work(uint worker_id) {
2231     SuspendibleThreadSetJoiner stsj(_concurrent && ShenandoahSuspendibleWorkers);
2232     ShenandoahHeapRegion* r = _regions.next();
2233     while (r != NULL) {
2234       if (_heap->in_collection_set(r)) {
2235         HeapWord* bottom = r->bottom();
2236         HeapWord* top = _heap->complete_top_at_mark_start(r->bottom());
2237         if (top > bottom) {
2238           _heap->complete_mark_bit_map()->clear_range_large(MemRegion(bottom, top));
2239         }
2240       } else {
2241         if (r->is_active()) {
2242           _heap->marked_object_oop_safe_iterate(r, &cl);
2243           if (ShenandoahPacing) {
2244             _heap->pacer()->report_updaterefs(r->get_live_data_words());
2245           }
2246         }
2247       }
2248       if (_heap->check_cancelled_concgc_and_yield(_concurrent)) {
2249         return;
2250       }
2251       r = _regions.next();
2252     }
2253   }
2254 };
2255 
2256 void ShenandoahHeap::update_heap_references(bool concurrent) {
2257   if (UseShenandoahMatrix) {
2258     ShenandoahUpdateHeapRefsTask<ShenandoahUpdateHeapRefsMatrixClosure> task(_update_refs_iterator, concurrent);
2259     workers()->run_task(&task);
2260   } else {
2261     ShenandoahUpdateHeapRefsTask<ShenandoahUpdateHeapRefsClosure> task(_update_refs_iterator, concurrent);
2262     workers()->run_task(&task);
2263   }
2264 }
2265 
2266 void ShenandoahHeap::op_init_updaterefs() {
2267   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "must be at safepoint");
2268 
2269   if (ShenandoahVerify) {
2270     verifier()->verify_before_updaterefs();
2271   }
2272 
2273   set_evacuation_in_progress(false);
2274   set_update_refs_in_progress(true);
2275   make_tlabs_parsable(true);
2276   if (UseShenandoahMatrix) {
2277     connection_matrix()->clear_all();
2278   }
2279   for (uint i = 0; i < num_regions(); i++) {
2280     ShenandoahHeapRegion* r = get_region(i);
2281     r->set_concurrent_iteration_safe_limit(r->top());
2282   }
2283 
2284   // Reset iterator.
2285   _update_refs_iterator = region_iterator();
2286 
2287   if (ShenandoahPacing) {
2288     pacer()->setup_for_updaterefs();
2289   }
2290 }
2291 
2292 void ShenandoahHeap::op_final_updaterefs() {
2293   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "must be at safepoint");
2294 
2295   // Check if there is left-over work, and finish it
2296   if (_update_refs_iterator.has_next()) {
2297     ShenandoahGCPhase final_work(ShenandoahPhaseTimings::final_update_refs_finish_work);
2298 
2299     // Finish updating references where we left off.
2300     clear_cancelled_concgc();
2301     update_heap_references(false);
2302   }
2303 
2304   // Clear cancelled conc GC, if set. On cancellation path, the block before would handle
2305   // everything. On degenerated paths, cancelled gc would not be set anyway.
2306   if (cancelled_concgc()) {
2307     clear_cancelled_concgc();
2308   }
2309   assert(!cancelled_concgc(), "Should have been done right before");
2310 
2311   concurrentMark()->update_roots(ShenandoahPhaseTimings::final_update_refs_roots);
2312 
2313   // Allocations might have happened before we STWed here, record peak:
2314   shenandoahPolicy()->record_peak_occupancy();
2315 
2316   ShenandoahGCPhase final_update_refs(ShenandoahPhaseTimings::final_update_refs_recycle);
2317 
2318   trash_cset_regions();
2319   set_has_forwarded_objects(false);
2320 
2321   if (ShenandoahVerify) {
2322     verifier()->verify_after_updaterefs();
2323   }
2324 
2325   {
2326     ShenandoahHeapLocker locker(lock());
2327     _free_set->rebuild();
2328   }
2329 
2330   set_update_refs_in_progress(false);
2331 }
2332 
2333 void ShenandoahHeap::set_alloc_seq_gc_start() {
2334   // Take next number, the start seq number is inclusive
2335   _alloc_seq_at_last_gc_start = ShenandoahHeapRegion::seqnum_current_alloc() + 1;
2336 }
2337 
2338 void ShenandoahHeap::set_alloc_seq_gc_end() {
2339   // Take current number, the end seq number is also inclusive
2340   _alloc_seq_at_last_gc_end = ShenandoahHeapRegion::seqnum_current_alloc();
2341 }
2342 
2343 
2344 #ifdef ASSERT
2345 void ShenandoahHeap::assert_heaplock_owned_by_current_thread() {
2346   _lock.assert_owned_by_current_thread();
2347 }
2348 
2349 void ShenandoahHeap::assert_heaplock_not_owned_by_current_thread() {
2350   _lock.assert_not_owned_by_current_thread();
2351 }
2352 
2353 void ShenandoahHeap::assert_heaplock_or_safepoint() {
2354   _lock.assert_owned_by_current_thread_or_safepoint();
2355 }
2356 #endif
2357 
2358 void ShenandoahHeap::print_extended_on(outputStream *st) const {
2359   print_on(st);
2360   print_heap_regions_on(st);
2361 }
2362 
2363 bool ShenandoahHeap::is_bitmap_slice_committed(ShenandoahHeapRegion* r, bool skip_self) {
2364   size_t slice = r->region_number() / _bitmap_regions_per_slice;
2365 
2366   size_t regions_from = _bitmap_regions_per_slice * slice;
2367   size_t regions_to   = MIN2(num_regions(), _bitmap_regions_per_slice * (slice + 1));
2368   for (size_t g = regions_from; g < regions_to; g++) {
2369     assert (g / _bitmap_regions_per_slice == slice, "same slice");
2370     if (skip_self && g == r->region_number()) continue;
2371     if (get_region(g)->is_committed()) {
2372       return true;
2373     }
2374   }
2375   return false;
2376 }
2377 
2378 bool ShenandoahHeap::commit_bitmap_slice(ShenandoahHeapRegion* r) {
2379   assert_heaplock_owned_by_current_thread();
2380 
2381   if (is_bitmap_slice_committed(r, true)) {
2382     // Some other region from the group is already committed, meaning the bitmap
2383     // slice is already committed, we exit right away.
2384     return true;
2385   }
2386 
2387   // Commit the bitmap slice:
2388   size_t slice = r->region_number() / _bitmap_regions_per_slice;
2389   size_t off = _bitmap_bytes_per_slice * slice;
2390   size_t len = _bitmap_bytes_per_slice;
2391   if (!os::commit_memory((char*)_bitmap0_region.start() + off, len, false)) {
2392     return false;
2393   }
2394   if (!os::commit_memory((char*)_bitmap1_region.start() + off, len, false)) {
2395     return false;
2396   }
2397   return true;
2398 }
2399 
2400 bool ShenandoahHeap::uncommit_bitmap_slice(ShenandoahHeapRegion *r) {
2401   assert_heaplock_owned_by_current_thread();
2402 
2403   if (is_bitmap_slice_committed(r, true)) {
2404     // Some other region from the group is still committed, meaning the bitmap
2405     // slice is should stay committed, exit right away.
2406     return true;
2407   }
2408 
2409   // Uncommit the bitmap slice:
2410   size_t slice = r->region_number() / _bitmap_regions_per_slice;
2411   size_t off = _bitmap_bytes_per_slice * slice;
2412   size_t len = _bitmap_bytes_per_slice;
2413   if (!os::uncommit_memory((char*)_bitmap0_region.start() + off, len)) {
2414     return false;
2415   }
2416   if (!os::uncommit_memory((char*)_bitmap1_region.start() + off, len)) {
2417     return false;
2418   }
2419   return true;
2420 }
2421 
2422 bool ShenandoahHeap::idle_bitmap_slice(ShenandoahHeapRegion *r) {
2423   assert_heaplock_owned_by_current_thread();
2424   assert(ShenandoahUncommitWithIdle, "Must be enabled");
2425 
2426   if (is_bitmap_slice_committed(r, true)) {
2427     // Some other region from the group is still committed, meaning the bitmap
2428     // slice is should stay committed, exit right away.
2429     return true;
2430   }
2431 
2432   // Idle the bitmap slice:
2433   size_t slice = r->region_number() / _bitmap_regions_per_slice;
2434   size_t off = _bitmap_bytes_per_slice * slice;
2435   size_t len = _bitmap_bytes_per_slice;
2436   if (!os::idle_memory((char*)_bitmap0_region.start() + off, len)) {
2437     return false;
2438   }
2439   if (!os::idle_memory((char*)_bitmap1_region.start() + off, len)) {
2440     return false;
2441   }
2442   return true;
2443 }
2444 
2445 void ShenandoahHeap::activate_bitmap_slice(ShenandoahHeapRegion* r) {
2446   assert_heaplock_owned_by_current_thread();
2447   assert(ShenandoahUncommitWithIdle, "Must be enabled");
2448   size_t slice = r->region_number() / _bitmap_regions_per_slice;
2449   size_t off = _bitmap_bytes_per_slice * slice;
2450   size_t len = _bitmap_bytes_per_slice;
2451   os::activate_memory((char*)_bitmap0_region.start() + off, len);
2452   os::activate_memory((char*)_bitmap1_region.start() + off, len);
2453 }
2454 
2455 void ShenandoahHeap::safepoint_synchronize_begin() {
2456   if (ShenandoahSuspendibleWorkers || UseStringDeduplication) {
2457     SuspendibleThreadSet::synchronize();
2458   }
2459 }
2460 
2461 void ShenandoahHeap::safepoint_synchronize_end() {
2462   if (ShenandoahSuspendibleWorkers || UseStringDeduplication) {
2463     SuspendibleThreadSet::desynchronize();
2464   }
2465 }
2466 
2467 void ShenandoahHeap::vmop_entry_init_mark() {
2468   TraceCollectorStats tcs(monitoring_support()->stw_collection_counters());
2469   ShenandoahGCPhase total(ShenandoahPhaseTimings::total_pause_gross);
2470   ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_mark_gross);
2471 
2472   try_inject_alloc_failure();
2473   VM_ShenandoahInitMark op;
2474   VMThread::execute(&op); // jump to entry_init_mark() under safepoint
2475 }
2476 
2477 void ShenandoahHeap::vmop_entry_final_mark() {
2478   TraceCollectorStats tcs(monitoring_support()->stw_collection_counters());
2479   ShenandoahGCPhase total(ShenandoahPhaseTimings::total_pause_gross);
2480   ShenandoahGCPhase phase(ShenandoahPhaseTimings::final_mark_gross);
2481 
2482   try_inject_alloc_failure();
2483   VM_ShenandoahFinalMarkStartEvac op;
2484   VMThread::execute(&op); // jump to entry_final_mark under safepoint
2485 }
2486 
2487 void ShenandoahHeap::vmop_entry_final_evac() {
2488   TraceCollectorStats tcs(monitoring_support()->stw_collection_counters());
2489   ShenandoahGCPhase total(ShenandoahPhaseTimings::total_pause_gross);
2490   ShenandoahGCPhase phase(ShenandoahPhaseTimings::final_evac_gross);
2491 
2492   VM_ShenandoahFinalEvac op;
2493   VMThread::execute(&op); // jump to entry_final_evac under safepoint
2494 }
2495 
2496 void ShenandoahHeap::vmop_entry_init_updaterefs() {
2497   TraceCollectorStats tcs(monitoring_support()->stw_collection_counters());
2498   ShenandoahGCPhase total(ShenandoahPhaseTimings::total_pause_gross);
2499   ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_update_refs_gross);
2500 
2501   try_inject_alloc_failure();
2502   VM_ShenandoahInitUpdateRefs op;
2503   VMThread::execute(&op);
2504 }
2505 
2506 void ShenandoahHeap::vmop_entry_final_updaterefs() {
2507   TraceCollectorStats tcs(monitoring_support()->stw_collection_counters());
2508   ShenandoahGCPhase total(ShenandoahPhaseTimings::total_pause_gross);
2509   ShenandoahGCPhase phase(ShenandoahPhaseTimings::final_update_refs_gross);
2510 
2511   try_inject_alloc_failure();
2512   VM_ShenandoahFinalUpdateRefs op;
2513   VMThread::execute(&op);
2514 }
2515 
2516 void ShenandoahHeap::vmop_entry_init_partial() {
2517   TraceCollectorStats tcs(monitoring_support()->stw_collection_counters());
2518   ShenandoahGCPhase total(ShenandoahPhaseTimings::total_pause_gross);
2519   ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_partial_gc_gross);
2520 
2521   try_inject_alloc_failure();
2522   VM_ShenandoahInitPartialGC op;
2523   VMThread::execute(&op);
2524 }
2525 
2526 void ShenandoahHeap::vmop_entry_final_partial() {
2527   TraceCollectorStats tcs(monitoring_support()->stw_collection_counters());
2528   ShenandoahGCPhase total(ShenandoahPhaseTimings::total_pause_gross);
2529   ShenandoahGCPhase phase(ShenandoahPhaseTimings::final_partial_gc_gross);
2530 
2531   try_inject_alloc_failure();
2532   VM_ShenandoahFinalPartialGC op;
2533   VMThread::execute(&op);
2534 }
2535 
2536 void ShenandoahHeap::vmop_entry_init_traversal() {
2537   TraceCollectorStats tcs(monitoring_support()->stw_collection_counters());
2538   ShenandoahGCPhase total(ShenandoahPhaseTimings::total_pause_gross);
2539   ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_traversal_gc_gross);
2540 
2541   try_inject_alloc_failure();
2542   VM_ShenandoahInitTraversalGC op;
2543   VMThread::execute(&op);
2544 }
2545 
2546 void ShenandoahHeap::vmop_entry_final_traversal() {
2547   TraceCollectorStats tcs(monitoring_support()->stw_collection_counters());
2548   ShenandoahGCPhase total(ShenandoahPhaseTimings::total_pause_gross);
2549   ShenandoahGCPhase phase(ShenandoahPhaseTimings::final_traversal_gc_gross);
2550 
2551   try_inject_alloc_failure();
2552   VM_ShenandoahFinalTraversalGC op;
2553   VMThread::execute(&op);
2554 }
2555 
2556 void ShenandoahHeap::vmop_entry_full(GCCause::Cause cause) {
2557   TraceCollectorStats tcs(monitoring_support()->full_stw_collection_counters());
2558   ShenandoahGCPhase total(ShenandoahPhaseTimings::total_pause_gross);
2559   ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_gross);
2560 
2561   try_inject_alloc_failure();
2562   VM_ShenandoahFullGC op(cause);
2563   VMThread::execute(&op);
2564 }
2565 
2566 void ShenandoahHeap::vmop_degenerated(ShenandoahDegenPoint point) {
2567   TraceCollectorStats tcs(monitoring_support()->full_stw_collection_counters());
2568   ShenandoahGCPhase total(ShenandoahPhaseTimings::total_pause_gross);
2569   ShenandoahGCPhase phase(ShenandoahPhaseTimings::degen_gc_gross);
2570 
2571   VM_ShenandoahDegeneratedGC degenerated_gc((int)point);
2572   VMThread::execute(&degenerated_gc);
2573 }
2574 
2575 void ShenandoahHeap::entry_init_mark() {
2576   ShenandoahGCPhase total_phase(ShenandoahPhaseTimings::total_pause);
2577   ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_mark);
2578 
2579   FormatBuffer<> msg("Pause Init Mark%s%s%s",
2580                      has_forwarded_objects() ? " (update refs)"    : "",
2581                      process_references() ?    " (process refs)"   : "",
2582                      unload_classes() ?        " (unload classes)" : "");
2583   GCTraceTime(Info, gc) time(msg, gc_timer());
2584   EventMark em("%s", msg.buffer());
2585 
2586   ShenandoahWorkerScope scope(workers(), ShenandoahWorkerPolicy::calc_workers_for_init_marking());
2587 
2588   op_init_mark();
2589 }
2590 
2591 void ShenandoahHeap::entry_final_mark() {
2592   ShenandoahGCPhase total_phase(ShenandoahPhaseTimings::total_pause);
2593   ShenandoahGCPhase phase(ShenandoahPhaseTimings::final_mark);
2594 
2595   FormatBuffer<> msg("Pause Final Mark%s%s%s",
2596                      has_forwarded_objects() ? " (update refs)"    : "",
2597                      process_references() ?    " (process refs)"   : "",
2598                      unload_classes() ?        " (unload classes)" : "");
2599   GCTraceTime(Info, gc) time(msg, gc_timer());
2600   EventMark em("%s", msg.buffer());
2601 
2602   ShenandoahWorkerScope scope(workers(), ShenandoahWorkerPolicy::calc_workers_for_final_marking());
2603 
2604   op_final_mark();
2605 }
2606 
2607 void ShenandoahHeap::entry_final_evac() {
2608   ShenandoahGCPhase total_phase(ShenandoahPhaseTimings::total_pause);
2609   ShenandoahGCPhase phase(ShenandoahPhaseTimings::final_evac);
2610 
2611   FormatBuffer<> msg("Pause Final Evac");
2612   GCTraceTime(Info, gc) time(msg, gc_timer());
2613   EventMark em("%s", msg.buffer());
2614 
2615   op_final_evac();
2616 }
2617 
2618 void ShenandoahHeap::entry_init_updaterefs() {
2619   ShenandoahGCPhase total_phase(ShenandoahPhaseTimings::total_pause);
2620   ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_update_refs);
2621 
2622   static const char* msg = "Pause Init Update Refs";
2623   GCTraceTime(Info, gc) time(msg, gc_timer());
2624   EventMark em("%s", msg);
2625 
2626   // No workers used in this phase, no setup required
2627 
2628   op_init_updaterefs();
2629 }
2630 
2631 void ShenandoahHeap::entry_final_updaterefs() {
2632   ShenandoahGCPhase total_phase(ShenandoahPhaseTimings::total_pause);
2633   ShenandoahGCPhase phase(ShenandoahPhaseTimings::final_update_refs);
2634 
2635   static const char* msg = "Pause Final Update Refs";
2636   GCTraceTime(Info, gc) time(msg, gc_timer());
2637   EventMark em("%s", msg);
2638 
2639   ShenandoahWorkerScope scope(workers(), ShenandoahWorkerPolicy::calc_workers_for_final_update_ref());
2640 
2641   op_final_updaterefs();
2642 }
2643 
2644 void ShenandoahHeap::entry_init_partial() {
2645   ShenandoahGCPhase total_phase(ShenandoahPhaseTimings::total_pause);
2646   ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_partial_gc);
2647 
2648   static const char* msg = "Pause Init Partial";
2649   GCTraceTime(Info, gc) time(msg, gc_timer());
2650   EventMark em("%s", msg);
2651 
2652   ShenandoahWorkerScope scope(workers(), ShenandoahWorkerPolicy::calc_workers_for_stw_partial());
2653 
2654   op_init_partial();
2655 }
2656 
2657 void ShenandoahHeap::entry_final_partial() {
2658   ShenandoahGCPhase total_phase(ShenandoahPhaseTimings::total_pause);
2659   ShenandoahGCPhase phase(ShenandoahPhaseTimings::final_partial_gc);
2660 
2661   static const char* msg = "Pause Final Partial";
2662   GCTraceTime(Info, gc) time(msg, gc_timer());
2663   EventMark em("%s", msg);
2664 
2665   ShenandoahWorkerScope scope(workers(), ShenandoahWorkerPolicy::calc_workers_for_stw_partial());
2666 
2667   op_final_partial();
2668 }
2669 
2670 void ShenandoahHeap::entry_init_traversal() {
2671   ShenandoahGCPhase total_phase(ShenandoahPhaseTimings::total_pause);
2672   ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_traversal_gc);
2673 
2674   static const char* msg = "Pause Init Traversal";
2675   GCTraceTime(Info, gc) time(msg, gc_timer());
2676   EventMark em("%s", msg);
2677 
2678   ShenandoahWorkerScope scope(workers(), ShenandoahWorkerPolicy::calc_workers_for_stw_traversal());
2679 
2680   op_init_traversal();
2681 }
2682 
2683 void ShenandoahHeap::entry_final_traversal() {
2684   ShenandoahGCPhase total_phase(ShenandoahPhaseTimings::total_pause);
2685   ShenandoahGCPhase phase(ShenandoahPhaseTimings::final_traversal_gc);
2686 
2687   static const char* msg = "Pause Final Traversal";
2688   GCTraceTime(Info, gc) time(msg, gc_timer());
2689   EventMark em("%s", msg);
2690 
2691   ShenandoahWorkerScope scope(workers(), ShenandoahWorkerPolicy::calc_workers_for_stw_traversal());
2692 
2693   op_final_traversal();
2694 }
2695 
2696 void ShenandoahHeap::entry_full(GCCause::Cause cause) {
2697   ShenandoahGCPhase total_phase(ShenandoahPhaseTimings::total_pause);
2698   ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc);
2699 
2700   static const char* msg = "Pause Full";
2701   GCTraceTime(Info, gc) time(msg, gc_timer(), cause, true);
2702   EventMark em("%s", msg);
2703 
2704   ShenandoahWorkerScope scope(workers(), ShenandoahWorkerPolicy::calc_workers_for_fullgc());
2705 
2706   op_full(cause);
2707 }
2708 
2709 void ShenandoahHeap::entry_degenerated(int point) {
2710   ShenandoahGCPhase total_phase(ShenandoahPhaseTimings::total_pause);
2711   ShenandoahGCPhase phase(ShenandoahPhaseTimings::degen_gc);
2712 
2713   ShenandoahDegenPoint dpoint = (ShenandoahDegenPoint)point;
2714   FormatBuffer<> msg("Pause Degenerated GC (%s)", degen_point_to_string(dpoint));
2715   GCTraceTime(Info, gc) time(msg, gc_timer(), GCCause::_no_gc, true);
2716   EventMark em("%s", msg.buffer());
2717 
2718   ShenandoahWorkerScope scope(workers(), ShenandoahWorkerPolicy::calc_workers_for_stw_degenerated());
2719 
2720   set_degenerated_gc_in_progress(true);
2721   op_degenerated(dpoint);
2722   set_degenerated_gc_in_progress(false);
2723 }
2724 
2725 void ShenandoahHeap::entry_mark() {
2726   TraceCollectorStats tcs(monitoring_support()->concurrent_collection_counters());
2727 
2728   FormatBuffer<> msg("Concurrent marking%s%s%s",
2729                      has_forwarded_objects() ? " (update refs)"    : "",
2730                      process_references() ?    " (process refs)"   : "",
2731                      unload_classes() ?        " (unload classes)" : "");
2732   GCTraceTime(Info, gc) time(msg, gc_timer(), GCCause::_no_gc, true);
2733   EventMark em("%s", msg.buffer());
2734 
2735   ShenandoahWorkerScope scope(workers(), ShenandoahWorkerPolicy::calc_workers_for_conc_marking());
2736 
2737   try_inject_alloc_failure();
2738   op_mark();
2739 }
2740 
2741 void ShenandoahHeap::entry_evac() {
2742   ShenandoahGCPhase conc_evac_phase(ShenandoahPhaseTimings::conc_evac);
2743   TraceCollectorStats tcs(monitoring_support()->concurrent_collection_counters());
2744 
2745   static const char* msg = "Concurrent evacuation";
2746   GCTraceTime(Info, gc) time(msg, gc_timer(), GCCause::_no_gc, true);
2747   EventMark em("%s", msg);
2748 
2749   ShenandoahWorkerScope scope(workers(), ShenandoahWorkerPolicy::calc_workers_for_conc_evac());
2750 
2751   try_inject_alloc_failure();
2752   op_evac();
2753 }
2754 
2755 void ShenandoahHeap::entry_updaterefs() {
2756   ShenandoahGCPhase phase(ShenandoahPhaseTimings::conc_update_refs);
2757 
2758   static const char* msg = "Concurrent update references";
2759   GCTraceTime(Info, gc) time(msg, gc_timer(), GCCause::_no_gc, true);
2760   EventMark em("%s", msg);
2761 
2762   ShenandoahWorkerScope scope(workers(), ShenandoahWorkerPolicy::calc_workers_for_conc_update_ref());
2763 
2764   try_inject_alloc_failure();
2765   op_updaterefs();
2766 }
2767 void ShenandoahHeap::entry_cleanup() {
2768   ShenandoahGCPhase phase(ShenandoahPhaseTimings::conc_cleanup);
2769 
2770   static const char* msg = "Concurrent cleanup";
2771   GCTraceTime(Info, gc) time(msg, gc_timer(), GCCause::_no_gc, true);
2772   EventMark em("%s", msg);
2773 
2774   // This phase does not use workers, no need for setup
2775 
2776   try_inject_alloc_failure();
2777   op_cleanup();
2778 }
2779 
2780 void ShenandoahHeap::entry_cleanup_bitmaps() {
2781   ShenandoahGCPhase phase(ShenandoahPhaseTimings::conc_cleanup);
2782 
2783   static const char* msg = "Concurrent cleanup";
2784   GCTraceTime(Info, gc) time(msg, gc_timer(), GCCause::_no_gc, true);
2785   EventMark em("%s", msg);
2786 
2787   ShenandoahWorkerScope scope(workers(), ShenandoahWorkerPolicy::calc_workers_for_conc_cleanup());
2788 
2789   try_inject_alloc_failure();
2790   op_cleanup_bitmaps();
2791 }
2792 
2793 void ShenandoahHeap::entry_preclean() {
2794   if (ShenandoahPreclean && process_references()) {
2795     static const char* msg = "Concurrent precleaning";
2796     GCTraceTime(Info, gc) time(msg, gc_timer(), GCCause::_no_gc, true);
2797     EventMark em("%s", msg);
2798 
2799     ShenandoahGCPhase conc_preclean(ShenandoahPhaseTimings::conc_preclean);
2800 
2801     ShenandoahWorkerScope scope(workers(), ShenandoahWorkerPolicy::calc_workers_for_conc_preclean());
2802 
2803     try_inject_alloc_failure();
2804     op_preclean();
2805   }
2806 }
2807 
2808 void ShenandoahHeap::entry_partial() {
2809   static const char* msg = "Concurrent partial";
2810   GCTraceTime(Info, gc) time(msg, gc_timer(), GCCause::_no_gc, true);
2811   EventMark em("%s", msg);
2812 
2813   TraceCollectorStats tcs(monitoring_support()->concurrent_collection_counters());
2814 
2815   ShenandoahWorkerScope scope(workers(), ShenandoahWorkerPolicy::calc_workers_for_conc_partial());
2816 
2817   try_inject_alloc_failure();
2818   op_partial();
2819 }
2820 
2821 void ShenandoahHeap::entry_traversal() {
2822   static const char* msg = "Concurrent traversal";
2823   GCTraceTime(Info, gc) time(msg, gc_timer(), GCCause::_no_gc, true);
2824   EventMark em("%s", msg);
2825 
2826   TraceCollectorStats tcs(monitoring_support()->concurrent_collection_counters());
2827 
2828   ShenandoahWorkerScope scope(workers(), ShenandoahWorkerPolicy::calc_workers_for_conc_traversal());
2829 
2830   try_inject_alloc_failure();
2831   op_traversal();
2832 }
2833 
2834 void ShenandoahHeap::try_inject_alloc_failure() {
2835   if (ShenandoahAllocFailureALot && !cancelled_concgc() && ((os::random() % 1000) > 950)) {
2836     _inject_alloc_failure.set();
2837     os::naked_short_sleep(1);
2838     if (cancelled_concgc()) {
2839       log_info(gc)("Allocation failure was successfully injected");
2840     }
2841   }
2842 }
2843 
2844 bool ShenandoahHeap::should_inject_alloc_failure() {
2845   return _inject_alloc_failure.is_set() && _inject_alloc_failure.try_unset();
2846 }
2847 
2848 void ShenandoahHeap::initialize_serviceability() {
2849   _memory_pool = new ShenandoahMemoryPool(this);
2850   _cycle_memory_manager.add_pool(_memory_pool);
2851   _stw_memory_manager.add_pool(_memory_pool);
2852 }
2853 
2854 GrowableArray<GCMemoryManager*> ShenandoahHeap::memory_managers() {
2855   GrowableArray<GCMemoryManager*> memory_managers(2);
2856   memory_managers.append(&_cycle_memory_manager);
2857   memory_managers.append(&_stw_memory_manager);
2858   return memory_managers;
2859 }
2860 
2861 GrowableArray<MemoryPool*> ShenandoahHeap::memory_pools() {
2862   GrowableArray<MemoryPool*> memory_pools(1);
2863   memory_pools.append(_memory_pool);
2864   return memory_pools;
2865 }
2866 
2867 void ShenandoahHeap::enter_evacuation() {
2868   _oom_evac_handler.enter_evacuation();
2869 }
2870 
2871 void ShenandoahHeap::leave_evacuation() {
2872   _oom_evac_handler.leave_evacuation();
2873 }
2874 
2875 SoftRefPolicy* ShenandoahHeap::soft_ref_policy() {
2876   return &_soft_ref_policy;
2877 }
2878 
2879 ShenandoahRegionIterator::ShenandoahRegionIterator() :
2880   _index(0),
2881   _heap(ShenandoahHeap::heap()) {}
2882 
2883 ShenandoahRegionIterator::ShenandoahRegionIterator(ShenandoahHeap* heap) :
2884   _index(0),
2885   _heap(heap) {}
2886 
2887 bool ShenandoahRegionIterator::has_next() const {
2888   return _index < _heap->num_regions();
2889 }
2890 
2891 ShenandoahRegionIterator ShenandoahHeap::region_iterator() const {
2892   return ShenandoahRegionIterator();
2893 }
2894 
2895 void ShenandoahHeap::heap_region_iterate(ShenandoahHeapRegionClosure& cl) const {
2896   ShenandoahRegionIterator regions = region_iterator();
2897   ShenandoahHeapRegion* r = regions.next();
2898   while (r != NULL) {
2899     if (cl.heap_region_do(r)) {
2900       break;
2901     }
2902     r = regions.next();
2903   }
2904 }
2905 
2906 char ShenandoahHeap::gc_state() const {
2907   return _gc_state.raw_value();
2908 }