1 /*
   2  * Copyright (c) 2013, 2018, Red Hat, Inc. and/or its affiliates.
   3  *
   4  * This code is free software; you can redistribute it and/or modify it
   5  * under the terms of the GNU General Public License version 2 only, as
   6  * published by the Free Software Foundation.
   7  *
   8  * This code is distributed in the hope that it will be useful, but WITHOUT
   9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  11  * version 2 for more details (a copy is included in the LICENSE file that
  12  * accompanied this code).
  13  *
  14  * You should have received a copy of the GNU General Public License version
  15  * 2 along with this work; if not, write to the Free Software Foundation,
  16  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  17  *
  18  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  19  * or visit www.oracle.com if you need additional information or have any
  20  * questions.
  21  *
  22  */
  23 
  24 #include "precompiled.hpp"
  25 #include "memory/allocation.hpp"
  26 
  27 #include "gc/shared/gcTimer.hpp"
  28 #include "gc/shared/gcTraceTime.inline.hpp"
  29 #include "gc/shared/parallelCleaning.hpp"
  30 #include "gc/shared/plab.hpp"
  31 
  32 #include "gc/shenandoah/brooksPointer.hpp"
  33 #include "gc/shenandoah/shenandoahAllocTracker.hpp"
  34 #include "gc/shenandoah/shenandoahBarrierSet.hpp"
  35 #include "gc/shenandoah/shenandoahCollectionSet.hpp"
  36 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
  37 #include "gc/shenandoah/shenandoahConcurrentMark.hpp"
  38 #include "gc/shenandoah/shenandoahConcurrentMark.inline.hpp"
  39 #include "gc/shenandoah/shenandoahControlThread.hpp"
  40 #include "gc/shenandoah/shenandoahFreeSet.hpp"
  41 #include "gc/shenandoah/shenandoahPhaseTimings.hpp"
  42 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
  43 #include "gc/shenandoah/shenandoahHeapRegion.hpp"
  44 #include "gc/shenandoah/shenandoahHeapRegionSet.hpp"
  45 #include "gc/shenandoah/shenandoahMarkCompact.hpp"
  46 #include "gc/shenandoah/shenandoahMemoryPool.hpp"
  47 #include "gc/shenandoah/shenandoahMetrics.hpp"
  48 #include "gc/shenandoah/shenandoahMonitoringSupport.hpp"
  49 #include "gc/shenandoah/shenandoahOopClosures.inline.hpp"
  50 #include "gc/shenandoah/shenandoahPacer.hpp"
  51 #include "gc/shenandoah/shenandoahPacer.inline.hpp"
  52 #include "gc/shenandoah/shenandoahRootProcessor.hpp"
  53 #include "gc/shenandoah/shenandoahStringDedup.hpp"
  54 #include "gc/shenandoah/shenandoahUtils.hpp"
  55 #include "gc/shenandoah/shenandoahVerifier.hpp"
  56 #include "gc/shenandoah/shenandoahCodeRoots.hpp"
  57 #include "gc/shenandoah/shenandoahWorkerPolicy.hpp"
  58 #include "gc/shenandoah/vm_operations_shenandoah.hpp"
  59 #include "gc/shenandoah/heuristics/shenandoahAdaptiveHeuristics.hpp"
  60 #include "gc/shenandoah/heuristics/shenandoahAggressiveHeuristics.hpp"
  61 #include "gc/shenandoah/heuristics/shenandoahCompactHeuristics.hpp"
  62 #include "gc/shenandoah/heuristics/shenandoahPartialConnectedHeuristics.hpp"
  63 #include "gc/shenandoah/heuristics/shenandoahPartialGenerationalHeuristics.hpp"
  64 #include "gc/shenandoah/heuristics/shenandoahPartialLRUHeuristics.hpp"
  65 #include "gc/shenandoah/heuristics/shenandoahPassiveHeuristics.hpp"
  66 #include "gc/shenandoah/heuristics/shenandoahStaticHeuristics.hpp"
  67 
  68 #include "memory/metaspace.hpp"
  69 #include "runtime/vmThread.hpp"
  70 #include "services/mallocTracker.hpp"
  71 
  72 ShenandoahUpdateRefsClosure::ShenandoahUpdateRefsClosure() : _heap(ShenandoahHeap::heap()) {}
  73 
  74 #ifdef ASSERT
  75 template <class T>
  76 void ShenandoahAssertToSpaceClosure::do_oop_nv(T* p) {
  77   T o = RawAccess<>::oop_load(p);
  78   if (! CompressedOops::is_null(o)) {
  79     oop obj = CompressedOops::decode_not_null(o);
  80     shenandoah_assert_not_forwarded(p, obj);
  81   }
  82 }
  83 
  84 void ShenandoahAssertToSpaceClosure::do_oop(narrowOop* p) { do_oop_nv(p); }
  85 void ShenandoahAssertToSpaceClosure::do_oop(oop* p)       { do_oop_nv(p); }
  86 #endif
  87 
  88 const char* ShenandoahHeap::name() const {
  89   return "Shenandoah";
  90 }
  91 
  92 class ShenandoahPretouchTask : public AbstractGangTask {
  93 private:
  94   ShenandoahRegionIterator _regions;
  95   const size_t _bitmap_size;
  96   const size_t _page_size;
  97   char* _bitmap0_base;
  98   char* _bitmap1_base;
  99 public:
 100   ShenandoahPretouchTask(char* bitmap0_base, char* bitmap1_base, size_t bitmap_size,
 101                          size_t page_size) :
 102     AbstractGangTask("Shenandoah PreTouch"),
 103     _bitmap0_base(bitmap0_base),
 104     _bitmap1_base(bitmap1_base),
 105     _bitmap_size(bitmap_size),
 106     _page_size(page_size) {}
 107 
 108   virtual void work(uint worker_id) {
 109     ShenandoahHeapRegion* r = _regions.next();
 110     while (r != NULL) {
 111       log_trace(gc, heap)("Pretouch region " SIZE_FORMAT ": " PTR_FORMAT " -> " PTR_FORMAT,
 112                           r->region_number(), p2i(r->bottom()), p2i(r->end()));
 113       os::pretouch_memory(r->bottom(), r->end(), _page_size);
 114 
 115       size_t start = r->region_number()       * ShenandoahHeapRegion::region_size_bytes() / MarkBitMap::heap_map_factor();
 116       size_t end   = (r->region_number() + 1) * ShenandoahHeapRegion::region_size_bytes() / MarkBitMap::heap_map_factor();
 117       assert (end <= _bitmap_size, "end is sane: " SIZE_FORMAT " < " SIZE_FORMAT, end, _bitmap_size);
 118 
 119       log_trace(gc, heap)("Pretouch bitmap under region " SIZE_FORMAT ": " PTR_FORMAT " -> " PTR_FORMAT,
 120                           r->region_number(), p2i(_bitmap0_base + start), p2i(_bitmap0_base + end));
 121       os::pretouch_memory(_bitmap0_base + start, _bitmap0_base + end, _page_size);
 122 
 123       log_trace(gc, heap)("Pretouch bitmap under region " SIZE_FORMAT ": " PTR_FORMAT " -> " PTR_FORMAT,
 124                           r->region_number(), p2i(_bitmap1_base + start), p2i(_bitmap1_base + end));
 125       os::pretouch_memory(_bitmap1_base + start, _bitmap1_base + end, _page_size);
 126 
 127       r = _regions.next();
 128     }
 129   }
 130 };
 131 
 132 jint ShenandoahHeap::initialize() {
 133 
 134   BrooksPointer::initial_checks();
 135 
 136   initialize_heuristics();
 137 
 138   size_t init_byte_size = collector_policy()->initial_heap_byte_size();
 139   size_t max_byte_size = collector_policy()->max_heap_byte_size();
 140   size_t heap_alignment = collector_policy()->heap_alignment();
 141 
 142   if (ShenandoahAlwaysPreTouch) {
 143     // Enabled pre-touch means the entire heap is committed right away.
 144     init_byte_size = max_byte_size;
 145   }
 146 
 147   Universe::check_alignment(max_byte_size,
 148                             ShenandoahHeapRegion::region_size_bytes(),
 149                             "shenandoah heap");
 150   Universe::check_alignment(init_byte_size,
 151                             ShenandoahHeapRegion::region_size_bytes(),
 152                             "shenandoah heap");
 153 
 154   ReservedSpace heap_rs = Universe::reserve_heap(max_byte_size,
 155                                                  heap_alignment);
 156   initialize_reserved_region((HeapWord*)heap_rs.base(), (HeapWord*) (heap_rs.base() + heap_rs.size()));
 157 
 158   BarrierSet::set_barrier_set(new ShenandoahBarrierSet(this));
 159   ReservedSpace pgc_rs = heap_rs.first_part(max_byte_size);
 160 
 161   _num_regions = max_byte_size / ShenandoahHeapRegion::region_size_bytes();
 162   size_t num_committed_regions = init_byte_size / ShenandoahHeapRegion::region_size_bytes();
 163   _initial_size = num_committed_regions * ShenandoahHeapRegion::region_size_bytes();
 164   _committed = _initial_size;
 165 
 166   log_info(gc, heap)("Initialize Shenandoah heap with initial size " SIZE_FORMAT " bytes", init_byte_size);
 167   if (!os::commit_memory(pgc_rs.base(), _initial_size, false)) {
 168     vm_exit_out_of_memory(_initial_size, OOM_MMAP_ERROR, "Shenandoah failed to initialize heap");
 169   }
 170 
 171   size_t reg_size_words = ShenandoahHeapRegion::region_size_words();
 172   size_t reg_size_bytes = ShenandoahHeapRegion::region_size_bytes();
 173 
 174   _regions = NEW_C_HEAP_ARRAY(ShenandoahHeapRegion*, _num_regions, mtGC);
 175   _free_set = new ShenandoahFreeSet(this, _num_regions);
 176 
 177   _collection_set = new ShenandoahCollectionSet(this, (HeapWord*)pgc_rs.base());
 178 
 179   _next_top_at_mark_starts_base = NEW_C_HEAP_ARRAY(HeapWord*, _num_regions, mtGC);
 180   _next_top_at_mark_starts = _next_top_at_mark_starts_base -
 181                ((uintx) pgc_rs.base() >> ShenandoahHeapRegion::region_size_bytes_shift());
 182 
 183   _complete_top_at_mark_starts_base = NEW_C_HEAP_ARRAY(HeapWord*, _num_regions, mtGC);
 184   _complete_top_at_mark_starts = _complete_top_at_mark_starts_base -
 185                ((uintx) pgc_rs.base() >> ShenandoahHeapRegion::region_size_bytes_shift());
 186 
 187   if (ShenandoahPacing) {
 188     _pacer = new ShenandoahPacer(this);
 189     _pacer->setup_for_idle();
 190   } else {
 191     _pacer = NULL;
 192   }
 193 
 194   {
 195     ShenandoahHeapLocker locker(lock());
 196     for (size_t i = 0; i < _num_regions; i++) {
 197       ShenandoahHeapRegion* r = new ShenandoahHeapRegion(this,
 198                                                          (HeapWord*) pgc_rs.base() + reg_size_words * i,
 199                                                          reg_size_words,
 200                                                          i,
 201                                                          i < num_committed_regions);
 202 
 203       _complete_top_at_mark_starts_base[i] = r->bottom();
 204       _next_top_at_mark_starts_base[i] = r->bottom();
 205       _regions[i] = r;
 206       assert(!collection_set()->is_in(i), "New region should not be in collection set");
 207     }
 208 
 209     _free_set->rebuild();
 210   }
 211 
 212   assert((((size_t) base()) & ShenandoahHeapRegion::region_size_bytes_mask()) == 0,
 213          "misaligned heap: "PTR_FORMAT, p2i(base()));
 214 
 215   // The call below uses stuff (the SATB* things) that are in G1, but probably
 216   // belong into a shared location.
 217   ShenandoahBarrierSet::satb_mark_queue_set().initialize(SATB_Q_CBL_mon,
 218                                                SATB_Q_FL_lock,
 219                                                20 /*G1SATBProcessCompletedThreshold */,
 220                                                Shared_SATB_Q_lock);
 221 
 222   // Reserve space for prev and next bitmap.
 223   size_t bitmap_page_size = UseLargePages ? (size_t)os::large_page_size() : (size_t)os::vm_page_size();
 224   _bitmap_size = MarkBitMap::compute_size(heap_rs.size());
 225   _bitmap_size = align_up(_bitmap_size, bitmap_page_size);
 226   _heap_region = MemRegion((HeapWord*) heap_rs.base(), heap_rs.size() / HeapWordSize);
 227 
 228   size_t bitmap_bytes_per_region = reg_size_bytes / MarkBitMap::heap_map_factor();
 229 
 230   guarantee(bitmap_bytes_per_region != 0,
 231             "Bitmap bytes per region should not be zero");
 232   guarantee(is_power_of_2(bitmap_bytes_per_region),
 233             "Bitmap bytes per region should be power of two: " SIZE_FORMAT, bitmap_bytes_per_region);
 234 
 235   if (bitmap_page_size > bitmap_bytes_per_region) {
 236     _bitmap_regions_per_slice = bitmap_page_size / bitmap_bytes_per_region;
 237     _bitmap_bytes_per_slice = bitmap_page_size;
 238   } else {
 239     _bitmap_regions_per_slice = 1;
 240     _bitmap_bytes_per_slice = bitmap_bytes_per_region;
 241   }
 242 
 243   guarantee(_bitmap_regions_per_slice >= 1,
 244             "Should have at least one region per slice: " SIZE_FORMAT,
 245             _bitmap_regions_per_slice);
 246 
 247   guarantee(((_bitmap_bytes_per_slice) % bitmap_page_size) == 0,
 248             "Bitmap slices should be page-granular: bps = " SIZE_FORMAT ", page size = " SIZE_FORMAT,
 249             _bitmap_bytes_per_slice, bitmap_page_size);
 250 
 251   ReservedSpace bitmap0(_bitmap_size, bitmap_page_size);
 252   MemTracker::record_virtual_memory_type(bitmap0.base(), mtGC);
 253   _bitmap0_region = MemRegion((HeapWord*) bitmap0.base(), bitmap0.size() / HeapWordSize);
 254 
 255   ReservedSpace bitmap1(_bitmap_size, bitmap_page_size);
 256   MemTracker::record_virtual_memory_type(bitmap1.base(), mtGC);
 257   _bitmap1_region = MemRegion((HeapWord*) bitmap1.base(), bitmap1.size() / HeapWordSize);
 258 
 259   size_t bitmap_init_commit = _bitmap_bytes_per_slice *
 260                               align_up(num_committed_regions, _bitmap_regions_per_slice) / _bitmap_regions_per_slice;
 261   bitmap_init_commit = MIN2(_bitmap_size, bitmap_init_commit);
 262   os::commit_memory_or_exit((char *) (_bitmap0_region.start()), bitmap_init_commit, false,
 263                             "couldn't allocate initial bitmap");
 264   os::commit_memory_or_exit((char *) (_bitmap1_region.start()), bitmap_init_commit, false,
 265                             "couldn't allocate initial bitmap");
 266 
 267   size_t page_size = UseLargePages ? (size_t)os::large_page_size() : (size_t)os::vm_page_size();
 268 
 269   if (ShenandoahVerify) {
 270     ReservedSpace verify_bitmap(_bitmap_size, page_size);
 271     os::commit_memory_or_exit(verify_bitmap.base(), verify_bitmap.size(), false,
 272                               "couldn't allocate verification bitmap");
 273     MemTracker::record_virtual_memory_type(verify_bitmap.base(), mtGC);
 274     MemRegion verify_bitmap_region = MemRegion((HeapWord *) verify_bitmap.base(), verify_bitmap.size() / HeapWordSize);
 275     _verification_bit_map.initialize(_heap_region, verify_bitmap_region);
 276     _verifier = new ShenandoahVerifier(this, &_verification_bit_map);
 277   }
 278 
 279   if (ShenandoahAlwaysPreTouch) {
 280     assert (!AlwaysPreTouch, "Should have been overridden");
 281 
 282     // For NUMA, it is important to pre-touch the storage under bitmaps with worker threads,
 283     // before initialize() below zeroes it with initializing thread. For any given region,
 284     // we touch the region and the corresponding bitmaps from the same thread.
 285     ShenandoahWorkerScope scope(workers(), _max_workers);
 286 
 287     log_info(gc, heap)("Parallel pretouch " SIZE_FORMAT " regions with " SIZE_FORMAT " byte pages",
 288                        _num_regions, page_size);
 289     ShenandoahPretouchTask cl(bitmap0.base(), bitmap1.base(), _bitmap_size, page_size);
 290     _workers->run_task(&cl);
 291   }
 292 
 293   _mark_bit_map0.initialize(_heap_region, _bitmap0_region);
 294   _complete_mark_bit_map = &_mark_bit_map0;
 295 
 296   _mark_bit_map1.initialize(_heap_region, _bitmap1_region);
 297   _next_mark_bit_map = &_mark_bit_map1;
 298 
 299   // Reserve aux bitmap for use in object_iterate(). We don't commit it here.
 300   ReservedSpace aux_bitmap(_bitmap_size, bitmap_page_size);
 301   MemTracker::record_virtual_memory_type(aux_bitmap.base(), mtGC);
 302   _aux_bitmap_region = MemRegion((HeapWord*) aux_bitmap.base(), aux_bitmap.size() / HeapWordSize);
 303   _aux_bit_map.initialize(_heap_region, _aux_bitmap_region);
 304 
 305   if (UseShenandoahMatrix) {
 306     _connection_matrix = new ShenandoahConnectionMatrix(_num_regions);
 307   } else {
 308     _connection_matrix = NULL;
 309   }
 310 
 311   _traversal_gc = heuristics()->can_do_traversal_gc() ?
 312                 new ShenandoahTraversalGC(this, _num_regions) :
 313                 NULL;
 314 
 315   _monitoring_support = new ShenandoahMonitoringSupport(this);
 316 
 317   _phase_timings = new ShenandoahPhaseTimings();
 318 
 319   if (ShenandoahAllocationTrace) {
 320     _alloc_tracker = new ShenandoahAllocTracker();
 321   }
 322 
 323   ShenandoahStringDedup::initialize();
 324 
 325   _control_thread = new ShenandoahControlThread();
 326 
 327   ShenandoahCodeRoots::initialize();
 328 
 329   LogTarget(Trace, gc, region) lt;
 330   if (lt.is_enabled()) {
 331     ResourceMark rm;
 332     LogStream ls(lt);
 333     log_trace(gc, region)("All Regions");
 334     print_heap_regions_on(&ls);
 335     log_trace(gc, region)("Free Regions");
 336     _free_set->print_on(&ls);
 337   }
 338 
 339   log_info(gc, init)("Safepointing mechanism: %s",
 340                      SafepointMechanism::uses_thread_local_poll() ? "thread-local poll" :
 341                      (SafepointMechanism::uses_global_page_poll() ? "global-page poll" : "unknown"));
 342 
 343   return JNI_OK;
 344 }
 345 
 346 void ShenandoahHeap::initialize_heuristics() {
 347   if (ShenandoahGCHeuristics != NULL) {
 348     if (strcmp(ShenandoahGCHeuristics, "aggressive") == 0) {
 349       _heuristics = new ShenandoahAggressiveHeuristics();
 350     } else if (strcmp(ShenandoahGCHeuristics, "static") == 0) {
 351       _heuristics = new ShenandoahStaticHeuristics();
 352     } else if (strcmp(ShenandoahGCHeuristics, "adaptive") == 0) {
 353       _heuristics = new ShenandoahAdaptiveHeuristics();
 354     } else if (strcmp(ShenandoahGCHeuristics, "passive") == 0) {
 355       _heuristics = new ShenandoahPassiveHeuristics();
 356     } else if (strcmp(ShenandoahGCHeuristics, "compact") == 0) {
 357       _heuristics = new ShenandoahCompactHeuristics();
 358     } else if (strcmp(ShenandoahGCHeuristics, "connected") == 0) {
 359       _heuristics = new ShenandoahPartialConnectedHeuristics();
 360     } else if (strcmp(ShenandoahGCHeuristics, "generational") == 0) {
 361       _heuristics = new ShenandoahPartialGenerationalHeuristics();
 362     } else if (strcmp(ShenandoahGCHeuristics, "LRU") == 0) {
 363       _heuristics = new ShenandoahPartialLRUHeuristics();
 364     } else if (strcmp(ShenandoahGCHeuristics, "traversal") == 0) {
 365       _heuristics = new ShenandoahTraversalHeuristics();
 366     } else {
 367       vm_exit_during_initialization("Unknown -XX:ShenandoahGCHeuristics option");
 368     }
 369 
 370     if (_heuristics->is_diagnostic() && !UnlockDiagnosticVMOptions) {
 371       vm_exit_during_initialization(
 372               err_msg("Heuristics \"%s\" is diagnostic, and must be enabled via -XX:+UnlockDiagnosticVMOptions.",
 373                       _heuristics->name()));
 374     }
 375     if (_heuristics->is_experimental() && !UnlockExperimentalVMOptions) {
 376       vm_exit_during_initialization(
 377               err_msg("Heuristics \"%s\" is experimental, and must be enabled via -XX:+UnlockExperimentalVMOptions.",
 378                       _heuristics->name()));
 379     }
 380 
 381     if (ShenandoahStoreValEnqueueBarrier && ShenandoahStoreValReadBarrier) {
 382       vm_exit_during_initialization("Cannot use both ShenandoahStoreValEnqueueBarrier and ShenandoahStoreValReadBarrier");
 383     }
 384     log_info(gc, init)("Shenandoah heuristics: %s",
 385                        _heuristics->name());
 386     _heuristics->print_thresholds();
 387   } else {
 388       ShouldNotReachHere();
 389   }
 390 
 391 }
 392 
 393 #ifdef _MSC_VER
 394 #pragma warning( push )
 395 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list
 396 #endif
 397 
 398 ShenandoahHeap::ShenandoahHeap(ShenandoahCollectorPolicy* policy) :
 399   CollectedHeap(),
 400   _shenandoah_policy(policy),
 401   _soft_ref_policy(),
 402   _regions(NULL),
 403   _free_set(NULL),
 404   _collection_set(NULL),
 405   _update_refs_iterator(this),
 406   _bytes_allocated_since_gc_start(0),
 407   _max_workers(MAX2(ConcGCThreads, ParallelGCThreads)),
 408   _ref_processor(NULL),
 409   _next_top_at_mark_starts(NULL),
 410   _next_top_at_mark_starts_base(NULL),
 411   _complete_top_at_mark_starts(NULL),
 412   _complete_top_at_mark_starts_base(NULL),
 413   _mark_bit_map0(),
 414   _mark_bit_map1(),
 415   _aux_bit_map(),
 416   _connection_matrix(NULL),
 417   _verifier(NULL),
 418   _pacer(NULL),
 419   _used_at_last_gc(0),
 420   _alloc_seq_at_last_gc_start(0),
 421   _alloc_seq_at_last_gc_end(0),
 422   _safepoint_workers(NULL),
 423   _gc_cycle_mode(),
 424 #ifdef ASSERT
 425   _heap_expansion_count(0),
 426 #endif
 427   _gc_timer(new (ResourceObj::C_HEAP, mtGC) ConcurrentGCTimer()),
 428   _phase_timings(NULL),
 429   _alloc_tracker(NULL),
 430   _cycle_memory_manager("Shenandoah Cycles", "end of GC cycle"),
 431   _stw_memory_manager("Shenandoah Pauses", "end of GC pause"),
 432   _mutator_gclab_stats(new PLABStats("Shenandoah mutator GCLAB stats", OldPLABSize, PLABWeight)),
 433   _collector_gclab_stats(new PLABStats("Shenandoah collector GCLAB stats", YoungPLABSize, PLABWeight)),
 434   _memory_pool(NULL)
 435 {
 436   log_info(gc, init)("Parallel GC threads: "UINT32_FORMAT, ParallelGCThreads);
 437   log_info(gc, init)("Concurrent GC threads: "UINT32_FORMAT, ConcGCThreads);
 438   log_info(gc, init)("Parallel reference processing enabled: %s", BOOL_TO_STR(ParallelRefProcEnabled));
 439 
 440   _scm = new ShenandoahConcurrentMark();
 441   _full_gc = new ShenandoahMarkCompact();
 442   _used = 0;
 443 
 444   _max_workers = MAX2(_max_workers, 1U);
 445   _workers = new ShenandoahWorkGang("Shenandoah GC Threads", _max_workers,
 446                             /* are_GC_task_threads */true,
 447                             /* are_ConcurrentGC_threads */false);
 448   if (_workers == NULL) {
 449     vm_exit_during_initialization("Failed necessary allocation.");
 450   } else {
 451     _workers->initialize_workers();
 452   }
 453 
 454   if (ParallelSafepointCleanupThreads > 1) {
 455     _safepoint_workers = new ShenandoahWorkGang("Safepoint Cleanup Thread",
 456                                                 ParallelSafepointCleanupThreads,
 457                                                 false, false);
 458     _safepoint_workers->initialize_workers();
 459   }
 460 }
 461 
 462 #ifdef _MSC_VER
 463 #pragma warning( pop )
 464 #endif
 465 
 466 class ShenandoahResetNextBitmapTask : public AbstractGangTask {
 467 private:
 468   ShenandoahRegionIterator _regions;
 469 
 470 public:
 471   ShenandoahResetNextBitmapTask() :
 472     AbstractGangTask("Parallel Reset Bitmap Task") {}
 473 
 474   void work(uint worker_id) {
 475     ShenandoahHeapRegion* region = _regions.next();
 476     ShenandoahHeap* heap = ShenandoahHeap::heap();
 477     while (region != NULL) {
 478       if (heap->is_bitmap_slice_committed(region)) {
 479         HeapWord* bottom = region->bottom();
 480         HeapWord* top = heap->next_top_at_mark_start(region->bottom());
 481         if (top > bottom) {
 482           heap->next_mark_bit_map()->clear_range_large(MemRegion(bottom, top));
 483         }
 484         assert(heap->is_next_bitmap_clear_range(bottom, region->end()), "must be clear");
 485       }
 486       region = _regions.next();
 487     }
 488   }
 489 };
 490 
 491 void ShenandoahHeap::reset_next_mark_bitmap() {
 492   assert_gc_workers(_workers->active_workers());
 493 
 494   ShenandoahResetNextBitmapTask task;
 495   _workers->run_task(&task);
 496 }
 497 
 498 class ShenandoahResetNextBitmapTraversalTask : public AbstractGangTask {
 499 private:
 500   ShenandoahRegionIterator _regions;
 501 
 502 public:
 503   ShenandoahResetNextBitmapTraversalTask() :
 504     AbstractGangTask("Parallel Reset Bitmap Task for Traversal") {}
 505 
 506   void work(uint worker_id) {
 507     ShenandoahHeap* heap = ShenandoahHeap::heap();
 508     ShenandoahHeapRegionSet* traversal_set = heap->traversal_gc()->traversal_set();
 509     ShenandoahHeapRegion* region = _regions.next();
 510     while (region != NULL) {
 511       if (heap->is_bitmap_slice_committed(region)) {
 512         if (traversal_set->is_in(region) && !region->is_trash()) {
 513           ShenandoahHeapLocker locker(heap->lock());
 514           HeapWord* bottom = region->bottom();
 515           HeapWord* top = heap->next_top_at_mark_start(bottom);
 516           assert(top <= region->top(),
 517                  "TAMS must smaller/equals than top: TAMS: "PTR_FORMAT", top: "PTR_FORMAT,
 518                  p2i(top), p2i(region->top()));
 519           if (top > bottom) {
 520             heap->complete_mark_bit_map()->copy_from(heap->next_mark_bit_map(), MemRegion(bottom, top));
 521             heap->set_complete_top_at_mark_start(bottom, top);
 522             heap->next_mark_bit_map()->clear_range_large(MemRegion(bottom, top));
 523             heap->set_next_top_at_mark_start(bottom, bottom);
 524           }
 525         }
 526         assert(heap->is_next_bitmap_clear_range(region->bottom(), region->end()),
 527                "need clear next bitmap");
 528       }
 529       region = _regions.next();
 530     }
 531   }
 532 };
 533 
 534 void ShenandoahHeap::reset_next_mark_bitmap_traversal() {
 535   assert_gc_workers(_workers->active_workers());
 536 
 537   ShenandoahResetNextBitmapTraversalTask task;
 538   _workers->run_task(&task);
 539 }
 540 
 541 bool ShenandoahHeap::is_next_bitmap_clear() {
 542   for (size_t idx = 0; idx < _num_regions; idx++) {
 543     ShenandoahHeapRegion* r = get_region(idx);
 544     if (is_bitmap_slice_committed(r) && !is_next_bitmap_clear_range(r->bottom(), r->end())) {
 545       return false;
 546     }
 547   }
 548   return true;
 549 }
 550 
 551 bool ShenandoahHeap::is_next_bitmap_clear_range(HeapWord* start, HeapWord* end) {
 552   return _next_mark_bit_map->getNextMarkedWordAddress(start, end) == end;
 553 }
 554 
 555 bool ShenandoahHeap::is_complete_bitmap_clear_range(HeapWord* start, HeapWord* end) {
 556   return _complete_mark_bit_map->getNextMarkedWordAddress(start, end) == end;
 557 }
 558 
 559 void ShenandoahHeap::print_on(outputStream* st) const {
 560   st->print_cr("Shenandoah Heap");
 561   st->print_cr(" " SIZE_FORMAT "K total, " SIZE_FORMAT "K committed, " SIZE_FORMAT "K used",
 562                capacity() / K, committed() / K, used() / K);
 563   st->print_cr(" " SIZE_FORMAT " x " SIZE_FORMAT"K regions",
 564                num_regions(), ShenandoahHeapRegion::region_size_bytes() / K);
 565 
 566   st->print("Status: ");
 567   if (has_forwarded_objects())               st->print("has forwarded objects, ");
 568   if (is_concurrent_mark_in_progress())      st->print("marking, ");
 569   if (is_evacuation_in_progress())           st->print("evacuating, ");
 570   if (is_update_refs_in_progress())          st->print("updating refs, ");
 571   if (is_concurrent_traversal_in_progress()) st->print("traversal, ");
 572   if (is_degenerated_gc_in_progress())       st->print("degenerated gc, ");
 573   if (is_full_gc_in_progress())              st->print("full gc, ");
 574   if (is_full_gc_move_in_progress())         st->print("full gc move, ");
 575 
 576   if (cancelled_gc()) {
 577     st->print("cancelled");
 578   } else {
 579     st->print("not cancelled");
 580   }
 581   st->cr();
 582 
 583   st->print_cr("Reserved region:");
 584   st->print_cr(" - [" PTR_FORMAT ", " PTR_FORMAT ") ",
 585                p2i(reserved_region().start()),
 586                p2i(reserved_region().end()));
 587 
 588   st->cr();
 589   MetaspaceUtils::print_on(st);
 590 
 591   if (UseShenandoahMatrix) {
 592     st->print_cr("Matrix:");
 593 
 594     ShenandoahConnectionMatrix* matrix = connection_matrix();
 595     if (matrix != NULL) {
 596       st->print_cr(" - base: " PTR_FORMAT, p2i(matrix->matrix_addr()));
 597       st->print_cr(" - stride: " SIZE_FORMAT, matrix->stride());
 598       st->print_cr(" - magic: " PTR_FORMAT, matrix->magic_offset());
 599     } else {
 600       st->print_cr(" No matrix.");
 601     }
 602   }
 603 
 604   if (Verbose) {
 605     print_heap_regions_on(st);
 606   }
 607 }
 608 
 609 class ShenandoahInitGCLABClosure : public ThreadClosure {
 610 public:
 611   void do_thread(Thread* thread) {
 612     if (thread != NULL && (thread->is_Java_thread() || thread->is_Worker_thread() ||
 613                            thread->is_ConcurrentGC_thread())) {
 614       ShenandoahThreadLocalData::initialize_gclab(thread);
 615     }
 616   }
 617 };
 618 
 619 void ShenandoahHeap::post_initialize() {
 620   CollectedHeap::post_initialize();
 621   MutexLocker ml(Threads_lock);
 622 
 623   ShenandoahInitGCLABClosure init_gclabs;
 624   Threads::threads_do(&init_gclabs);
 625   gc_threads_do(&init_gclabs);
 626 
 627   // gclab can not be initialized early during VM startup, as it can not determinate its max_size.
 628   // Now, we will let WorkGang to initialize gclab when new worker is created.
 629   _workers->set_initialize_gclab();
 630 
 631   _scm->initialize(_max_workers);
 632   _full_gc->initialize(_gc_timer);
 633 
 634   ref_processing_init();
 635 
 636   _heuristics->initialize();
 637 }
 638 
 639 size_t ShenandoahHeap::used() const {
 640   return OrderAccess::load_acquire(&_used);
 641 }
 642 
 643 size_t ShenandoahHeap::committed() const {
 644   OrderAccess::acquire();
 645   return _committed;
 646 }
 647 
 648 void ShenandoahHeap::increase_committed(size_t bytes) {
 649   assert_heaplock_or_safepoint();
 650   _committed += bytes;
 651 }
 652 
 653 void ShenandoahHeap::decrease_committed(size_t bytes) {
 654   assert_heaplock_or_safepoint();
 655   _committed -= bytes;
 656 }
 657 
 658 void ShenandoahHeap::increase_used(size_t bytes) {
 659   Atomic::add(bytes, &_used);
 660 }
 661 
 662 void ShenandoahHeap::set_used(size_t bytes) {
 663   OrderAccess::release_store_fence(&_used, bytes);
 664 }
 665 
 666 void ShenandoahHeap::decrease_used(size_t bytes) {
 667   assert(used() >= bytes, "never decrease heap size by more than we've left");
 668   Atomic::sub(bytes, &_used);
 669 }
 670 
 671 void ShenandoahHeap::increase_allocated(size_t bytes) {
 672   Atomic::add(bytes, &_bytes_allocated_since_gc_start);
 673 }
 674 
 675 void ShenandoahHeap::notify_alloc(size_t words, bool waste) {
 676   size_t bytes = words * HeapWordSize;
 677   if (!waste) {
 678     increase_used(bytes);
 679   }
 680   increase_allocated(bytes);
 681   if (ShenandoahPacing) {
 682     control_thread()->pacing_notify_alloc(words);
 683     if (waste) {
 684       pacer()->claim_for_alloc(words, true);
 685     }
 686   }
 687 }
 688 
 689 size_t ShenandoahHeap::capacity() const {
 690   return num_regions() * ShenandoahHeapRegion::region_size_bytes();
 691 }
 692 
 693 bool ShenandoahHeap::is_maximal_no_gc() const {
 694   Unimplemented();
 695   return true;
 696 }
 697 
 698 size_t ShenandoahHeap::max_capacity() const {
 699   return _num_regions * ShenandoahHeapRegion::region_size_bytes();
 700 }
 701 
 702 size_t ShenandoahHeap::initial_capacity() const {
 703   return _initial_size;
 704 }
 705 
 706 bool ShenandoahHeap::is_in(const void* p) const {
 707   HeapWord* heap_base = (HeapWord*) base();
 708   HeapWord* last_region_end = heap_base + ShenandoahHeapRegion::region_size_words() * num_regions();
 709   return p >= heap_base && p < last_region_end;
 710 }
 711 
 712 bool ShenandoahHeap::is_scavengable(oop p) {
 713   return true;
 714 }
 715 
 716 void ShenandoahHeap::handle_heap_shrinkage(double shrink_before) {
 717   if (!ShenandoahUncommit) {
 718     return;
 719   }
 720 
 721   ShenandoahHeapLocker locker(lock());
 722 
 723   size_t count = 0;
 724   for (size_t i = 0; i < num_regions(); i++) {
 725     ShenandoahHeapRegion* r = get_region(i);
 726     if (r->is_empty_committed() && (r->empty_time() < shrink_before)) {
 727       r->make_uncommitted();
 728       count++;
 729     }
 730   }
 731 
 732   if (count > 0) {
 733     log_info(gc)("Uncommitted " SIZE_FORMAT "M. Heap: " SIZE_FORMAT "M reserved, " SIZE_FORMAT "M committed, " SIZE_FORMAT "M used",
 734                  count * ShenandoahHeapRegion::region_size_bytes() / M, capacity() / M, committed() / M, used() / M);
 735     control_thread()->notify_heap_changed();
 736   }
 737 }
 738 
 739 HeapWord* ShenandoahHeap::allocate_from_gclab_slow(Thread* thread, size_t size) {
 740   // Retain tlab and allocate object in shared space if
 741   // the amount free in the tlab is too large to discard.
 742   PLAB* gclab = ShenandoahThreadLocalData::gclab(thread);
 743 
 744   // Discard gclab and allocate a new one.
 745   // To minimize fragmentation, the last GCLAB may be smaller than the rest.
 746   gclab->retire();
 747   // Figure out size of new GCLAB
 748   size_t new_gclab_size;
 749   if (thread->is_Java_thread()) {
 750     new_gclab_size = _mutator_gclab_stats->desired_plab_sz(Threads::number_of_threads());
 751   } else {
 752     new_gclab_size = _collector_gclab_stats->desired_plab_sz(workers()->active_workers());
 753   }
 754 
 755   // Allocate a new GCLAB...
 756   HeapWord* gclab_buf = allocate_new_gclab(new_gclab_size);
 757   if (gclab_buf == NULL) {
 758     return NULL;
 759   }
 760 
 761   if (ZeroTLAB) {
 762     // ..and clear it.
 763     Copy::zero_to_words(gclab_buf, new_gclab_size);
 764   } else {
 765     // ...and zap just allocated object.
 766 #ifdef ASSERT
 767     // Skip mangling the space corresponding to the object header to
 768     // ensure that the returned space is not considered parsable by
 769     // any concurrent GC thread.
 770     size_t hdr_size = oopDesc::header_size();
 771     Copy::fill_to_words(gclab_buf + hdr_size, new_gclab_size - hdr_size, badHeapWordVal);
 772 #endif // ASSERT
 773   }
 774   gclab->set_buf(gclab_buf, new_gclab_size);
 775   return gclab->allocate(size);
 776 }
 777 
 778 HeapWord* ShenandoahHeap::allocate_new_tlab(size_t min_size,
 779                                             size_t requested_size,
 780                                             size_t* actual_size) {
 781 #ifdef ASSERT
 782   log_debug(gc, alloc)("Allocate new tlab, requested size = " SIZE_FORMAT " bytes", requested_size * HeapWordSize);
 783 #endif
 784   *actual_size = requested_size;
 785   return allocate_new_lab(requested_size, _alloc_tlab);
 786 }
 787 
 788 HeapWord* ShenandoahHeap::allocate_new_gclab(size_t word_size) {
 789 #ifdef ASSERT
 790   log_debug(gc, alloc)("Allocate new gclab, requested size = " SIZE_FORMAT " bytes", word_size * HeapWordSize);
 791 #endif
 792   return allocate_new_lab(word_size, _alloc_gclab);
 793 }
 794 
 795 HeapWord* ShenandoahHeap::allocate_new_lab(size_t word_size, AllocType type) {
 796   HeapWord* result = allocate_memory(word_size, type);
 797 
 798   if (result != NULL) {
 799     assert(! in_collection_set(result), "Never allocate in collection set");
 800 
 801     log_develop_trace(gc, tlab)("allocating new tlab of size "SIZE_FORMAT" at addr "PTR_FORMAT, word_size, p2i(result));
 802 
 803   }
 804   return result;
 805 }
 806 
 807 ShenandoahHeap* ShenandoahHeap::heap() {
 808   CollectedHeap* heap = Universe::heap();
 809   assert(heap != NULL, "Unitialized access to ShenandoahHeap::heap()");
 810   assert(heap->kind() == CollectedHeap::Shenandoah, "not a shenandoah heap");
 811   return (ShenandoahHeap*) heap;
 812 }
 813 
 814 ShenandoahHeap* ShenandoahHeap::heap_no_check() {
 815   CollectedHeap* heap = Universe::heap();
 816   return (ShenandoahHeap*) heap;
 817 }
 818 
 819 HeapWord* ShenandoahHeap::allocate_memory(size_t word_size, AllocType type) {
 820   ShenandoahAllocTrace trace_alloc(word_size, type);
 821 
 822   bool in_new_region = false;
 823   HeapWord* result = NULL;
 824 
 825   if (type == _alloc_tlab || type == _alloc_shared) {
 826     if (ShenandoahPacing) {
 827       pacer()->pace_for_alloc(word_size);
 828     }
 829 
 830     if (!ShenandoahAllocFailureALot || !should_inject_alloc_failure()) {
 831       result = allocate_memory_under_lock(word_size, type, in_new_region);
 832     }
 833 
 834     // Allocation failed, block until control thread reacted, then retry allocation.
 835     //
 836     // It might happen that one of the threads requesting allocation would unblock
 837     // way later after GC happened, only to fail the second allocation, because
 838     // other threads have already depleted the free storage. In this case, a better
 839     // strategy is to try again, as long as GC makes progress.
 840     //
 841     // Then, we need to make sure the allocation was retried after at least one
 842     // Full GC, which means we want to try more than ShenandoahFullGCThreshold times.
 843 
 844     size_t tries = 0;
 845 
 846     while (result == NULL && last_gc_made_progress()) {
 847       tries++;
 848       control_thread()->handle_alloc_failure(word_size);
 849       result = allocate_memory_under_lock(word_size, type, in_new_region);
 850     }
 851 
 852     while (result == NULL && tries <= ShenandoahFullGCThreshold) {
 853       tries++;
 854       control_thread()->handle_alloc_failure(word_size);
 855       result = allocate_memory_under_lock(word_size, type, in_new_region);
 856     }
 857 
 858   } else {
 859     assert(type == _alloc_gclab || type == _alloc_shared_gc, "Can only accept these types here");
 860     result = allocate_memory_under_lock(word_size, type, in_new_region);
 861     // Do not call handle_alloc_failure() here, because we cannot block.
 862     // The allocation failure would be handled by the WB slowpath with handle_alloc_failure_evac().
 863   }
 864 
 865   if (in_new_region) {
 866     control_thread()->notify_heap_changed();
 867   }
 868 
 869   if (result != NULL) {
 870     log_develop_trace(gc, alloc)("allocate memory chunk of size "SIZE_FORMAT" at addr "PTR_FORMAT " by thread %d ",
 871                                  word_size, p2i(result), Thread::current()->osthread()->thread_id());
 872     notify_alloc(word_size, false);
 873   }
 874 
 875   return result;
 876 }
 877 
 878 HeapWord* ShenandoahHeap::allocate_memory_under_lock(size_t word_size, AllocType type, bool& in_new_region) {
 879   ShenandoahHeapLocker locker(lock());
 880   return _free_set->allocate(word_size, type, in_new_region);
 881 }
 882 
 883 void ShenandoahHeap::fill_with_dummy_object(HeapWord* start, HeapWord* end, bool zap) {
 884   HeapWord* obj = start + BrooksPointer::word_size();
 885   BrooksPointer::initialize(oop(obj));
 886   CollectedHeap::fill_with_object(obj, end, zap);
 887 }
 888 
 889 HeapWord* ShenandoahHeap::obj_allocate_raw(Klass* klass, size_t size,
 890                                            bool* gc_overhead_limit_was_exceeded, TRAPS) {
 891   size += BrooksPointer::word_size();
 892   HeapWord* result = CollectedHeap::obj_allocate_raw(klass, size, gc_overhead_limit_was_exceeded, THREAD);
 893   if (result != NULL) {
 894     result += BrooksPointer::word_size();
 895     BrooksPointer::initialize(oop(result));
 896     assert(! in_collection_set(result), "never allocate in targetted region");
 897   }
 898   return result;
 899 }
 900 
 901 HeapWord*  ShenandoahHeap::mem_allocate(size_t size,
 902                                         bool*  gc_overhead_limit_was_exceeded) {
 903   return  allocate_memory(size, _alloc_shared);
 904 }
 905 
 906 class ShenandoahEvacuateUpdateRootsClosure: public ExtendedOopClosure {
 907 private:
 908   ShenandoahHeap* _heap;
 909   Thread* _thread;
 910 public:
 911   ShenandoahEvacuateUpdateRootsClosure() :
 912     _heap(ShenandoahHeap::heap()), _thread(Thread::current()) {
 913   }
 914 
 915 private:
 916   template <class T>
 917   void do_oop_work(T* p) {
 918     assert(_heap->is_evacuation_in_progress(), "Only do this when evacuation is in progress");
 919 
 920     T o = RawAccess<>::oop_load(p);
 921     if (! CompressedOops::is_null(o)) {
 922       oop obj = CompressedOops::decode_not_null(o);
 923       if (_heap->in_collection_set(obj)) {
 924         shenandoah_assert_marked_complete(p, obj);
 925         oop resolved = ShenandoahBarrierSet::resolve_forwarded_not_null(obj);
 926         if (oopDesc::unsafe_equals(resolved, obj)) {
 927           resolved = _heap->evacuate_object(obj, _thread);
 928         }
 929         RawAccess<OOP_NOT_NULL>::oop_store(p, resolved);
 930       }
 931     }
 932   }
 933 
 934 public:
 935   void do_oop(oop* p) {
 936     do_oop_work(p);
 937   }
 938   void do_oop(narrowOop* p) {
 939     do_oop_work(p);
 940   }
 941 };
 942 
 943 class ShenandoahEvacuateRootsClosure: public ExtendedOopClosure {
 944 private:
 945   ShenandoahHeap* _heap;
 946   Thread* _thread;
 947 public:
 948   ShenandoahEvacuateRootsClosure() :
 949           _heap(ShenandoahHeap::heap()), _thread(Thread::current()) {
 950   }
 951 
 952 private:
 953   template <class T>
 954   void do_oop_work(T* p) {
 955     T o = RawAccess<>::oop_load(p);
 956     if (! CompressedOops::is_null(o)) {
 957       oop obj = CompressedOops::decode_not_null(o);
 958       if (_heap->in_collection_set(obj)) {
 959         oop resolved = ShenandoahBarrierSet::resolve_forwarded_not_null(obj);
 960         if (oopDesc::unsafe_equals(resolved, obj)) {
 961           _heap->evacuate_object(obj, _thread);
 962         }
 963       }
 964     }
 965   }
 966 
 967 public:
 968   void do_oop(oop* p) {
 969     do_oop_work(p);
 970   }
 971   void do_oop(narrowOop* p) {
 972     do_oop_work(p);
 973   }
 974 };
 975 
 976 class ShenandoahParallelEvacuateRegionObjectClosure : public ObjectClosure {
 977 private:
 978   ShenandoahHeap* const _heap;
 979   Thread* const _thread;
 980 public:
 981   ShenandoahParallelEvacuateRegionObjectClosure(ShenandoahHeap* heap) :
 982     _heap(heap), _thread(Thread::current()) {}
 983 
 984   void do_object(oop p) {
 985     shenandoah_assert_marked_complete(NULL, p);
 986     if (oopDesc::unsafe_equals(p, ShenandoahBarrierSet::resolve_forwarded_not_null(p))) {
 987       _heap->evacuate_object(p, _thread);
 988     }
 989   }
 990 };
 991 
 992 class ShenandoahParallelEvacuationTask : public AbstractGangTask {
 993 private:
 994   ShenandoahHeap* const _sh;
 995   ShenandoahCollectionSet* const _cs;
 996   ShenandoahSharedFlag _claimed_codecache;
 997 
 998 public:
 999   ShenandoahParallelEvacuationTask(ShenandoahHeap* sh,
1000                          ShenandoahCollectionSet* cs) :
1001     AbstractGangTask("Parallel Evacuation Task"),
1002     _cs(cs),
1003     _sh(sh)
1004   {}
1005 
1006   void work(uint worker_id) {
1007 
1008     ShenandoahEvacOOMScope oom_evac_scope;
1009     SuspendibleThreadSetJoiner stsj(ShenandoahSuspendibleWorkers);
1010 
1011     // If concurrent code cache evac is enabled, evacuate it here.
1012     // Note we cannot update the roots here, because we risk non-atomic stores to the alive
1013     // nmethods. The update would be handled elsewhere.
1014     if (ShenandoahConcurrentEvacCodeRoots && _claimed_codecache.try_set()) {
1015       ShenandoahEvacuateRootsClosure cl;
1016       MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
1017       CodeBlobToOopClosure blobs(&cl, !CodeBlobToOopClosure::FixRelocations);
1018       CodeCache::blobs_do(&blobs);
1019     }
1020 
1021     ShenandoahParallelEvacuateRegionObjectClosure cl(_sh);
1022     ShenandoahHeapRegion* r;
1023     while ((r =_cs->claim_next()) != NULL) {
1024       log_develop_trace(gc, region)("Thread "INT32_FORMAT" claimed Heap Region "SIZE_FORMAT,
1025                                     worker_id,
1026                                     r->region_number());
1027 
1028       assert(r->has_live(), "all-garbage regions are reclaimed early");
1029       _sh->marked_object_iterate(r, &cl);
1030 
1031       if (_sh->check_cancelled_gc_and_yield()) {
1032         log_develop_trace(gc, region)("Cancelled GC while evacuating region " SIZE_FORMAT, r->region_number());
1033         break;
1034       }
1035 
1036       if (ShenandoahPacing) {
1037         _sh->pacer()->report_evac(r->get_live_data_words());
1038       }
1039     }
1040   }
1041 };
1042 
1043 void ShenandoahHeap::trash_cset_regions() {
1044   ShenandoahHeapLocker locker(lock());
1045 
1046   ShenandoahCollectionSet* set = collection_set();
1047   ShenandoahHeapRegion* r;
1048   set->clear_current_index();
1049   while ((r = set->next()) != NULL) {
1050     r->make_trash();
1051   }
1052   collection_set()->clear();
1053 }
1054 
1055 void ShenandoahHeap::print_heap_regions_on(outputStream* st) const {
1056   st->print_cr("Heap Regions:");
1057   st->print_cr("EU=empty-uncommitted, EC=empty-committed, R=regular, H=humongous start, HC=humongous continuation, CS=collection set, T=trash, P=pinned");
1058   st->print_cr("BTE=bottom/top/end, U=used, T=TLAB allocs, G=GCLAB allocs, S=shared allocs, L=live data");
1059   st->print_cr("R=root, CP=critical pins, TAMS=top-at-mark-start (previous, next)");
1060   st->print_cr("SN=alloc sequence numbers (first mutator, last mutator, first gc, last gc)");
1061 
1062   for (size_t i = 0; i < num_regions(); i++) {
1063     get_region(i)->print_on(st);
1064   }
1065 }
1066 
1067 void ShenandoahHeap::trash_humongous_region_at(ShenandoahHeapRegion* start) {
1068   assert(start->is_humongous_start(), "reclaim regions starting with the first one");
1069 
1070   oop humongous_obj = oop(start->bottom() + BrooksPointer::word_size());
1071   size_t size = humongous_obj->size() + BrooksPointer::word_size();
1072   size_t required_regions = ShenandoahHeapRegion::required_regions(size * HeapWordSize);
1073   size_t index = start->region_number() + required_regions - 1;
1074 
1075   assert(!start->has_live(), "liveness must be zero");
1076   log_trace(gc, humongous)("Reclaiming "SIZE_FORMAT" humongous regions for object of size: "SIZE_FORMAT" words", required_regions, size);
1077 
1078   for(size_t i = 0; i < required_regions; i++) {
1079     // Reclaim from tail. Otherwise, assertion fails when printing region to trace log,
1080     // as it expects that every region belongs to a humongous region starting with a humongous start region.
1081     ShenandoahHeapRegion* region = get_region(index --);
1082 
1083     LogTarget(Trace, gc, humongous) lt;
1084     if (lt.is_enabled()) {
1085       ResourceMark rm;
1086       LogStream ls(lt);
1087       region->print_on(&ls);
1088     }
1089 
1090     assert(region->is_humongous(), "expect correct humongous start or continuation");
1091     assert(!in_collection_set(region), "Humongous region should not be in collection set");
1092 
1093     region->make_trash();
1094   }
1095 }
1096 
1097 #ifdef ASSERT
1098 class ShenandoahCheckCollectionSetClosure: public ShenandoahHeapRegionClosure {
1099   bool heap_region_do(ShenandoahHeapRegion* r) {
1100     assert(! ShenandoahHeap::heap()->in_collection_set(r), "Should have been cleared by now");
1101     return false;
1102   }
1103 };
1104 #endif
1105 
1106 void ShenandoahHeap::prepare_for_concurrent_evacuation() {
1107   log_develop_trace(gc)("Thread %d started prepare_for_concurrent_evacuation", Thread::current()->osthread()->thread_id());
1108 
1109   if (!cancelled_gc()) {
1110     // Allocations might have happened before we STWed here, record peak:
1111     heuristics()->record_peak_occupancy();
1112 
1113     make_tlabs_parsable(true);
1114 
1115     if (ShenandoahVerify) {
1116       verifier()->verify_after_concmark();
1117     }
1118 
1119     trash_cset_regions();
1120 
1121     // NOTE: This needs to be done during a stop the world pause, because
1122     // putting regions into the collection set concurrently with Java threads
1123     // will create a race. In particular, acmp could fail because when we
1124     // resolve the first operand, the containing region might not yet be in
1125     // the collection set, and thus return the original oop. When the 2nd
1126     // operand gets resolved, the region could be in the collection set
1127     // and the oop gets evacuated. If both operands have originally been
1128     // the same, we get false negatives.
1129 
1130     {
1131       ShenandoahHeapLocker locker(lock());
1132       _collection_set->clear();
1133       _free_set->clear();
1134 
1135 #ifdef ASSERT
1136       ShenandoahCheckCollectionSetClosure ccsc;
1137       heap_region_iterate(&ccsc);
1138 #endif
1139 
1140       heuristics()->choose_collection_set(_collection_set);
1141 
1142       _free_set->rebuild();
1143     }
1144 
1145     Universe::update_heap_info_at_gc();
1146 
1147     if (ShenandoahVerify) {
1148       verifier()->verify_before_evacuation();
1149     }
1150   }
1151 }
1152 
1153 
1154 class ShenandoahRetireTLABClosure : public ThreadClosure {
1155 private:
1156   bool _retire;
1157 
1158 public:
1159   ShenandoahRetireTLABClosure(bool retire) : _retire(retire) {}
1160 
1161   void do_thread(Thread* thread) {
1162     PLAB* gclab = ShenandoahThreadLocalData::gclab(thread);
1163     assert(gclab != NULL, "GCLAB should be initialized for %s", thread->name());
1164     gclab->retire();
1165   }
1166 };
1167 
1168 void ShenandoahHeap::make_tlabs_parsable(bool retire_tlabs) {
1169   if (UseTLAB) {
1170     CollectedHeap::ensure_parsability(retire_tlabs);
1171   }
1172   ShenandoahRetireTLABClosure cl(retire_tlabs);
1173   for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) {
1174     cl.do_thread(t);
1175   }
1176   gc_threads_do(&cl);
1177 }
1178 
1179 
1180 class ShenandoahEvacuateUpdateRootsTask : public AbstractGangTask {
1181   ShenandoahRootEvacuator* _rp;
1182 public:
1183 
1184   ShenandoahEvacuateUpdateRootsTask(ShenandoahRootEvacuator* rp) :
1185     AbstractGangTask("Shenandoah evacuate and update roots"),
1186     _rp(rp)
1187   {
1188     // Nothing else to do.
1189   }
1190 
1191   void work(uint worker_id) {
1192     ShenandoahEvacOOMScope oom_evac_scope;
1193     ShenandoahEvacuateUpdateRootsClosure cl;
1194 
1195     if (ShenandoahConcurrentEvacCodeRoots) {
1196       _rp->process_evacuate_roots(&cl, NULL, worker_id);
1197     } else {
1198       MarkingCodeBlobClosure blobsCl(&cl, CodeBlobToOopClosure::FixRelocations);
1199       _rp->process_evacuate_roots(&cl, &blobsCl, worker_id);
1200     }
1201   }
1202 };
1203 
1204 class ShenandoahFixRootsTask : public AbstractGangTask {
1205   ShenandoahRootEvacuator* _rp;
1206 public:
1207 
1208   ShenandoahFixRootsTask(ShenandoahRootEvacuator* rp) :
1209     AbstractGangTask("Shenandoah update roots"),
1210     _rp(rp)
1211   {
1212     // Nothing else to do.
1213   }
1214 
1215   void work(uint worker_id) {
1216     ShenandoahEvacOOMScope oom_evac_scope;
1217     ShenandoahUpdateRefsClosure cl;
1218     MarkingCodeBlobClosure blobsCl(&cl, CodeBlobToOopClosure::FixRelocations);
1219 
1220     _rp->process_evacuate_roots(&cl, &blobsCl, worker_id);
1221   }
1222 };
1223 
1224 void ShenandoahHeap::evacuate_and_update_roots() {
1225 
1226 #if defined(COMPILER2) || INCLUDE_JVMCI
1227   DerivedPointerTable::clear();
1228 #endif
1229   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Only iterate roots while world is stopped");
1230 
1231   {
1232     ShenandoahRootEvacuator rp(this, workers()->active_workers(), ShenandoahPhaseTimings::init_evac);
1233     ShenandoahEvacuateUpdateRootsTask roots_task(&rp);
1234     workers()->run_task(&roots_task);
1235   }
1236 
1237 #if defined(COMPILER2) || INCLUDE_JVMCI
1238   DerivedPointerTable::update_pointers();
1239 #endif
1240   if (cancelled_gc()) {
1241     fixup_roots();
1242   }
1243 }
1244 
1245 void ShenandoahHeap::fixup_roots() {
1246     assert(cancelled_gc(), "Only after concurrent cycle failed");
1247 
1248     // If initial evacuation has been cancelled, we need to update all references
1249     // after all workers have finished. Otherwise we might run into the following problem:
1250     // GC thread 1 cannot allocate anymore, thus evacuation fails, leaves from-space ptr of object X.
1251     // GC thread 2 evacuates the same object X to to-space
1252     // which leaves a truly dangling from-space reference in the first root oop*. This must not happen.
1253     // clear() and update_pointers() must always be called in pairs,
1254     // cannot nest with above clear()/update_pointers().
1255 #if defined(COMPILER2) || INCLUDE_JVMCI
1256     DerivedPointerTable::clear();
1257 #endif
1258     ShenandoahRootEvacuator rp(this, workers()->active_workers(), ShenandoahPhaseTimings::init_evac);
1259     ShenandoahFixRootsTask update_roots_task(&rp);
1260     workers()->run_task(&update_roots_task);
1261 #if defined(COMPILER2) || INCLUDE_JVMCI
1262     DerivedPointerTable::update_pointers();
1263 #endif
1264 }
1265 
1266 void ShenandoahHeap::roots_iterate(OopClosure* cl) {
1267   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Only iterate roots while world is stopped");
1268 
1269   CodeBlobToOopClosure blobsCl(cl, false);
1270   CLDToOopClosure cldCl(cl);
1271 
1272   ShenandoahRootProcessor rp(this, 1, ShenandoahPhaseTimings::_num_phases);
1273   rp.process_all_roots(cl, NULL, &cldCl, &blobsCl, NULL, 0);
1274 }
1275 
1276 bool ShenandoahHeap::supports_tlab_allocation() const {
1277   return true;
1278 }
1279 
1280 size_t  ShenandoahHeap::unsafe_max_tlab_alloc(Thread *thread) const {
1281   return MIN2(_free_set->unsafe_peek_free(), max_tlab_size());
1282 }
1283 
1284 size_t ShenandoahHeap::max_tlab_size() const {
1285   return ShenandoahHeapRegion::max_tlab_size_bytes();
1286 }
1287 
1288 class ShenandoahAccumulateStatisticsGCLABClosure : public ThreadClosure {
1289 public:
1290   void do_thread(Thread* thread) {
1291     ShenandoahHeap* heap = ShenandoahHeap::heap();
1292     PLAB* gclab = ShenandoahThreadLocalData::gclab(thread);
1293     if (thread->is_Java_thread()) {
1294       gclab->flush_and_retire_stats(heap->mutator_gclab_stats());
1295     } else {
1296       gclab->flush_and_retire_stats(heap->collector_gclab_stats());
1297     }
1298   }
1299 };
1300 
1301 void ShenandoahHeap::accumulate_statistics_all_gclabs() {
1302   ShenandoahAccumulateStatisticsGCLABClosure cl;
1303   for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) {
1304     cl.do_thread(t);
1305   }
1306   gc_threads_do(&cl);
1307   _mutator_gclab_stats->adjust_desired_plab_sz();
1308   _collector_gclab_stats->adjust_desired_plab_sz();
1309 }
1310 
1311 bool  ShenandoahHeap::can_elide_tlab_store_barriers() const {
1312   return true;
1313 }
1314 
1315 oop ShenandoahHeap::new_store_pre_barrier(JavaThread* thread, oop new_obj) {
1316   // Overridden to do nothing.
1317   return new_obj;
1318 }
1319 
1320 bool  ShenandoahHeap::can_elide_initializing_store_barrier(oop new_obj) {
1321   return true;
1322 }
1323 
1324 bool ShenandoahHeap::card_mark_must_follow_store() const {
1325   return false;
1326 }
1327 
1328 void ShenandoahHeap::collect(GCCause::Cause cause) {
1329   control_thread()->handle_explicit_gc(cause);
1330 }
1331 
1332 void ShenandoahHeap::do_full_collection(bool clear_all_soft_refs) {
1333   //assert(false, "Shouldn't need to do full collections");
1334 }
1335 
1336 AdaptiveSizePolicy* ShenandoahHeap::size_policy() {
1337   Unimplemented();
1338   return NULL;
1339 
1340 }
1341 
1342 CollectorPolicy* ShenandoahHeap::collector_policy() const {
1343   return _shenandoah_policy;
1344 }
1345 
1346 
1347 HeapWord* ShenandoahHeap::block_start(const void* addr) const {
1348   Space* sp = heap_region_containing(addr);
1349   if (sp != NULL) {
1350     return sp->block_start(addr);
1351   }
1352   return NULL;
1353 }
1354 
1355 size_t ShenandoahHeap::block_size(const HeapWord* addr) const {
1356   Space* sp = heap_region_containing(addr);
1357   assert(sp != NULL, "block_size of address outside of heap");
1358   return sp->block_size(addr);
1359 }
1360 
1361 bool ShenandoahHeap::block_is_obj(const HeapWord* addr) const {
1362   Space* sp = heap_region_containing(addr);
1363   return sp->block_is_obj(addr);
1364 }
1365 
1366 jlong ShenandoahHeap::millis_since_last_gc() {
1367   return 0;
1368 }
1369 
1370 void ShenandoahHeap::prepare_for_verify() {
1371   if (SafepointSynchronize::is_at_safepoint() || ! UseTLAB) {
1372     make_tlabs_parsable(false);
1373   }
1374 }
1375 
1376 void ShenandoahHeap::print_gc_threads_on(outputStream* st) const {
1377   workers()->print_worker_threads_on(st);
1378   if (ShenandoahStringDedup::is_enabled()) {
1379     ShenandoahStringDedup::print_worker_threads_on(st);
1380   }
1381 }
1382 
1383 void ShenandoahHeap::gc_threads_do(ThreadClosure* tcl) const {
1384   workers()->threads_do(tcl);
1385   if (ShenandoahStringDedup::is_enabled()) {
1386     ShenandoahStringDedup::threads_do(tcl);
1387   }
1388 }
1389 
1390 void ShenandoahHeap::print_tracing_info() const {
1391   LogTarget(Info, gc, stats) lt;
1392   if (lt.is_enabled()) {
1393     ResourceMark rm;
1394     LogStream ls(lt);
1395 
1396     phase_timings()->print_on(&ls);
1397 
1398     ls.cr();
1399     ls.cr();
1400 
1401     shenandoahPolicy()->print_gc_stats(&ls);
1402 
1403     ls.cr();
1404     ls.cr();
1405 
1406     if (ShenandoahPacing) {
1407       pacer()->print_on(&ls);
1408     }
1409 
1410     ls.cr();
1411     ls.cr();
1412 
1413     if (ShenandoahAllocationTrace) {
1414       assert(alloc_tracker() != NULL, "Must be");
1415       alloc_tracker()->print_on(&ls);
1416     } else {
1417       ls.print_cr("  Allocation tracing is disabled, use -XX:+ShenandoahAllocationTrace to enable.");
1418     }
1419   }
1420 }
1421 
1422 void ShenandoahHeap::verify(VerifyOption vo) {
1423   if (ShenandoahSafepoint::is_at_shenandoah_safepoint()) {
1424     if (ShenandoahVerify) {
1425       verifier()->verify_generic(vo);
1426     } else {
1427       // TODO: Consider allocating verification bitmaps on demand,
1428       // and turn this on unconditionally.
1429     }
1430   }
1431 }
1432 size_t ShenandoahHeap::tlab_capacity(Thread *thr) const {
1433   return _free_set->capacity();
1434 }
1435 
1436 class ObjectIterateScanRootClosure : public ExtendedOopClosure {
1437 private:
1438   MarkBitMap* _bitmap;
1439   Stack<oop,mtGC>* _oop_stack;
1440 
1441   template <class T>
1442   void do_oop_work(T* p) {
1443     T o = RawAccess<>::oop_load(p);
1444     if (!CompressedOops::is_null(o)) {
1445       oop obj = CompressedOops::decode_not_null(o);
1446       obj = ShenandoahBarrierSet::resolve_forwarded_not_null(obj);
1447       assert(oopDesc::is_oop(obj), "must be a valid oop");
1448       if (!_bitmap->isMarked((HeapWord*) obj)) {
1449         _bitmap->mark((HeapWord*) obj);
1450         _oop_stack->push(obj);
1451       }
1452     }
1453   }
1454 public:
1455   ObjectIterateScanRootClosure(MarkBitMap* bitmap, Stack<oop,mtGC>* oop_stack) :
1456     _bitmap(bitmap), _oop_stack(oop_stack) {}
1457   void do_oop(oop* p)       { do_oop_work(p); }
1458   void do_oop(narrowOop* p) { do_oop_work(p); }
1459 };
1460 
1461 /*
1462  * This is public API, used in preparation of object_iterate().
1463  * Since we don't do linear scan of heap in object_iterate() (see comment below), we don't
1464  * need to make the heap parsable. For Shenandoah-internal linear heap scans that we can
1465  * control, we call SH::make_tlabs_parsable().
1466  */
1467 void ShenandoahHeap::ensure_parsability(bool retire_tlabs) {
1468   // No-op.
1469 }
1470 
1471 /*
1472  * Iterates objects in the heap. This is public API, used for, e.g., heap dumping.
1473  *
1474  * We cannot safely iterate objects by doing a linear scan at random points in time. Linear
1475  * scanning needs to deal with dead objects, which may have dead Klass* pointers (e.g.
1476  * calling oopDesc::size() would crash) or dangling reference fields (crashes) etc. Linear
1477  * scanning therefore depends on having a valid marking bitmap to support it. However, we only
1478  * have a valid marking bitmap after successful marking. In particular, we *don't* have a valid
1479  * marking bitmap during marking, after aborted marking or during/after cleanup (when we just
1480  * wiped the bitmap in preparation for next marking).
1481  *
1482  * For all those reasons, we implement object iteration as a single marking traversal, reporting
1483  * objects as we mark+traverse through the heap, starting from GC roots. JVMTI IterateThroughHeap
1484  * is allowed to report dead objects, but is not required to do so.
1485  */
1486 void ShenandoahHeap::object_iterate(ObjectClosure* cl) {
1487   assert(SafepointSynchronize::is_at_safepoint(), "safe iteration is only available during safepoints");
1488   if (!os::commit_memory((char*)_aux_bitmap_region.start(), _aux_bitmap_region.byte_size(), false)) {
1489     log_warning(gc)("Could not commit native memory for auxiliary marking bitmap for heap iteration");
1490     return;
1491   }
1492 
1493   Stack<oop,mtGC> oop_stack;
1494 
1495   // First, we process all GC roots. This populates the work stack with initial objects.
1496   ShenandoahRootProcessor rp(this, 1, ShenandoahPhaseTimings::_num_phases);
1497   ObjectIterateScanRootClosure oops(&_aux_bit_map, &oop_stack);
1498   CLDToOopClosure clds(&oops, false);
1499   CodeBlobToOopClosure blobs(&oops, false);
1500   rp.process_all_roots(&oops, &oops, &clds, &blobs, NULL, 0);
1501 
1502   // Work through the oop stack to traverse heap.
1503   while (! oop_stack.is_empty()) {
1504     oop obj = oop_stack.pop();
1505     assert(oopDesc::is_oop(obj), "must be a valid oop");
1506     cl->do_object(obj);
1507     obj->oop_iterate(&oops);
1508   }
1509 
1510   assert(oop_stack.is_empty(), "should be empty");
1511 
1512   if (!os::uncommit_memory((char*)_aux_bitmap_region.start(), _aux_bitmap_region.byte_size())) {
1513     log_warning(gc)("Could not uncommit native memory for auxiliary marking bitmap for heap iteration");
1514   }
1515 }
1516 
1517 void ShenandoahHeap::safe_object_iterate(ObjectClosure* cl) {
1518   assert(SafepointSynchronize::is_at_safepoint(), "safe iteration is only available during safepoints");
1519   object_iterate(cl);
1520 }
1521 
1522 // Apply blk->heap_region_do() on all committed regions in address order,
1523 // terminating the iteration early if heap_region_do() returns true.
1524 void ShenandoahHeap::heap_region_iterate(ShenandoahHeapRegionClosure* blk, bool skip_cset_regions, bool skip_humongous_continuation) const {
1525   for (size_t i = 0; i < num_regions(); i++) {
1526     ShenandoahHeapRegion* current  = get_region(i);
1527     if (skip_humongous_continuation && current->is_humongous_continuation()) {
1528       continue;
1529     }
1530     if (skip_cset_regions && in_collection_set(current)) {
1531       continue;
1532     }
1533     if (blk->heap_region_do(current)) {
1534       return;
1535     }
1536   }
1537 }
1538 
1539 class ShenandoahClearLivenessClosure : public ShenandoahHeapRegionClosure {
1540 private:
1541   ShenandoahHeap* sh;
1542 public:
1543   ShenandoahClearLivenessClosure(ShenandoahHeap* heap) : sh(heap) {}
1544 
1545   bool heap_region_do(ShenandoahHeapRegion* r) {
1546     r->clear_live_data();
1547     sh->set_next_top_at_mark_start(r->bottom(), r->top());
1548     return false;
1549   }
1550 };
1551 
1552 void ShenandoahHeap::op_init_mark() {
1553   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Should be at safepoint");
1554 
1555   assert(is_next_bitmap_clear(), "need clear marking bitmap");
1556 
1557   if (ShenandoahVerify) {
1558     verifier()->verify_before_concmark();
1559   }
1560 
1561   {
1562     ShenandoahGCPhase phase(ShenandoahPhaseTimings::accumulate_stats);
1563     accumulate_statistics_all_tlabs();
1564   }
1565 
1566   set_concurrent_mark_in_progress(true);
1567   // We need to reset all TLABs because we'd lose marks on all objects allocated in them.
1568   {
1569     ShenandoahGCPhase phase(ShenandoahPhaseTimings::make_parsable);
1570     make_tlabs_parsable(true);
1571   }
1572 
1573   {
1574     ShenandoahGCPhase phase(ShenandoahPhaseTimings::clear_liveness);
1575     ShenandoahClearLivenessClosure clc(this);
1576     heap_region_iterate(&clc);
1577   }
1578 
1579   // Make above changes visible to worker threads
1580   OrderAccess::fence();
1581 
1582   concurrentMark()->init_mark_roots();
1583 
1584   if (UseTLAB) {
1585     ShenandoahGCPhase phase(ShenandoahPhaseTimings::resize_tlabs);
1586     resize_all_tlabs();
1587   }
1588 
1589   if (ShenandoahPacing) {
1590     pacer()->setup_for_mark();
1591   }
1592 }
1593 
1594 void ShenandoahHeap::op_mark() {
1595   concurrentMark()->mark_from_roots();
1596 
1597   // Allocations happen during concurrent mark, record peak after the phase:
1598   heuristics()->record_peak_occupancy();
1599 }
1600 
1601 void ShenandoahHeap::op_final_mark() {
1602   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Should be at safepoint");
1603 
1604   // It is critical that we
1605   // evacuate roots right after finishing marking, so that we don't
1606   // get unmarked objects in the roots.
1607 
1608   if (!cancelled_gc()) {
1609     concurrentMark()->finish_mark_from_roots();
1610     stop_concurrent_marking();
1611 
1612     {
1613       ShenandoahGCPhase prepare_evac(ShenandoahPhaseTimings::prepare_evac);
1614       prepare_for_concurrent_evacuation();
1615     }
1616 
1617     // If collection set has candidates, start evacuation.
1618     // Otherwise, bypass the rest of the cycle.
1619     if (!collection_set()->is_empty()) {
1620       set_evacuation_in_progress(true);
1621       // From here on, we need to update references.
1622       set_has_forwarded_objects(true);
1623 
1624       ShenandoahGCPhase init_evac(ShenandoahPhaseTimings::init_evac);
1625       evacuate_and_update_roots();
1626     }
1627 
1628     if (ShenandoahPacing) {
1629       pacer()->setup_for_evac();
1630     }
1631   } else {
1632     concurrentMark()->cancel();
1633     stop_concurrent_marking();
1634 
1635     if (process_references()) {
1636       // Abandon reference processing right away: pre-cleaning must have failed.
1637       ReferenceProcessor *rp = ref_processor();
1638       rp->disable_discovery();
1639       rp->abandon_partial_discovery();
1640       rp->verify_no_references_recorded();
1641     }
1642   }
1643 }
1644 
1645 void ShenandoahHeap::op_final_evac() {
1646   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Should be at safepoint");
1647 
1648   accumulate_statistics_all_gclabs();
1649   set_evacuation_in_progress(false);
1650   if (ShenandoahVerify) {
1651     verifier()->verify_after_evacuation();
1652   }
1653 }
1654 
1655 void ShenandoahHeap::op_evac() {
1656 
1657   LogTarget(Trace, gc, region) lt_region;
1658   LogTarget(Trace, gc, cset) lt_cset;
1659 
1660   if (lt_region.is_enabled()) {
1661     ResourceMark rm;
1662     LogStream ls(lt_region);
1663     ls.print_cr("All available regions:");
1664     print_heap_regions_on(&ls);
1665   }
1666 
1667   if (lt_cset.is_enabled()) {
1668     ResourceMark rm;
1669     LogStream ls(lt_cset);
1670     ls.print_cr("Collection set ("SIZE_FORMAT" regions):", _collection_set->count());
1671     _collection_set->print_on(&ls);
1672 
1673     ls.print_cr("Free set:");
1674     _free_set->print_on(&ls);
1675   }
1676 
1677   ShenandoahParallelEvacuationTask task(this, _collection_set);
1678   workers()->run_task(&task);
1679 
1680   if (lt_cset.is_enabled()) {
1681     ResourceMark rm;
1682     LogStream ls(lt_cset);
1683     ls.print_cr("After evacuation collection set ("SIZE_FORMAT" regions):",
1684                 _collection_set->count());
1685     _collection_set->print_on(&ls);
1686 
1687     ls.print_cr("After evacuation free set:");
1688     _free_set->print_on(&ls);
1689   }
1690 
1691   if (lt_region.is_enabled()) {
1692     ResourceMark rm;
1693     LogStream ls(lt_region);
1694     ls.print_cr("All regions after evacuation:");
1695     print_heap_regions_on(&ls);
1696   }
1697 
1698   // Allocations happen during evacuation, record peak after the phase:
1699   heuristics()->record_peak_occupancy();
1700 }
1701 
1702 void ShenandoahHeap::op_updaterefs() {
1703   update_heap_references(true);
1704 
1705   // Allocations happen during update-refs, record peak after the phase:
1706   heuristics()->record_peak_occupancy();
1707 }
1708 
1709 void ShenandoahHeap::op_cleanup() {
1710   ShenandoahGCPhase phase_recycle(ShenandoahPhaseTimings::conc_cleanup_recycle);
1711   free_set()->recycle_trash();
1712 
1713   // Allocations happen during cleanup, record peak after the phase:
1714   heuristics()->record_peak_occupancy();
1715 }
1716 
1717 void ShenandoahHeap::op_cleanup_bitmaps() {
1718   op_cleanup();
1719 
1720   ShenandoahGCPhase phase_reset(ShenandoahPhaseTimings::conc_cleanup_reset_bitmaps);
1721   reset_next_mark_bitmap();
1722 
1723   // Allocations happen during bitmap cleanup, record peak after the phase:
1724   heuristics()->record_peak_occupancy();
1725 }
1726 
1727 void ShenandoahHeap::op_cleanup_traversal() {
1728 
1729   {
1730     ShenandoahGCPhase phase_reset(ShenandoahPhaseTimings::conc_cleanup_reset_bitmaps);
1731     reset_next_mark_bitmap_traversal();
1732   }
1733 
1734   op_cleanup();
1735 
1736   // Allocations happen during bitmap cleanup, record peak after the phase:
1737   heuristics()->record_peak_occupancy();
1738 }
1739 
1740 void ShenandoahHeap::op_preclean() {
1741   concurrentMark()->preclean_weak_refs();
1742 
1743   // Allocations happen during concurrent preclean, record peak after the phase:
1744   heuristics()->record_peak_occupancy();
1745 }
1746 
1747 void ShenandoahHeap::op_init_traversal() {
1748   traversal_gc()->init_traversal_collection();
1749 }
1750 
1751 void ShenandoahHeap::op_traversal() {
1752   traversal_gc()->concurrent_traversal_collection();
1753 }
1754 
1755 void ShenandoahHeap::op_final_traversal() {
1756   traversal_gc()->final_traversal_collection();
1757 }
1758 
1759 void ShenandoahHeap::op_full(GCCause::Cause cause) {
1760   ShenandoahMetricsSnapshot metrics;
1761   metrics.snap_before();
1762 
1763   full_gc()->do_it(cause);
1764   if (UseTLAB) {
1765     ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_resize_tlabs);
1766     resize_all_tlabs();
1767   }
1768 
1769   metrics.snap_after();
1770   metrics.print();
1771 
1772   if (metrics.is_good_progress("Full GC")) {
1773     _progress_last_gc.set();
1774   } else {
1775     // Nothing to do. Tell the allocation path that we have failed to make
1776     // progress, and it can finally fail.
1777     _progress_last_gc.unset();
1778   }
1779 }
1780 
1781 void ShenandoahHeap::op_degenerated(ShenandoahDegenPoint point) {
1782   // Degenerated GC is STW, but it can also fail. Current mechanics communicates
1783   // GC failure via cancelled_concgc() flag. So, if we detect the failure after
1784   // some phase, we have to upgrade the Degenerate GC to Full GC.
1785 
1786   clear_cancelled_gc();
1787 
1788   ShenandoahMetricsSnapshot metrics;
1789   metrics.snap_before();
1790 
1791   switch (point) {
1792     case _degenerated_evac:
1793       // Not possible to degenerate from here, upgrade to Full GC right away.
1794       cancel_gc(GCCause::_shenandoah_upgrade_to_full_gc);
1795       op_degenerated_fail();
1796       return;
1797 
1798     // The cases below form the Duff's-like device: it describes the actual GC cycle,
1799     // but enters it at different points, depending on which concurrent phase had
1800     // degenerated.
1801 
1802     case _degenerated_traversal:
1803       {
1804         ShenandoahHeapLocker locker(lock());
1805         collection_set()->clear_current_index();
1806         for (size_t i = 0; i < collection_set()->count(); i++) {
1807           ShenandoahHeapRegion* r = collection_set()->next();
1808           r->make_regular_bypass();
1809         }
1810         collection_set()->clear();
1811       }
1812       op_final_traversal();
1813       op_cleanup_traversal();
1814       return;
1815 
1816     case _degenerated_outside_cycle:
1817       if (heuristics()->can_do_traversal_gc()) {
1818         // Not possible to degenerate from here, upgrade to Full GC right away.
1819         cancel_gc(GCCause::_shenandoah_upgrade_to_full_gc);
1820         op_degenerated_fail();
1821         return;
1822       }
1823       op_init_mark();
1824       if (cancelled_gc()) {
1825         op_degenerated_fail();
1826         return;
1827       }
1828 
1829     case _degenerated_mark:
1830       op_final_mark();
1831       if (cancelled_gc()) {
1832         op_degenerated_fail();
1833         return;
1834       }
1835 
1836       op_cleanup();
1837 
1838       // If heuristics thinks we should do the cycle, this flag would be set,
1839       // and we can do evacuation. Otherwise, it would be the shortcut cycle.
1840       if (is_evacuation_in_progress()) {
1841         op_evac();
1842         if (cancelled_gc()) {
1843           op_degenerated_fail();
1844           return;
1845         }
1846       }
1847 
1848       // If heuristics thinks we should do the cycle, this flag would be set,
1849       // and we need to do update-refs. Otherwise, it would be the shortcut cycle.
1850       if (has_forwarded_objects()) {
1851         op_init_updaterefs();
1852         if (cancelled_gc()) {
1853           op_degenerated_fail();
1854           return;
1855         }
1856       }
1857 
1858     case _degenerated_updaterefs:
1859       if (has_forwarded_objects()) {
1860         op_final_updaterefs();
1861         if (cancelled_gc()) {
1862           op_degenerated_fail();
1863           return;
1864         }
1865       }
1866 
1867       op_cleanup_bitmaps();
1868       break;
1869 
1870     default:
1871       ShouldNotReachHere();
1872   }
1873 
1874   if (ShenandoahVerify) {
1875     verifier()->verify_after_degenerated();
1876   }
1877 
1878   metrics.snap_after();
1879   metrics.print();
1880 
1881   // Check for futility and fail. There is no reason to do several back-to-back Degenerated cycles,
1882   // because that probably means the heap is overloaded and/or fragmented.
1883   if (!metrics.is_good_progress("Degenerated GC")) {
1884     _progress_last_gc.unset();
1885     cancel_gc(GCCause::_shenandoah_upgrade_to_full_gc);
1886     op_degenerated_futile();
1887   } else {
1888     _progress_last_gc.set();
1889   }
1890 }
1891 
1892 void ShenandoahHeap::op_degenerated_fail() {
1893   log_info(gc)("Cannot finish degeneration, upgrading to Full GC");
1894   shenandoahPolicy()->record_degenerated_upgrade_to_full();
1895   op_full(GCCause::_shenandoah_upgrade_to_full_gc);
1896 }
1897 
1898 void ShenandoahHeap::op_degenerated_futile() {
1899   shenandoahPolicy()->record_degenerated_upgrade_to_full();
1900   op_full(GCCause::_shenandoah_upgrade_to_full_gc);
1901 }
1902 
1903 void ShenandoahHeap::swap_mark_bitmaps() {
1904   // Swap bitmaps.
1905   MarkBitMap* tmp1 = _complete_mark_bit_map;
1906   _complete_mark_bit_map = _next_mark_bit_map;
1907   _next_mark_bit_map = tmp1;
1908 
1909   // Swap top-at-mark-start pointers
1910   HeapWord** tmp2 = _complete_top_at_mark_starts;
1911   _complete_top_at_mark_starts = _next_top_at_mark_starts;
1912   _next_top_at_mark_starts = tmp2;
1913 
1914   HeapWord** tmp3 = _complete_top_at_mark_starts_base;
1915   _complete_top_at_mark_starts_base = _next_top_at_mark_starts_base;
1916   _next_top_at_mark_starts_base = tmp3;
1917 }
1918 
1919 
1920 void ShenandoahHeap::stop_concurrent_marking() {
1921   assert(is_concurrent_mark_in_progress(), "How else could we get here?");
1922   if (!cancelled_gc()) {
1923     // If we needed to update refs, and concurrent marking has been cancelled,
1924     // we need to finish updating references.
1925     set_has_forwarded_objects(false);
1926     swap_mark_bitmaps();
1927   }
1928   set_concurrent_mark_in_progress(false);
1929 
1930   LogTarget(Trace, gc, region) lt;
1931   if (lt.is_enabled()) {
1932     ResourceMark rm;
1933     LogStream ls(lt);
1934     ls.print_cr("Regions at stopping the concurrent mark:");
1935     print_heap_regions_on(&ls);
1936   }
1937 }
1938 
1939 void ShenandoahHeap::set_gc_state_all_threads(char state) {
1940   for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) {
1941     ShenandoahThreadLocalData::set_gc_state(t, state);
1942   }
1943 }
1944 
1945 void ShenandoahHeap::set_gc_state_mask(uint mask, bool value) {
1946   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Should really be Shenandoah safepoint");
1947   _gc_state.set_cond(mask, value);
1948   set_gc_state_all_threads(_gc_state.raw_value());
1949 }
1950 
1951 void ShenandoahHeap::set_concurrent_mark_in_progress(bool in_progress) {
1952   set_gc_state_mask(MARKING, in_progress);
1953   ShenandoahBarrierSet::satb_mark_queue_set().set_active_all_threads(in_progress, !in_progress);
1954 }
1955 
1956 void ShenandoahHeap::set_concurrent_traversal_in_progress(bool in_progress) {
1957    set_gc_state_mask(TRAVERSAL | HAS_FORWARDED, in_progress);
1958    ShenandoahBarrierSet::satb_mark_queue_set().set_active_all_threads(in_progress, !in_progress);
1959 }
1960 
1961 void ShenandoahHeap::set_evacuation_in_progress(bool in_progress) {
1962   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Only call this at safepoint");
1963   set_gc_state_mask(EVACUATION, in_progress);
1964 }
1965 
1966 uint ShenandoahHeap::oop_extra_words() {
1967   return BrooksPointer::word_size();
1968 }
1969 
1970 ShenandoahForwardedIsAliveClosure::ShenandoahForwardedIsAliveClosure() :
1971   _heap(ShenandoahHeap::heap_no_check()) {
1972 }
1973 
1974 ShenandoahIsAliveClosure::ShenandoahIsAliveClosure() :
1975   _heap(ShenandoahHeap::heap_no_check()) {
1976 }
1977 
1978 bool ShenandoahForwardedIsAliveClosure::do_object_b(oop obj) {
1979   if (CompressedOops::is_null(obj)) {
1980     return false;
1981   }
1982   obj = ShenandoahBarrierSet::resolve_forwarded_not_null(obj);
1983   shenandoah_assert_not_forwarded_if(NULL, obj, _heap->is_concurrent_mark_in_progress() || _heap->is_concurrent_traversal_in_progress())
1984   return _heap->is_marked_next(obj);
1985 }
1986 
1987 bool ShenandoahIsAliveClosure::do_object_b(oop obj) {
1988   if (CompressedOops::is_null(obj)) {
1989     return false;
1990   }
1991   shenandoah_assert_not_forwarded(NULL, obj);
1992   return _heap->is_marked_next(obj);
1993 }
1994 
1995 BoolObjectClosure* ShenandoahHeap::is_alive_closure() {
1996   return has_forwarded_objects() ?
1997          (BoolObjectClosure*) &_forwarded_is_alive :
1998          (BoolObjectClosure*) &_is_alive;
1999 }
2000 
2001 void ShenandoahHeap::ref_processing_init() {
2002   MemRegion mr = reserved_region();
2003 
2004   _forwarded_is_alive.init(this);
2005   _is_alive.init(this);
2006   assert(_max_workers > 0, "Sanity");
2007 
2008   _ref_processor =
2009     new ReferenceProcessor(&_subject_to_discovery,  // is_subject_to_discovery
2010                            ParallelRefProcEnabled,  // MT processing
2011                            _max_workers,            // Degree of MT processing
2012                            true,                    // MT discovery
2013                            _max_workers,            // Degree of MT discovery
2014                            false,                   // Reference discovery is not atomic
2015                            NULL);                   // No closure, should be installed before use
2016 
2017   shenandoah_assert_rp_isalive_not_installed();
2018 }
2019 
2020 
2021 GCTracer* ShenandoahHeap::tracer() {
2022   return shenandoahPolicy()->tracer();
2023 }
2024 
2025 size_t ShenandoahHeap::tlab_used(Thread* thread) const {
2026   return _free_set->used();
2027 }
2028 
2029 void ShenandoahHeap::cancel_gc(GCCause::Cause cause) {
2030   if (try_cancel_gc()) {
2031     FormatBuffer<> msg("Cancelling GC: %s", GCCause::to_string(cause));
2032     log_info(gc)("%s", msg.buffer());
2033     Events::log(Thread::current(), "%s", msg.buffer());
2034   }
2035 }
2036 
2037 uint ShenandoahHeap::max_workers() {
2038   return _max_workers;
2039 }
2040 
2041 void ShenandoahHeap::stop() {
2042   // The shutdown sequence should be able to terminate when GC is running.
2043 
2044   // Step 0. Notify policy to disable event recording.
2045   _shenandoah_policy->record_shutdown();
2046 
2047   // Step 1. Notify control thread that we are in shutdown.
2048   // Note that we cannot do that with stop(), because stop() is blocking and waits for the actual shutdown.
2049   // Doing stop() here would wait for the normal GC cycle to complete, never falling through to cancel below.
2050   control_thread()->prepare_for_graceful_shutdown();
2051 
2052   // Step 2. Notify GC workers that we are cancelling GC.
2053   cancel_gc(GCCause::_shenandoah_stop_vm);
2054 
2055   // Step 3. Wait until GC worker exits normally.
2056   control_thread()->stop();
2057 
2058   // Step 4. Stop String Dedup thread if it is active
2059   if (ShenandoahStringDedup::is_enabled()) {
2060     ShenandoahStringDedup::stop();
2061   }
2062 }
2063 
2064 void ShenandoahHeap::unload_classes_and_cleanup_tables(bool full_gc) {
2065   assert(ClassUnloading || full_gc, "Class unloading should be enabled");
2066 
2067   ShenandoahPhaseTimings::Phase phase_root =
2068           full_gc ?
2069           ShenandoahPhaseTimings::full_gc_purge :
2070           ShenandoahPhaseTimings::purge;
2071 
2072   ShenandoahPhaseTimings::Phase phase_unload =
2073           full_gc ?
2074           ShenandoahPhaseTimings::full_gc_purge_class_unload :
2075           ShenandoahPhaseTimings::purge_class_unload;
2076 
2077   ShenandoahPhaseTimings::Phase phase_cldg =
2078           full_gc ?
2079           ShenandoahPhaseTimings::full_gc_purge_cldg :
2080           ShenandoahPhaseTimings::purge_cldg;
2081 
2082   ShenandoahPhaseTimings::Phase phase_par =
2083           full_gc ?
2084           ShenandoahPhaseTimings::full_gc_purge_par :
2085           ShenandoahPhaseTimings::purge_par;
2086 
2087   ShenandoahPhaseTimings::Phase phase_par_classes =
2088           full_gc ?
2089           ShenandoahPhaseTimings::full_gc_purge_par_classes :
2090           ShenandoahPhaseTimings::purge_par_classes;
2091 
2092   ShenandoahPhaseTimings::Phase phase_par_codecache =
2093           full_gc ?
2094           ShenandoahPhaseTimings::full_gc_purge_par_codecache :
2095           ShenandoahPhaseTimings::purge_par_codecache;
2096 
2097   ShenandoahPhaseTimings::Phase phase_par_rmt =
2098           full_gc ?
2099           ShenandoahPhaseTimings::full_gc_purge_par_rmt :
2100           ShenandoahPhaseTimings::purge_par_rmt;
2101 
2102   ShenandoahPhaseTimings::Phase phase_par_symbstring =
2103           full_gc ?
2104           ShenandoahPhaseTimings::full_gc_purge_par_symbstring :
2105           ShenandoahPhaseTimings::purge_par_symbstring;
2106 
2107   ShenandoahPhaseTimings::Phase phase_par_sync =
2108           full_gc ?
2109           ShenandoahPhaseTimings::full_gc_purge_par_sync :
2110           ShenandoahPhaseTimings::purge_par_sync;
2111 
2112   ShenandoahGCPhase root_phase(phase_root);
2113 
2114   BoolObjectClosure* is_alive = is_alive_closure();
2115 
2116   bool purged_class;
2117 
2118   // Unload classes and purge SystemDictionary.
2119   {
2120     ShenandoahGCPhase phase(phase_unload);
2121     purged_class = SystemDictionary::do_unloading(gc_timer(),
2122                                                   full_gc /* do_cleaning*/ );
2123   }
2124 
2125   {
2126     ShenandoahGCPhase phase(phase_par);
2127     uint active = _workers->active_workers();
2128     ParallelCleaningTask unlink_task(is_alive, true, true, active, purged_class);
2129     _workers->run_task(&unlink_task);
2130 
2131     ShenandoahPhaseTimings* p = phase_timings();
2132     ParallelCleaningTimes times = unlink_task.times();
2133 
2134     // "times" report total time, phase_tables_cc reports wall time. Divide total times
2135     // by active workers to get average time per worker, that would add up to wall time.
2136     p->record_phase_time(phase_par_classes,    times.klass_work_us() / active);
2137     p->record_phase_time(phase_par_codecache,  times.codecache_work_us() / active);
2138     p->record_phase_time(phase_par_rmt,        times.rmt_work_us() / active);
2139     p->record_phase_time(phase_par_symbstring, times.tables_work_us() / active);
2140     p->record_phase_time(phase_par_sync,       times.sync_us() / active);
2141   }
2142 
2143   if (ShenandoahStringDedup::is_enabled()) {
2144     ShenandoahPhaseTimings::Phase phase_par_string_dedup =
2145             full_gc ?
2146             ShenandoahPhaseTimings::full_gc_purge_par_string_dedup :
2147             ShenandoahPhaseTimings::purge_par_string_dedup;
2148     ShenandoahGCPhase phase(phase_par_string_dedup);
2149     ShenandoahStringDedup::parallel_cleanup();
2150   }
2151 
2152 
2153   {
2154     ShenandoahGCPhase phase(phase_cldg);
2155     ClassLoaderDataGraph::purge();
2156   }
2157 }
2158 
2159 void ShenandoahHeap::set_has_forwarded_objects(bool cond) {
2160   set_gc_state_mask(HAS_FORWARDED, cond);
2161 }
2162 
2163 bool ShenandoahHeap::last_gc_made_progress() const {
2164   return _progress_last_gc.is_set();
2165 }
2166 
2167 void ShenandoahHeap::set_process_references(bool pr) {
2168   _process_references.set_cond(pr);
2169 }
2170 
2171 void ShenandoahHeap::set_unload_classes(bool uc) {
2172   _unload_classes.set_cond(uc);
2173 }
2174 
2175 bool ShenandoahHeap::process_references() const {
2176   return _process_references.is_set();
2177 }
2178 
2179 bool ShenandoahHeap::unload_classes() const {
2180   return _unload_classes.is_set();
2181 }
2182 
2183 //fixme this should be in heapregionset
2184 ShenandoahHeapRegion* ShenandoahHeap::next_compaction_region(const ShenandoahHeapRegion* r) {
2185   size_t region_idx = r->region_number() + 1;
2186   ShenandoahHeapRegion* next = get_region(region_idx);
2187   guarantee(next->region_number() == region_idx, "region number must match");
2188   while (next->is_humongous()) {
2189     region_idx = next->region_number() + 1;
2190     next = get_region(region_idx);
2191     guarantee(next->region_number() == region_idx, "region number must match");
2192   }
2193   return next;
2194 }
2195 
2196 ShenandoahMonitoringSupport* ShenandoahHeap::monitoring_support() {
2197   return _monitoring_support;
2198 }
2199 
2200 MarkBitMap* ShenandoahHeap::complete_mark_bit_map() {
2201   return _complete_mark_bit_map;
2202 }
2203 
2204 MarkBitMap* ShenandoahHeap::next_mark_bit_map() {
2205   return _next_mark_bit_map;
2206 }
2207 
2208 address ShenandoahHeap::in_cset_fast_test_addr() {
2209   ShenandoahHeap* heap = ShenandoahHeap::heap();
2210   assert(heap->collection_set() != NULL, "Sanity");
2211   return (address) heap->collection_set()->biased_map_address();
2212 }
2213 
2214 address ShenandoahHeap::cancelled_gc_addr() {
2215   return (address) ShenandoahHeap::heap()->_cancelled_gc.addr_of();
2216 }
2217 
2218 address ShenandoahHeap::gc_state_addr() {
2219   return (address) ShenandoahHeap::heap()->_gc_state.addr_of();
2220 }
2221 
2222 size_t ShenandoahHeap::bytes_allocated_since_gc_start() {
2223   return OrderAccess::load_acquire(&_bytes_allocated_since_gc_start);
2224 }
2225 
2226 void ShenandoahHeap::reset_bytes_allocated_since_gc_start() {
2227   OrderAccess::release_store_fence(&_bytes_allocated_since_gc_start, (size_t)0);
2228 }
2229 
2230 ShenandoahPacer* ShenandoahHeap::pacer() const {
2231   assert (_pacer != NULL, "sanity");
2232   return _pacer;
2233 }
2234 
2235 void ShenandoahHeap::set_next_top_at_mark_start(HeapWord* region_base, HeapWord* addr) {
2236   uintx index = ((uintx) region_base) >> ShenandoahHeapRegion::region_size_bytes_shift();
2237   _next_top_at_mark_starts[index] = addr;
2238 }
2239 
2240 HeapWord* ShenandoahHeap::next_top_at_mark_start(HeapWord* region_base) {
2241   uintx index = ((uintx) region_base) >> ShenandoahHeapRegion::region_size_bytes_shift();
2242   return _next_top_at_mark_starts[index];
2243 }
2244 
2245 void ShenandoahHeap::set_complete_top_at_mark_start(HeapWord* region_base, HeapWord* addr) {
2246   uintx index = ((uintx) region_base) >> ShenandoahHeapRegion::region_size_bytes_shift();
2247   _complete_top_at_mark_starts[index] = addr;
2248 }
2249 
2250 HeapWord* ShenandoahHeap::complete_top_at_mark_start(HeapWord* region_base) {
2251   uintx index = ((uintx) region_base) >> ShenandoahHeapRegion::region_size_bytes_shift();
2252   return _complete_top_at_mark_starts[index];
2253 }
2254 
2255 void ShenandoahHeap::set_degenerated_gc_in_progress(bool in_progress) {
2256   _degenerated_gc_in_progress.set_cond(in_progress);
2257 }
2258 
2259 void ShenandoahHeap::set_full_gc_in_progress(bool in_progress) {
2260   _full_gc_in_progress.set_cond(in_progress);
2261 }
2262 
2263 void ShenandoahHeap::set_full_gc_move_in_progress(bool in_progress) {
2264   assert (is_full_gc_in_progress(), "should be");
2265   _full_gc_move_in_progress.set_cond(in_progress);
2266 }
2267 
2268 void ShenandoahHeap::set_update_refs_in_progress(bool in_progress) {
2269   set_gc_state_mask(UPDATEREFS, in_progress);
2270 }
2271 
2272 void ShenandoahHeap::register_nmethod(nmethod* nm) {
2273   ShenandoahCodeRoots::add_nmethod(nm);
2274 }
2275 
2276 void ShenandoahHeap::unregister_nmethod(nmethod* nm) {
2277   ShenandoahCodeRoots::remove_nmethod(nm);
2278 }
2279 
2280 oop ShenandoahHeap::pin_object(JavaThread* thr, oop o) {
2281   o = ShenandoahBarrierSet::barrier_set()->write_barrier(o);
2282   ShenandoahHeapLocker locker(lock());
2283   heap_region_containing(o)->make_pinned();
2284   return o;
2285 }
2286 
2287 void ShenandoahHeap::unpin_object(JavaThread* thr, oop o) {
2288   o = ShenandoahBarrierSet::barrier_set()->read_barrier(o);
2289   ShenandoahHeapLocker locker(lock());
2290   heap_region_containing(o)->make_unpinned();
2291 }
2292 
2293 GCTimer* ShenandoahHeap::gc_timer() const {
2294   return _gc_timer;
2295 }
2296 
2297 #ifdef ASSERT
2298 void ShenandoahHeap::assert_gc_workers(uint nworkers) {
2299   assert(nworkers > 0 && nworkers <= max_workers(), "Sanity");
2300 
2301   if (ShenandoahSafepoint::is_at_shenandoah_safepoint()) {
2302     if (UseDynamicNumberOfGCThreads ||
2303         (FLAG_IS_DEFAULT(ParallelGCThreads) && ForceDynamicNumberOfGCThreads)) {
2304       assert(nworkers <= ParallelGCThreads, "Cannot use more than it has");
2305     } else {
2306       // Use ParallelGCThreads inside safepoints
2307       assert(nworkers == ParallelGCThreads, "Use ParalleGCThreads within safepoints");
2308     }
2309   } else {
2310     if (UseDynamicNumberOfGCThreads ||
2311         (FLAG_IS_DEFAULT(ConcGCThreads) && ForceDynamicNumberOfGCThreads)) {
2312       assert(nworkers <= ConcGCThreads, "Cannot use more than it has");
2313     } else {
2314       // Use ConcGCThreads outside safepoints
2315       assert(nworkers == ConcGCThreads, "Use ConcGCThreads outside safepoints");
2316     }
2317   }
2318 }
2319 #endif
2320 
2321 ShenandoahConnectionMatrix* ShenandoahHeap::connection_matrix() const {
2322   return _connection_matrix;
2323 }
2324 
2325 ShenandoahTraversalGC* ShenandoahHeap::traversal_gc() {
2326   return _traversal_gc;
2327 }
2328 
2329 ShenandoahVerifier* ShenandoahHeap::verifier() {
2330   guarantee(ShenandoahVerify, "Should be enabled");
2331   assert (_verifier != NULL, "sanity");
2332   return _verifier;
2333 }
2334 
2335 template<class T>
2336 class ShenandoahUpdateHeapRefsTask : public AbstractGangTask {
2337 private:
2338   T cl;
2339   ShenandoahHeap* _heap;
2340   ShenandoahRegionIterator* _regions;
2341   bool _concurrent;
2342 public:
2343   ShenandoahUpdateHeapRefsTask(ShenandoahRegionIterator* regions, bool concurrent) :
2344     AbstractGangTask("Concurrent Update References Task"),
2345     cl(T()),
2346     _heap(ShenandoahHeap::heap()),
2347     _regions(regions),
2348     _concurrent(concurrent) {
2349   }
2350 
2351   void work(uint worker_id) {
2352     SuspendibleThreadSetJoiner stsj(_concurrent && ShenandoahSuspendibleWorkers);
2353     ShenandoahHeapRegion* r = _regions->next();
2354     while (r != NULL) {
2355       if (_heap->in_collection_set(r)) {
2356         HeapWord* bottom = r->bottom();
2357         HeapWord* top = _heap->complete_top_at_mark_start(r->bottom());
2358         if (top > bottom) {
2359           _heap->complete_mark_bit_map()->clear_range_large(MemRegion(bottom, top));
2360         }
2361       } else {
2362         if (r->is_active()) {
2363           _heap->marked_object_oop_safe_iterate(r, &cl);
2364           if (ShenandoahPacing) {
2365             _heap->pacer()->report_updaterefs(r->get_live_data_words());
2366           }
2367         }
2368       }
2369       if (_heap->check_cancelled_gc_and_yield(_concurrent)) {
2370         return;
2371       }
2372       r = _regions->next();
2373     }
2374   }
2375 };
2376 
2377 void ShenandoahHeap::update_heap_references(bool concurrent) {
2378   if (UseShenandoahMatrix) {
2379     ShenandoahUpdateHeapRefsTask<ShenandoahUpdateHeapRefsMatrixClosure> task(&_update_refs_iterator, concurrent);
2380     workers()->run_task(&task);
2381   } else {
2382     ShenandoahUpdateHeapRefsTask<ShenandoahUpdateHeapRefsClosure> task(&_update_refs_iterator, concurrent);
2383     workers()->run_task(&task);
2384   }
2385 }
2386 
2387 void ShenandoahHeap::op_init_updaterefs() {
2388   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "must be at safepoint");
2389 
2390   if (ShenandoahVerify) {
2391     verifier()->verify_before_updaterefs();
2392   }
2393 
2394   accumulate_statistics_all_gclabs();
2395   set_evacuation_in_progress(false);
2396   set_update_refs_in_progress(true);
2397   make_tlabs_parsable(true);
2398   if (UseShenandoahMatrix) {
2399     connection_matrix()->clear_all();
2400   }
2401   for (uint i = 0; i < num_regions(); i++) {
2402     ShenandoahHeapRegion* r = get_region(i);
2403     r->set_concurrent_iteration_safe_limit(r->top());
2404   }
2405 
2406   // Reset iterator.
2407   _update_refs_iterator.reset();
2408 
2409   if (ShenandoahPacing) {
2410     pacer()->setup_for_updaterefs();
2411   }
2412 }
2413 
2414 void ShenandoahHeap::op_final_updaterefs() {
2415   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "must be at safepoint");
2416 
2417   // Check if there is left-over work, and finish it
2418   if (_update_refs_iterator.has_next()) {
2419     ShenandoahGCPhase final_work(ShenandoahPhaseTimings::final_update_refs_finish_work);
2420 
2421     // Finish updating references where we left off.
2422     clear_cancelled_gc();
2423     update_heap_references(false);
2424   }
2425 
2426   // Clear cancelled GC, if set. On cancellation path, the block before would handle
2427   // everything. On degenerated paths, cancelled gc would not be set anyway.
2428   if (cancelled_gc()) {
2429     clear_cancelled_gc();
2430   }
2431   assert(!cancelled_gc(), "Should have been done right before");
2432 
2433   concurrentMark()->update_roots(ShenandoahPhaseTimings::final_update_refs_roots);
2434 
2435   // Allocations might have happened before we STWed here, record peak:
2436   heuristics()->record_peak_occupancy();
2437 
2438   ShenandoahGCPhase final_update_refs(ShenandoahPhaseTimings::final_update_refs_recycle);
2439 
2440   trash_cset_regions();
2441   set_has_forwarded_objects(false);
2442 
2443   if (ShenandoahVerify) {
2444     verifier()->verify_after_updaterefs();
2445   }
2446 
2447   {
2448     ShenandoahHeapLocker locker(lock());
2449     _free_set->rebuild();
2450   }
2451 
2452   set_update_refs_in_progress(false);
2453 }
2454 
2455 void ShenandoahHeap::set_alloc_seq_gc_start() {
2456   // Take next number, the start seq number is inclusive
2457   _alloc_seq_at_last_gc_start = ShenandoahHeapRegion::seqnum_current_alloc() + 1;
2458 }
2459 
2460 void ShenandoahHeap::set_alloc_seq_gc_end() {
2461   // Take current number, the end seq number is also inclusive
2462   _alloc_seq_at_last_gc_end = ShenandoahHeapRegion::seqnum_current_alloc();
2463 }
2464 
2465 
2466 #ifdef ASSERT
2467 void ShenandoahHeap::assert_heaplock_owned_by_current_thread() {
2468   _lock.assert_owned_by_current_thread();
2469 }
2470 
2471 void ShenandoahHeap::assert_heaplock_not_owned_by_current_thread() {
2472   _lock.assert_not_owned_by_current_thread();
2473 }
2474 
2475 void ShenandoahHeap::assert_heaplock_or_safepoint() {
2476   _lock.assert_owned_by_current_thread_or_safepoint();
2477 }
2478 #endif
2479 
2480 void ShenandoahHeap::print_extended_on(outputStream *st) const {
2481   print_on(st);
2482   print_heap_regions_on(st);
2483 }
2484 
2485 bool ShenandoahHeap::is_bitmap_slice_committed(ShenandoahHeapRegion* r, bool skip_self) {
2486   size_t slice = r->region_number() / _bitmap_regions_per_slice;
2487 
2488   size_t regions_from = _bitmap_regions_per_slice * slice;
2489   size_t regions_to   = MIN2(num_regions(), _bitmap_regions_per_slice * (slice + 1));
2490   for (size_t g = regions_from; g < regions_to; g++) {
2491     assert (g / _bitmap_regions_per_slice == slice, "same slice");
2492     if (skip_self && g == r->region_number()) continue;
2493     if (get_region(g)->is_committed()) {
2494       return true;
2495     }
2496   }
2497   return false;
2498 }
2499 
2500 bool ShenandoahHeap::commit_bitmap_slice(ShenandoahHeapRegion* r) {
2501   assert_heaplock_owned_by_current_thread();
2502 
2503   if (is_bitmap_slice_committed(r, true)) {
2504     // Some other region from the group is already committed, meaning the bitmap
2505     // slice is already committed, we exit right away.
2506     return true;
2507   }
2508 
2509   // Commit the bitmap slice:
2510   size_t slice = r->region_number() / _bitmap_regions_per_slice;
2511   size_t off = _bitmap_bytes_per_slice * slice;
2512   size_t len = _bitmap_bytes_per_slice;
2513   if (!os::commit_memory((char*)_bitmap0_region.start() + off, len, false)) {
2514     return false;
2515   }
2516   if (!os::commit_memory((char*)_bitmap1_region.start() + off, len, false)) {
2517     return false;
2518   }
2519   return true;
2520 }
2521 
2522 bool ShenandoahHeap::uncommit_bitmap_slice(ShenandoahHeapRegion *r) {
2523   assert_heaplock_owned_by_current_thread();
2524 
2525   if (is_bitmap_slice_committed(r, true)) {
2526     // Some other region from the group is still committed, meaning the bitmap
2527     // slice is should stay committed, exit right away.
2528     return true;
2529   }
2530 
2531   // Uncommit the bitmap slice:
2532   size_t slice = r->region_number() / _bitmap_regions_per_slice;
2533   size_t off = _bitmap_bytes_per_slice * slice;
2534   size_t len = _bitmap_bytes_per_slice;
2535   if (!os::uncommit_memory((char*)_bitmap0_region.start() + off, len)) {
2536     return false;
2537   }
2538   if (!os::uncommit_memory((char*)_bitmap1_region.start() + off, len)) {
2539     return false;
2540   }
2541   return true;
2542 }
2543 
2544 bool ShenandoahHeap::idle_bitmap_slice(ShenandoahHeapRegion *r) {
2545   assert_heaplock_owned_by_current_thread();
2546   assert(ShenandoahUncommitWithIdle, "Must be enabled");
2547 
2548   if (is_bitmap_slice_committed(r, true)) {
2549     // Some other region from the group is still committed, meaning the bitmap
2550     // slice is should stay committed, exit right away.
2551     return true;
2552   }
2553 
2554   // Idle the bitmap slice:
2555   size_t slice = r->region_number() / _bitmap_regions_per_slice;
2556   size_t off = _bitmap_bytes_per_slice * slice;
2557   size_t len = _bitmap_bytes_per_slice;
2558   if (!os::idle_memory((char*)_bitmap0_region.start() + off, len)) {
2559     return false;
2560   }
2561   if (!os::idle_memory((char*)_bitmap1_region.start() + off, len)) {
2562     return false;
2563   }
2564   return true;
2565 }
2566 
2567 void ShenandoahHeap::activate_bitmap_slice(ShenandoahHeapRegion* r) {
2568   assert_heaplock_owned_by_current_thread();
2569   assert(ShenandoahUncommitWithIdle, "Must be enabled");
2570   size_t slice = r->region_number() / _bitmap_regions_per_slice;
2571   size_t off = _bitmap_bytes_per_slice * slice;
2572   size_t len = _bitmap_bytes_per_slice;
2573   os::activate_memory((char*)_bitmap0_region.start() + off, len);
2574   os::activate_memory((char*)_bitmap1_region.start() + off, len);
2575 }
2576 
2577 void ShenandoahHeap::safepoint_synchronize_begin() {
2578   if (ShenandoahSuspendibleWorkers || UseStringDeduplication) {
2579     SuspendibleThreadSet::synchronize();
2580   }
2581 }
2582 
2583 void ShenandoahHeap::safepoint_synchronize_end() {
2584   if (ShenandoahSuspendibleWorkers || UseStringDeduplication) {
2585     SuspendibleThreadSet::desynchronize();
2586   }
2587 }
2588 
2589 void ShenandoahHeap::vmop_entry_init_mark() {
2590   TraceCollectorStats tcs(monitoring_support()->stw_collection_counters());
2591   ShenandoahGCPhase total(ShenandoahPhaseTimings::total_pause_gross);
2592   ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_mark_gross);
2593 
2594   try_inject_alloc_failure();
2595   VM_ShenandoahInitMark op;
2596   VMThread::execute(&op); // jump to entry_init_mark() under safepoint
2597 }
2598 
2599 void ShenandoahHeap::vmop_entry_final_mark() {
2600   TraceCollectorStats tcs(monitoring_support()->stw_collection_counters());
2601   ShenandoahGCPhase total(ShenandoahPhaseTimings::total_pause_gross);
2602   ShenandoahGCPhase phase(ShenandoahPhaseTimings::final_mark_gross);
2603 
2604   try_inject_alloc_failure();
2605   VM_ShenandoahFinalMarkStartEvac op;
2606   VMThread::execute(&op); // jump to entry_final_mark under safepoint
2607 }
2608 
2609 void ShenandoahHeap::vmop_entry_final_evac() {
2610   TraceCollectorStats tcs(monitoring_support()->stw_collection_counters());
2611   ShenandoahGCPhase total(ShenandoahPhaseTimings::total_pause_gross);
2612   ShenandoahGCPhase phase(ShenandoahPhaseTimings::final_evac_gross);
2613 
2614   VM_ShenandoahFinalEvac op;
2615   VMThread::execute(&op); // jump to entry_final_evac under safepoint
2616 }
2617 
2618 void ShenandoahHeap::vmop_entry_init_updaterefs() {
2619   TraceCollectorStats tcs(monitoring_support()->stw_collection_counters());
2620   ShenandoahGCPhase total(ShenandoahPhaseTimings::total_pause_gross);
2621   ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_update_refs_gross);
2622 
2623   try_inject_alloc_failure();
2624   VM_ShenandoahInitUpdateRefs op;
2625   VMThread::execute(&op);
2626 }
2627 
2628 void ShenandoahHeap::vmop_entry_final_updaterefs() {
2629   TraceCollectorStats tcs(monitoring_support()->stw_collection_counters());
2630   ShenandoahGCPhase total(ShenandoahPhaseTimings::total_pause_gross);
2631   ShenandoahGCPhase phase(ShenandoahPhaseTimings::final_update_refs_gross);
2632 
2633   try_inject_alloc_failure();
2634   VM_ShenandoahFinalUpdateRefs op;
2635   VMThread::execute(&op);
2636 }
2637 
2638 void ShenandoahHeap::vmop_entry_init_traversal() {
2639   TraceCollectorStats tcs(monitoring_support()->stw_collection_counters());
2640   ShenandoahGCPhase total(ShenandoahPhaseTimings::total_pause_gross);
2641   ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_traversal_gc_gross);
2642 
2643   try_inject_alloc_failure();
2644   VM_ShenandoahInitTraversalGC op;
2645   VMThread::execute(&op);
2646 }
2647 
2648 void ShenandoahHeap::vmop_entry_final_traversal() {
2649   TraceCollectorStats tcs(monitoring_support()->stw_collection_counters());
2650   ShenandoahGCPhase total(ShenandoahPhaseTimings::total_pause_gross);
2651   ShenandoahGCPhase phase(ShenandoahPhaseTimings::final_traversal_gc_gross);
2652 
2653   try_inject_alloc_failure();
2654   VM_ShenandoahFinalTraversalGC op;
2655   VMThread::execute(&op);
2656 }
2657 
2658 void ShenandoahHeap::vmop_entry_full(GCCause::Cause cause) {
2659   TraceCollectorStats tcs(monitoring_support()->full_stw_collection_counters());
2660   ShenandoahGCPhase total(ShenandoahPhaseTimings::total_pause_gross);
2661   ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_gross);
2662 
2663   try_inject_alloc_failure();
2664   VM_ShenandoahFullGC op(cause);
2665   VMThread::execute(&op);
2666 }
2667 
2668 void ShenandoahHeap::vmop_degenerated(ShenandoahDegenPoint point) {
2669   TraceCollectorStats tcs(monitoring_support()->full_stw_collection_counters());
2670   ShenandoahGCPhase total(ShenandoahPhaseTimings::total_pause_gross);
2671   ShenandoahGCPhase phase(ShenandoahPhaseTimings::degen_gc_gross);
2672 
2673   VM_ShenandoahDegeneratedGC degenerated_gc((int)point);
2674   VMThread::execute(&degenerated_gc);
2675 }
2676 
2677 void ShenandoahHeap::entry_init_mark() {
2678   ShenandoahGCPhase total_phase(ShenandoahPhaseTimings::total_pause);
2679   ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_mark);
2680 
2681   FormatBuffer<> msg("Pause Init Mark%s%s%s",
2682                      has_forwarded_objects() ? " (update refs)"    : "",
2683                      process_references() ?    " (process refs)"   : "",
2684                      unload_classes() ?        " (unload classes)" : "");
2685   GCTraceTime(Info, gc) time(msg, gc_timer());
2686   EventMark em("%s", msg.buffer());
2687 
2688   ShenandoahWorkerScope scope(workers(), ShenandoahWorkerPolicy::calc_workers_for_init_marking());
2689 
2690   op_init_mark();
2691 }
2692 
2693 void ShenandoahHeap::entry_final_mark() {
2694   ShenandoahGCPhase total_phase(ShenandoahPhaseTimings::total_pause);
2695   ShenandoahGCPhase phase(ShenandoahPhaseTimings::final_mark);
2696 
2697   FormatBuffer<> msg("Pause Final Mark%s%s%s",
2698                      has_forwarded_objects() ? " (update refs)"    : "",
2699                      process_references() ?    " (process refs)"   : "",
2700                      unload_classes() ?        " (unload classes)" : "");
2701   GCTraceTime(Info, gc) time(msg, gc_timer());
2702   EventMark em("%s", msg.buffer());
2703 
2704   ShenandoahWorkerScope scope(workers(), ShenandoahWorkerPolicy::calc_workers_for_final_marking());
2705 
2706   op_final_mark();
2707 }
2708 
2709 void ShenandoahHeap::entry_final_evac() {
2710   ShenandoahGCPhase total_phase(ShenandoahPhaseTimings::total_pause);
2711   ShenandoahGCPhase phase(ShenandoahPhaseTimings::final_evac);
2712 
2713   FormatBuffer<> msg("Pause Final Evac");
2714   GCTraceTime(Info, gc) time(msg, gc_timer());
2715   EventMark em("%s", msg.buffer());
2716 
2717   op_final_evac();
2718 }
2719 
2720 void ShenandoahHeap::entry_init_updaterefs() {
2721   ShenandoahGCPhase total_phase(ShenandoahPhaseTimings::total_pause);
2722   ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_update_refs);
2723 
2724   static const char* msg = "Pause Init Update Refs";
2725   GCTraceTime(Info, gc) time(msg, gc_timer());
2726   EventMark em("%s", msg);
2727 
2728   // No workers used in this phase, no setup required
2729 
2730   op_init_updaterefs();
2731 }
2732 
2733 void ShenandoahHeap::entry_final_updaterefs() {
2734   ShenandoahGCPhase total_phase(ShenandoahPhaseTimings::total_pause);
2735   ShenandoahGCPhase phase(ShenandoahPhaseTimings::final_update_refs);
2736 
2737   static const char* msg = "Pause Final Update Refs";
2738   GCTraceTime(Info, gc) time(msg, gc_timer());
2739   EventMark em("%s", msg);
2740 
2741   ShenandoahWorkerScope scope(workers(), ShenandoahWorkerPolicy::calc_workers_for_final_update_ref());
2742 
2743   op_final_updaterefs();
2744 }
2745 
2746 void ShenandoahHeap::entry_init_traversal() {
2747   ShenandoahGCPhase total_phase(ShenandoahPhaseTimings::total_pause);
2748   ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_traversal_gc);
2749 
2750   static const char* msg = "Pause Init Traversal";
2751   GCTraceTime(Info, gc) time(msg, gc_timer());
2752   EventMark em("%s", msg);
2753 
2754   ShenandoahWorkerScope scope(workers(), ShenandoahWorkerPolicy::calc_workers_for_stw_traversal());
2755 
2756   op_init_traversal();
2757 }
2758 
2759 void ShenandoahHeap::entry_final_traversal() {
2760   ShenandoahGCPhase total_phase(ShenandoahPhaseTimings::total_pause);
2761   ShenandoahGCPhase phase(ShenandoahPhaseTimings::final_traversal_gc);
2762 
2763   static const char* msg = "Pause Final Traversal";
2764   GCTraceTime(Info, gc) time(msg, gc_timer());
2765   EventMark em("%s", msg);
2766 
2767   ShenandoahWorkerScope scope(workers(), ShenandoahWorkerPolicy::calc_workers_for_stw_traversal());
2768 
2769   op_final_traversal();
2770 }
2771 
2772 void ShenandoahHeap::entry_full(GCCause::Cause cause) {
2773   ShenandoahGCPhase total_phase(ShenandoahPhaseTimings::total_pause);
2774   ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc);
2775 
2776   static const char* msg = "Pause Full";
2777   GCTraceTime(Info, gc) time(msg, gc_timer(), cause, true);
2778   EventMark em("%s", msg);
2779 
2780   ShenandoahWorkerScope scope(workers(), ShenandoahWorkerPolicy::calc_workers_for_fullgc());
2781 
2782   op_full(cause);
2783 }
2784 
2785 void ShenandoahHeap::entry_degenerated(int point) {
2786   ShenandoahGCPhase total_phase(ShenandoahPhaseTimings::total_pause);
2787   ShenandoahGCPhase phase(ShenandoahPhaseTimings::degen_gc);
2788 
2789   ShenandoahDegenPoint dpoint = (ShenandoahDegenPoint)point;
2790   FormatBuffer<> msg("Pause Degenerated GC (%s)", degen_point_to_string(dpoint));
2791   GCTraceTime(Info, gc) time(msg, gc_timer(), GCCause::_no_gc, true);
2792   EventMark em("%s", msg.buffer());
2793 
2794   ShenandoahWorkerScope scope(workers(), ShenandoahWorkerPolicy::calc_workers_for_stw_degenerated());
2795 
2796   set_degenerated_gc_in_progress(true);
2797   op_degenerated(dpoint);
2798   set_degenerated_gc_in_progress(false);
2799 }
2800 
2801 void ShenandoahHeap::entry_mark() {
2802   TraceCollectorStats tcs(monitoring_support()->concurrent_collection_counters());
2803 
2804   FormatBuffer<> msg("Concurrent marking%s%s%s",
2805                      has_forwarded_objects() ? " (update refs)"    : "",
2806                      process_references() ?    " (process refs)"   : "",
2807                      unload_classes() ?        " (unload classes)" : "");
2808   GCTraceTime(Info, gc) time(msg, gc_timer(), GCCause::_no_gc, true);
2809   EventMark em("%s", msg.buffer());
2810 
2811   ShenandoahWorkerScope scope(workers(), ShenandoahWorkerPolicy::calc_workers_for_conc_marking());
2812 
2813   try_inject_alloc_failure();
2814   op_mark();
2815 }
2816 
2817 void ShenandoahHeap::entry_evac() {
2818   ShenandoahGCPhase conc_evac_phase(ShenandoahPhaseTimings::conc_evac);
2819   TraceCollectorStats tcs(monitoring_support()->concurrent_collection_counters());
2820 
2821   static const char* msg = "Concurrent evacuation";
2822   GCTraceTime(Info, gc) time(msg, gc_timer(), GCCause::_no_gc, true);
2823   EventMark em("%s", msg);
2824 
2825   ShenandoahWorkerScope scope(workers(), ShenandoahWorkerPolicy::calc_workers_for_conc_evac());
2826 
2827   try_inject_alloc_failure();
2828   op_evac();
2829 }
2830 
2831 void ShenandoahHeap::entry_updaterefs() {
2832   ShenandoahGCPhase phase(ShenandoahPhaseTimings::conc_update_refs);
2833 
2834   static const char* msg = "Concurrent update references";
2835   GCTraceTime(Info, gc) time(msg, gc_timer(), GCCause::_no_gc, true);
2836   EventMark em("%s", msg);
2837 
2838   ShenandoahWorkerScope scope(workers(), ShenandoahWorkerPolicy::calc_workers_for_conc_update_ref());
2839 
2840   try_inject_alloc_failure();
2841   op_updaterefs();
2842 }
2843 void ShenandoahHeap::entry_cleanup() {
2844   ShenandoahGCPhase phase(ShenandoahPhaseTimings::conc_cleanup);
2845 
2846   static const char* msg = "Concurrent cleanup";
2847   GCTraceTime(Info, gc) time(msg, gc_timer(), GCCause::_no_gc, true);
2848   EventMark em("%s", msg);
2849 
2850   // This phase does not use workers, no need for setup
2851 
2852   try_inject_alloc_failure();
2853   op_cleanup();
2854 }
2855 
2856 void ShenandoahHeap::entry_cleanup_traversal() {
2857   ShenandoahGCPhase phase(ShenandoahPhaseTimings::conc_cleanup);
2858 
2859   static const char* msg = "Concurrent cleanup";
2860   GCTraceTime(Info, gc) time(msg, gc_timer(), GCCause::_no_gc, true);
2861   EventMark em("%s", msg);
2862 
2863   ShenandoahWorkerScope scope(workers(), ShenandoahWorkerPolicy::calc_workers_for_conc_traversal());
2864 
2865   try_inject_alloc_failure();
2866   op_cleanup_traversal();
2867 }
2868 
2869 void ShenandoahHeap::entry_cleanup_bitmaps() {
2870   ShenandoahGCPhase phase(ShenandoahPhaseTimings::conc_cleanup);
2871 
2872   static const char* msg = "Concurrent cleanup";
2873   GCTraceTime(Info, gc) time(msg, gc_timer(), GCCause::_no_gc, true);
2874   EventMark em("%s", msg);
2875 
2876   ShenandoahWorkerScope scope(workers(), ShenandoahWorkerPolicy::calc_workers_for_conc_cleanup());
2877 
2878   try_inject_alloc_failure();
2879   op_cleanup_bitmaps();
2880 }
2881 
2882 void ShenandoahHeap::entry_preclean() {
2883   if (ShenandoahPreclean && process_references()) {
2884     static const char* msg = "Concurrent precleaning";
2885     GCTraceTime(Info, gc) time(msg, gc_timer(), GCCause::_no_gc, true);
2886     EventMark em("%s", msg);
2887 
2888     ShenandoahGCPhase conc_preclean(ShenandoahPhaseTimings::conc_preclean);
2889 
2890     ShenandoahWorkerScope scope(workers(), ShenandoahWorkerPolicy::calc_workers_for_conc_preclean());
2891 
2892     try_inject_alloc_failure();
2893     op_preclean();
2894   }
2895 }
2896 
2897 void ShenandoahHeap::entry_traversal() {
2898   static const char* msg = "Concurrent traversal";
2899   GCTraceTime(Info, gc) time(msg, gc_timer(), GCCause::_no_gc, true);
2900   EventMark em("%s", msg);
2901 
2902   TraceCollectorStats tcs(is_minor_gc() ? monitoring_support()->partial_collection_counters()
2903                                         : monitoring_support()->concurrent_collection_counters());
2904 
2905   ShenandoahWorkerScope scope(workers(), ShenandoahWorkerPolicy::calc_workers_for_conc_traversal());
2906 
2907   try_inject_alloc_failure();
2908   op_traversal();
2909 }
2910 
2911 void ShenandoahHeap::try_inject_alloc_failure() {
2912   if (ShenandoahAllocFailureALot && !cancelled_gc() && ((os::random() % 1000) > 950)) {
2913     _inject_alloc_failure.set();
2914     os::naked_short_sleep(1);
2915     if (cancelled_gc()) {
2916       log_info(gc)("Allocation failure was successfully injected");
2917     }
2918   }
2919 }
2920 
2921 bool ShenandoahHeap::should_inject_alloc_failure() {
2922   return _inject_alloc_failure.is_set() && _inject_alloc_failure.try_unset();
2923 }
2924 
2925 void ShenandoahHeap::initialize_serviceability() {
2926   _memory_pool = new ShenandoahMemoryPool(this);
2927   _cycle_memory_manager.add_pool(_memory_pool);
2928   _stw_memory_manager.add_pool(_memory_pool);
2929 }
2930 
2931 GrowableArray<GCMemoryManager*> ShenandoahHeap::memory_managers() {
2932   GrowableArray<GCMemoryManager*> memory_managers(2);
2933   memory_managers.append(&_cycle_memory_manager);
2934   memory_managers.append(&_stw_memory_manager);
2935   return memory_managers;
2936 }
2937 
2938 GrowableArray<MemoryPool*> ShenandoahHeap::memory_pools() {
2939   GrowableArray<MemoryPool*> memory_pools(1);
2940   memory_pools.append(_memory_pool);
2941   return memory_pools;
2942 }
2943 
2944 void ShenandoahHeap::enter_evacuation() {
2945   _oom_evac_handler.enter_evacuation();
2946 }
2947 
2948 void ShenandoahHeap::leave_evacuation() {
2949   _oom_evac_handler.leave_evacuation();
2950 }
2951 
2952 SoftRefPolicy* ShenandoahHeap::soft_ref_policy() {
2953   return &_soft_ref_policy;
2954 }
2955 
2956 ShenandoahRegionIterator::ShenandoahRegionIterator() :
2957   _index(0),
2958   _heap(ShenandoahHeap::heap()) {}
2959 
2960 ShenandoahRegionIterator::ShenandoahRegionIterator(ShenandoahHeap* heap) :
2961   _index(0),
2962   _heap(heap) {}
2963 
2964 void ShenandoahRegionIterator::reset() {
2965   _index = 0;
2966 }
2967 
2968 bool ShenandoahRegionIterator::has_next() const {
2969   return _index < _heap->num_regions();
2970 }
2971 
2972 void ShenandoahHeap::heap_region_iterate(ShenandoahHeapRegionClosure& cl) const {
2973   ShenandoahRegionIterator regions;
2974   ShenandoahHeapRegion* r = regions.next();
2975   while (r != NULL) {
2976     if (cl.heap_region_do(r)) {
2977       break;
2978     }
2979     r = regions.next();
2980   }
2981 }
2982 
2983 bool ShenandoahHeap::is_minor_gc() const {
2984   return _gc_cycle_mode.get() == MINOR;
2985 }
2986 
2987 bool ShenandoahHeap::is_major_gc() const {
2988   return _gc_cycle_mode.get() == MAJOR;
2989 }
2990 
2991 void ShenandoahHeap::set_cycle_mode(GCCycleMode gc_cycle_mode) {
2992   _gc_cycle_mode.set(gc_cycle_mode);
2993 }
2994 
2995 char ShenandoahHeap::gc_state() const {
2996   return _gc_state.raw_value();
2997 }
2998 
2999 void ShenandoahHeap::deduplicate_string(oop str) {
3000   assert(java_lang_String::is_instance(str), "invariant");
3001 
3002   if (ShenandoahStringDedup::is_enabled()) {
3003     ShenandoahStringDedup::deduplicate(str);
3004   }
3005 }