1 /*
   2  * Copyright (c) 2013, 2018, Red Hat, Inc. and/or its affiliates.
   3  *
   4  * This code is free software; you can redistribute it and/or modify it
   5  * under the terms of the GNU General Public License version 2 only, as
   6  * published by the Free Software Foundation.
   7  *
   8  * This code is distributed in the hope that it will be useful, but WITHOUT
   9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  11  * version 2 for more details (a copy is included in the LICENSE file that
  12  * accompanied this code).
  13  *
  14  * You should have received a copy of the GNU General Public License version
  15  * 2 along with this work; if not, write to the Free Software Foundation,
  16  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  17  *
  18  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  19  * or visit www.oracle.com if you need additional information or have any
  20  * questions.
  21  *
  22  */
  23 
  24 #include "precompiled.hpp"
  25 #include "memory/allocation.hpp"
  26 
  27 #include "gc/shared/gcTimer.hpp"
  28 #include "gc/shared/gcTraceTime.inline.hpp"
  29 #include "gc/shared/memAllocator.hpp"
  30 #include "gc/shared/parallelCleaning.hpp"
  31 #include "gc/shared/plab.hpp"
  32 
  33 #include "gc/shenandoah/brooksPointer.hpp"
  34 #include "gc/shenandoah/shenandoahAllocTracker.hpp"
  35 #include "gc/shenandoah/shenandoahBarrierSet.hpp"
  36 #include "gc/shenandoah/shenandoahCollectionSet.hpp"
  37 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
  38 #include "gc/shenandoah/shenandoahConcurrentMark.hpp"
  39 #include "gc/shenandoah/shenandoahConcurrentMark.inline.hpp"
  40 #include "gc/shenandoah/shenandoahControlThread.hpp"
  41 #include "gc/shenandoah/shenandoahFreeSet.hpp"
  42 #include "gc/shenandoah/shenandoahPhaseTimings.hpp"
  43 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
  44 #include "gc/shenandoah/shenandoahHeapRegion.hpp"
  45 #include "gc/shenandoah/shenandoahHeapRegionSet.hpp"
  46 #include "gc/shenandoah/shenandoahMarkCompact.hpp"
  47 #include "gc/shenandoah/shenandoahMemoryPool.hpp"
  48 #include "gc/shenandoah/shenandoahMetrics.hpp"
  49 #include "gc/shenandoah/shenandoahMonitoringSupport.hpp"
  50 #include "gc/shenandoah/shenandoahOopClosures.inline.hpp"
  51 #include "gc/shenandoah/shenandoahPacer.hpp"
  52 #include "gc/shenandoah/shenandoahPacer.inline.hpp"
  53 #include "gc/shenandoah/shenandoahRootProcessor.hpp"
  54 #include "gc/shenandoah/shenandoahStringDedup.hpp"
  55 #include "gc/shenandoah/shenandoahUtils.hpp"
  56 #include "gc/shenandoah/shenandoahVerifier.hpp"
  57 #include "gc/shenandoah/shenandoahCodeRoots.hpp"
  58 #include "gc/shenandoah/shenandoahWorkerPolicy.hpp"
  59 #include "gc/shenandoah/vm_operations_shenandoah.hpp"
  60 #include "gc/shenandoah/heuristics/shenandoahAdaptiveHeuristics.hpp"
  61 #include "gc/shenandoah/heuristics/shenandoahAggressiveHeuristics.hpp"
  62 #include "gc/shenandoah/heuristics/shenandoahCompactHeuristics.hpp"
  63 #include "gc/shenandoah/heuristics/shenandoahPartialConnectedHeuristics.hpp"
  64 #include "gc/shenandoah/heuristics/shenandoahPartialGenerationalHeuristics.hpp"
  65 #include "gc/shenandoah/heuristics/shenandoahPartialLRUHeuristics.hpp"
  66 #include "gc/shenandoah/heuristics/shenandoahPassiveHeuristics.hpp"
  67 #include "gc/shenandoah/heuristics/shenandoahStaticHeuristics.hpp"
  68 
  69 #include "memory/metaspace.hpp"
  70 #include "runtime/vmThread.hpp"
  71 #include "services/mallocTracker.hpp"
  72 
  73 ShenandoahUpdateRefsClosure::ShenandoahUpdateRefsClosure() : _heap(ShenandoahHeap::heap()) {}
  74 
  75 #ifdef ASSERT
  76 template <class T>
  77 void ShenandoahAssertToSpaceClosure::do_oop_work(T* p) {
  78   T o = RawAccess<>::oop_load(p);
  79   if (! CompressedOops::is_null(o)) {
  80     oop obj = CompressedOops::decode_not_null(o);
  81     shenandoah_assert_not_forwarded(p, obj);
  82   }
  83 }
  84 
  85 void ShenandoahAssertToSpaceClosure::do_oop(narrowOop* p) { do_oop_work(p); }
  86 void ShenandoahAssertToSpaceClosure::do_oop(oop* p)       { do_oop_work(p); }
  87 #endif
  88 
  89 const char* ShenandoahHeap::name() const {
  90   return "Shenandoah";
  91 }
  92 
  93 class ShenandoahPretouchTask : public AbstractGangTask {
  94 private:
  95   ShenandoahRegionIterator _regions;
  96   const size_t _bitmap_size;
  97   const size_t _page_size;
  98   char* _bitmap0_base;
  99   char* _bitmap1_base;
 100 public:
 101   ShenandoahPretouchTask(char* bitmap0_base, char* bitmap1_base, size_t bitmap_size,
 102                          size_t page_size) :
 103     AbstractGangTask("Shenandoah PreTouch"),
 104     _bitmap0_base(bitmap0_base),
 105     _bitmap1_base(bitmap1_base),
 106     _bitmap_size(bitmap_size),
 107     _page_size(page_size) {}
 108 
 109   virtual void work(uint worker_id) {
 110     ShenandoahHeapRegion* r = _regions.next();
 111     while (r != NULL) {
 112       log_trace(gc, heap)("Pretouch region " SIZE_FORMAT ": " PTR_FORMAT " -> " PTR_FORMAT,
 113                           r->region_number(), p2i(r->bottom()), p2i(r->end()));
 114       os::pretouch_memory(r->bottom(), r->end(), _page_size);
 115 
 116       size_t start = r->region_number()       * ShenandoahHeapRegion::region_size_bytes() / MarkBitMap::heap_map_factor();
 117       size_t end   = (r->region_number() + 1) * ShenandoahHeapRegion::region_size_bytes() / MarkBitMap::heap_map_factor();
 118       assert (end <= _bitmap_size, "end is sane: " SIZE_FORMAT " < " SIZE_FORMAT, end, _bitmap_size);
 119 
 120       log_trace(gc, heap)("Pretouch bitmap under region " SIZE_FORMAT ": " PTR_FORMAT " -> " PTR_FORMAT,
 121                           r->region_number(), p2i(_bitmap0_base + start), p2i(_bitmap0_base + end));
 122       os::pretouch_memory(_bitmap0_base + start, _bitmap0_base + end, _page_size);
 123 
 124       log_trace(gc, heap)("Pretouch bitmap under region " SIZE_FORMAT ": " PTR_FORMAT " -> " PTR_FORMAT,
 125                           r->region_number(), p2i(_bitmap1_base + start), p2i(_bitmap1_base + end));
 126       os::pretouch_memory(_bitmap1_base + start, _bitmap1_base + end, _page_size);
 127 
 128       r = _regions.next();
 129     }
 130   }
 131 };
 132 
 133 jint ShenandoahHeap::initialize() {
 134 
 135   BrooksPointer::initial_checks();
 136 
 137   initialize_heuristics();
 138 
 139   size_t init_byte_size = collector_policy()->initial_heap_byte_size();
 140   size_t max_byte_size = collector_policy()->max_heap_byte_size();
 141   size_t heap_alignment = collector_policy()->heap_alignment();
 142 
 143   if (ShenandoahAlwaysPreTouch) {
 144     // Enabled pre-touch means the entire heap is committed right away.
 145     init_byte_size = max_byte_size;
 146   }
 147 
 148   Universe::check_alignment(max_byte_size,
 149                             ShenandoahHeapRegion::region_size_bytes(),
 150                             "shenandoah heap");
 151   Universe::check_alignment(init_byte_size,
 152                             ShenandoahHeapRegion::region_size_bytes(),
 153                             "shenandoah heap");
 154 
 155   ReservedSpace heap_rs = Universe::reserve_heap(max_byte_size,
 156                                                  heap_alignment);
 157   initialize_reserved_region((HeapWord*)heap_rs.base(), (HeapWord*) (heap_rs.base() + heap_rs.size()));
 158 
 159   BarrierSet::set_barrier_set(new ShenandoahBarrierSet(this));
 160   ReservedSpace pgc_rs = heap_rs.first_part(max_byte_size);
 161 
 162   _num_regions = max_byte_size / ShenandoahHeapRegion::region_size_bytes();
 163   size_t num_committed_regions = init_byte_size / ShenandoahHeapRegion::region_size_bytes();
 164   _initial_size = num_committed_regions * ShenandoahHeapRegion::region_size_bytes();
 165   _committed = _initial_size;
 166 
 167   log_info(gc, heap)("Initialize Shenandoah heap with initial size " SIZE_FORMAT " bytes", init_byte_size);
 168   if (!os::commit_memory(pgc_rs.base(), _initial_size, false)) {
 169     vm_exit_out_of_memory(_initial_size, OOM_MMAP_ERROR, "Shenandoah failed to initialize heap");
 170   }
 171 
 172   size_t reg_size_words = ShenandoahHeapRegion::region_size_words();
 173   size_t reg_size_bytes = ShenandoahHeapRegion::region_size_bytes();
 174 
 175   _regions = NEW_C_HEAP_ARRAY(ShenandoahHeapRegion*, _num_regions, mtGC);
 176   _free_set = new ShenandoahFreeSet(this, _num_regions);
 177 
 178   _collection_set = new ShenandoahCollectionSet(this, (HeapWord*)pgc_rs.base());
 179 
 180   _next_top_at_mark_starts_base = NEW_C_HEAP_ARRAY(HeapWord*, _num_regions, mtGC);
 181   _next_top_at_mark_starts = _next_top_at_mark_starts_base -
 182                ((uintx) pgc_rs.base() >> ShenandoahHeapRegion::region_size_bytes_shift());
 183 
 184   _complete_top_at_mark_starts_base = NEW_C_HEAP_ARRAY(HeapWord*, _num_regions, mtGC);
 185   _complete_top_at_mark_starts = _complete_top_at_mark_starts_base -
 186                ((uintx) pgc_rs.base() >> ShenandoahHeapRegion::region_size_bytes_shift());
 187 
 188   if (ShenandoahPacing) {
 189     _pacer = new ShenandoahPacer(this);
 190     _pacer->setup_for_idle();
 191   } else {
 192     _pacer = NULL;
 193   }
 194 
 195   {
 196     ShenandoahHeapLocker locker(lock());
 197     for (size_t i = 0; i < _num_regions; i++) {
 198       ShenandoahHeapRegion* r = new ShenandoahHeapRegion(this,
 199                                                          (HeapWord*) pgc_rs.base() + reg_size_words * i,
 200                                                          reg_size_words,
 201                                                          i,
 202                                                          i < num_committed_regions);
 203 
 204       _complete_top_at_mark_starts_base[i] = r->bottom();
 205       _next_top_at_mark_starts_base[i] = r->bottom();
 206       _regions[i] = r;
 207       assert(!collection_set()->is_in(i), "New region should not be in collection set");
 208     }
 209 
 210     _free_set->rebuild();
 211   }
 212 
 213   assert((((size_t) base()) & ShenandoahHeapRegion::region_size_bytes_mask()) == 0,
 214          "misaligned heap: "PTR_FORMAT, p2i(base()));
 215 
 216   // The call below uses stuff (the SATB* things) that are in G1, but probably
 217   // belong into a shared location.
 218   ShenandoahBarrierSet::satb_mark_queue_set().initialize(SATB_Q_CBL_mon,
 219                                                SATB_Q_FL_lock,
 220                                                20 /*G1SATBProcessCompletedThreshold */,
 221                                                Shared_SATB_Q_lock);
 222 
 223   // Reserve space for prev and next bitmap.
 224   size_t bitmap_page_size = UseLargePages ? (size_t)os::large_page_size() : (size_t)os::vm_page_size();
 225   _bitmap_size = MarkBitMap::compute_size(heap_rs.size());
 226   _bitmap_size = align_up(_bitmap_size, bitmap_page_size);
 227   _heap_region = MemRegion((HeapWord*) heap_rs.base(), heap_rs.size() / HeapWordSize);
 228 
 229   size_t bitmap_bytes_per_region = reg_size_bytes / MarkBitMap::heap_map_factor();
 230 
 231   guarantee(bitmap_bytes_per_region != 0,
 232             "Bitmap bytes per region should not be zero");
 233   guarantee(is_power_of_2(bitmap_bytes_per_region),
 234             "Bitmap bytes per region should be power of two: " SIZE_FORMAT, bitmap_bytes_per_region);
 235 
 236   if (bitmap_page_size > bitmap_bytes_per_region) {
 237     _bitmap_regions_per_slice = bitmap_page_size / bitmap_bytes_per_region;
 238     _bitmap_bytes_per_slice = bitmap_page_size;
 239   } else {
 240     _bitmap_regions_per_slice = 1;
 241     _bitmap_bytes_per_slice = bitmap_bytes_per_region;
 242   }
 243 
 244   guarantee(_bitmap_regions_per_slice >= 1,
 245             "Should have at least one region per slice: " SIZE_FORMAT,
 246             _bitmap_regions_per_slice);
 247 
 248   guarantee(((_bitmap_bytes_per_slice) % bitmap_page_size) == 0,
 249             "Bitmap slices should be page-granular: bps = " SIZE_FORMAT ", page size = " SIZE_FORMAT,
 250             _bitmap_bytes_per_slice, bitmap_page_size);
 251 
 252   ReservedSpace bitmap0(_bitmap_size, bitmap_page_size);
 253   MemTracker::record_virtual_memory_type(bitmap0.base(), mtGC);
 254   _bitmap0_region = MemRegion((HeapWord*) bitmap0.base(), bitmap0.size() / HeapWordSize);
 255 
 256   ReservedSpace bitmap1(_bitmap_size, bitmap_page_size);
 257   MemTracker::record_virtual_memory_type(bitmap1.base(), mtGC);
 258   _bitmap1_region = MemRegion((HeapWord*) bitmap1.base(), bitmap1.size() / HeapWordSize);
 259 
 260   size_t bitmap_init_commit = _bitmap_bytes_per_slice *
 261                               align_up(num_committed_regions, _bitmap_regions_per_slice) / _bitmap_regions_per_slice;
 262   bitmap_init_commit = MIN2(_bitmap_size, bitmap_init_commit);
 263   os::commit_memory_or_exit((char *) (_bitmap0_region.start()), bitmap_init_commit, false,
 264                             "couldn't allocate initial bitmap");
 265   os::commit_memory_or_exit((char *) (_bitmap1_region.start()), bitmap_init_commit, false,
 266                             "couldn't allocate initial bitmap");
 267 
 268   size_t page_size = UseLargePages ? (size_t)os::large_page_size() : (size_t)os::vm_page_size();
 269 
 270   if (ShenandoahVerify) {
 271     ReservedSpace verify_bitmap(_bitmap_size, page_size);
 272     os::commit_memory_or_exit(verify_bitmap.base(), verify_bitmap.size(), false,
 273                               "couldn't allocate verification bitmap");
 274     MemTracker::record_virtual_memory_type(verify_bitmap.base(), mtGC);
 275     MemRegion verify_bitmap_region = MemRegion((HeapWord *) verify_bitmap.base(), verify_bitmap.size() / HeapWordSize);
 276     _verification_bit_map.initialize(_heap_region, verify_bitmap_region);
 277     _verifier = new ShenandoahVerifier(this, &_verification_bit_map);
 278   }
 279 
 280   if (ShenandoahAlwaysPreTouch) {
 281     assert (!AlwaysPreTouch, "Should have been overridden");
 282 
 283     // For NUMA, it is important to pre-touch the storage under bitmaps with worker threads,
 284     // before initialize() below zeroes it with initializing thread. For any given region,
 285     // we touch the region and the corresponding bitmaps from the same thread.
 286     ShenandoahPushWorkerScope scope(workers(), _max_workers, false);
 287 
 288     log_info(gc, heap)("Parallel pretouch " SIZE_FORMAT " regions with " SIZE_FORMAT " byte pages",
 289                        _num_regions, page_size);
 290     ShenandoahPretouchTask cl(bitmap0.base(), bitmap1.base(), _bitmap_size, page_size);
 291     _workers->run_task(&cl);
 292   }
 293 
 294   _mark_bit_map0.initialize(_heap_region, _bitmap0_region);
 295   _complete_mark_bit_map = &_mark_bit_map0;
 296 
 297   _mark_bit_map1.initialize(_heap_region, _bitmap1_region);
 298   _next_mark_bit_map = &_mark_bit_map1;
 299 
 300   // Reserve aux bitmap for use in object_iterate(). We don't commit it here.
 301   ReservedSpace aux_bitmap(_bitmap_size, bitmap_page_size);
 302   MemTracker::record_virtual_memory_type(aux_bitmap.base(), mtGC);
 303   _aux_bitmap_region = MemRegion((HeapWord*) aux_bitmap.base(), aux_bitmap.size() / HeapWordSize);
 304   _aux_bit_map.initialize(_heap_region, _aux_bitmap_region);
 305 
 306   if (UseShenandoahMatrix) {
 307     _connection_matrix = new ShenandoahConnectionMatrix(_num_regions);
 308   } else {
 309     _connection_matrix = NULL;
 310   }
 311 
 312   _traversal_gc = heuristics()->can_do_traversal_gc() ?
 313                 new ShenandoahTraversalGC(this, _num_regions) :
 314                 NULL;
 315 
 316   _monitoring_support = new ShenandoahMonitoringSupport(this);
 317 
 318   _phase_timings = new ShenandoahPhaseTimings();
 319 
 320   if (ShenandoahAllocationTrace) {
 321     _alloc_tracker = new ShenandoahAllocTracker();
 322   }
 323 
 324   ShenandoahStringDedup::initialize();
 325 
 326   _control_thread = new ShenandoahControlThread();
 327 
 328   ShenandoahCodeRoots::initialize();
 329 
 330   LogTarget(Trace, gc, region) lt;
 331   if (lt.is_enabled()) {
 332     ResourceMark rm;
 333     LogStream ls(lt);
 334     log_trace(gc, region)("All Regions");
 335     print_heap_regions_on(&ls);
 336     log_trace(gc, region)("Free Regions");
 337     _free_set->print_on(&ls);
 338   }
 339 
 340   log_info(gc, init)("Safepointing mechanism: %s",
 341                      SafepointMechanism::uses_thread_local_poll() ? "thread-local poll" :
 342                      (SafepointMechanism::uses_global_page_poll() ? "global-page poll" : "unknown"));
 343 
 344   return JNI_OK;
 345 }
 346 
 347 void ShenandoahHeap::initialize_heuristics() {
 348   if (ShenandoahGCHeuristics != NULL) {
 349     if (strcmp(ShenandoahGCHeuristics, "aggressive") == 0) {
 350       _heuristics = new ShenandoahAggressiveHeuristics();
 351     } else if (strcmp(ShenandoahGCHeuristics, "static") == 0) {
 352       _heuristics = new ShenandoahStaticHeuristics();
 353     } else if (strcmp(ShenandoahGCHeuristics, "adaptive") == 0) {
 354       _heuristics = new ShenandoahAdaptiveHeuristics();
 355     } else if (strcmp(ShenandoahGCHeuristics, "passive") == 0) {
 356       _heuristics = new ShenandoahPassiveHeuristics();
 357     } else if (strcmp(ShenandoahGCHeuristics, "compact") == 0) {
 358       _heuristics = new ShenandoahCompactHeuristics();
 359     } else if (strcmp(ShenandoahGCHeuristics, "connected") == 0) {
 360       _heuristics = new ShenandoahPartialConnectedHeuristics();
 361     } else if (strcmp(ShenandoahGCHeuristics, "generational") == 0) {
 362       _heuristics = new ShenandoahPartialGenerationalHeuristics();
 363     } else if (strcmp(ShenandoahGCHeuristics, "LRU") == 0) {
 364       _heuristics = new ShenandoahPartialLRUHeuristics();
 365     } else if (strcmp(ShenandoahGCHeuristics, "traversal") == 0) {
 366       _heuristics = new ShenandoahTraversalHeuristics();
 367     } else {
 368       vm_exit_during_initialization("Unknown -XX:ShenandoahGCHeuristics option");
 369     }
 370 
 371     if (_heuristics->is_diagnostic() && !UnlockDiagnosticVMOptions) {
 372       vm_exit_during_initialization(
 373               err_msg("Heuristics \"%s\" is diagnostic, and must be enabled via -XX:+UnlockDiagnosticVMOptions.",
 374                       _heuristics->name()));
 375     }
 376     if (_heuristics->is_experimental() && !UnlockExperimentalVMOptions) {
 377       vm_exit_during_initialization(
 378               err_msg("Heuristics \"%s\" is experimental, and must be enabled via -XX:+UnlockExperimentalVMOptions.",
 379                       _heuristics->name()));
 380     }
 381 
 382     if (ShenandoahStoreValEnqueueBarrier && ShenandoahStoreValReadBarrier) {
 383       vm_exit_during_initialization("Cannot use both ShenandoahStoreValEnqueueBarrier and ShenandoahStoreValReadBarrier");
 384     }
 385     log_info(gc, init)("Shenandoah heuristics: %s",
 386                        _heuristics->name());
 387     _heuristics->print_thresholds();
 388   } else {
 389       ShouldNotReachHere();
 390   }
 391 
 392 }
 393 
 394 #ifdef _MSC_VER
 395 #pragma warning( push )
 396 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list
 397 #endif
 398 
 399 ShenandoahHeap::ShenandoahHeap(ShenandoahCollectorPolicy* policy) :
 400   CollectedHeap(),
 401   _shenandoah_policy(policy),
 402   _soft_ref_policy(),
 403   _regions(NULL),
 404   _free_set(NULL),
 405   _collection_set(NULL),
 406   _update_refs_iterator(this),
 407   _bytes_allocated_since_gc_start(0),
 408   _max_workers(MAX2(ConcGCThreads, ParallelGCThreads)),
 409   _ref_processor(NULL),
 410   _next_top_at_mark_starts(NULL),
 411   _next_top_at_mark_starts_base(NULL),
 412   _complete_top_at_mark_starts(NULL),
 413   _complete_top_at_mark_starts_base(NULL),
 414   _mark_bit_map0(),
 415   _mark_bit_map1(),
 416   _aux_bit_map(),
 417   _connection_matrix(NULL),
 418   _verifier(NULL),
 419   _pacer(NULL),
 420   _used_at_last_gc(0),
 421   _alloc_seq_at_last_gc_start(0),
 422   _alloc_seq_at_last_gc_end(0),
 423   _safepoint_workers(NULL),
 424   _gc_cycle_mode(),
 425 #ifdef ASSERT
 426   _heap_expansion_count(0),
 427 #endif
 428   _gc_timer(new (ResourceObj::C_HEAP, mtGC) ConcurrentGCTimer()),
 429   _phase_timings(NULL),
 430   _alloc_tracker(NULL),
 431   _cycle_memory_manager("Shenandoah Cycles", "end of GC cycle"),
 432   _stw_memory_manager("Shenandoah Pauses", "end of GC pause"),
 433   _memory_pool(NULL)
 434 {
 435   log_info(gc, init)("Parallel GC threads: " UINT32_FORMAT, ParallelGCThreads);
 436   log_info(gc, init)("Concurrent GC threads: " UINT32_FORMAT, ConcGCThreads);
 437   log_info(gc, init)("Parallel reference processing enabled: %s", BOOL_TO_STR(ParallelRefProcEnabled));
 438 
 439   _scm = new ShenandoahConcurrentMark();
 440   _full_gc = new ShenandoahMarkCompact();
 441   _used = 0;
 442 
 443   _max_workers = MAX2(_max_workers, 1U);
 444   _workers = new ShenandoahWorkGang("Shenandoah GC Threads", _max_workers,
 445                             /* are_GC_task_threads */true,
 446                             /* are_ConcurrentGC_threads */false);
 447   if (_workers == NULL) {
 448     vm_exit_during_initialization("Failed necessary allocation.");
 449   } else {
 450     _workers->initialize_workers();
 451   }
 452 
 453   if (ParallelSafepointCleanupThreads > 1) {
 454     _safepoint_workers = new ShenandoahWorkGang("Safepoint Cleanup Thread",
 455                                                 ParallelSafepointCleanupThreads,
 456                                                 false, false);
 457     _safepoint_workers->initialize_workers();
 458   }
 459 }
 460 
 461 #ifdef _MSC_VER
 462 #pragma warning( pop )
 463 #endif
 464 
 465 class ShenandoahResetNextBitmapTask : public AbstractGangTask {
 466 private:
 467   ShenandoahRegionIterator _regions;
 468 
 469 public:
 470   ShenandoahResetNextBitmapTask() :
 471     AbstractGangTask("Parallel Reset Bitmap Task") {}
 472 
 473   void work(uint worker_id) {
 474     ShenandoahHeapRegion* region = _regions.next();
 475     ShenandoahHeap* heap = ShenandoahHeap::heap();
 476     while (region != NULL) {
 477       if (heap->is_bitmap_slice_committed(region)) {
 478         HeapWord* bottom = region->bottom();
 479         HeapWord* top = heap->next_top_at_mark_start(region->bottom());
 480         if (top > bottom) {
 481           heap->next_mark_bit_map()->clear_range_large(MemRegion(bottom, top));
 482         }
 483         assert(heap->is_next_bitmap_clear_range(bottom, region->end()), "must be clear");
 484       }
 485       region = _regions.next();
 486     }
 487   }
 488 };
 489 
 490 void ShenandoahHeap::reset_next_mark_bitmap() {
 491   assert_gc_workers(_workers->active_workers());
 492 
 493   ShenandoahResetNextBitmapTask task;
 494   _workers->run_task(&task);
 495 }
 496 
 497 class ShenandoahResetNextBitmapTraversalTask : public AbstractGangTask {
 498 private:
 499   ShenandoahHeapRegionSetIterator& _regions;
 500 
 501 public:
 502   ShenandoahResetNextBitmapTraversalTask(ShenandoahHeapRegionSetIterator& regions) :
 503     AbstractGangTask("Parallel Reset Bitmap Task for Traversal"),
 504     _regions(regions) {}
 505 
 506   void work(uint worker_id) {
 507     ShenandoahHeap* heap = ShenandoahHeap::heap();
 508     ShenandoahHeapRegion* region = _regions.claim_next();
 509     while (region != NULL) {
 510       if (!region->is_trash()) {
 511         assert(!region->is_empty_uncommitted(), "sanity");
 512         assert(heap->is_bitmap_slice_committed(region), "sanity");
 513         HeapWord* bottom = region->bottom();
 514         HeapWord* top = heap->next_top_at_mark_start(bottom);
 515         assert(top <= region->top(),
 516                "TAMS must smaller/equals than top: TAMS: "PTR_FORMAT", top: "PTR_FORMAT,
 517                p2i(top), p2i(region->top()));
 518         if (top > bottom) {
 519           heap->complete_mark_bit_map()->copy_from(heap->next_mark_bit_map(), MemRegion(bottom, top));
 520           heap->set_complete_top_at_mark_start(bottom, top);
 521           heap->next_mark_bit_map()->clear_range_large(MemRegion(bottom, top));
 522           heap->set_next_top_at_mark_start(bottom, bottom);
 523         }
 524         assert(heap->is_next_bitmap_clear_range(region->bottom(), region->end()),
 525                "need clear next bitmap");
 526       }
 527       region = _regions.claim_next();
 528     }
 529   }
 530 };
 531 
 532 void ShenandoahHeap::reset_next_mark_bitmap_traversal() {
 533   assert_gc_workers(_workers->active_workers());
 534 
 535   ShenandoahHeapRegionSet* regions = traversal_gc()->traversal_set();
 536   ShenandoahHeapRegionSetIterator iter(regions);
 537   ShenandoahResetNextBitmapTraversalTask task(iter);
 538   _workers->run_task(&task);
 539 }
 540 
 541 bool ShenandoahHeap::is_next_bitmap_clear() {
 542   for (size_t idx = 0; idx < _num_regions; idx++) {
 543     ShenandoahHeapRegion* r = get_region(idx);
 544     if (is_bitmap_slice_committed(r) && !is_next_bitmap_clear_range(r->bottom(), r->end())) {
 545       return false;
 546     }
 547   }
 548   return true;
 549 }
 550 
 551 bool ShenandoahHeap::is_next_bitmap_clear_range(HeapWord* start, HeapWord* end) {
 552   return _next_mark_bit_map->getNextMarkedWordAddress(start, end) == end;
 553 }
 554 
 555 bool ShenandoahHeap::is_complete_bitmap_clear_range(HeapWord* start, HeapWord* end) {
 556   return _complete_mark_bit_map->getNextMarkedWordAddress(start, end) == end;
 557 }
 558 
 559 void ShenandoahHeap::print_on(outputStream* st) const {
 560   st->print_cr("Shenandoah Heap");
 561   st->print_cr(" " SIZE_FORMAT "K total, " SIZE_FORMAT "K committed, " SIZE_FORMAT "K used",
 562                capacity() / K, committed() / K, used() / K);
 563   st->print_cr(" " SIZE_FORMAT " x " SIZE_FORMAT"K regions",
 564                num_regions(), ShenandoahHeapRegion::region_size_bytes() / K);
 565 
 566   st->print("Status: ");
 567   if (has_forwarded_objects())               st->print("has forwarded objects, ");
 568   if (is_concurrent_mark_in_progress())      st->print("marking, ");
 569   if (is_evacuation_in_progress())           st->print("evacuating, ");
 570   if (is_update_refs_in_progress())          st->print("updating refs, ");
 571   if (is_concurrent_traversal_in_progress()) st->print("traversal, ");
 572   if (is_degenerated_gc_in_progress())       st->print("degenerated gc, ");
 573   if (is_full_gc_in_progress())              st->print("full gc, ");
 574   if (is_full_gc_move_in_progress())         st->print("full gc move, ");
 575 
 576   if (cancelled_gc()) {
 577     st->print("cancelled");
 578   } else {
 579     st->print("not cancelled");
 580   }
 581   st->cr();
 582 
 583   st->print_cr("Reserved region:");
 584   st->print_cr(" - [" PTR_FORMAT ", " PTR_FORMAT ") ",
 585                p2i(reserved_region().start()),
 586                p2i(reserved_region().end()));
 587 
 588   st->cr();
 589   MetaspaceUtils::print_on(st);
 590 
 591   if (UseShenandoahMatrix) {
 592     st->print_cr("Matrix:");
 593 
 594     ShenandoahConnectionMatrix* matrix = connection_matrix();
 595     if (matrix != NULL) {
 596       st->print_cr(" - base: " PTR_FORMAT, p2i(matrix->matrix_addr()));
 597       st->print_cr(" - stride: " SIZE_FORMAT, matrix->stride());
 598       st->print_cr(" - magic: " PTR_FORMAT, matrix->magic_offset());
 599     } else {
 600       st->print_cr(" No matrix.");
 601     }
 602   }
 603 
 604   if (Verbose) {
 605     print_heap_regions_on(st);
 606   }
 607 }
 608 
 609 class ShenandoahInitGCLABClosure : public ThreadClosure {
 610 public:
 611   void do_thread(Thread* thread) {
 612     if (thread != NULL && (thread->is_Java_thread() || thread->is_Worker_thread() ||
 613                            thread->is_ConcurrentGC_thread())) {
 614       ShenandoahThreadLocalData::initialize_gclab(thread);
 615     }
 616   }
 617 };
 618 
 619 void ShenandoahHeap::post_initialize() {
 620   CollectedHeap::post_initialize();
 621   MutexLocker ml(Threads_lock);
 622 
 623   ShenandoahInitGCLABClosure init_gclabs;
 624   Threads::threads_do(&init_gclabs);
 625   gc_threads_do(&init_gclabs);
 626 
 627   // gclab can not be initialized early during VM startup, as it can not determinate its max_size.
 628   // Now, we will let WorkGang to initialize gclab when new worker is created.
 629   _workers->set_initialize_gclab();
 630 
 631   _scm->initialize(_max_workers);
 632   _full_gc->initialize(_gc_timer);
 633 
 634   ref_processing_init();
 635 
 636   _heuristics->initialize();
 637 }
 638 
 639 size_t ShenandoahHeap::used() const {
 640   return OrderAccess::load_acquire(&_used);
 641 }
 642 
 643 size_t ShenandoahHeap::committed() const {
 644   OrderAccess::acquire();
 645   return _committed;
 646 }
 647 
 648 void ShenandoahHeap::increase_committed(size_t bytes) {
 649   assert_heaplock_or_safepoint();
 650   _committed += bytes;
 651 }
 652 
 653 void ShenandoahHeap::decrease_committed(size_t bytes) {
 654   assert_heaplock_or_safepoint();
 655   _committed -= bytes;
 656 }
 657 
 658 void ShenandoahHeap::increase_used(size_t bytes) {
 659   Atomic::add(bytes, &_used);
 660 }
 661 
 662 void ShenandoahHeap::set_used(size_t bytes) {
 663   OrderAccess::release_store_fence(&_used, bytes);
 664 }
 665 
 666 void ShenandoahHeap::decrease_used(size_t bytes) {
 667   assert(used() >= bytes, "never decrease heap size by more than we've left");
 668   Atomic::sub(bytes, &_used);
 669 }
 670 
 671 void ShenandoahHeap::increase_allocated(size_t bytes) {
 672   Atomic::add(bytes, &_bytes_allocated_since_gc_start);
 673 }
 674 
 675 void ShenandoahHeap::notify_alloc(size_t words, bool waste) {
 676   size_t bytes = words * HeapWordSize;
 677   if (!waste) {
 678     increase_used(bytes);
 679   }
 680   increase_allocated(bytes);
 681   if (ShenandoahPacing) {
 682     control_thread()->pacing_notify_alloc(words);
 683     if (waste) {
 684       pacer()->claim_for_alloc(words, true);
 685     }
 686   }
 687 }
 688 
 689 size_t ShenandoahHeap::capacity() const {
 690   return num_regions() * ShenandoahHeapRegion::region_size_bytes();
 691 }
 692 
 693 bool ShenandoahHeap::is_maximal_no_gc() const {
 694   Unimplemented();
 695   return true;
 696 }
 697 
 698 size_t ShenandoahHeap::max_capacity() const {
 699   return _num_regions * ShenandoahHeapRegion::region_size_bytes();
 700 }
 701 
 702 size_t ShenandoahHeap::initial_capacity() const {
 703   return _initial_size;
 704 }
 705 
 706 bool ShenandoahHeap::is_in(const void* p) const {
 707   HeapWord* heap_base = (HeapWord*) base();
 708   HeapWord* last_region_end = heap_base + ShenandoahHeapRegion::region_size_words() * num_regions();
 709   return p >= heap_base && p < last_region_end;
 710 }
 711 
 712 bool ShenandoahHeap::is_scavengable(oop p) {
 713   return true;
 714 }
 715 
 716 void ShenandoahHeap::op_uncommit(double shrink_before) {
 717   assert (ShenandoahUncommit, "should be enabled");
 718 
 719   size_t count = 0;
 720   for (size_t i = 0; i < num_regions(); i++) {
 721     ShenandoahHeapRegion* r = get_region(i);
 722     if (r->is_empty_committed() && (r->empty_time() < shrink_before)) {
 723       ShenandoahHeapLocker locker(lock());
 724       if (r->is_empty_committed()) {
 725         r->make_uncommitted();
 726         count++;
 727       }
 728     }
 729     SpinPause(); // allow allocators to take the lock
 730   }
 731 
 732   if (count > 0) {
 733     log_info(gc)("Uncommitted " SIZE_FORMAT "M. Heap: " SIZE_FORMAT "M reserved, " SIZE_FORMAT "M committed, " SIZE_FORMAT "M used",
 734                  count * ShenandoahHeapRegion::region_size_bytes() / M, capacity() / M, committed() / M, used() / M);
 735     control_thread()->notify_heap_changed();
 736   }
 737 
 738   // Allocations happen during uncommits, record peak after the phase:
 739   heuristics()->record_peak_occupancy();
 740 }
 741 
 742 HeapWord* ShenandoahHeap::allocate_from_gclab_slow(Thread* thread, size_t size) {
 743   // New object should fit the GCLAB size
 744   size_t min_size = MAX2(size, PLAB::min_size());
 745 
 746   // Figure out size of new GCLAB, looking back at heuristics. Expand aggressively.
 747   size_t new_size = ShenandoahThreadLocalData::gclab_size(thread) * 2;
 748   new_size = MIN2(new_size, PLAB::max_size());
 749   new_size = MAX2(new_size, PLAB::min_size());
 750 
 751   // Record new heuristic value even if we take any shortcut. This captures
 752   // the case when moderately-sized objects always take a shortcut. At some point,
 753   // heuristics should catch up with them.
 754   ShenandoahThreadLocalData::set_gclab_size(thread, new_size);
 755 
 756   if (new_size < size) {
 757     // New size still does not fit the object. Fall back to shared allocation.
 758     // This avoids retiring perfectly good GCLABs, when we encounter a large object.
 759     return NULL;
 760   }
 761 
 762   // Retire current GCLAB, and allocate a new one.
 763   PLAB* gclab = ShenandoahThreadLocalData::gclab(thread);
 764   gclab->retire();
 765 
 766   size_t actual_size = 0;
 767   HeapWord* gclab_buf = allocate_new_gclab(min_size, new_size, &actual_size);
 768   if (gclab_buf == NULL) {
 769     return NULL;
 770   }
 771 
 772   assert (size <= actual_size, "allocation should fit");
 773 
 774   if (ZeroTLAB) {
 775     // ..and clear it.
 776     Copy::zero_to_words(gclab_buf, actual_size);
 777   } else {
 778     // ...and zap just allocated object.
 779 #ifdef ASSERT
 780     // Skip mangling the space corresponding to the object header to
 781     // ensure that the returned space is not considered parsable by
 782     // any concurrent GC thread.
 783     size_t hdr_size = oopDesc::header_size();
 784     Copy::fill_to_words(gclab_buf + hdr_size, actual_size - hdr_size, badHeapWordVal);
 785 #endif // ASSERT
 786   }
 787   gclab->set_buf(gclab_buf, actual_size);
 788   return gclab->allocate(size);
 789 }
 790 
 791 HeapWord* ShenandoahHeap::allocate_new_tlab(size_t min_size,
 792                                             size_t requested_size,
 793                                             size_t* actual_size) {
 794 #ifdef ASSERT
 795   log_debug(gc, alloc)("Allocate new tlab, requested size = " SIZE_FORMAT " bytes", requested_size * HeapWordSize);
 796 #endif
 797   ShenandoahAllocationRequest req = ShenandoahAllocationRequest::for_tlab(min_size, requested_size);
 798   HeapWord* res = allocate_memory(req);
 799   if (res != NULL) {
 800     *actual_size = req.actual_size();
 801   } else {
 802     *actual_size = 0;
 803   }
 804   return res;
 805 }
 806 
 807 HeapWord* ShenandoahHeap::allocate_new_gclab(size_t min_size,
 808                                              size_t word_size,
 809                                              size_t* actual_size) {
 810 #ifdef ASSERT
 811   log_debug(gc, alloc)("Allocate new gclab, requested size = " SIZE_FORMAT " bytes", word_size * HeapWordSize);
 812 #endif
 813   ShenandoahAllocationRequest req = ShenandoahAllocationRequest::for_gclab(min_size, word_size);
 814   HeapWord* res = allocate_memory(req);
 815   if (res != NULL) {
 816     *actual_size = req.actual_size();
 817   } else {
 818     *actual_size = 0;
 819   }
 820   return res;
 821 }
 822 
 823 ShenandoahHeap* ShenandoahHeap::heap() {
 824   CollectedHeap* heap = Universe::heap();
 825   assert(heap != NULL, "Unitialized access to ShenandoahHeap::heap()");
 826   assert(heap->kind() == CollectedHeap::Shenandoah, "not a shenandoah heap");
 827   return (ShenandoahHeap*) heap;
 828 }
 829 
 830 ShenandoahHeap* ShenandoahHeap::heap_no_check() {
 831   CollectedHeap* heap = Universe::heap();
 832   return (ShenandoahHeap*) heap;
 833 }
 834 
 835 HeapWord* ShenandoahHeap::allocate_memory(ShenandoahAllocationRequest& req) {
 836   ShenandoahAllocTrace trace_alloc(req.size(), req.type());
 837 
 838   intptr_t pacer_epoch = 0;
 839   bool in_new_region = false;
 840   HeapWord* result = NULL;
 841 
 842   if (req.is_mutator_alloc()) {
 843     if (ShenandoahPacing) {
 844       pacer()->pace_for_alloc(req.size());
 845       pacer_epoch = pacer()->epoch();
 846     }
 847 
 848     if (!ShenandoahAllocFailureALot || !should_inject_alloc_failure()) {
 849       result = allocate_memory_under_lock(req, in_new_region);
 850     }
 851 
 852     // Allocation failed, block until control thread reacted, then retry allocation.
 853     //
 854     // It might happen that one of the threads requesting allocation would unblock
 855     // way later after GC happened, only to fail the second allocation, because
 856     // other threads have already depleted the free storage. In this case, a better
 857     // strategy is to try again, as long as GC makes progress.
 858     //
 859     // Then, we need to make sure the allocation was retried after at least one
 860     // Full GC, which means we want to try more than ShenandoahFullGCThreshold times.
 861 
 862     size_t tries = 0;
 863 
 864     while (result == NULL && last_gc_made_progress()) {
 865       tries++;
 866       control_thread()->handle_alloc_failure(req.size());
 867       result = allocate_memory_under_lock(req, in_new_region);
 868     }
 869 
 870     while (result == NULL && tries <= ShenandoahFullGCThreshold) {
 871       tries++;
 872       control_thread()->handle_alloc_failure(req.size());
 873       result = allocate_memory_under_lock(req, in_new_region);
 874     }
 875 
 876   } else {
 877     assert(req.is_gc_alloc(), "Can only accept GC allocs here");
 878     result = allocate_memory_under_lock(req, in_new_region);
 879     // Do not call handle_alloc_failure() here, because we cannot block.
 880     // The allocation failure would be handled by the WB slowpath with handle_alloc_failure_evac().
 881   }
 882 
 883   if (in_new_region) {
 884     control_thread()->notify_heap_changed();
 885   }
 886 
 887   if (result != NULL) {
 888     size_t requested = req.size();
 889     size_t actual = req.actual_size();
 890 
 891     assert (req.is_lab_alloc() || (requested == actual),
 892             "Only LAB allocations are elastic: %s, requested = " SIZE_FORMAT ", actual = " SIZE_FORMAT,
 893             alloc_type_to_string(req.type()), requested, actual);
 894 
 895     notify_alloc(actual, false);
 896 
 897     // If we requested more than we were granted, give the rest back to pacer.
 898     // This only matters if we are in the same pacing epoch: do not try to unpace
 899     // over the budget for the other phase.
 900     if (ShenandoahPacing && (pacer_epoch > 0) && (requested > actual)) {
 901       pacer()->unpace_for_alloc(pacer_epoch, requested - actual);
 902     }
 903   }
 904 
 905   return result;
 906 }
 907 
 908 HeapWord* ShenandoahHeap::allocate_memory_under_lock(ShenandoahAllocationRequest& req, bool& in_new_region) {
 909   ShenandoahHeapLocker locker(lock());
 910   return _free_set->allocate(req, in_new_region);
 911 }
 912 
 913 class ShenandoahObjAllocator : public ObjAllocator {
 914 public:
 915   ShenandoahObjAllocator(Klass* klass, size_t word_size, Thread* thread) :
 916     ObjAllocator(klass, word_size, thread) {}
 917 
 918   virtual HeapWord* mem_allocate(Allocation& allocation) {
 919     // Allocate object.
 920     _word_size += BrooksPointer::word_size();
 921     HeapWord* result = ObjAllocator::mem_allocate(allocation);
 922     _word_size -= BrooksPointer::word_size();
 923     // Initialize brooks-pointer
 924     if (result != NULL) {
 925       result += BrooksPointer::word_size();
 926       BrooksPointer::initialize(oop(result));
 927       assert(! ShenandoahHeap::heap()->in_collection_set(result), "never allocate in targetted region");
 928     }
 929     return result;
 930   }
 931 };
 932 
 933 oop ShenandoahHeap::obj_allocate(Klass* klass, int size, TRAPS) {
 934   ShenandoahObjAllocator allocator(klass, size, THREAD);
 935   return allocator.allocate();
 936 }
 937 
 938 class ShenandoahObjArrayAllocator : public ObjArrayAllocator {
 939 public:
 940   ShenandoahObjArrayAllocator(Klass* klass, size_t word_size, int length, bool do_zero,
 941                               Thread* thread) :
 942     ObjArrayAllocator(klass, word_size, length, do_zero, thread) {}
 943 
 944   virtual HeapWord* mem_allocate(Allocation& allocation) {
 945     // Allocate object.
 946     _word_size += BrooksPointer::word_size();
 947     HeapWord* result = ObjArrayAllocator::mem_allocate(allocation);
 948     _word_size -= BrooksPointer::word_size();
 949     if (result != NULL) {
 950       result += BrooksPointer::word_size();
 951       BrooksPointer::initialize(oop(result));
 952       assert(! ShenandoahHeap::heap()->in_collection_set(result), "never allocate in targetted region");
 953     }
 954     return result;
 955   }
 956 
 957 };
 958 
 959 oop ShenandoahHeap::array_allocate(Klass* klass, int size, int length, bool do_zero, TRAPS) {
 960   ShenandoahObjArrayAllocator allocator(klass, size, length, do_zero, THREAD);
 961   return allocator.allocate();
 962 }
 963 
 964 class ShenandoahClassAllocator : public ClassAllocator {
 965 public:
 966   ShenandoahClassAllocator(Klass* klass, size_t word_size, Thread* thread) :
 967     ClassAllocator(klass, word_size, thread) {}
 968 
 969   virtual HeapWord* mem_allocate(Allocation& allocation) {
 970     _word_size += BrooksPointer::word_size();
 971     HeapWord* result = ClassAllocator::mem_allocate(allocation);
 972     _word_size -= BrooksPointer::word_size();
 973     if (result != NULL) {
 974       result += BrooksPointer::word_size();
 975       BrooksPointer::initialize(oop(result));
 976       assert(! ShenandoahHeap::heap()->in_collection_set(result), "never allocate in targetted region");
 977     }
 978     return result;
 979   }
 980 
 981 };
 982 
 983 oop ShenandoahHeap::class_allocate(Klass* klass, int size, TRAPS) {
 984   ShenandoahClassAllocator allocator(klass, size, THREAD);
 985   return allocator.allocate();
 986 }
 987 
 988 HeapWord* ShenandoahHeap::mem_allocate(size_t size,
 989                                         bool*  gc_overhead_limit_was_exceeded) {
 990   ShenandoahAllocationRequest req = ShenandoahAllocationRequest::for_shared(size);
 991   return allocate_memory(req);
 992 }
 993 
 994 void ShenandoahHeap::fill_with_dummy_object(HeapWord* start, HeapWord* end, bool zap) {
 995   HeapWord* obj = tlab_post_allocation_setup(start);
 996   CollectedHeap::fill_with_object(obj, end);
 997 }
 998 
 999 class ShenandoahEvacuateUpdateRootsClosure: public BasicOopIterateClosure {
1000 private:
1001   ShenandoahHeap* _heap;
1002   Thread* _thread;
1003 public:
1004   ShenandoahEvacuateUpdateRootsClosure() :
1005     _heap(ShenandoahHeap::heap()), _thread(Thread::current()) {
1006   }
1007 
1008 private:
1009   template <class T>
1010   void do_oop_work(T* p) {
1011     assert(_heap->is_evacuation_in_progress(), "Only do this when evacuation is in progress");
1012 
1013     T o = RawAccess<>::oop_load(p);
1014     if (! CompressedOops::is_null(o)) {
1015       oop obj = CompressedOops::decode_not_null(o);
1016       if (_heap->in_collection_set(obj)) {
1017         shenandoah_assert_marked_complete(p, obj);
1018         oop resolved = ShenandoahBarrierSet::resolve_forwarded_not_null(obj);
1019         if (oopDesc::unsafe_equals(resolved, obj)) {
1020           resolved = _heap->evacuate_object(obj, _thread);
1021         }
1022         RawAccess<IS_NOT_NULL>::oop_store(p, resolved);
1023       }
1024     }
1025   }
1026 
1027 public:
1028   void do_oop(oop* p) {
1029     do_oop_work(p);
1030   }
1031   void do_oop(narrowOop* p) {
1032     do_oop_work(p);
1033   }
1034 };
1035 
1036 class ShenandoahEvacuateRootsClosure: public BasicOopIterateClosure {
1037 private:
1038   ShenandoahHeap* _heap;
1039   Thread* _thread;
1040 public:
1041   ShenandoahEvacuateRootsClosure() :
1042           _heap(ShenandoahHeap::heap()), _thread(Thread::current()) {
1043   }
1044 
1045 private:
1046   template <class T>
1047   void do_oop_work(T* p) {
1048     T o = RawAccess<>::oop_load(p);
1049     if (! CompressedOops::is_null(o)) {
1050       oop obj = CompressedOops::decode_not_null(o);
1051       if (_heap->in_collection_set(obj)) {
1052         oop resolved = ShenandoahBarrierSet::resolve_forwarded_not_null(obj);
1053         if (oopDesc::unsafe_equals(resolved, obj)) {
1054           _heap->evacuate_object(obj, _thread);
1055         }
1056       }
1057     }
1058   }
1059 
1060 public:
1061   void do_oop(oop* p) {
1062     do_oop_work(p);
1063   }
1064   void do_oop(narrowOop* p) {
1065     do_oop_work(p);
1066   }
1067 };
1068 
1069 class ShenandoahParallelEvacuateRegionObjectClosure : public ObjectClosure {
1070 private:
1071   ShenandoahHeap* const _heap;
1072   Thread* const _thread;
1073 public:
1074   ShenandoahParallelEvacuateRegionObjectClosure(ShenandoahHeap* heap) :
1075     _heap(heap), _thread(Thread::current()) {}
1076 
1077   void do_object(oop p) {
1078     shenandoah_assert_marked_complete(NULL, p);
1079     if (oopDesc::unsafe_equals(p, ShenandoahBarrierSet::resolve_forwarded_not_null(p))) {
1080       _heap->evacuate_object(p, _thread);
1081     }
1082   }
1083 };
1084 
1085 class ShenandoahParallelEvacuationTask : public AbstractGangTask {
1086 private:
1087   ShenandoahHeap* const _sh;
1088   ShenandoahCollectionSet* const _cs;
1089   ShenandoahSharedFlag _claimed_codecache;
1090 
1091 public:
1092   ShenandoahParallelEvacuationTask(ShenandoahHeap* sh,
1093                          ShenandoahCollectionSet* cs) :
1094     AbstractGangTask("Parallel Evacuation Task"),
1095     _cs(cs),
1096     _sh(sh)
1097   {}
1098 
1099   void work(uint worker_id) {
1100     ShenandoahWorkerSession worker_session(worker_id);
1101     ShenandoahEvacOOMScope oom_evac_scope;
1102     SuspendibleThreadSetJoiner stsj(ShenandoahSuspendibleWorkers);
1103 
1104     // If concurrent code cache evac is enabled, evacuate it here.
1105     // Note we cannot update the roots here, because we risk non-atomic stores to the alive
1106     // nmethods. The update would be handled elsewhere.
1107     if (ShenandoahConcurrentEvacCodeRoots && _claimed_codecache.try_set()) {
1108       ShenandoahEvacuateRootsClosure cl;
1109       MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
1110       CodeBlobToOopClosure blobs(&cl, !CodeBlobToOopClosure::FixRelocations);
1111       CodeCache::blobs_do(&blobs);
1112     }
1113 
1114     ShenandoahParallelEvacuateRegionObjectClosure cl(_sh);
1115     ShenandoahHeapRegion* r;
1116     while ((r =_cs->claim_next()) != NULL) {
1117       log_develop_trace(gc, region)("Thread " INT32_FORMAT " claimed Heap Region " SIZE_FORMAT,
1118                                     worker_id,
1119                                     r->region_number());
1120 
1121       assert(r->has_live(), "all-garbage regions are reclaimed early");
1122       _sh->marked_object_iterate(r, &cl);
1123 
1124       if (ShenandoahPacing) {
1125         _sh->pacer()->report_evac(r->used() >> LogHeapWordSize);
1126       }
1127 
1128       if (_sh->check_cancelled_gc_and_yield()) {
1129         log_develop_trace(gc, region)("Cancelled GC while evacuating region " SIZE_FORMAT, r->region_number());
1130         break;
1131       }
1132     }
1133   }
1134 };
1135 
1136 void ShenandoahHeap::trash_cset_regions() {
1137   ShenandoahHeapLocker locker(lock());
1138 
1139   ShenandoahCollectionSet* set = collection_set();
1140   ShenandoahHeapRegion* r;
1141   set->clear_current_index();
1142   while ((r = set->next()) != NULL) {
1143     r->make_trash();
1144   }
1145   collection_set()->clear();
1146 }
1147 
1148 void ShenandoahHeap::print_heap_regions_on(outputStream* st) const {
1149   st->print_cr("Heap Regions:");
1150   st->print_cr("EU=empty-uncommitted, EC=empty-committed, R=regular, H=humongous start, HC=humongous continuation, CS=collection set, T=trash, P=pinned");
1151   st->print_cr("BTE=bottom/top/end, U=used, T=TLAB allocs, G=GCLAB allocs, S=shared allocs, L=live data");
1152   st->print_cr("R=root, CP=critical pins, TAMS=top-at-mark-start (previous, next)");
1153   st->print_cr("SN=alloc sequence numbers (first mutator, last mutator, first gc, last gc)");
1154 
1155   for (size_t i = 0; i < num_regions(); i++) {
1156     get_region(i)->print_on(st);
1157   }
1158 }
1159 
1160 void ShenandoahHeap::trash_humongous_region_at(ShenandoahHeapRegion* start) {
1161   assert(start->is_humongous_start(), "reclaim regions starting with the first one");
1162 
1163   oop humongous_obj = oop(start->bottom() + BrooksPointer::word_size());
1164   size_t size = humongous_obj->size() + BrooksPointer::word_size();
1165   size_t required_regions = ShenandoahHeapRegion::required_regions(size * HeapWordSize);
1166   size_t index = start->region_number() + required_regions - 1;
1167 
1168   assert(!start->has_live(), "liveness must be zero");
1169   log_trace(gc, humongous)("Reclaiming " SIZE_FORMAT " humongous regions for object of size: " SIZE_FORMAT " words", required_regions, size);
1170 
1171   for(size_t i = 0; i < required_regions; i++) {
1172     // Reclaim from tail. Otherwise, assertion fails when printing region to trace log,
1173     // as it expects that every region belongs to a humongous region starting with a humongous start region.
1174     ShenandoahHeapRegion* region = get_region(index --);
1175 
1176     LogTarget(Trace, gc, humongous) lt;
1177     if (lt.is_enabled()) {
1178       ResourceMark rm;
1179       LogStream ls(lt);
1180       region->print_on(&ls);
1181     }
1182 
1183     assert(region->is_humongous(), "expect correct humongous start or continuation");
1184     assert(!in_collection_set(region), "Humongous region should not be in collection set");
1185 
1186     region->make_trash();
1187   }
1188 }
1189 
1190 #ifdef ASSERT
1191 class ShenandoahCheckCollectionSetClosure: public ShenandoahHeapRegionClosure {
1192   bool heap_region_do(ShenandoahHeapRegion* r) {
1193     assert(! ShenandoahHeap::heap()->in_collection_set(r), "Should have been cleared by now");
1194     return false;
1195   }
1196 };
1197 #endif
1198 
1199 void ShenandoahHeap::prepare_for_concurrent_evacuation() {
1200   log_develop_trace(gc)("Thread %d started prepare_for_concurrent_evacuation", Thread::current()->osthread()->thread_id());
1201 
1202   if (!cancelled_gc()) {
1203     // Allocations might have happened before we STWed here, record peak:
1204     heuristics()->record_peak_occupancy();
1205 
1206     make_parsable(true);
1207 
1208     if (ShenandoahVerify) {
1209       verifier()->verify_after_concmark();
1210     }
1211 
1212     trash_cset_regions();
1213 
1214     // NOTE: This needs to be done during a stop the world pause, because
1215     // putting regions into the collection set concurrently with Java threads
1216     // will create a race. In particular, acmp could fail because when we
1217     // resolve the first operand, the containing region might not yet be in
1218     // the collection set, and thus return the original oop. When the 2nd
1219     // operand gets resolved, the region could be in the collection set
1220     // and the oop gets evacuated. If both operands have originally been
1221     // the same, we get false negatives.
1222 
1223     {
1224       ShenandoahHeapLocker locker(lock());
1225       _collection_set->clear();
1226       _free_set->clear();
1227 
1228 #ifdef ASSERT
1229       ShenandoahCheckCollectionSetClosure ccsc;
1230       heap_region_iterate(&ccsc);
1231 #endif
1232 
1233       heuristics()->choose_collection_set(_collection_set);
1234 
1235       _free_set->rebuild();
1236     }
1237 
1238     Universe::update_heap_info_at_gc();
1239 
1240     if (ShenandoahVerify) {
1241       verifier()->verify_before_evacuation();
1242     }
1243   }
1244 }
1245 
1246 
1247 class ShenandoahRetireTLABClosure : public ThreadClosure {
1248 private:
1249   bool _retire;
1250 
1251 public:
1252   ShenandoahRetireTLABClosure(bool retire) : _retire(retire) {}
1253 
1254   void do_thread(Thread* thread) {
1255     PLAB* gclab = ShenandoahThreadLocalData::gclab(thread);
1256     assert(gclab != NULL, "GCLAB should be initialized for %s", thread->name());
1257     gclab->retire();
1258   }
1259 };
1260 
1261 void ShenandoahHeap::make_parsable(bool retire_tlabs) {
1262   if (UseTLAB) {
1263     CollectedHeap::ensure_parsability(retire_tlabs);
1264   }
1265   ShenandoahRetireTLABClosure cl(retire_tlabs);
1266   for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) {
1267     cl.do_thread(t);
1268   }
1269   gc_threads_do(&cl);
1270 }
1271 
1272 
1273 class ShenandoahEvacuateUpdateRootsTask : public AbstractGangTask {
1274   ShenandoahRootEvacuator* _rp;
1275 public:
1276 
1277   ShenandoahEvacuateUpdateRootsTask(ShenandoahRootEvacuator* rp) :
1278     AbstractGangTask("Shenandoah evacuate and update roots"),
1279     _rp(rp)
1280   {
1281     // Nothing else to do.
1282   }
1283 
1284   void work(uint worker_id) {
1285     ShenandoahWorkerSession worker_session(worker_id);
1286     ShenandoahEvacOOMScope oom_evac_scope;
1287     ShenandoahEvacuateUpdateRootsClosure cl;
1288 
1289     if (ShenandoahConcurrentEvacCodeRoots) {
1290       _rp->process_evacuate_roots(&cl, NULL, worker_id);
1291     } else {
1292       MarkingCodeBlobClosure blobsCl(&cl, CodeBlobToOopClosure::FixRelocations);
1293       _rp->process_evacuate_roots(&cl, &blobsCl, worker_id);
1294     }
1295   }
1296 };
1297 
1298 class ShenandoahFixRootsTask : public AbstractGangTask {
1299   ShenandoahRootEvacuator* _rp;
1300 public:
1301 
1302   ShenandoahFixRootsTask(ShenandoahRootEvacuator* rp) :
1303     AbstractGangTask("Shenandoah update roots"),
1304     _rp(rp)
1305   {
1306     // Nothing else to do.
1307   }
1308 
1309   void work(uint worker_id) {
1310     ShenandoahWorkerSession worker_session(worker_id);
1311     ShenandoahEvacOOMScope oom_evac_scope;
1312     ShenandoahUpdateRefsClosure cl;
1313     MarkingCodeBlobClosure blobsCl(&cl, CodeBlobToOopClosure::FixRelocations);
1314 
1315     _rp->process_evacuate_roots(&cl, &blobsCl, worker_id);
1316   }
1317 };
1318 
1319 void ShenandoahHeap::evacuate_and_update_roots() {
1320 
1321 #if defined(COMPILER2) || INCLUDE_JVMCI
1322   DerivedPointerTable::clear();
1323 #endif
1324   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Only iterate roots while world is stopped");
1325 
1326   {
1327     ShenandoahRootEvacuator rp(this, workers()->active_workers(), ShenandoahPhaseTimings::init_evac);
1328     ShenandoahEvacuateUpdateRootsTask roots_task(&rp);
1329     workers()->run_task(&roots_task);
1330   }
1331 
1332 #if defined(COMPILER2) || INCLUDE_JVMCI
1333   DerivedPointerTable::update_pointers();
1334 #endif
1335   if (cancelled_gc()) {
1336     fixup_roots();
1337   }
1338 }
1339 
1340 void ShenandoahHeap::fixup_roots() {
1341     assert(cancelled_gc(), "Only after concurrent cycle failed");
1342 
1343     // If initial evacuation has been cancelled, we need to update all references
1344     // after all workers have finished. Otherwise we might run into the following problem:
1345     // GC thread 1 cannot allocate anymore, thus evacuation fails, leaves from-space ptr of object X.
1346     // GC thread 2 evacuates the same object X to to-space
1347     // which leaves a truly dangling from-space reference in the first root oop*. This must not happen.
1348     // clear() and update_pointers() must always be called in pairs,
1349     // cannot nest with above clear()/update_pointers().
1350 #if defined(COMPILER2) || INCLUDE_JVMCI
1351     DerivedPointerTable::clear();
1352 #endif
1353     ShenandoahRootEvacuator rp(this, workers()->active_workers(), ShenandoahPhaseTimings::init_evac);
1354     ShenandoahFixRootsTask update_roots_task(&rp);
1355     workers()->run_task(&update_roots_task);
1356 #if defined(COMPILER2) || INCLUDE_JVMCI
1357     DerivedPointerTable::update_pointers();
1358 #endif
1359 }
1360 
1361 void ShenandoahHeap::roots_iterate(OopClosure* cl) {
1362   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Only iterate roots while world is stopped");
1363 
1364   CodeBlobToOopClosure blobsCl(cl, false);
1365   CLDToOopClosure cldCl(cl);
1366 
1367   ShenandoahRootProcessor rp(this, 1, ShenandoahPhaseTimings::_num_phases);
1368   rp.process_all_roots(cl, NULL, &cldCl, &blobsCl, NULL, 0);
1369 }
1370 
1371 bool ShenandoahHeap::supports_tlab_allocation() const {
1372   return true;
1373 }
1374 
1375 // Returns size in bytes
1376 size_t ShenandoahHeap::unsafe_max_tlab_alloc(Thread *thread) const {
1377   if (ShenandoahElasticTLAB) {
1378     // With Elastic TLABs, return the max allowed size, and let the allocation path
1379     // figure out the safe size for current allocation.
1380     return ShenandoahHeapRegion::max_tlab_size_bytes();
1381   } else {
1382     return MIN2(_free_set->unsafe_peek_free(), ShenandoahHeapRegion::max_tlab_size_bytes());
1383   }
1384 }
1385 
1386 size_t ShenandoahHeap::max_tlab_size() const {
1387   // Returns size in words
1388   return ShenandoahHeapRegion::max_tlab_size_words();
1389 }
1390 
1391 class ShenandoahRetireAndResetGCLABClosure : public ThreadClosure {
1392 public:
1393   void do_thread(Thread* thread) {
1394     PLAB* gclab = ShenandoahThreadLocalData::gclab(thread);
1395     gclab->retire();
1396     if (ShenandoahThreadLocalData::gclab_size(thread) > 0) {
1397       ShenandoahThreadLocalData::set_gclab_size(thread, 0);
1398     }
1399   }
1400 };
1401 
1402 void ShenandoahHeap::retire_and_reset_gclabs() {
1403   ShenandoahRetireAndResetGCLABClosure cl;
1404   for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) {
1405     cl.do_thread(t);
1406   }
1407   gc_threads_do(&cl);
1408 }
1409 
1410 bool  ShenandoahHeap::can_elide_tlab_store_barriers() const {
1411   return true;
1412 }
1413 
1414 oop ShenandoahHeap::new_store_pre_barrier(JavaThread* thread, oop new_obj) {
1415   // Overridden to do nothing.
1416   return new_obj;
1417 }
1418 
1419 bool  ShenandoahHeap::can_elide_initializing_store_barrier(oop new_obj) {
1420   return true;
1421 }
1422 
1423 bool ShenandoahHeap::card_mark_must_follow_store() const {
1424   return false;
1425 }
1426 
1427 void ShenandoahHeap::collect(GCCause::Cause cause) {
1428   control_thread()->handle_explicit_gc(cause);
1429 }
1430 
1431 void ShenandoahHeap::do_full_collection(bool clear_all_soft_refs) {
1432   //assert(false, "Shouldn't need to do full collections");
1433 }
1434 
1435 AdaptiveSizePolicy* ShenandoahHeap::size_policy() {
1436   Unimplemented();
1437   return NULL;
1438 
1439 }
1440 
1441 CollectorPolicy* ShenandoahHeap::collector_policy() const {
1442   return _shenandoah_policy;
1443 }
1444 
1445 
1446 HeapWord* ShenandoahHeap::block_start(const void* addr) const {
1447   Space* sp = heap_region_containing(addr);
1448   if (sp != NULL) {
1449     return sp->block_start(addr);
1450   }
1451   return NULL;
1452 }
1453 
1454 size_t ShenandoahHeap::block_size(const HeapWord* addr) const {
1455   Space* sp = heap_region_containing(addr);
1456   assert(sp != NULL, "block_size of address outside of heap");
1457   return sp->block_size(addr);
1458 }
1459 
1460 bool ShenandoahHeap::block_is_obj(const HeapWord* addr) const {
1461   Space* sp = heap_region_containing(addr);
1462   return sp->block_is_obj(addr);
1463 }
1464 
1465 jlong ShenandoahHeap::millis_since_last_gc() {
1466   return 0;
1467 }
1468 
1469 void ShenandoahHeap::prepare_for_verify() {
1470   if (SafepointSynchronize::is_at_safepoint() || ! UseTLAB) {
1471     make_parsable(false);
1472   }
1473 }
1474 
1475 void ShenandoahHeap::print_gc_threads_on(outputStream* st) const {
1476   workers()->print_worker_threads_on(st);
1477   if (ShenandoahStringDedup::is_enabled()) {
1478     ShenandoahStringDedup::print_worker_threads_on(st);
1479   }
1480 }
1481 
1482 void ShenandoahHeap::gc_threads_do(ThreadClosure* tcl) const {
1483   workers()->threads_do(tcl);
1484   if (ShenandoahStringDedup::is_enabled()) {
1485     ShenandoahStringDedup::threads_do(tcl);
1486   }
1487 }
1488 
1489 void ShenandoahHeap::print_tracing_info() const {
1490   LogTarget(Info, gc, stats) lt;
1491   if (lt.is_enabled()) {
1492     ResourceMark rm;
1493     LogStream ls(lt);
1494 
1495     phase_timings()->print_on(&ls);
1496 
1497     ls.cr();
1498     ls.cr();
1499 
1500     shenandoahPolicy()->print_gc_stats(&ls);
1501 
1502     ls.cr();
1503     ls.cr();
1504 
1505     if (ShenandoahPacing) {
1506       pacer()->print_on(&ls);
1507     }
1508 
1509     ls.cr();
1510     ls.cr();
1511 
1512     if (ShenandoahAllocationTrace) {
1513       assert(alloc_tracker() != NULL, "Must be");
1514       alloc_tracker()->print_on(&ls);
1515     } else {
1516       ls.print_cr("  Allocation tracing is disabled, use -XX:+ShenandoahAllocationTrace to enable.");
1517     }
1518   }
1519 }
1520 
1521 void ShenandoahHeap::verify(VerifyOption vo) {
1522   if (ShenandoahSafepoint::is_at_shenandoah_safepoint()) {
1523     if (ShenandoahVerify) {
1524       verifier()->verify_generic(vo);
1525     } else {
1526       // TODO: Consider allocating verification bitmaps on demand,
1527       // and turn this on unconditionally.
1528     }
1529   }
1530 }
1531 size_t ShenandoahHeap::tlab_capacity(Thread *thr) const {
1532   return _free_set->capacity();
1533 }
1534 
1535 class ObjectIterateScanRootClosure : public BasicOopIterateClosure {
1536 private:
1537   MarkBitMap* _bitmap;
1538   Stack<oop,mtGC>* _oop_stack;
1539 
1540   template <class T>
1541   void do_oop_work(T* p) {
1542     T o = RawAccess<>::oop_load(p);
1543     if (!CompressedOops::is_null(o)) {
1544       oop obj = CompressedOops::decode_not_null(o);
1545       obj = ShenandoahBarrierSet::resolve_forwarded_not_null(obj);
1546       assert(oopDesc::is_oop(obj), "must be a valid oop");
1547       if (!_bitmap->isMarked((HeapWord*) obj)) {
1548         _bitmap->mark((HeapWord*) obj);
1549         _oop_stack->push(obj);
1550       }
1551     }
1552   }
1553 public:
1554   ObjectIterateScanRootClosure(MarkBitMap* bitmap, Stack<oop,mtGC>* oop_stack) :
1555     _bitmap(bitmap), _oop_stack(oop_stack) {}
1556   void do_oop(oop* p)       { do_oop_work(p); }
1557   void do_oop(narrowOop* p) { do_oop_work(p); }
1558 };
1559 
1560 /*
1561  * This is public API, used in preparation of object_iterate().
1562  * Since we don't do linear scan of heap in object_iterate() (see comment below), we don't
1563  * need to make the heap parsable. For Shenandoah-internal linear heap scans that we can
1564  * control, we call SH::make_tlabs_parsable().
1565  */
1566 void ShenandoahHeap::ensure_parsability(bool retire_tlabs) {
1567   // No-op.
1568 }
1569 
1570 /*
1571  * Iterates objects in the heap. This is public API, used for, e.g., heap dumping.
1572  *
1573  * We cannot safely iterate objects by doing a linear scan at random points in time. Linear
1574  * scanning needs to deal with dead objects, which may have dead Klass* pointers (e.g.
1575  * calling oopDesc::size() would crash) or dangling reference fields (crashes) etc. Linear
1576  * scanning therefore depends on having a valid marking bitmap to support it. However, we only
1577  * have a valid marking bitmap after successful marking. In particular, we *don't* have a valid
1578  * marking bitmap during marking, after aborted marking or during/after cleanup (when we just
1579  * wiped the bitmap in preparation for next marking).
1580  *
1581  * For all those reasons, we implement object iteration as a single marking traversal, reporting
1582  * objects as we mark+traverse through the heap, starting from GC roots. JVMTI IterateThroughHeap
1583  * is allowed to report dead objects, but is not required to do so.
1584  */
1585 void ShenandoahHeap::object_iterate(ObjectClosure* cl) {
1586   assert(SafepointSynchronize::is_at_safepoint(), "safe iteration is only available during safepoints");
1587   if (!os::commit_memory((char*)_aux_bitmap_region.start(), _aux_bitmap_region.byte_size(), false)) {
1588     log_warning(gc)("Could not commit native memory for auxiliary marking bitmap for heap iteration");
1589     return;
1590   }
1591 
1592   // Reset bitmap
1593   MemRegion mr = MemRegion(_aux_bit_map.startWord(), _aux_bit_map.endWord());
1594   _aux_bit_map.clear_range_large(mr);
1595 
1596   Stack<oop,mtGC> oop_stack;
1597 
1598   // First, we process all GC roots. This populates the work stack with initial objects.
1599   ShenandoahRootProcessor rp(this, 1, ShenandoahPhaseTimings::_num_phases);
1600   ObjectIterateScanRootClosure oops(&_aux_bit_map, &oop_stack);
1601   CLDToOopClosure clds(&oops, false);
1602   CodeBlobToOopClosure blobs(&oops, false);
1603   rp.process_all_roots(&oops, &oops, &clds, &blobs, NULL, 0);
1604 
1605   // Work through the oop stack to traverse heap.
1606   while (! oop_stack.is_empty()) {
1607     oop obj = oop_stack.pop();
1608     assert(oopDesc::is_oop(obj), "must be a valid oop");
1609     cl->do_object(obj);
1610     obj->oop_iterate(&oops);
1611   }
1612 
1613   assert(oop_stack.is_empty(), "should be empty");
1614 
1615   if (!os::uncommit_memory((char*)_aux_bitmap_region.start(), _aux_bitmap_region.byte_size())) {
1616     log_warning(gc)("Could not uncommit native memory for auxiliary marking bitmap for heap iteration");
1617   }
1618 }
1619 
1620 void ShenandoahHeap::safe_object_iterate(ObjectClosure* cl) {
1621   assert(SafepointSynchronize::is_at_safepoint(), "safe iteration is only available during safepoints");
1622   object_iterate(cl);
1623 }
1624 
1625 // Apply blk->heap_region_do() on all committed regions in address order,
1626 // terminating the iteration early if heap_region_do() returns true.
1627 void ShenandoahHeap::heap_region_iterate(ShenandoahHeapRegionClosure* blk, bool skip_cset_regions, bool skip_humongous_continuation) const {
1628   for (size_t i = 0; i < num_regions(); i++) {
1629     ShenandoahHeapRegion* current  = get_region(i);
1630     if (skip_humongous_continuation && current->is_humongous_continuation()) {
1631       continue;
1632     }
1633     if (skip_cset_regions && in_collection_set(current)) {
1634       continue;
1635     }
1636     if (blk->heap_region_do(current)) {
1637       return;
1638     }
1639   }
1640 }
1641 
1642 class ShenandoahClearLivenessClosure : public ShenandoahHeapRegionClosure {
1643 private:
1644   ShenandoahHeap* sh;
1645 public:
1646   ShenandoahClearLivenessClosure(ShenandoahHeap* heap) : sh(heap) {}
1647 
1648   bool heap_region_do(ShenandoahHeapRegion* r) {
1649     r->clear_live_data();
1650     sh->set_next_top_at_mark_start(r->bottom(), r->top());
1651     return false;
1652   }
1653 };
1654 
1655 void ShenandoahHeap::op_init_mark() {
1656   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Should be at safepoint");
1657 
1658   assert(is_next_bitmap_clear(), "need clear marking bitmap");
1659 
1660   if (ShenandoahVerify) {
1661     verifier()->verify_before_concmark();
1662   }
1663 
1664   {
1665     ShenandoahGCPhase phase(ShenandoahPhaseTimings::accumulate_stats);
1666     accumulate_statistics_all_tlabs();
1667   }
1668 
1669   set_concurrent_mark_in_progress(true);
1670   // We need to reset all TLABs because we'd lose marks on all objects allocated in them.
1671   {
1672     ShenandoahGCPhase phase(ShenandoahPhaseTimings::make_parsable);
1673     make_parsable(true);
1674   }
1675 
1676   {
1677     ShenandoahGCPhase phase(ShenandoahPhaseTimings::clear_liveness);
1678     ShenandoahClearLivenessClosure clc(this);
1679     heap_region_iterate(&clc);
1680   }
1681 
1682   // Make above changes visible to worker threads
1683   OrderAccess::fence();
1684 
1685   concurrentMark()->init_mark_roots();
1686 
1687   if (UseTLAB) {
1688     ShenandoahGCPhase phase(ShenandoahPhaseTimings::resize_tlabs);
1689     resize_all_tlabs();
1690   }
1691 
1692   if (ShenandoahPacing) {
1693     pacer()->setup_for_mark();
1694   }
1695 }
1696 
1697 void ShenandoahHeap::op_mark() {
1698   concurrentMark()->mark_from_roots();
1699 
1700   // Allocations happen during concurrent mark, record peak after the phase:
1701   heuristics()->record_peak_occupancy();
1702 }
1703 
1704 void ShenandoahHeap::op_final_mark() {
1705   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Should be at safepoint");
1706 
1707   // It is critical that we
1708   // evacuate roots right after finishing marking, so that we don't
1709   // get unmarked objects in the roots.
1710 
1711   if (!cancelled_gc()) {
1712     concurrentMark()->finish_mark_from_roots();
1713     stop_concurrent_marking();
1714 
1715     {
1716       ShenandoahGCPhase phase(ShenandoahPhaseTimings::complete_liveness);
1717 
1718       // All allocations past TAMS are implicitly live, adjust the region data.
1719       // Bitmaps/TAMS are swapped at this point, so we need to poll complete bitmap.
1720       for (size_t i = 0; i < num_regions(); i++) {
1721         ShenandoahHeapRegion* r = get_region(i);
1722         if (!r->is_active()) continue;
1723 
1724         HeapWord* tams = complete_top_at_mark_start(r->bottom());
1725         HeapWord* top = r->top();
1726         if (top > tams) {
1727           r->increase_live_data_alloc_words(pointer_delta(top, tams));
1728         }
1729       }
1730     }
1731 
1732     {
1733       ShenandoahGCPhase prepare_evac(ShenandoahPhaseTimings::prepare_evac);
1734       prepare_for_concurrent_evacuation();
1735     }
1736 
1737     // If collection set has candidates, start evacuation.
1738     // Otherwise, bypass the rest of the cycle.
1739     if (!collection_set()->is_empty()) {
1740       set_evacuation_in_progress(true);
1741       // From here on, we need to update references.
1742       set_has_forwarded_objects(true);
1743 
1744       ShenandoahGCPhase init_evac(ShenandoahPhaseTimings::init_evac);
1745       evacuate_and_update_roots();
1746     }
1747 
1748     if (ShenandoahPacing) {
1749       pacer()->setup_for_evac();
1750     }
1751   } else {
1752     concurrentMark()->cancel();
1753     stop_concurrent_marking();
1754 
1755     if (process_references()) {
1756       // Abandon reference processing right away: pre-cleaning must have failed.
1757       ReferenceProcessor *rp = ref_processor();
1758       rp->disable_discovery();
1759       rp->abandon_partial_discovery();
1760       rp->verify_no_references_recorded();
1761     }
1762   }
1763 }
1764 
1765 void ShenandoahHeap::op_final_evac() {
1766   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Should be at safepoint");
1767 
1768   set_evacuation_in_progress(false);
1769 
1770   retire_and_reset_gclabs();
1771 
1772   if (ShenandoahVerify) {
1773     verifier()->verify_after_evacuation();
1774   }
1775 }
1776 
1777 void ShenandoahHeap::op_evac() {
1778 
1779   LogTarget(Trace, gc, region) lt_region;
1780   LogTarget(Trace, gc, cset) lt_cset;
1781 
1782   if (lt_region.is_enabled()) {
1783     ResourceMark rm;
1784     LogStream ls(lt_region);
1785     ls.print_cr("All available regions:");
1786     print_heap_regions_on(&ls);
1787   }
1788 
1789   if (lt_cset.is_enabled()) {
1790     ResourceMark rm;
1791     LogStream ls(lt_cset);
1792     ls.print_cr("Collection set (" SIZE_FORMAT " regions):", _collection_set->count());
1793     _collection_set->print_on(&ls);
1794 
1795     ls.print_cr("Free set:");
1796     _free_set->print_on(&ls);
1797   }
1798 
1799   ShenandoahParallelEvacuationTask task(this, _collection_set);
1800   workers()->run_task(&task);
1801 
1802   if (lt_cset.is_enabled()) {
1803     ResourceMark rm;
1804     LogStream ls(lt_cset);
1805     ls.print_cr("After evacuation collection set (" SIZE_FORMAT " regions):",
1806                 _collection_set->count());
1807     _collection_set->print_on(&ls);
1808 
1809     ls.print_cr("After evacuation free set:");
1810     _free_set->print_on(&ls);
1811   }
1812 
1813   if (lt_region.is_enabled()) {
1814     ResourceMark rm;
1815     LogStream ls(lt_region);
1816     ls.print_cr("All regions after evacuation:");
1817     print_heap_regions_on(&ls);
1818   }
1819 
1820   // Allocations happen during evacuation, record peak after the phase:
1821   heuristics()->record_peak_occupancy();
1822 }
1823 
1824 void ShenandoahHeap::op_updaterefs() {
1825   update_heap_references(true);
1826 
1827   // Allocations happen during update-refs, record peak after the phase:
1828   heuristics()->record_peak_occupancy();
1829 }
1830 
1831 void ShenandoahHeap::op_cleanup() {
1832   ShenandoahGCPhase phase_recycle(ShenandoahPhaseTimings::conc_cleanup_recycle);
1833   free_set()->recycle_trash();
1834 
1835   // Allocations happen during cleanup, record peak after the phase:
1836   heuristics()->record_peak_occupancy();
1837 }
1838 
1839 void ShenandoahHeap::op_cleanup_bitmaps() {
1840   op_cleanup();
1841 
1842   ShenandoahGCPhase phase_reset(ShenandoahPhaseTimings::conc_cleanup_reset_bitmaps);
1843   reset_next_mark_bitmap();
1844 
1845   // Allocations happen during bitmap cleanup, record peak after the phase:
1846   heuristics()->record_peak_occupancy();
1847 }
1848 
1849 void ShenandoahHeap::op_cleanup_traversal() {
1850 
1851   {
1852     ShenandoahGCPhase phase_reset(ShenandoahPhaseTimings::conc_cleanup_reset_bitmaps);
1853     reset_next_mark_bitmap_traversal();
1854   }
1855 
1856   op_cleanup();
1857 
1858   // Allocations happen during bitmap cleanup, record peak after the phase:
1859   heuristics()->record_peak_occupancy();
1860 }
1861 
1862 void ShenandoahHeap::op_preclean() {
1863   concurrentMark()->preclean_weak_refs();
1864 
1865   // Allocations happen during concurrent preclean, record peak after the phase:
1866   heuristics()->record_peak_occupancy();
1867 }
1868 
1869 void ShenandoahHeap::op_init_traversal() {
1870   traversal_gc()->init_traversal_collection();
1871 }
1872 
1873 void ShenandoahHeap::op_traversal() {
1874   traversal_gc()->concurrent_traversal_collection();
1875 }
1876 
1877 void ShenandoahHeap::op_final_traversal() {
1878   traversal_gc()->final_traversal_collection();
1879 }
1880 
1881 void ShenandoahHeap::op_full(GCCause::Cause cause) {
1882   ShenandoahMetricsSnapshot metrics;
1883   metrics.snap_before();
1884 
1885   full_gc()->do_it(cause);
1886   if (UseTLAB) {
1887     ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_resize_tlabs);
1888     resize_all_tlabs();
1889   }
1890 
1891   metrics.snap_after();
1892   metrics.print();
1893 
1894   if (metrics.is_good_progress("Full GC")) {
1895     _progress_last_gc.set();
1896   } else {
1897     // Nothing to do. Tell the allocation path that we have failed to make
1898     // progress, and it can finally fail.
1899     _progress_last_gc.unset();
1900   }
1901 }
1902 
1903 void ShenandoahHeap::op_degenerated(ShenandoahDegenPoint point) {
1904   // Degenerated GC is STW, but it can also fail. Current mechanics communicates
1905   // GC failure via cancelled_concgc() flag. So, if we detect the failure after
1906   // some phase, we have to upgrade the Degenerate GC to Full GC.
1907 
1908   clear_cancelled_gc();
1909 
1910   ShenandoahMetricsSnapshot metrics;
1911   metrics.snap_before();
1912 
1913   switch (point) {
1914     case _degenerated_evac:
1915       // Not possible to degenerate from here, upgrade to Full GC right away.
1916       cancel_gc(GCCause::_shenandoah_upgrade_to_full_gc);
1917       op_degenerated_fail();
1918       return;
1919 
1920     // The cases below form the Duff's-like device: it describes the actual GC cycle,
1921     // but enters it at different points, depending on which concurrent phase had
1922     // degenerated.
1923 
1924     case _degenerated_traversal:
1925       {
1926         ShenandoahHeapLocker locker(lock());
1927         collection_set()->clear_current_index();
1928         for (size_t i = 0; i < collection_set()->count(); i++) {
1929           ShenandoahHeapRegion* r = collection_set()->next();
1930           r->make_regular_bypass();
1931         }
1932         collection_set()->clear();
1933       }
1934       op_final_traversal();
1935       op_cleanup_traversal();
1936       return;
1937 
1938     case _degenerated_outside_cycle:
1939       if (heuristics()->can_do_traversal_gc()) {
1940         // Not possible to degenerate from here, upgrade to Full GC right away.
1941         cancel_gc(GCCause::_shenandoah_upgrade_to_full_gc);
1942         op_degenerated_fail();
1943         return;
1944       }
1945       op_init_mark();
1946       if (cancelled_gc()) {
1947         op_degenerated_fail();
1948         return;
1949       }
1950 
1951     case _degenerated_mark:
1952       op_final_mark();
1953       if (cancelled_gc()) {
1954         op_degenerated_fail();
1955         return;
1956       }
1957 
1958       op_cleanup();
1959 
1960       // If heuristics thinks we should do the cycle, this flag would be set,
1961       // and we can do evacuation. Otherwise, it would be the shortcut cycle.
1962       if (is_evacuation_in_progress()) {
1963         op_evac();
1964         if (cancelled_gc()) {
1965           op_degenerated_fail();
1966           return;
1967         }
1968       }
1969 
1970       // If heuristics thinks we should do the cycle, this flag would be set,
1971       // and we need to do update-refs. Otherwise, it would be the shortcut cycle.
1972       if (has_forwarded_objects()) {
1973         op_init_updaterefs();
1974         if (cancelled_gc()) {
1975           op_degenerated_fail();
1976           return;
1977         }
1978       }
1979 
1980     case _degenerated_updaterefs:
1981       if (has_forwarded_objects()) {
1982         op_final_updaterefs();
1983         if (cancelled_gc()) {
1984           op_degenerated_fail();
1985           return;
1986         }
1987       }
1988 
1989       op_cleanup_bitmaps();
1990       break;
1991 
1992     default:
1993       ShouldNotReachHere();
1994   }
1995 
1996   if (ShenandoahVerify) {
1997     verifier()->verify_after_degenerated();
1998   }
1999 
2000   metrics.snap_after();
2001   metrics.print();
2002 
2003   // Check for futility and fail. There is no reason to do several back-to-back Degenerated cycles,
2004   // because that probably means the heap is overloaded and/or fragmented.
2005   if (!metrics.is_good_progress("Degenerated GC")) {
2006     _progress_last_gc.unset();
2007     cancel_gc(GCCause::_shenandoah_upgrade_to_full_gc);
2008     op_degenerated_futile();
2009   } else {
2010     _progress_last_gc.set();
2011   }
2012 }
2013 
2014 void ShenandoahHeap::op_degenerated_fail() {
2015   log_info(gc)("Cannot finish degeneration, upgrading to Full GC");
2016   shenandoahPolicy()->record_degenerated_upgrade_to_full();
2017   op_full(GCCause::_shenandoah_upgrade_to_full_gc);
2018 }
2019 
2020 void ShenandoahHeap::op_degenerated_futile() {
2021   shenandoahPolicy()->record_degenerated_upgrade_to_full();
2022   op_full(GCCause::_shenandoah_upgrade_to_full_gc);
2023 }
2024 
2025 void ShenandoahHeap::swap_mark_bitmaps() {
2026   // Swap bitmaps.
2027   MarkBitMap* tmp1 = _complete_mark_bit_map;
2028   _complete_mark_bit_map = _next_mark_bit_map;
2029   _next_mark_bit_map = tmp1;
2030 
2031   // Swap top-at-mark-start pointers
2032   HeapWord** tmp2 = _complete_top_at_mark_starts;
2033   _complete_top_at_mark_starts = _next_top_at_mark_starts;
2034   _next_top_at_mark_starts = tmp2;
2035 
2036   HeapWord** tmp3 = _complete_top_at_mark_starts_base;
2037   _complete_top_at_mark_starts_base = _next_top_at_mark_starts_base;
2038   _next_top_at_mark_starts_base = tmp3;
2039 }
2040 
2041 
2042 void ShenandoahHeap::stop_concurrent_marking() {
2043   assert(is_concurrent_mark_in_progress(), "How else could we get here?");
2044   if (!cancelled_gc()) {
2045     // If we needed to update refs, and concurrent marking has been cancelled,
2046     // we need to finish updating references.
2047     set_has_forwarded_objects(false);
2048     swap_mark_bitmaps();
2049   }
2050   set_concurrent_mark_in_progress(false);
2051 
2052   LogTarget(Trace, gc, region) lt;
2053   if (lt.is_enabled()) {
2054     ResourceMark rm;
2055     LogStream ls(lt);
2056     ls.print_cr("Regions at stopping the concurrent mark:");
2057     print_heap_regions_on(&ls);
2058   }
2059 }
2060 
2061 void ShenandoahHeap::force_satb_flush_all_threads() {
2062   if (!is_concurrent_mark_in_progress() && !is_concurrent_traversal_in_progress()) {
2063     // No need to flush SATBs
2064     return;
2065   }
2066 
2067   for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) {
2068     ShenandoahThreadLocalData::set_force_satb_flush(t, true);
2069   }
2070   // The threads are not "acquiring" their thread-local data, but it does not
2071   // hurt to "release" the updates here anyway.
2072   OrderAccess::fence();
2073 }
2074 
2075 void ShenandoahHeap::set_gc_state_all_threads(char state) {
2076   for (JavaThreadIteratorWithHandle jtiwh; JavaThread *t = jtiwh.next(); ) {
2077     ShenandoahThreadLocalData::set_gc_state(t, state);
2078   }
2079 }
2080 
2081 void ShenandoahHeap::set_gc_state_mask(uint mask, bool value) {
2082   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Should really be Shenandoah safepoint");
2083   _gc_state.set_cond(mask, value);
2084   set_gc_state_all_threads(_gc_state.raw_value());
2085 }
2086 
2087 void ShenandoahHeap::set_concurrent_mark_in_progress(bool in_progress) {
2088   set_gc_state_mask(MARKING, in_progress);
2089   ShenandoahBarrierSet::satb_mark_queue_set().set_active_all_threads(in_progress, !in_progress);
2090 }
2091 
2092 void ShenandoahHeap::set_concurrent_traversal_in_progress(bool in_progress) {
2093    set_gc_state_mask(TRAVERSAL | HAS_FORWARDED, in_progress);
2094    ShenandoahBarrierSet::satb_mark_queue_set().set_active_all_threads(in_progress, !in_progress);
2095 }
2096 
2097 void ShenandoahHeap::set_evacuation_in_progress(bool in_progress) {
2098   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Only call this at safepoint");
2099   set_gc_state_mask(EVACUATION, in_progress);
2100 }
2101 
2102 HeapWord* ShenandoahHeap::tlab_post_allocation_setup(HeapWord* obj) {
2103   // Initialize Brooks pointer for the next object
2104   HeapWord* result = obj + BrooksPointer::word_size();
2105   BrooksPointer::initialize(oop(result));
2106   return result;
2107 }
2108 
2109 uint ShenandoahHeap::oop_extra_words() {
2110   return BrooksPointer::word_size();
2111 }
2112 
2113 ShenandoahForwardedIsAliveClosure::ShenandoahForwardedIsAliveClosure() :
2114   _heap(ShenandoahHeap::heap_no_check()) {
2115 }
2116 
2117 ShenandoahIsAliveClosure::ShenandoahIsAliveClosure() :
2118   _heap(ShenandoahHeap::heap_no_check()) {
2119 }
2120 
2121 bool ShenandoahForwardedIsAliveClosure::do_object_b(oop obj) {
2122   if (CompressedOops::is_null(obj)) {
2123     return false;
2124   }
2125   obj = ShenandoahBarrierSet::resolve_forwarded_not_null(obj);
2126   shenandoah_assert_not_forwarded_if(NULL, obj, _heap->is_concurrent_mark_in_progress() || _heap->is_concurrent_traversal_in_progress())
2127   return _heap->is_marked_next(obj);
2128 }
2129 
2130 bool ShenandoahIsAliveClosure::do_object_b(oop obj) {
2131   if (CompressedOops::is_null(obj)) {
2132     return false;
2133   }
2134   shenandoah_assert_not_forwarded(NULL, obj);
2135   return _heap->is_marked_next(obj);
2136 }
2137 
2138 BoolObjectClosure* ShenandoahHeap::is_alive_closure() {
2139   return has_forwarded_objects() ?
2140          (BoolObjectClosure*) &_forwarded_is_alive :
2141          (BoolObjectClosure*) &_is_alive;
2142 }
2143 
2144 void ShenandoahHeap::ref_processing_init() {
2145   MemRegion mr = reserved_region();
2146 
2147   _forwarded_is_alive.init(this);
2148   _is_alive.init(this);
2149   assert(_max_workers > 0, "Sanity");
2150 
2151   _ref_processor =
2152     new ReferenceProcessor(&_subject_to_discovery,  // is_subject_to_discovery
2153                            ParallelRefProcEnabled,  // MT processing
2154                            _max_workers,            // Degree of MT processing
2155                            true,                    // MT discovery
2156                            _max_workers,            // Degree of MT discovery
2157                            false,                   // Reference discovery is not atomic
2158                            NULL);                   // No closure, should be installed before use
2159 
2160   shenandoah_assert_rp_isalive_not_installed();
2161 }
2162 
2163 
2164 GCTracer* ShenandoahHeap::tracer() {
2165   return shenandoahPolicy()->tracer();
2166 }
2167 
2168 size_t ShenandoahHeap::tlab_used(Thread* thread) const {
2169   return _free_set->used();
2170 }
2171 
2172 void ShenandoahHeap::cancel_gc(GCCause::Cause cause) {
2173   if (try_cancel_gc()) {
2174     FormatBuffer<> msg("Cancelling GC: %s", GCCause::to_string(cause));
2175     log_info(gc)("%s", msg.buffer());
2176     Events::log(Thread::current(), "%s", msg.buffer());
2177   }
2178 }
2179 
2180 uint ShenandoahHeap::max_workers() {
2181   return _max_workers;
2182 }
2183 
2184 void ShenandoahHeap::stop() {
2185   // The shutdown sequence should be able to terminate when GC is running.
2186 
2187   // Step 0. Notify policy to disable event recording.
2188   _shenandoah_policy->record_shutdown();
2189 
2190   // Step 1. Notify control thread that we are in shutdown.
2191   // Note that we cannot do that with stop(), because stop() is blocking and waits for the actual shutdown.
2192   // Doing stop() here would wait for the normal GC cycle to complete, never falling through to cancel below.
2193   control_thread()->prepare_for_graceful_shutdown();
2194 
2195   // Step 2. Notify GC workers that we are cancelling GC.
2196   cancel_gc(GCCause::_shenandoah_stop_vm);
2197 
2198   // Step 3. Wait until GC worker exits normally.
2199   control_thread()->stop();
2200 
2201   // Step 4. Stop String Dedup thread if it is active
2202   if (ShenandoahStringDedup::is_enabled()) {
2203     ShenandoahStringDedup::stop();
2204   }
2205 }
2206 
2207 void ShenandoahHeap::unload_classes_and_cleanup_tables(bool full_gc) {
2208   assert(ClassUnloading || full_gc, "Class unloading should be enabled");
2209 
2210   ShenandoahPhaseTimings::Phase phase_root =
2211           full_gc ?
2212           ShenandoahPhaseTimings::full_gc_purge :
2213           ShenandoahPhaseTimings::purge;
2214 
2215   ShenandoahPhaseTimings::Phase phase_unload =
2216           full_gc ?
2217           ShenandoahPhaseTimings::full_gc_purge_class_unload :
2218           ShenandoahPhaseTimings::purge_class_unload;
2219 
2220   ShenandoahPhaseTimings::Phase phase_cldg =
2221           full_gc ?
2222           ShenandoahPhaseTimings::full_gc_purge_cldg :
2223           ShenandoahPhaseTimings::purge_cldg;
2224 
2225   ShenandoahPhaseTimings::Phase phase_par =
2226           full_gc ?
2227           ShenandoahPhaseTimings::full_gc_purge_par :
2228           ShenandoahPhaseTimings::purge_par;
2229 
2230   ShenandoahPhaseTimings::Phase phase_par_classes =
2231           full_gc ?
2232           ShenandoahPhaseTimings::full_gc_purge_par_classes :
2233           ShenandoahPhaseTimings::purge_par_classes;
2234 
2235   ShenandoahPhaseTimings::Phase phase_par_codecache =
2236           full_gc ?
2237           ShenandoahPhaseTimings::full_gc_purge_par_codecache :
2238           ShenandoahPhaseTimings::purge_par_codecache;
2239 
2240   ShenandoahPhaseTimings::Phase phase_par_rmt =
2241           full_gc ?
2242           ShenandoahPhaseTimings::full_gc_purge_par_rmt :
2243           ShenandoahPhaseTimings::purge_par_rmt;
2244 
2245   ShenandoahPhaseTimings::Phase phase_par_symbstring =
2246           full_gc ?
2247           ShenandoahPhaseTimings::full_gc_purge_par_symbstring :
2248           ShenandoahPhaseTimings::purge_par_symbstring;
2249 
2250   ShenandoahPhaseTimings::Phase phase_par_sync =
2251           full_gc ?
2252           ShenandoahPhaseTimings::full_gc_purge_par_sync :
2253           ShenandoahPhaseTimings::purge_par_sync;
2254 
2255   ShenandoahGCPhase root_phase(phase_root);
2256 
2257   BoolObjectClosure* is_alive = is_alive_closure();
2258 
2259   bool purged_class;
2260 
2261   // Unload classes and purge SystemDictionary.
2262   {
2263     ShenandoahGCPhase phase(phase_unload);
2264     purged_class = SystemDictionary::do_unloading(gc_timer(),
2265                                                   full_gc /* do_cleaning*/ );
2266   }
2267 
2268   {
2269     ShenandoahGCPhase phase(phase_par);
2270     uint active = _workers->active_workers();
2271     ParallelCleaningTask unlink_task(is_alive, true, true, active, purged_class);
2272     _workers->run_task(&unlink_task);
2273 
2274     ShenandoahPhaseTimings* p = phase_timings();
2275     ParallelCleaningTimes times = unlink_task.times();
2276 
2277     // "times" report total time, phase_tables_cc reports wall time. Divide total times
2278     // by active workers to get average time per worker, that would add up to wall time.
2279     p->record_phase_time(phase_par_classes,    times.klass_work_us() / active);
2280     p->record_phase_time(phase_par_codecache,  times.codecache_work_us() / active);
2281     p->record_phase_time(phase_par_rmt,        times.rmt_work_us() / active);
2282     p->record_phase_time(phase_par_symbstring, times.tables_work_us() / active);
2283     p->record_phase_time(phase_par_sync,       times.sync_us() / active);
2284   }
2285 
2286   if (ShenandoahStringDedup::is_enabled()) {
2287     ShenandoahPhaseTimings::Phase phase_purge_dedup =
2288             full_gc ?
2289             ShenandoahPhaseTimings::full_gc_purge_string_dedup :
2290             ShenandoahPhaseTimings::purge_string_dedup;
2291     ShenandoahGCPhase phase(phase_purge_dedup);
2292     ShenandoahStringDedup::parallel_cleanup();
2293   }
2294 
2295   {
2296     ShenandoahGCPhase phase(phase_cldg);
2297     ClassLoaderDataGraph::purge();
2298   }
2299 }
2300 
2301 void ShenandoahHeap::set_has_forwarded_objects(bool cond) {
2302   set_gc_state_mask(HAS_FORWARDED, cond);
2303 }
2304 
2305 bool ShenandoahHeap::last_gc_made_progress() const {
2306   return _progress_last_gc.is_set();
2307 }
2308 
2309 void ShenandoahHeap::set_process_references(bool pr) {
2310   _process_references.set_cond(pr);
2311 }
2312 
2313 void ShenandoahHeap::set_unload_classes(bool uc) {
2314   _unload_classes.set_cond(uc);
2315 }
2316 
2317 bool ShenandoahHeap::process_references() const {
2318   return _process_references.is_set();
2319 }
2320 
2321 bool ShenandoahHeap::unload_classes() const {
2322   return _unload_classes.is_set();
2323 }
2324 
2325 //fixme this should be in heapregionset
2326 ShenandoahHeapRegion* ShenandoahHeap::next_compaction_region(const ShenandoahHeapRegion* r) {
2327   size_t region_idx = r->region_number() + 1;
2328   ShenandoahHeapRegion* next = get_region(region_idx);
2329   guarantee(next->region_number() == region_idx, "region number must match");
2330   while (next->is_humongous()) {
2331     region_idx = next->region_number() + 1;
2332     next = get_region(region_idx);
2333     guarantee(next->region_number() == region_idx, "region number must match");
2334   }
2335   return next;
2336 }
2337 
2338 ShenandoahMonitoringSupport* ShenandoahHeap::monitoring_support() {
2339   return _monitoring_support;
2340 }
2341 
2342 MarkBitMap* ShenandoahHeap::complete_mark_bit_map() {
2343   return _complete_mark_bit_map;
2344 }
2345 
2346 MarkBitMap* ShenandoahHeap::next_mark_bit_map() {
2347   return _next_mark_bit_map;
2348 }
2349 
2350 address ShenandoahHeap::in_cset_fast_test_addr() {
2351   ShenandoahHeap* heap = ShenandoahHeap::heap();
2352   assert(heap->collection_set() != NULL, "Sanity");
2353   return (address) heap->collection_set()->biased_map_address();
2354 }
2355 
2356 address ShenandoahHeap::cancelled_gc_addr() {
2357   return (address) ShenandoahHeap::heap()->_cancelled_gc.addr_of();
2358 }
2359 
2360 address ShenandoahHeap::gc_state_addr() {
2361   return (address) ShenandoahHeap::heap()->_gc_state.addr_of();
2362 }
2363 
2364 size_t ShenandoahHeap::bytes_allocated_since_gc_start() {
2365   return OrderAccess::load_acquire(&_bytes_allocated_since_gc_start);
2366 }
2367 
2368 void ShenandoahHeap::reset_bytes_allocated_since_gc_start() {
2369   OrderAccess::release_store_fence(&_bytes_allocated_since_gc_start, (size_t)0);
2370 }
2371 
2372 ShenandoahPacer* ShenandoahHeap::pacer() const {
2373   assert (_pacer != NULL, "sanity");
2374   return _pacer;
2375 }
2376 
2377 void ShenandoahHeap::set_next_top_at_mark_start(HeapWord* region_base, HeapWord* addr) {
2378   uintx index = ((uintx) region_base) >> ShenandoahHeapRegion::region_size_bytes_shift();
2379   _next_top_at_mark_starts[index] = addr;
2380 }
2381 
2382 HeapWord* ShenandoahHeap::next_top_at_mark_start(HeapWord* region_base) {
2383   uintx index = ((uintx) region_base) >> ShenandoahHeapRegion::region_size_bytes_shift();
2384   return _next_top_at_mark_starts[index];
2385 }
2386 
2387 void ShenandoahHeap::set_complete_top_at_mark_start(HeapWord* region_base, HeapWord* addr) {
2388   uintx index = ((uintx) region_base) >> ShenandoahHeapRegion::region_size_bytes_shift();
2389   _complete_top_at_mark_starts[index] = addr;
2390 }
2391 
2392 HeapWord* ShenandoahHeap::complete_top_at_mark_start(HeapWord* region_base) {
2393   uintx index = ((uintx) region_base) >> ShenandoahHeapRegion::region_size_bytes_shift();
2394   return _complete_top_at_mark_starts[index];
2395 }
2396 
2397 void ShenandoahHeap::set_degenerated_gc_in_progress(bool in_progress) {
2398   _degenerated_gc_in_progress.set_cond(in_progress);
2399 }
2400 
2401 void ShenandoahHeap::set_full_gc_in_progress(bool in_progress) {
2402   _full_gc_in_progress.set_cond(in_progress);
2403 }
2404 
2405 void ShenandoahHeap::set_full_gc_move_in_progress(bool in_progress) {
2406   assert (is_full_gc_in_progress(), "should be");
2407   _full_gc_move_in_progress.set_cond(in_progress);
2408 }
2409 
2410 void ShenandoahHeap::set_update_refs_in_progress(bool in_progress) {
2411   set_gc_state_mask(UPDATEREFS, in_progress);
2412 }
2413 
2414 void ShenandoahHeap::register_nmethod(nmethod* nm) {
2415   ShenandoahCodeRoots::add_nmethod(nm);
2416 }
2417 
2418 void ShenandoahHeap::unregister_nmethod(nmethod* nm) {
2419   ShenandoahCodeRoots::remove_nmethod(nm);
2420 }
2421 
2422 oop ShenandoahHeap::pin_object(JavaThread* thr, oop o) {
2423   o = ShenandoahBarrierSet::barrier_set()->write_barrier(o);
2424   ShenandoahHeapLocker locker(lock());
2425   heap_region_containing(o)->make_pinned();
2426   return o;
2427 }
2428 
2429 void ShenandoahHeap::unpin_object(JavaThread* thr, oop o) {
2430   o = ShenandoahBarrierSet::barrier_set()->read_barrier(o);
2431   ShenandoahHeapLocker locker(lock());
2432   heap_region_containing(o)->make_unpinned();
2433 }
2434 
2435 GCTimer* ShenandoahHeap::gc_timer() const {
2436   return _gc_timer;
2437 }
2438 
2439 #ifdef ASSERT
2440 void ShenandoahHeap::assert_gc_workers(uint nworkers) {
2441   assert(nworkers > 0 && nworkers <= max_workers(), "Sanity");
2442 
2443   if (ShenandoahSafepoint::is_at_shenandoah_safepoint()) {
2444     if (UseDynamicNumberOfGCThreads ||
2445         (FLAG_IS_DEFAULT(ParallelGCThreads) && ForceDynamicNumberOfGCThreads)) {
2446       assert(nworkers <= ParallelGCThreads, "Cannot use more than it has");
2447     } else {
2448       // Use ParallelGCThreads inside safepoints
2449       assert(nworkers == ParallelGCThreads, "Use ParalleGCThreads within safepoints");
2450     }
2451   } else {
2452     if (UseDynamicNumberOfGCThreads ||
2453         (FLAG_IS_DEFAULT(ConcGCThreads) && ForceDynamicNumberOfGCThreads)) {
2454       assert(nworkers <= ConcGCThreads, "Cannot use more than it has");
2455     } else {
2456       // Use ConcGCThreads outside safepoints
2457       assert(nworkers == ConcGCThreads, "Use ConcGCThreads outside safepoints");
2458     }
2459   }
2460 }
2461 #endif
2462 
2463 ShenandoahConnectionMatrix* ShenandoahHeap::connection_matrix() const {
2464   return _connection_matrix;
2465 }
2466 
2467 ShenandoahTraversalGC* ShenandoahHeap::traversal_gc() {
2468   return _traversal_gc;
2469 }
2470 
2471 ShenandoahVerifier* ShenandoahHeap::verifier() {
2472   guarantee(ShenandoahVerify, "Should be enabled");
2473   assert (_verifier != NULL, "sanity");
2474   return _verifier;
2475 }
2476 
2477 template<class T>
2478 class ShenandoahUpdateHeapRefsTask : public AbstractGangTask {
2479 private:
2480   T cl;
2481   ShenandoahHeap* _heap;
2482   ShenandoahRegionIterator* _regions;
2483   bool _concurrent;
2484 public:
2485   ShenandoahUpdateHeapRefsTask(ShenandoahRegionIterator* regions, bool concurrent) :
2486     AbstractGangTask("Concurrent Update References Task"),
2487     cl(T()),
2488     _heap(ShenandoahHeap::heap()),
2489     _regions(regions),
2490     _concurrent(concurrent) {
2491   }
2492 
2493   void work(uint worker_id) {
2494     ShenandoahWorkerSession worker_session(worker_id);
2495     SuspendibleThreadSetJoiner stsj(_concurrent && ShenandoahSuspendibleWorkers);
2496     ShenandoahHeapRegion* r = _regions->next();
2497     while (r != NULL) {
2498       if (_heap->in_collection_set(r)) {
2499         HeapWord* bottom = r->bottom();
2500         HeapWord* top = _heap->complete_top_at_mark_start(r->bottom());
2501         if (top > bottom) {
2502           _heap->complete_mark_bit_map()->clear_range_large(MemRegion(bottom, top));
2503         }
2504       } else {
2505         if (r->is_active()) {
2506           _heap->marked_object_oop_safe_iterate(r, &cl);
2507         }
2508       }
2509       if (ShenandoahPacing) {
2510         HeapWord* top_at_start_ur = r->concurrent_iteration_safe_limit();
2511         assert (top_at_start_ur >= r->bottom(), "sanity");
2512         _heap->pacer()->report_updaterefs(pointer_delta(top_at_start_ur, r->bottom()));
2513       }
2514       if (_heap->check_cancelled_gc_and_yield(_concurrent)) {
2515         return;
2516       }
2517       r = _regions->next();
2518     }
2519   }
2520 };
2521 
2522 void ShenandoahHeap::update_heap_references(bool concurrent) {
2523   if (UseShenandoahMatrix) {
2524     ShenandoahUpdateHeapRefsTask<ShenandoahUpdateHeapRefsMatrixClosure> task(&_update_refs_iterator, concurrent);
2525     workers()->run_task(&task);
2526   } else {
2527     ShenandoahUpdateHeapRefsTask<ShenandoahUpdateHeapRefsClosure> task(&_update_refs_iterator, concurrent);
2528     workers()->run_task(&task);
2529   }
2530 }
2531 
2532 void ShenandoahHeap::op_init_updaterefs() {
2533   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "must be at safepoint");
2534 
2535   set_evacuation_in_progress(false);
2536 
2537   retire_and_reset_gclabs();
2538 
2539   if (ShenandoahVerify) {
2540     verifier()->verify_before_updaterefs();
2541   }
2542 
2543   set_update_refs_in_progress(true);
2544   make_parsable(true);
2545   if (UseShenandoahMatrix) {
2546     connection_matrix()->clear_all();
2547   }
2548   for (uint i = 0; i < num_regions(); i++) {
2549     ShenandoahHeapRegion* r = get_region(i);
2550     r->set_concurrent_iteration_safe_limit(r->top());
2551   }
2552 
2553   // Reset iterator.
2554   _update_refs_iterator.reset();
2555 
2556   if (ShenandoahPacing) {
2557     pacer()->setup_for_updaterefs();
2558   }
2559 }
2560 
2561 void ShenandoahHeap::op_final_updaterefs() {
2562   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "must be at safepoint");
2563 
2564   // Check if there is left-over work, and finish it
2565   if (_update_refs_iterator.has_next()) {
2566     ShenandoahGCPhase final_work(ShenandoahPhaseTimings::final_update_refs_finish_work);
2567 
2568     // Finish updating references where we left off.
2569     clear_cancelled_gc();
2570     update_heap_references(false);
2571   }
2572 
2573   // Clear cancelled GC, if set. On cancellation path, the block before would handle
2574   // everything. On degenerated paths, cancelled gc would not be set anyway.
2575   if (cancelled_gc()) {
2576     clear_cancelled_gc();
2577   }
2578   assert(!cancelled_gc(), "Should have been done right before");
2579 
2580   concurrentMark()->update_roots(ShenandoahPhaseTimings::final_update_refs_roots);
2581 
2582   // Allocations might have happened before we STWed here, record peak:
2583   heuristics()->record_peak_occupancy();
2584 
2585   ShenandoahGCPhase final_update_refs(ShenandoahPhaseTimings::final_update_refs_recycle);
2586 
2587   trash_cset_regions();
2588   set_has_forwarded_objects(false);
2589   set_update_refs_in_progress(false);
2590 
2591   if (ShenandoahVerify) {
2592     verifier()->verify_after_updaterefs();
2593   }
2594 
2595   {
2596     ShenandoahHeapLocker locker(lock());
2597     _free_set->rebuild();
2598   }
2599 }
2600 
2601 void ShenandoahHeap::set_alloc_seq_gc_start() {
2602   // Take next number, the start seq number is inclusive
2603   _alloc_seq_at_last_gc_start = ShenandoahHeapRegion::seqnum_current_alloc() + 1;
2604 }
2605 
2606 void ShenandoahHeap::set_alloc_seq_gc_end() {
2607   // Take current number, the end seq number is also inclusive
2608   _alloc_seq_at_last_gc_end = ShenandoahHeapRegion::seqnum_current_alloc();
2609 }
2610 
2611 
2612 #ifdef ASSERT
2613 void ShenandoahHeap::assert_heaplock_owned_by_current_thread() {
2614   _lock.assert_owned_by_current_thread();
2615 }
2616 
2617 void ShenandoahHeap::assert_heaplock_not_owned_by_current_thread() {
2618   _lock.assert_not_owned_by_current_thread();
2619 }
2620 
2621 void ShenandoahHeap::assert_heaplock_or_safepoint() {
2622   _lock.assert_owned_by_current_thread_or_safepoint();
2623 }
2624 #endif
2625 
2626 void ShenandoahHeap::print_extended_on(outputStream *st) const {
2627   print_on(st);
2628   print_heap_regions_on(st);
2629 }
2630 
2631 bool ShenandoahHeap::is_bitmap_slice_committed(ShenandoahHeapRegion* r, bool skip_self) {
2632   size_t slice = r->region_number() / _bitmap_regions_per_slice;
2633 
2634   size_t regions_from = _bitmap_regions_per_slice * slice;
2635   size_t regions_to   = MIN2(num_regions(), _bitmap_regions_per_slice * (slice + 1));
2636   for (size_t g = regions_from; g < regions_to; g++) {
2637     assert (g / _bitmap_regions_per_slice == slice, "same slice");
2638     if (skip_self && g == r->region_number()) continue;
2639     if (get_region(g)->is_committed()) {
2640       return true;
2641     }
2642   }
2643   return false;
2644 }
2645 
2646 bool ShenandoahHeap::commit_bitmap_slice(ShenandoahHeapRegion* r) {
2647   assert_heaplock_owned_by_current_thread();
2648 
2649   if (is_bitmap_slice_committed(r, true)) {
2650     // Some other region from the group is already committed, meaning the bitmap
2651     // slice is already committed, we exit right away.
2652     return true;
2653   }
2654 
2655   // Commit the bitmap slice:
2656   size_t slice = r->region_number() / _bitmap_regions_per_slice;
2657   size_t off = _bitmap_bytes_per_slice * slice;
2658   size_t len = _bitmap_bytes_per_slice;
2659   if (!os::commit_memory((char*)_bitmap0_region.start() + off, len, false)) {
2660     return false;
2661   }
2662   if (!os::commit_memory((char*)_bitmap1_region.start() + off, len, false)) {
2663     return false;
2664   }
2665   return true;
2666 }
2667 
2668 bool ShenandoahHeap::uncommit_bitmap_slice(ShenandoahHeapRegion *r) {
2669   assert_heaplock_owned_by_current_thread();
2670 
2671   if (is_bitmap_slice_committed(r, true)) {
2672     // Some other region from the group is still committed, meaning the bitmap
2673     // slice is should stay committed, exit right away.
2674     return true;
2675   }
2676 
2677   // Uncommit the bitmap slice:
2678   size_t slice = r->region_number() / _bitmap_regions_per_slice;
2679   size_t off = _bitmap_bytes_per_slice * slice;
2680   size_t len = _bitmap_bytes_per_slice;
2681   if (!os::uncommit_memory((char*)_bitmap0_region.start() + off, len)) {
2682     return false;
2683   }
2684   if (!os::uncommit_memory((char*)_bitmap1_region.start() + off, len)) {
2685     return false;
2686   }
2687   return true;
2688 }
2689 
2690 bool ShenandoahHeap::idle_bitmap_slice(ShenandoahHeapRegion *r) {
2691   assert_heaplock_owned_by_current_thread();
2692   assert(ShenandoahUncommitWithIdle, "Must be enabled");
2693 
2694   if (is_bitmap_slice_committed(r, true)) {
2695     // Some other region from the group is still committed, meaning the bitmap
2696     // slice is should stay committed, exit right away.
2697     return true;
2698   }
2699 
2700   // Idle the bitmap slice:
2701   size_t slice = r->region_number() / _bitmap_regions_per_slice;
2702   size_t off = _bitmap_bytes_per_slice * slice;
2703   size_t len = _bitmap_bytes_per_slice;
2704   if (!os::idle_memory((char*)_bitmap0_region.start() + off, len)) {
2705     return false;
2706   }
2707   if (!os::idle_memory((char*)_bitmap1_region.start() + off, len)) {
2708     return false;
2709   }
2710   return true;
2711 }
2712 
2713 void ShenandoahHeap::activate_bitmap_slice(ShenandoahHeapRegion* r) {
2714   assert_heaplock_owned_by_current_thread();
2715   assert(ShenandoahUncommitWithIdle, "Must be enabled");
2716   size_t slice = r->region_number() / _bitmap_regions_per_slice;
2717   size_t off = _bitmap_bytes_per_slice * slice;
2718   size_t len = _bitmap_bytes_per_slice;
2719   os::activate_memory((char*)_bitmap0_region.start() + off, len);
2720   os::activate_memory((char*)_bitmap1_region.start() + off, len);
2721 }
2722 
2723 void ShenandoahHeap::safepoint_synchronize_begin() {
2724   if (ShenandoahSuspendibleWorkers || UseStringDeduplication) {
2725     SuspendibleThreadSet::synchronize();
2726   }
2727 }
2728 
2729 void ShenandoahHeap::safepoint_synchronize_end() {
2730   if (ShenandoahSuspendibleWorkers || UseStringDeduplication) {
2731     SuspendibleThreadSet::desynchronize();
2732   }
2733 }
2734 
2735 void ShenandoahHeap::vmop_entry_init_mark() {
2736   TraceCollectorStats tcs(monitoring_support()->stw_collection_counters());
2737   ShenandoahGCPhase total(ShenandoahPhaseTimings::total_pause_gross);
2738   ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_mark_gross);
2739 
2740   try_inject_alloc_failure();
2741   VM_ShenandoahInitMark op;
2742   VMThread::execute(&op); // jump to entry_init_mark() under safepoint
2743 }
2744 
2745 void ShenandoahHeap::vmop_entry_final_mark() {
2746   TraceCollectorStats tcs(monitoring_support()->stw_collection_counters());
2747   ShenandoahGCPhase total(ShenandoahPhaseTimings::total_pause_gross);
2748   ShenandoahGCPhase phase(ShenandoahPhaseTimings::final_mark_gross);
2749 
2750   try_inject_alloc_failure();
2751   VM_ShenandoahFinalMarkStartEvac op;
2752   VMThread::execute(&op); // jump to entry_final_mark under safepoint
2753 }
2754 
2755 void ShenandoahHeap::vmop_entry_final_evac() {
2756   TraceCollectorStats tcs(monitoring_support()->stw_collection_counters());
2757   ShenandoahGCPhase total(ShenandoahPhaseTimings::total_pause_gross);
2758   ShenandoahGCPhase phase(ShenandoahPhaseTimings::final_evac_gross);
2759 
2760   VM_ShenandoahFinalEvac op;
2761   VMThread::execute(&op); // jump to entry_final_evac under safepoint
2762 }
2763 
2764 void ShenandoahHeap::vmop_entry_init_updaterefs() {
2765   TraceCollectorStats tcs(monitoring_support()->stw_collection_counters());
2766   ShenandoahGCPhase total(ShenandoahPhaseTimings::total_pause_gross);
2767   ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_update_refs_gross);
2768 
2769   try_inject_alloc_failure();
2770   VM_ShenandoahInitUpdateRefs op;
2771   VMThread::execute(&op);
2772 }
2773 
2774 void ShenandoahHeap::vmop_entry_final_updaterefs() {
2775   TraceCollectorStats tcs(monitoring_support()->stw_collection_counters());
2776   ShenandoahGCPhase total(ShenandoahPhaseTimings::total_pause_gross);
2777   ShenandoahGCPhase phase(ShenandoahPhaseTimings::final_update_refs_gross);
2778 
2779   try_inject_alloc_failure();
2780   VM_ShenandoahFinalUpdateRefs op;
2781   VMThread::execute(&op);
2782 }
2783 
2784 void ShenandoahHeap::vmop_entry_init_traversal() {
2785   TraceCollectorStats tcs(monitoring_support()->stw_collection_counters());
2786   ShenandoahGCPhase total(ShenandoahPhaseTimings::total_pause_gross);
2787   ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_traversal_gc_gross);
2788 
2789   try_inject_alloc_failure();
2790   VM_ShenandoahInitTraversalGC op;
2791   VMThread::execute(&op);
2792 }
2793 
2794 void ShenandoahHeap::vmop_entry_final_traversal() {
2795   TraceCollectorStats tcs(monitoring_support()->stw_collection_counters());
2796   ShenandoahGCPhase total(ShenandoahPhaseTimings::total_pause_gross);
2797   ShenandoahGCPhase phase(ShenandoahPhaseTimings::final_traversal_gc_gross);
2798 
2799   try_inject_alloc_failure();
2800   VM_ShenandoahFinalTraversalGC op;
2801   VMThread::execute(&op);
2802 }
2803 
2804 void ShenandoahHeap::vmop_entry_full(GCCause::Cause cause) {
2805   TraceCollectorStats tcs(monitoring_support()->full_stw_collection_counters());
2806   ShenandoahGCPhase total(ShenandoahPhaseTimings::total_pause_gross);
2807   ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_gross);
2808 
2809   try_inject_alloc_failure();
2810   VM_ShenandoahFullGC op(cause);
2811   VMThread::execute(&op);
2812 }
2813 
2814 void ShenandoahHeap::vmop_degenerated(ShenandoahDegenPoint point) {
2815   TraceCollectorStats tcs(monitoring_support()->full_stw_collection_counters());
2816   ShenandoahGCPhase total(ShenandoahPhaseTimings::total_pause_gross);
2817   ShenandoahGCPhase phase(ShenandoahPhaseTimings::degen_gc_gross);
2818 
2819   VM_ShenandoahDegeneratedGC degenerated_gc((int)point);
2820   VMThread::execute(&degenerated_gc);
2821 }
2822 
2823 void ShenandoahHeap::entry_init_mark() {
2824   ShenandoahGCPhase total_phase(ShenandoahPhaseTimings::total_pause);
2825   ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_mark);
2826 
2827   FormatBuffer<> msg("Pause Init Mark%s%s%s",
2828                      has_forwarded_objects() ? " (update refs)"    : "",
2829                      process_references() ?    " (process refs)"   : "",
2830                      unload_classes() ?        " (unload classes)" : "");
2831   GCTraceTime(Info, gc) time(msg, gc_timer());
2832   EventMark em("%s", msg.buffer());
2833 
2834   ShenandoahWorkerScope scope(workers(),
2835                               ShenandoahWorkerPolicy::calc_workers_for_init_marking(),
2836                               "init marking");
2837 
2838   op_init_mark();
2839 }
2840 
2841 void ShenandoahHeap::entry_final_mark() {
2842   ShenandoahGCPhase total_phase(ShenandoahPhaseTimings::total_pause);
2843   ShenandoahGCPhase phase(ShenandoahPhaseTimings::final_mark);
2844 
2845   FormatBuffer<> msg("Pause Final Mark%s%s%s",
2846                      has_forwarded_objects() ? " (update refs)"    : "",
2847                      process_references() ?    " (process refs)"   : "",
2848                      unload_classes() ?        " (unload classes)" : "");
2849   GCTraceTime(Info, gc) time(msg, gc_timer());
2850   EventMark em("%s", msg.buffer());
2851 
2852   ShenandoahWorkerScope scope(workers(),
2853                               ShenandoahWorkerPolicy::calc_workers_for_final_marking(),
2854                               "final marking");
2855 
2856   op_final_mark();
2857 }
2858 
2859 void ShenandoahHeap::entry_final_evac() {
2860   ShenandoahGCPhase total_phase(ShenandoahPhaseTimings::total_pause);
2861   ShenandoahGCPhase phase(ShenandoahPhaseTimings::final_evac);
2862 
2863   FormatBuffer<> msg("Pause Final Evac");
2864   GCTraceTime(Info, gc) time(msg, gc_timer());
2865   EventMark em("%s", msg.buffer());
2866 
2867   op_final_evac();
2868 }
2869 
2870 void ShenandoahHeap::entry_init_updaterefs() {
2871   ShenandoahGCPhase total_phase(ShenandoahPhaseTimings::total_pause);
2872   ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_update_refs);
2873 
2874   static const char* msg = "Pause Init Update Refs";
2875   GCTraceTime(Info, gc) time(msg, gc_timer());
2876   EventMark em("%s", msg);
2877 
2878   // No workers used in this phase, no setup required
2879 
2880   op_init_updaterefs();
2881 }
2882 
2883 void ShenandoahHeap::entry_final_updaterefs() {
2884   ShenandoahGCPhase total_phase(ShenandoahPhaseTimings::total_pause);
2885   ShenandoahGCPhase phase(ShenandoahPhaseTimings::final_update_refs);
2886 
2887   static const char* msg = "Pause Final Update Refs";
2888   GCTraceTime(Info, gc) time(msg, gc_timer());
2889   EventMark em("%s", msg);
2890 
2891   ShenandoahWorkerScope scope(workers(),
2892                               ShenandoahWorkerPolicy::calc_workers_for_final_update_ref(),
2893                               "final reference update");
2894 
2895   op_final_updaterefs();
2896 }
2897 
2898 void ShenandoahHeap::entry_init_traversal() {
2899   ShenandoahGCPhase total_phase(ShenandoahPhaseTimings::total_pause);
2900   ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_traversal_gc);
2901 
2902   static const char* msg = "Pause Init Traversal";
2903   GCTraceTime(Info, gc) time(msg, gc_timer());
2904   EventMark em("%s", msg);
2905 
2906   ShenandoahWorkerScope scope(workers(),
2907                               ShenandoahWorkerPolicy::calc_workers_for_stw_traversal(),
2908                               "init traversal");
2909 
2910   op_init_traversal();
2911 }
2912 
2913 void ShenandoahHeap::entry_final_traversal() {
2914   ShenandoahGCPhase total_phase(ShenandoahPhaseTimings::total_pause);
2915   ShenandoahGCPhase phase(ShenandoahPhaseTimings::final_traversal_gc);
2916 
2917   static const char* msg = "Pause Final Traversal";
2918   GCTraceTime(Info, gc) time(msg, gc_timer());
2919   EventMark em("%s", msg);
2920 
2921   ShenandoahWorkerScope scope(workers(),
2922                               ShenandoahWorkerPolicy::calc_workers_for_stw_traversal(),
2923                               "final traversal");
2924 
2925   op_final_traversal();
2926 }
2927 
2928 void ShenandoahHeap::entry_full(GCCause::Cause cause) {
2929   ShenandoahGCPhase total_phase(ShenandoahPhaseTimings::total_pause);
2930   ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc);
2931 
2932   static const char* msg = "Pause Full";
2933   GCTraceTime(Info, gc) time(msg, gc_timer(), cause, true);
2934   EventMark em("%s", msg);
2935 
2936   ShenandoahWorkerScope scope(workers(),
2937                               ShenandoahWorkerPolicy::calc_workers_for_fullgc(),
2938                               "full gc");
2939 
2940   op_full(cause);
2941 }
2942 
2943 void ShenandoahHeap::entry_degenerated(int point) {
2944   ShenandoahGCPhase total_phase(ShenandoahPhaseTimings::total_pause);
2945   ShenandoahGCPhase phase(ShenandoahPhaseTimings::degen_gc);
2946 
2947   ShenandoahDegenPoint dpoint = (ShenandoahDegenPoint)point;
2948   FormatBuffer<> msg("Pause Degenerated GC (%s)", degen_point_to_string(dpoint));
2949   GCTraceTime(Info, gc) time(msg, gc_timer(), GCCause::_no_gc, true);
2950   EventMark em("%s", msg.buffer());
2951 
2952   ShenandoahWorkerScope scope(workers(),
2953                               ShenandoahWorkerPolicy::calc_workers_for_stw_degenerated(),
2954                               "stw degenerated gc");
2955 
2956   set_degenerated_gc_in_progress(true);
2957   op_degenerated(dpoint);
2958   set_degenerated_gc_in_progress(false);
2959 }
2960 
2961 void ShenandoahHeap::entry_mark() {
2962   TraceCollectorStats tcs(monitoring_support()->concurrent_collection_counters());
2963 
2964   FormatBuffer<> msg("Concurrent marking%s%s%s",
2965                      has_forwarded_objects() ? " (update refs)"    : "",
2966                      process_references() ?    " (process refs)"   : "",
2967                      unload_classes() ?        " (unload classes)" : "");
2968   GCTraceTime(Info, gc) time(msg, gc_timer(), GCCause::_no_gc, true);
2969   EventMark em("%s", msg.buffer());
2970 
2971   ShenandoahWorkerScope scope(workers(),
2972                               ShenandoahWorkerPolicy::calc_workers_for_conc_marking(),
2973                               "concurrent marking");
2974 
2975   try_inject_alloc_failure();
2976   op_mark();
2977 }
2978 
2979 void ShenandoahHeap::entry_evac() {
2980   ShenandoahGCPhase conc_evac_phase(ShenandoahPhaseTimings::conc_evac);
2981   TraceCollectorStats tcs(monitoring_support()->concurrent_collection_counters());
2982 
2983   static const char* msg = "Concurrent evacuation";
2984   GCTraceTime(Info, gc) time(msg, gc_timer(), GCCause::_no_gc, true);
2985   EventMark em("%s", msg);
2986 
2987   ShenandoahWorkerScope scope(workers(),
2988                               ShenandoahWorkerPolicy::calc_workers_for_conc_evac(),
2989                               "concurrent evacuation");
2990 
2991   try_inject_alloc_failure();
2992   op_evac();
2993 }
2994 
2995 void ShenandoahHeap::entry_updaterefs() {
2996   ShenandoahGCPhase phase(ShenandoahPhaseTimings::conc_update_refs);
2997 
2998   static const char* msg = "Concurrent update references";
2999   GCTraceTime(Info, gc) time(msg, gc_timer(), GCCause::_no_gc, true);
3000   EventMark em("%s", msg);
3001 
3002   ShenandoahWorkerScope scope(workers(),
3003                               ShenandoahWorkerPolicy::calc_workers_for_conc_update_ref(),
3004                               "concurrent reference update");
3005 
3006   try_inject_alloc_failure();
3007   op_updaterefs();
3008 }
3009 void ShenandoahHeap::entry_cleanup() {
3010   ShenandoahGCPhase phase(ShenandoahPhaseTimings::conc_cleanup);
3011 
3012   static const char* msg = "Concurrent cleanup";
3013   GCTraceTime(Info, gc) time(msg, gc_timer(), GCCause::_no_gc, true);
3014   EventMark em("%s", msg);
3015 
3016   // This phase does not use workers, no need for setup
3017 
3018   try_inject_alloc_failure();
3019   op_cleanup();
3020 }
3021 
3022 void ShenandoahHeap::entry_cleanup_traversal() {
3023   ShenandoahGCPhase phase(ShenandoahPhaseTimings::conc_cleanup);
3024 
3025   static const char* msg = "Concurrent cleanup";
3026   GCTraceTime(Info, gc) time(msg, gc_timer(), GCCause::_no_gc, true);
3027   EventMark em("%s", msg);
3028 
3029   ShenandoahWorkerScope scope(workers(),
3030                               ShenandoahWorkerPolicy::calc_workers_for_conc_traversal(),
3031                               "concurrent traversal cleanup");
3032 
3033   try_inject_alloc_failure();
3034   op_cleanup_traversal();
3035 }
3036 
3037 void ShenandoahHeap::entry_cleanup_bitmaps() {
3038   ShenandoahGCPhase phase(ShenandoahPhaseTimings::conc_cleanup);
3039 
3040   static const char* msg = "Concurrent cleanup";
3041   GCTraceTime(Info, gc) time(msg, gc_timer(), GCCause::_no_gc, true);
3042   EventMark em("%s", msg);
3043 
3044   ShenandoahWorkerScope scope(workers(),
3045                               ShenandoahWorkerPolicy::calc_workers_for_conc_cleanup(),
3046                               "concurrent cleanup");
3047 
3048   try_inject_alloc_failure();
3049   op_cleanup_bitmaps();
3050 }
3051 
3052 void ShenandoahHeap::entry_preclean() {
3053   if (ShenandoahPreclean && process_references()) {
3054     static const char* msg = "Concurrent precleaning";
3055     GCTraceTime(Info, gc) time(msg, gc_timer(), GCCause::_no_gc, true);
3056     EventMark em("%s", msg);
3057 
3058     ShenandoahGCPhase conc_preclean(ShenandoahPhaseTimings::conc_preclean);
3059 
3060     ShenandoahWorkerScope scope(workers(),
3061                                 ShenandoahWorkerPolicy::calc_workers_for_conc_preclean(),
3062                                 "concurrent preclean");
3063 
3064     try_inject_alloc_failure();
3065     op_preclean();
3066   }
3067 }
3068 
3069 void ShenandoahHeap::entry_traversal() {
3070   static const char* msg = "Concurrent traversal";
3071   GCTraceTime(Info, gc) time(msg, gc_timer(), GCCause::_no_gc, true);
3072   EventMark em("%s", msg);
3073 
3074   TraceCollectorStats tcs(is_minor_gc() ? monitoring_support()->partial_collection_counters()
3075                                         : monitoring_support()->concurrent_collection_counters());
3076 
3077   ShenandoahWorkerScope scope(workers(),
3078                               ShenandoahWorkerPolicy::calc_workers_for_conc_traversal(),
3079                               "concurrent traversal");
3080 
3081   try_inject_alloc_failure();
3082   op_traversal();
3083 }
3084 
3085 void ShenandoahHeap::entry_uncommit(double shrink_before) {
3086   static const char *msg = "Concurrent uncommit";
3087   GCTraceTime(Info, gc) time(msg, gc_timer(), GCCause::_no_gc, true);
3088   EventMark em("%s", msg);
3089 
3090   ShenandoahGCPhase phase(ShenandoahPhaseTimings::conc_uncommit);
3091 
3092   op_uncommit(shrink_before);
3093 }
3094 
3095 void ShenandoahHeap::try_inject_alloc_failure() {
3096   if (ShenandoahAllocFailureALot && !cancelled_gc() && ((os::random() % 1000) > 950)) {
3097     _inject_alloc_failure.set();
3098     os::naked_short_sleep(1);
3099     if (cancelled_gc()) {
3100       log_info(gc)("Allocation failure was successfully injected");
3101     }
3102   }
3103 }
3104 
3105 bool ShenandoahHeap::should_inject_alloc_failure() {
3106   return _inject_alloc_failure.is_set() && _inject_alloc_failure.try_unset();
3107 }
3108 
3109 void ShenandoahHeap::initialize_serviceability() {
3110   _memory_pool = new ShenandoahMemoryPool(this);
3111   _cycle_memory_manager.add_pool(_memory_pool);
3112   _stw_memory_manager.add_pool(_memory_pool);
3113 }
3114 
3115 GrowableArray<GCMemoryManager*> ShenandoahHeap::memory_managers() {
3116   GrowableArray<GCMemoryManager*> memory_managers(2);
3117   memory_managers.append(&_cycle_memory_manager);
3118   memory_managers.append(&_stw_memory_manager);
3119   return memory_managers;
3120 }
3121 
3122 GrowableArray<MemoryPool*> ShenandoahHeap::memory_pools() {
3123   GrowableArray<MemoryPool*> memory_pools(1);
3124   memory_pools.append(_memory_pool);
3125   return memory_pools;
3126 }
3127 
3128 void ShenandoahHeap::enter_evacuation() {
3129   _oom_evac_handler.enter_evacuation();
3130 }
3131 
3132 void ShenandoahHeap::leave_evacuation() {
3133   _oom_evac_handler.leave_evacuation();
3134 }
3135 
3136 SoftRefPolicy* ShenandoahHeap::soft_ref_policy() {
3137   return &_soft_ref_policy;
3138 }
3139 
3140 ShenandoahRegionIterator::ShenandoahRegionIterator() :
3141   _index(0),
3142   _heap(ShenandoahHeap::heap()) {}
3143 
3144 ShenandoahRegionIterator::ShenandoahRegionIterator(ShenandoahHeap* heap) :
3145   _index(0),
3146   _heap(heap) {}
3147 
3148 void ShenandoahRegionIterator::reset() {
3149   _index = 0;
3150 }
3151 
3152 bool ShenandoahRegionIterator::has_next() const {
3153   return _index < _heap->num_regions();
3154 }
3155 
3156 void ShenandoahHeap::heap_region_iterate(ShenandoahHeapRegionClosure& cl) const {
3157   ShenandoahRegionIterator regions;
3158   ShenandoahHeapRegion* r = regions.next();
3159   while (r != NULL) {
3160     if (cl.heap_region_do(r)) {
3161       break;
3162     }
3163     r = regions.next();
3164   }
3165 }
3166 
3167 bool ShenandoahHeap::is_minor_gc() const {
3168   return _gc_cycle_mode.get() == MINOR;
3169 }
3170 
3171 bool ShenandoahHeap::is_major_gc() const {
3172   return _gc_cycle_mode.get() == MAJOR;
3173 }
3174 
3175 void ShenandoahHeap::set_cycle_mode(GCCycleMode gc_cycle_mode) {
3176   _gc_cycle_mode.set(gc_cycle_mode);
3177 }
3178 
3179 char ShenandoahHeap::gc_state() const {
3180   return _gc_state.raw_value();
3181 }
3182 
3183 void ShenandoahHeap::deduplicate_string(oop str) {
3184   assert(java_lang_String::is_instance(str), "invariant");
3185 
3186   if (ShenandoahStringDedup::is_enabled()) {
3187     ShenandoahStringDedup::deduplicate(str);
3188   }
3189 }