1 /*
   2  * Copyright (c) 2013, 2015, Red Hat, Inc. and/or its affiliates.
   3  *
   4  * This code is free software; you can redistribute it and/or modify it
   5  * under the terms of the GNU General Public License version 2 only, as
   6  * published by the Free Software Foundation.
   7  *
   8  * This code is distributed in the hope that it will be useful, but WITHOUT
   9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  11  * version 2 for more details (a copy is included in the LICENSE file that
  12  * accompanied this code).
  13  *
  14  * You should have received a copy of the GNU General Public License version
  15  * 2 along with this work; if not, write to the Free Software Foundation,
  16  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  17  *
  18  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  19  * or visit www.oracle.com if you need additional information or have any
  20  * questions.
  21  *
  22  */
  23 
  24 #include "precompiled.hpp"
  25 #include "memory/allocation.hpp"
  26 
  27 #include "gc_implementation/shared/gcTimer.hpp"
  28 #include "gc_implementation/shenandoah/shenandoahGCTraceTime.hpp"
  29 #include "gc_implementation/shared/parallelCleaning.hpp"
  30 
  31 #include "gc_implementation/shenandoah/brooksPointer.hpp"
  32 #include "gc_implementation/shenandoah/shenandoahAllocTracker.hpp"
  33 #include "gc_implementation/shenandoah/shenandoahBarrierSet.hpp"
  34 #include "gc_implementation/shenandoah/shenandoahCollectionSet.hpp"
  35 #include "gc_implementation/shenandoah/shenandoahCollectorPolicy.hpp"
  36 #include "gc_implementation/shenandoah/shenandoahConcurrentMark.hpp"
  37 #include "gc_implementation/shenandoah/shenandoahConcurrentMark.inline.hpp"
  38 #include "gc_implementation/shenandoah/shenandoahControlThread.hpp"
  39 #include "gc_implementation/shenandoah/shenandoahFreeSet.hpp"
  40 #include "gc_implementation/shenandoah/shenandoahPhaseTimings.hpp"
  41 #include "gc_implementation/shenandoah/shenandoahHeap.inline.hpp"
  42 #include "gc_implementation/shenandoah/shenandoahHeapRegion.hpp"
  43 #include "gc_implementation/shenandoah/shenandoahHeapRegionSet.hpp"
  44 #include "gc_implementation/shenandoah/shenandoahMarkCompact.hpp"
  45 #include "gc_implementation/shenandoah/shenandoahMarkingContext.inline.hpp"
  46 #include "gc_implementation/shenandoah/shenandoahMonitoringSupport.hpp"
  47 #include "gc_implementation/shenandoah/shenandoahMetrics.hpp"
  48 #include "gc_implementation/shenandoah/shenandoahOopClosures.inline.hpp"
  49 #include "gc_implementation/shenandoah/shenandoahPacer.hpp"
  50 #include "gc_implementation/shenandoah/shenandoahPacer.inline.hpp"
  51 #include "gc_implementation/shenandoah/shenandoahSuspendibleThreadSet.hpp"
  52 #include "gc_implementation/shenandoah/shenandoahRootProcessor.hpp"
  53 #include "gc_implementation/shenandoah/shenandoahUtils.hpp"
  54 #include "gc_implementation/shenandoah/shenandoahVerifier.hpp"
  55 #include "gc_implementation/shenandoah/shenandoahCodeRoots.hpp"
  56 #include "gc_implementation/shenandoah/shenandoahWorkGroup.hpp"
  57 #include "gc_implementation/shenandoah/shenandoahWorkerPolicy.hpp"
  58 #include "gc_implementation/shenandoah/vm_operations_shenandoah.hpp"
  59 #include "gc_implementation/shenandoah/heuristics/shenandoahAdaptiveHeuristics.hpp"
  60 #include "gc_implementation/shenandoah/heuristics/shenandoahAggressiveHeuristics.hpp"
  61 #include "gc_implementation/shenandoah/heuristics/shenandoahCompactHeuristics.hpp"
  62 #include "gc_implementation/shenandoah/heuristics/shenandoahPassiveHeuristics.hpp"
  63 #include "gc_implementation/shenandoah/heuristics/shenandoahStaticHeuristics.hpp"
  64 
  65 #include "memory/metaspace.hpp"
  66 #include "runtime/vmThread.hpp"
  67 #include "services/mallocTracker.hpp"
  68 
  69 ShenandoahUpdateRefsClosure::ShenandoahUpdateRefsClosure() : _heap(ShenandoahHeap::heap()) {}
  70 
  71 #ifdef ASSERT
  72 template <class T>
  73 void ShenandoahAssertToSpaceClosure::do_oop_nv(T* p) {
  74   T o = oopDesc::load_heap_oop(p);
  75   if (! oopDesc::is_null(o)) {
  76     oop obj = oopDesc::decode_heap_oop_not_null(o);
  77     shenandoah_assert_not_forwarded(p, obj);
  78   }
  79 }
  80 
  81 void ShenandoahAssertToSpaceClosure::do_oop(narrowOop* p) { do_oop_nv(p); }
  82 void ShenandoahAssertToSpaceClosure::do_oop(oop* p)       { do_oop_nv(p); }
  83 #endif
  84 
  85 class ShenandoahPretouchTask : public AbstractGangTask {
  86 private:
  87   ShenandoahRegionIterator _regions;
  88   const size_t _bitmap_size;
  89   const size_t _page_size;
  90   char* _bitmap_base;
  91 public:
  92   ShenandoahPretouchTask(char* bitmap_base, size_t bitmap_size, size_t page_size) :
  93     AbstractGangTask("Shenandoah PreTouch"),
  94     _bitmap_size(bitmap_size),
  95     _page_size(page_size),
  96     _bitmap_base(bitmap_base) {}
  97 
  98   virtual void work(uint worker_id) {
  99     ShenandoahHeapRegion* r = _regions.next();
 100     while (r != NULL) {
 101       os::pretouch_memory((char*) r->bottom(), (char*) r->end());
 102 
 103       size_t start = r->region_number()       * ShenandoahHeapRegion::region_size_bytes() / MarkBitMap::heap_map_factor();
 104       size_t end   = (r->region_number() + 1) * ShenandoahHeapRegion::region_size_bytes() / MarkBitMap::heap_map_factor();
 105       assert (end <= _bitmap_size, err_msg("end is sane: " SIZE_FORMAT " < " SIZE_FORMAT, end, _bitmap_size));
 106 
 107       os::pretouch_memory(_bitmap_base + start, _bitmap_base + end);
 108 
 109       r = _regions.next();
 110     }
 111   }
 112 };
 113 
 114 jint ShenandoahHeap::initialize() {
 115   CollectedHeap::pre_initialize();
 116 
 117   BrooksPointer::initial_checks();
 118 
 119   initialize_heuristics();
 120 
 121   size_t init_byte_size = collector_policy()->initial_heap_byte_size();
 122   size_t max_byte_size = collector_policy()->max_heap_byte_size();
 123   size_t heap_alignment = collector_policy()->heap_alignment();
 124 
 125   if (ShenandoahAlwaysPreTouch) {
 126     // Enabled pre-touch means the entire heap is committed right away.
 127     init_byte_size = max_byte_size;
 128   }
 129 
 130   Universe::check_alignment(max_byte_size,
 131                             ShenandoahHeapRegion::region_size_bytes(),
 132                             "shenandoah heap");
 133   Universe::check_alignment(init_byte_size,
 134                             ShenandoahHeapRegion::region_size_bytes(),
 135                             "shenandoah heap");
 136 
 137   ReservedSpace heap_rs = Universe::reserve_heap(max_byte_size,
 138                                                  heap_alignment);
 139 
 140   _reserved.set_word_size(0);
 141   _reserved.set_start((HeapWord*)heap_rs.base());
 142   _reserved.set_end((HeapWord*)(heap_rs.base() + heap_rs.size()));
 143 
 144   set_barrier_set(new ShenandoahBarrierSet(this));
 145   ReservedSpace pgc_rs = heap_rs.first_part(max_byte_size);
 146 
 147   _num_regions = ShenandoahHeapRegion::region_count();
 148   size_t num_committed_regions = init_byte_size / ShenandoahHeapRegion::region_size_bytes();
 149   _initial_size = num_committed_regions * ShenandoahHeapRegion::region_size_bytes();
 150   _committed = _initial_size;
 151 
 152   log_info(gc, heap)("Initialize Shenandoah heap with initial size " SIZE_FORMAT " bytes", init_byte_size);
 153   if (!os::commit_memory(pgc_rs.base(), _initial_size, false)) {
 154     vm_exit_out_of_memory(_initial_size, OOM_MMAP_ERROR, "Shenandoah failed to initialize heap");
 155   }
 156 
 157   size_t reg_size_words = ShenandoahHeapRegion::region_size_words();
 158   size_t reg_size_bytes = ShenandoahHeapRegion::region_size_bytes();
 159 
 160   _regions = NEW_C_HEAP_ARRAY(ShenandoahHeapRegion*, _num_regions, mtGC);
 161   _free_set = new ShenandoahFreeSet(this, _num_regions);
 162 
 163   _collection_set = new ShenandoahCollectionSet(this, (HeapWord*)pgc_rs.base());
 164 
 165   if (ShenandoahPacing) {
 166     _pacer = new ShenandoahPacer(this);
 167     _pacer->setup_for_idle();
 168   } else {
 169     _pacer = NULL;
 170   }
 171 
 172   assert((((size_t) base()) & ShenandoahHeapRegion::region_size_bytes_mask()) == 0,
 173          err_msg("misaligned heap: "PTR_FORMAT, p2i(base())));
 174 
 175   // The call below uses stuff (the SATB* things) that are in G1, but probably
 176   // belong into a shared location.
 177   JavaThread::satb_mark_queue_set().initialize(SATB_Q_CBL_mon,
 178                                                SATB_Q_FL_lock,
 179                                                20 /*G1SATBProcessCompletedThreshold */,
 180                                                Shared_SATB_Q_lock);
 181 
 182   // Reserve space for prev and next bitmap.
 183   size_t bitmap_page_size = UseLargePages ? (size_t)os::large_page_size() : (size_t)os::vm_page_size();
 184   _bitmap_size = MarkBitMap::compute_size(heap_rs.size());
 185   _bitmap_size = align_size_up(_bitmap_size, bitmap_page_size);
 186   _heap_region = MemRegion((HeapWord*) heap_rs.base(), heap_rs.size() / HeapWordSize);
 187 
 188   size_t bitmap_bytes_per_region = reg_size_bytes / MarkBitMap::heap_map_factor();
 189 
 190   guarantee(bitmap_bytes_per_region != 0,
 191             err_msg("Bitmap bytes per region should not be zero"));
 192   guarantee(is_power_of_2(bitmap_bytes_per_region),
 193             err_msg("Bitmap bytes per region should be power of two: " SIZE_FORMAT, bitmap_bytes_per_region));
 194 
 195   if (bitmap_page_size > bitmap_bytes_per_region) {
 196     _bitmap_regions_per_slice = bitmap_page_size / bitmap_bytes_per_region;
 197     _bitmap_bytes_per_slice = bitmap_page_size;
 198   } else {
 199     _bitmap_regions_per_slice = 1;
 200     _bitmap_bytes_per_slice = bitmap_bytes_per_region;
 201   }
 202 
 203   guarantee(_bitmap_regions_per_slice >= 1,
 204             err_msg("Should have at least one region per slice: " SIZE_FORMAT,
 205                     _bitmap_regions_per_slice));
 206 
 207   guarantee(((_bitmap_bytes_per_slice) % bitmap_page_size) == 0,
 208             err_msg("Bitmap slices should be page-granular: bps = " SIZE_FORMAT ", page size = " SIZE_FORMAT,
 209                     _bitmap_bytes_per_slice, bitmap_page_size));
 210 
 211   ReservedSpace bitmap0(_bitmap_size, bitmap_page_size);
 212   MemTracker::record_virtual_memory_type(bitmap0.base(), mtGC);
 213   _bitmap_region = MemRegion((HeapWord*) bitmap0.base(), bitmap0.size() / HeapWordSize);
 214 
 215   size_t bitmap_init_commit = _bitmap_bytes_per_slice *
 216                               align_size_up(num_committed_regions, _bitmap_regions_per_slice) / _bitmap_regions_per_slice;
 217   bitmap_init_commit = MIN2(_bitmap_size, bitmap_init_commit);
 218   os::commit_memory_or_exit((char *) (_bitmap_region.start()), bitmap_init_commit, false,
 219                             "couldn't allocate initial bitmap");
 220 
 221   size_t page_size = UseLargePages ? (size_t)os::large_page_size() : (size_t)os::vm_page_size();
 222 
 223   if (ShenandoahVerify) {
 224     ReservedSpace verify_bitmap(_bitmap_size, page_size);
 225     os::commit_memory_or_exit(verify_bitmap.base(), verify_bitmap.size(), false,
 226                               "couldn't allocate verification bitmap");
 227     MemTracker::record_virtual_memory_type(verify_bitmap.base(), mtGC);
 228     MemRegion verify_bitmap_region = MemRegion((HeapWord *) verify_bitmap.base(), verify_bitmap.size() / HeapWordSize);
 229     _verification_bit_map.initialize(_heap_region, verify_bitmap_region);
 230     _verifier = new ShenandoahVerifier(this, &_verification_bit_map);
 231   }
 232 
 233   _marking_context = new ShenandoahMarkingContext(_heap_region, _bitmap_region, _num_regions);
 234 
 235   {
 236     ShenandoahHeapLocker locker(lock());
 237     for (size_t i = 0; i < _num_regions; i++) {
 238       ShenandoahHeapRegion* r = new ShenandoahHeapRegion(this,
 239                                                          (HeapWord*) pgc_rs.base() + reg_size_words * i,
 240                                                          reg_size_words,
 241                                                          i,
 242                                                          i < num_committed_regions);
 243 
 244       _marking_context->initialize_top_at_mark_start(r);
 245       _regions[i] = r;
 246       assert(!collection_set()->is_in(i), "New region should not be in collection set");
 247     }
 248 
 249     // Initialize to complete
 250     _marking_context->mark_complete();
 251 
 252     _free_set->rebuild();
 253   }
 254 
 255   if (ShenandoahAlwaysPreTouch) {
 256     assert (!AlwaysPreTouch, "Should have been overridden");
 257 
 258     // For NUMA, it is important to pre-touch the storage under bitmaps with worker threads,
 259     // before initialize() below zeroes it with initializing thread. For any given region,
 260     // we touch the region and the corresponding bitmaps from the same thread.
 261     ShenandoahPushWorkerScope scope(workers(), _max_workers, false);
 262 
 263     log_info(gc, heap)("Parallel pretouch " SIZE_FORMAT " regions with " SIZE_FORMAT " byte pages",
 264                        _num_regions, page_size);
 265     ShenandoahPretouchTask cl(bitmap0.base(), _bitmap_size, page_size);
 266     _workers->run_task(&cl);
 267   }
 268 
 269 
 270   // Reserve aux bitmap for use in object_iterate(). We don't commit it here.
 271   ReservedSpace aux_bitmap(_bitmap_size, bitmap_page_size);
 272   MemTracker::record_virtual_memory_type(aux_bitmap.base(), mtGC);
 273   _aux_bitmap_region = MemRegion((HeapWord*) aux_bitmap.base(), aux_bitmap.size() / HeapWordSize);
 274   _aux_bit_map.initialize(_heap_region, _aux_bitmap_region);
 275 
 276   _monitoring_support = new ShenandoahMonitoringSupport(this);
 277 
 278   _phase_timings = new ShenandoahPhaseTimings();
 279 
 280   if (ShenandoahAllocationTrace) {
 281     _alloc_tracker = new ShenandoahAllocTracker();
 282   }
 283 
 284   ShenandoahStringDedup::initialize();
 285 
 286   _control_thread = new ShenandoahControlThread();
 287 
 288   ShenandoahCodeRoots::initialize();
 289 
 290   _liveness_cache = NEW_C_HEAP_ARRAY(jushort*, _max_workers, mtGC);
 291   for (uint worker = 0; worker < _max_workers; worker++) {
 292     _liveness_cache[worker] = NEW_C_HEAP_ARRAY(jushort, _num_regions, mtGC);
 293     Copy::fill_to_bytes(_liveness_cache[worker], _num_regions * sizeof(jushort));
 294   }
 295 
 296   return JNI_OK;
 297 }
 298 
 299 #ifdef _MSC_VER
 300 #pragma warning( push )
 301 #pragma warning( disable:4355 ) // 'this' : used in base member initializer list
 302 #endif
 303 
 304 void ShenandoahHeap::initialize_heuristics() {
 305   if (ShenandoahGCHeuristics != NULL) {
 306     if (strcmp(ShenandoahGCHeuristics, "aggressive") == 0) {
 307       _heuristics = new ShenandoahAggressiveHeuristics();
 308     } else if (strcmp(ShenandoahGCHeuristics, "static") == 0) {
 309       _heuristics = new ShenandoahStaticHeuristics();
 310     } else if (strcmp(ShenandoahGCHeuristics, "adaptive") == 0) {
 311       _heuristics = new ShenandoahAdaptiveHeuristics();
 312     } else if (strcmp(ShenandoahGCHeuristics, "passive") == 0) {
 313       _heuristics = new ShenandoahPassiveHeuristics();
 314     } else if (strcmp(ShenandoahGCHeuristics, "compact") == 0) {
 315       _heuristics = new ShenandoahCompactHeuristics();
 316     } else {
 317       vm_exit_during_initialization("Unknown -XX:ShenandoahGCHeuristics option");
 318     }
 319 
 320     if (_heuristics->is_diagnostic() && !UnlockDiagnosticVMOptions) {
 321       vm_exit_during_initialization(
 322               err_msg("Heuristics \"%s\" is diagnostic, and must be enabled via -XX:+UnlockDiagnosticVMOptions.",
 323                       _heuristics->name()));
 324     }
 325     if (_heuristics->is_experimental() && !UnlockExperimentalVMOptions) {
 326       vm_exit_during_initialization(
 327               err_msg("Heuristics \"%s\" is experimental, and must be enabled via -XX:+UnlockExperimentalVMOptions.",
 328                       _heuristics->name()));
 329     }
 330     log_info(gc, init)("Shenandoah heuristics: %s",
 331                        _heuristics->name());
 332   } else {
 333     ShouldNotReachHere();
 334   }
 335 }
 336 
 337 ShenandoahHeap::ShenandoahHeap(ShenandoahCollectorPolicy* policy) :
 338   SharedHeap(policy),
 339   _shenandoah_policy(policy),
 340   _regions(NULL),
 341   _free_set(NULL),
 342   _collection_set(NULL),
 343   _update_refs_iterator(this),
 344   _bytes_allocated_since_gc_start(0),
 345   _max_workers((uint)MAX2(ConcGCThreads, ParallelGCThreads)),
 346   _ref_processor(NULL),
 347   _marking_context(NULL),
 348   _aux_bit_map(),
 349   _verifier(NULL),
 350   _pacer(NULL),
 351   _gc_timer(new (ResourceObj::C_HEAP, mtGC) ConcurrentGCTimer()),
 352   _phase_timings(NULL),
 353   _alloc_tracker(NULL)
 354 {
 355   log_info(gc, init)("GC threads: " UINTX_FORMAT " parallel, " UINTX_FORMAT " concurrent", ParallelGCThreads, ConcGCThreads);
 356   log_info(gc, init)("Reference processing: %s", ParallelRefProcEnabled ? "parallel" : "serial");
 357 
 358   _scm = new ShenandoahConcurrentMark();
 359   _full_gc = new ShenandoahMarkCompact();
 360   _used = 0;
 361 
 362   _max_workers = MAX2(_max_workers, 1U);
 363   _workers = new ShenandoahWorkGang("Shenandoah GC Threads", _max_workers,
 364                             /* are_GC_task_threads */true,
 365                             /* are_ConcurrentGC_threads */false);
 366   if (_workers == NULL) {
 367     vm_exit_during_initialization("Failed necessary allocation.");
 368   } else {
 369     _workers->initialize_workers();
 370   }
 371 }
 372 
 373 #ifdef _MSC_VER
 374 #pragma warning( pop )
 375 #endif
 376 
 377 class ShenandoahResetBitmapTask : public AbstractGangTask {
 378 private:
 379   ShenandoahRegionIterator _regions;
 380 
 381 public:
 382   ShenandoahResetBitmapTask() :
 383     AbstractGangTask("Parallel Reset Bitmap Task") {}
 384 
 385   void work(uint worker_id) {
 386     ShenandoahHeapRegion* region = _regions.next();
 387     ShenandoahHeap* heap = ShenandoahHeap::heap();
 388     ShenandoahMarkingContext* const ctx = heap->marking_context();
 389     while (region != NULL) {
 390       if (heap->is_bitmap_slice_committed(region)) {
 391         ctx->clear_bitmap(region);
 392       }
 393       region = _regions.next();
 394     }
 395   }
 396 };
 397 
 398 void ShenandoahHeap::reset_mark_bitmap() {
 399   assert_gc_workers(_workers->active_workers());
 400   mark_incomplete_marking_context();
 401 
 402   ShenandoahResetBitmapTask task;
 403   _workers->run_task(&task);
 404 }
 405 
 406 void ShenandoahHeap::print_on(outputStream* st) const {
 407   st->print_cr("Shenandoah Heap");
 408   st->print_cr(" " SIZE_FORMAT "K total, " SIZE_FORMAT "K committed, " SIZE_FORMAT "K used",
 409                capacity() / K, committed() / K, used() / K);
 410   st->print_cr(" " SIZE_FORMAT " x " SIZE_FORMAT"K regions",
 411                num_regions(), ShenandoahHeapRegion::region_size_bytes() / K);
 412 
 413   st->print("Status: ");
 414   if (has_forwarded_objects())               st->print("has forwarded objects, ");
 415   if (is_concurrent_mark_in_progress())      st->print("marking, ");
 416   if (is_evacuation_in_progress())           st->print("evacuating, ");
 417   if (is_update_refs_in_progress())          st->print("updating refs, ");
 418   if (is_degenerated_gc_in_progress())       st->print("degenerated gc, ");
 419   if (is_full_gc_in_progress())              st->print("full gc, ");
 420   if (is_full_gc_move_in_progress())         st->print("full gc move, ");
 421 
 422   if (cancelled_gc()) {
 423     st->print("cancelled");
 424   } else {
 425     st->print("not cancelled");
 426   }
 427   st->cr();
 428 
 429   st->print_cr("Reserved region:");
 430   st->print_cr(" - [" PTR_FORMAT ", " PTR_FORMAT ") ",
 431                p2i(reserved_region().start()),
 432                p2i(reserved_region().end()));
 433 
 434   st->cr();
 435   MetaspaceAux::print_on(st);
 436 
 437   if (Verbose) {
 438     print_heap_regions_on(st);
 439   }
 440 }
 441 
 442 class ShenandoahInitGCLABClosure : public ThreadClosure {
 443 public:
 444   void do_thread(Thread* thread) {
 445     if (thread != NULL && (thread->is_Java_thread() || thread->is_Worker_thread())) {
 446       thread->gclab().initialize(true);
 447     }
 448   }
 449 };
 450 
 451 void ShenandoahHeap::post_initialize() {
 452   if (UseTLAB) {
 453     MutexLocker ml(Threads_lock);
 454 
 455     ShenandoahInitGCLABClosure init_gclabs;
 456     Threads::java_threads_do(&init_gclabs);
 457     _workers->threads_do(&init_gclabs);
 458   }
 459 
 460   _scm->initialize(_max_workers);
 461   _full_gc->initialize(_gc_timer);
 462 
 463   ref_processing_init();
 464 
 465   _heuristics->initialize();
 466 }
 467 
 468 size_t ShenandoahHeap::used() const {
 469   OrderAccess::acquire();
 470   return (size_t) _used;
 471 }
 472 
 473 size_t ShenandoahHeap::committed() const {
 474   OrderAccess::acquire();
 475   return _committed;
 476 }
 477 
 478 void ShenandoahHeap::increase_committed(size_t bytes) {
 479   assert_heaplock_or_safepoint();
 480   _committed += bytes;
 481 }
 482 
 483 void ShenandoahHeap::decrease_committed(size_t bytes) {
 484   assert_heaplock_or_safepoint();
 485   _committed -= bytes;
 486 }
 487 
 488 void ShenandoahHeap::increase_used(size_t bytes) {
 489   Atomic::add(bytes, &_used);
 490 }
 491 
 492 void ShenandoahHeap::set_used(size_t bytes) {
 493   OrderAccess::release_store_fence(&_used, bytes);
 494 }
 495 
 496 void ShenandoahHeap::decrease_used(size_t bytes) {
 497   assert(used() >= bytes, "never decrease heap size by more than we've left");
 498   Atomic::add(-(jlong)bytes, &_used);
 499 }
 500 
 501 void ShenandoahHeap::increase_allocated(size_t bytes) {
 502   Atomic::add(bytes, &_bytes_allocated_since_gc_start);
 503 }
 504 
 505 void ShenandoahHeap::notify_mutator_alloc_words(size_t words, bool waste) {
 506   size_t bytes = words * HeapWordSize;
 507   if (!waste) {
 508     increase_used(bytes);
 509   }
 510   increase_allocated(bytes);
 511   if (ShenandoahPacing) {
 512     control_thread()->pacing_notify_alloc(words);
 513     if (waste) {
 514       pacer()->claim_for_alloc(words, true);
 515     }
 516   }
 517 }
 518 
 519 size_t ShenandoahHeap::capacity() const {
 520   return num_regions() * ShenandoahHeapRegion::region_size_bytes();
 521 }
 522 
 523 size_t ShenandoahHeap::max_capacity() const {
 524   return _num_regions * ShenandoahHeapRegion::region_size_bytes();
 525 }
 526 
 527 size_t ShenandoahHeap::initial_capacity() const {
 528   return _initial_size;
 529 }
 530 
 531 bool ShenandoahHeap::is_in(const void* p) const {
 532   HeapWord* heap_base = (HeapWord*) base();
 533   HeapWord* last_region_end = heap_base + ShenandoahHeapRegion::region_size_words() * num_regions();
 534   return p >= heap_base && p < last_region_end;
 535 }
 536 
 537 void ShenandoahHeap::op_uncommit(double shrink_before) {
 538   assert (ShenandoahUncommit, "should be enabled");
 539 
 540   size_t count = 0;
 541   for (size_t i = 0; i < num_regions(); i++) {
 542     ShenandoahHeapRegion* r = get_region(i);
 543     if (r->is_empty_committed() && (r->empty_time() < shrink_before)) {
 544       ShenandoahHeapLocker locker(lock());
 545       if (r->is_empty_committed()) {
 546         r->make_uncommitted();
 547         count++;
 548       }
 549     }
 550     SpinPause(); // allow allocators to take the lock
 551   }
 552 
 553   if (count > 0) {
 554     log_info(gc)("Uncommitted " SIZE_FORMAT "M. Heap: " SIZE_FORMAT "M reserved, " SIZE_FORMAT "M committed, " SIZE_FORMAT "M used",
 555                  count * ShenandoahHeapRegion::region_size_bytes() / M, capacity() / M, committed() / M, used() / M);
 556     _control_thread->notify_heap_changed();
 557   }
 558 }
 559 
 560 HeapWord* ShenandoahHeap::allocate_from_gclab_slow(Thread* thread, size_t size) {
 561   // Retain tlab and allocate object in shared space if
 562   // the amount free in the tlab is too large to discard.
 563   if (thread->gclab().free() > thread->gclab().refill_waste_limit()) {
 564     thread->gclab().record_slow_allocation(size);
 565     return NULL;
 566   }
 567 
 568   // Discard gclab and allocate a new one.
 569   // To minimize fragmentation, the last GCLAB may be smaller than the rest.
 570   size_t new_gclab_size = thread->gclab().compute_size(size);
 571 
 572   thread->gclab().clear_before_allocation();
 573 
 574   if (new_gclab_size == 0) {
 575     return NULL;
 576   }
 577 
 578   // Allocated object should fit in new GCLAB, and new_gclab_size should be larger than min
 579   size_t min_size = MAX2(size + ThreadLocalAllocBuffer::alignment_reserve(), ThreadLocalAllocBuffer::min_size());
 580   new_gclab_size = MAX2(new_gclab_size, min_size);
 581 
 582   // Allocate a new GCLAB...
 583   size_t actual_size = 0;
 584   HeapWord* obj = allocate_new_gclab(min_size, new_gclab_size, &actual_size);
 585 
 586   if (obj == NULL) {
 587     return NULL;
 588   }
 589 
 590   assert (size <= actual_size, "allocation should fit");
 591 
 592   if (ZeroTLAB) {
 593     // ..and clear it.
 594     Copy::zero_to_words(obj, actual_size);
 595   } else {
 596     // ...and zap just allocated object.
 597 #ifdef ASSERT
 598     // Skip mangling the space corresponding to the object header to
 599     // ensure that the returned space is not considered parsable by
 600     // any concurrent GC thread.
 601     size_t hdr_size = oopDesc::header_size();
 602     Copy::fill_to_words(obj + hdr_size, actual_size - hdr_size, badHeapWordVal);
 603 #endif // ASSERT
 604   }
 605   thread->gclab().fill(obj, obj + size, actual_size);
 606   return obj;
 607 }
 608 
 609 HeapWord* ShenandoahHeap::allocate_new_tlab(size_t word_size) {
 610   ShenandoahAllocRequest req = ShenandoahAllocRequest::for_tlab(word_size);
 611   return allocate_memory(req);
 612 }
 613 
 614 HeapWord* ShenandoahHeap::allocate_new_gclab(size_t min_size,
 615                                              size_t word_size,
 616                                              size_t* actual_size) {
 617   ShenandoahAllocRequest req = ShenandoahAllocRequest::for_gclab(min_size, word_size);
 618   HeapWord* res = allocate_memory(req);
 619   if (res != NULL) {
 620     *actual_size = req.actual_size();
 621   } else {
 622     *actual_size = 0;
 623   }
 624   return res;
 625 }
 626 
 627 ShenandoahHeap* ShenandoahHeap::heap() {
 628   CollectedHeap* heap = Universe::heap();
 629   assert(heap != NULL, "Unitialized access to ShenandoahHeap::heap()");
 630   assert(heap->kind() == CollectedHeap::ShenandoahHeap, "not a shenandoah heap");
 631   return (ShenandoahHeap*) heap;
 632 }
 633 
 634 ShenandoahHeap* ShenandoahHeap::heap_no_check() {
 635   CollectedHeap* heap = Universe::heap();
 636   return (ShenandoahHeap*) heap;
 637 }
 638 
 639 HeapWord* ShenandoahHeap::allocate_memory(ShenandoahAllocRequest& req) {
 640   ShenandoahAllocTrace trace_alloc(req.size(), req.type());
 641 
 642   intptr_t pacer_epoch = 0;
 643   bool in_new_region = false;
 644   HeapWord* result = NULL;
 645 
 646   if (req.is_mutator_alloc()) {
 647     if (ShenandoahPacing) {
 648       pacer()->pace_for_alloc(req.size());
 649       pacer_epoch = pacer()->epoch();
 650     }
 651 
 652     if (!ShenandoahAllocFailureALot || !should_inject_alloc_failure()) {
 653       result = allocate_memory_under_lock(req, in_new_region);
 654     }
 655 
 656     // Allocation failed, block until control thread reacted, then retry allocation.
 657     //
 658     // It might happen that one of the threads requesting allocation would unblock
 659     // way later after GC happened, only to fail the second allocation, because
 660     // other threads have already depleted the free storage. In this case, a better
 661     // strategy is to try again, as long as GC makes progress.
 662     //
 663     // Then, we need to make sure the allocation was retried after at least one
 664     // Full GC, which means we want to try more than ShenandoahFullGCThreshold times.
 665 
 666     size_t tries = 0;
 667 
 668     while (result == NULL && _progress_last_gc.is_set()) {
 669       tries++;
 670       control_thread()->handle_alloc_failure(req.size());
 671       result = allocate_memory_under_lock(req, in_new_region);
 672     }
 673 
 674     while (result == NULL && tries <= ShenandoahFullGCThreshold) {
 675       tries++;
 676       control_thread()->handle_alloc_failure(req.size());
 677       result = allocate_memory_under_lock(req, in_new_region);
 678     }
 679 
 680   } else {
 681     assert(req.is_gc_alloc(), "Can only accept GC allocs here");
 682     result = allocate_memory_under_lock(req, in_new_region);
 683     // Do not call handle_alloc_failure() here, because we cannot block.
 684     // The allocation failure would be handled by the WB slowpath with handle_alloc_failure_evac().
 685   }
 686 
 687   if (in_new_region) {
 688     control_thread()->notify_heap_changed();
 689   }
 690 
 691   if (result != NULL) {
 692     size_t requested = req.size();
 693     size_t actual = req.actual_size();
 694 
 695     assert (req.is_lab_alloc() || (requested == actual),
 696             err_msg("Only LAB allocations are elastic: %s, requested = " SIZE_FORMAT ", actual = " SIZE_FORMAT,
 697                     ShenandoahAllocRequest::alloc_type_to_string(req.type()), requested, actual));
 698 
 699     if (req.is_mutator_alloc()) {
 700       notify_mutator_alloc_words(actual, false);
 701 
 702       // If we requested more than we were granted, give the rest back to pacer.
 703       // This only matters if we are in the same pacing epoch: do not try to unpace
 704       // over the budget for the other phase.
 705       if (ShenandoahPacing && (pacer_epoch > 0) && (requested > actual)) {
 706         pacer()->unpace_for_alloc(pacer_epoch, requested - actual);
 707       }
 708     } else {
 709       increase_used(actual*HeapWordSize);
 710     }
 711   }
 712 
 713   return result;
 714 }
 715 
 716 HeapWord* ShenandoahHeap::allocate_memory_under_lock(ShenandoahAllocRequest& req, bool& in_new_region) {
 717   ShenandoahHeapLocker locker(lock());
 718   return _free_set->allocate(req, in_new_region);
 719 }
 720 
 721 HeapWord*  ShenandoahHeap::mem_allocate(size_t size,
 722                                         bool*  gc_overhead_limit_was_exceeded) {
 723   ShenandoahAllocRequest req = ShenandoahAllocRequest::for_shared(size + BrooksPointer::word_size());
 724   HeapWord* filler = allocate_memory(req);
 725   HeapWord* result = filler + BrooksPointer::word_size();
 726   if (filler != NULL) {
 727     BrooksPointer::initialize(oop(result));
 728 
 729     assert(! in_collection_set(result), "never allocate in targetted region");
 730     return result;
 731   } else {
 732     return NULL;
 733   }
 734 }
 735 
 736 class ShenandoahEvacuateUpdateRootsClosure: public ExtendedOopClosure {
 737 private:
 738   ShenandoahHeap* _heap;
 739   Thread* _thread;
 740 public:
 741   ShenandoahEvacuateUpdateRootsClosure() :
 742           _heap(ShenandoahHeap::heap()), _thread(Thread::current()) {
 743   }
 744 
 745 private:
 746   template <class T>
 747   void do_oop_work(T* p) {
 748     assert(_heap->is_evacuation_in_progress(), "Only do this when evacuation is in progress");
 749 
 750     T o = oopDesc::load_heap_oop(p);
 751     if (! oopDesc::is_null(o)) {
 752       oop obj = oopDesc::decode_heap_oop_not_null(o);
 753       if (_heap->in_collection_set(obj)) {
 754         shenandoah_assert_marked(p, obj);
 755         oop resolved = ShenandoahBarrierSet::resolve_forwarded_not_null(obj);
 756         if (oopDesc::unsafe_equals(resolved, obj)) {
 757           bool evac;
 758           resolved = _heap->evacuate_object(obj, _thread, evac);
 759         }
 760         oopDesc::encode_store_heap_oop(p, resolved);
 761       }
 762     }
 763   }
 764 
 765 public:
 766   void do_oop(oop* p) {
 767     do_oop_work(p);
 768   }
 769   void do_oop(narrowOop* p) {
 770     do_oop_work(p);
 771   }
 772 };
 773 
 774 class ShenandoahConcurrentEvacuateRegionObjectClosure : public ObjectClosure {
 775 private:
 776   ShenandoahHeap* const _heap;
 777   Thread* const _thread;
 778 public:
 779   ShenandoahConcurrentEvacuateRegionObjectClosure(ShenandoahHeap* heap) :
 780     _heap(heap), _thread(Thread::current()) {}
 781 
 782   void do_object(oop p) {
 783     shenandoah_assert_marked(NULL, p);
 784     if (oopDesc::unsafe_equals(p, ShenandoahBarrierSet::resolve_forwarded_not_null(p))) {
 785       bool evac;
 786       _heap->evacuate_object(p, _thread, evac);
 787     }
 788   }
 789 };
 790 
 791 class ShenandoahConcurrentEvacuationTask : public AbstractGangTask {
 792 private:
 793   ShenandoahHeap* const _sh;
 794   ShenandoahCollectionSet* const _cs;
 795   bool _concurrent;
 796 public:
 797   ShenandoahConcurrentEvacuationTask(ShenandoahHeap* sh,
 798                                      ShenandoahCollectionSet* cs,
 799                                      bool concurrent) :
 800     AbstractGangTask("Parallel Evacuation Task"),
 801     _sh(sh),
 802     _cs(cs),
 803     _concurrent(concurrent) {}
 804 
 805   void work(uint worker_id) {
 806     ShenandoahWorkerSession worker_session(worker_id);
 807     ShenandoahEvacOOMScope oom_evac_scope;
 808     ShenandoahSuspendibleThreadSetJoiner stsj(ShenandoahSuspendibleWorkers && _concurrent);
 809 
 810     ShenandoahConcurrentEvacuateRegionObjectClosure cl(_sh);
 811     ShenandoahHeapRegion* r;
 812     while ((r =_cs->claim_next()) != NULL) {
 813       assert(r->has_live(), "all-garbage regions are reclaimed early");
 814       _sh->marked_object_iterate(r, &cl);
 815 
 816       if (ShenandoahPacing) {
 817         _sh->pacer()->report_evac(r->used() >> LogHeapWordSize);
 818       }
 819 
 820       if (_sh->check_cancelled_gc_and_yield(_concurrent)) {
 821         break;
 822       }
 823     }
 824   }
 825 };
 826 
 827 void ShenandoahHeap::trash_cset_regions() {
 828   ShenandoahHeapLocker locker(lock());
 829 
 830   ShenandoahCollectionSet* set = collection_set();
 831   ShenandoahHeapRegion* r;
 832   set->clear_current_index();
 833   while ((r = set->next()) != NULL) {
 834     r->make_trash();
 835   }
 836   collection_set()->clear();
 837 }
 838 
 839 void ShenandoahHeap::print_heap_regions_on(outputStream* st) const {
 840   st->print_cr("Heap Regions:");
 841   st->print_cr("EU=empty-uncommitted, EC=empty-committed, R=regular, H=humongous start, HC=humongous continuation, CS=collection set, T=trash, P=pinned");
 842   st->print_cr("BTE=bottom/top/end, U=used, T=TLAB allocs, G=GCLAB allocs, S=shared allocs, L=live data");
 843   st->print_cr("R=root, CP=critical pins, TAMS=top-at-mark-start (previous, next)");
 844 
 845   for (size_t i = 0; i < num_regions(); i++) {
 846     get_region(i)->print_on(st);
 847   }
 848 }
 849 
 850 void ShenandoahHeap::trash_humongous_region_at(ShenandoahHeapRegion* start) {
 851   assert(start->is_humongous_start(), "reclaim regions starting with the first one");
 852 
 853   oop humongous_obj = oop(start->bottom() + BrooksPointer::word_size());
 854   size_t size = humongous_obj->size() + BrooksPointer::word_size();
 855   size_t required_regions = ShenandoahHeapRegion::required_regions(size * HeapWordSize);
 856   size_t index = start->region_number() + required_regions - 1;
 857 
 858   assert(!start->has_live(), "liveness must be zero");
 859 
 860   for(size_t i = 0; i < required_regions; i++) {
 861      // Reclaim from tail. Otherwise, assertion fails when printing region to trace log,
 862      // as it expects that every region belongs to a humongous region starting with a humongous start region.
 863      ShenandoahHeapRegion* region = get_region(index --);
 864 
 865     assert(region->is_humongous(), "expect correct humongous start or continuation");
 866     assert(!region->is_cset(), "Humongous region should not be in collection set");
 867 
 868     region->make_trash_immediate();
 869   }
 870 }
 871 
 872 class ShenandoahRetireGCLABClosure : public ThreadClosure {
 873 private:
 874   bool _retire;
 875 public:
 876   ShenandoahRetireGCLABClosure(bool retire) : _retire(retire) {};
 877 
 878   void do_thread(Thread* thread) {
 879     assert(thread->gclab().is_initialized(), err_msg("GCLAB should be initialized for %s", thread->name()));
 880     thread->gclab().make_parsable(_retire);
 881   }
 882 };
 883 
 884 void ShenandoahHeap::make_parsable(bool retire_tlabs) {
 885   if (UseTLAB) {
 886     CollectedHeap::ensure_parsability(retire_tlabs);
 887     ShenandoahRetireGCLABClosure cl(retire_tlabs);
 888     Threads::java_threads_do(&cl);
 889     _workers->threads_do(&cl);
 890   }
 891 }
 892 
 893 class ShenandoahEvacuateUpdateRootsTask : public AbstractGangTask {
 894   ShenandoahRootEvacuator* _rp;
 895 public:
 896 
 897   ShenandoahEvacuateUpdateRootsTask(ShenandoahRootEvacuator* rp) :
 898     AbstractGangTask("Shenandoah evacuate and update roots"),
 899     _rp(rp)
 900   {
 901     // Nothing else to do.
 902   }
 903 
 904   void work(uint worker_id) {
 905     ShenandoahWorkerSession worker_session(worker_id);
 906     ShenandoahEvacOOMScope oom_evac_scope;
 907     ShenandoahEvacuateUpdateRootsClosure cl;
 908 
 909     MarkingCodeBlobClosure blobsCl(&cl, CodeBlobToOopClosure::FixRelocations);
 910     _rp->process_evacuate_roots(&cl, &blobsCl, worker_id);
 911   }
 912 };
 913 
 914 class ShenandoahFixRootsTask : public AbstractGangTask {
 915   ShenandoahRootEvacuator* _rp;
 916 public:
 917 
 918   ShenandoahFixRootsTask(ShenandoahRootEvacuator* rp) :
 919     AbstractGangTask("Shenandoah update roots"),
 920     _rp(rp)
 921   {
 922     // Nothing else to do.
 923   }
 924 
 925   void work(uint worker_id) {
 926     ShenandoahWorkerSession worker_session(worker_id);
 927     ShenandoahEvacOOMScope oom_evac_scope;
 928     ShenandoahUpdateRefsClosure cl;
 929     MarkingCodeBlobClosure blobsCl(&cl, CodeBlobToOopClosure::FixRelocations);
 930 
 931     _rp->process_evacuate_roots(&cl, &blobsCl, worker_id);
 932   }
 933 };
 934 void ShenandoahHeap::evacuate_and_update_roots() {
 935 
 936   COMPILER2_PRESENT(DerivedPointerTable::clear());
 937 
 938   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Only iterate roots while world is stopped");
 939 
 940   {
 941     ShenandoahRootEvacuator rp(this, workers()->active_workers(), ShenandoahPhaseTimings::init_evac);
 942     ShenandoahEvacuateUpdateRootsTask roots_task(&rp);
 943     workers()->run_task(&roots_task);
 944   }
 945 
 946   COMPILER2_PRESENT(DerivedPointerTable::update_pointers());
 947 
 948   if (cancelled_gc()) {
 949     // If initial evacuation has been cancelled, we need to update all references
 950     // after all workers have finished. Otherwise we might run into the following problem:
 951     // GC thread 1 cannot allocate anymore, thus evacuation fails, leaves from-space ptr of object X.
 952     // GC thread 2 evacuates the same object X to to-space
 953     // which leaves a truly dangling from-space reference in the first root oop*. This must not happen.
 954     // clear() and update_pointers() must always be called in pairs,
 955     // cannot nest with above clear()/update_pointers().
 956     COMPILER2_PRESENT(DerivedPointerTable::clear());
 957     ShenandoahRootEvacuator rp(this, workers()->active_workers(), ShenandoahPhaseTimings::init_evac);
 958     ShenandoahFixRootsTask update_roots_task(&rp);
 959     workers()->run_task(&update_roots_task);
 960     COMPILER2_PRESENT(DerivedPointerTable::update_pointers());
 961   }
 962 }
 963 
 964 
 965 void ShenandoahHeap::roots_iterate(OopClosure* cl) {
 966   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Only iterate roots while world is stopped");
 967 
 968   CodeBlobToOopClosure blobsCl(cl, false);
 969   CLDToOopClosure cldCl(cl);
 970 
 971   ShenandoahRootProcessor rp(this, 1, ShenandoahPhaseTimings::_num_phases);
 972   rp.process_all_roots(cl, NULL, &cldCl, &blobsCl, NULL, 0);
 973 }
 974 
 975 size_t  ShenandoahHeap::unsafe_max_tlab_alloc(Thread *thread) const {
 976   // Returns size in bytes
 977   return MIN2(_free_set->unsafe_peek_free(), ShenandoahHeapRegion::max_tlab_size_bytes());
 978 }
 979 
 980 size_t ShenandoahHeap::max_tlab_size() const {
 981   // Returns size in words
 982   return ShenandoahHeapRegion::max_tlab_size_words();
 983 }
 984 
 985 class ShenandoahResizeGCLABClosure : public ThreadClosure {
 986 public:
 987   void do_thread(Thread* thread) {
 988     assert(thread->gclab().is_initialized(), err_msg("GCLAB should be initialized for %s", thread->name()));
 989     thread->gclab().resize();
 990   }
 991 };
 992 
 993 void ShenandoahHeap::resize_all_tlabs() {
 994   CollectedHeap::resize_all_tlabs();
 995 
 996   ShenandoahResizeGCLABClosure cl;
 997   Threads::java_threads_do(&cl);
 998   _workers->threads_do(&cl);
 999 }
1000 
1001 class ShenandoahAccumulateStatisticsGCLABClosure : public ThreadClosure {
1002 public:
1003   void do_thread(Thread* thread) {
1004     assert(thread->gclab().is_initialized(), err_msg("GCLAB should be initialized for %s", thread->name()));
1005     thread->gclab().accumulate_statistics();
1006     thread->gclab().initialize_statistics();
1007   }
1008 };
1009 
1010 void ShenandoahHeap::accumulate_statistics_all_gclabs() {
1011   ShenandoahAccumulateStatisticsGCLABClosure cl;
1012   Threads::java_threads_do(&cl);
1013   _workers->threads_do(&cl);
1014 }
1015 
1016 void ShenandoahHeap::collect(GCCause::Cause cause) {
1017   _control_thread->handle_explicit_gc(cause);
1018 }
1019 
1020 void ShenandoahHeap::do_full_collection(bool clear_all_soft_refs) {
1021   //assert(false, "Shouldn't need to do full collections");
1022 }
1023 
1024 CollectorPolicy* ShenandoahHeap::collector_policy() const {
1025   return _shenandoah_policy;
1026 }
1027 
1028 void ShenandoahHeap::resize_tlabs() {
1029   CollectedHeap::resize_all_tlabs();
1030 }
1031 
1032 void ShenandoahHeap::accumulate_statistics_tlabs() {
1033   CollectedHeap::accumulate_statistics_all_tlabs();
1034 }
1035 
1036 HeapWord* ShenandoahHeap::block_start(const void* addr) const {
1037   Space* sp = heap_region_containing(addr);
1038   if (sp != NULL) {
1039     return sp->block_start(addr);
1040   }
1041   return NULL;
1042 }
1043 
1044 size_t ShenandoahHeap::block_size(const HeapWord* addr) const {
1045   Space* sp = heap_region_containing(addr);
1046   assert(sp != NULL, "block_size of address outside of heap");
1047   return sp->block_size(addr);
1048 }
1049 
1050 bool ShenandoahHeap::block_is_obj(const HeapWord* addr) const {
1051   Space* sp = heap_region_containing(addr);
1052   return sp->block_is_obj(addr);
1053 }
1054 
1055 jlong ShenandoahHeap::millis_since_last_gc() {
1056   return heuristics()->time_since_last_gc() * 1000;
1057 }
1058 
1059 void ShenandoahHeap::prepare_for_verify() {
1060   if (SafepointSynchronize::is_at_safepoint()) {
1061     make_parsable(false);
1062   }
1063 }
1064 
1065 void ShenandoahHeap::print_gc_threads_on(outputStream* st) const {
1066   workers()->print_worker_threads_on(st);
1067   if (ShenandoahStringDedup::is_enabled()) {
1068     ShenandoahStringDedup::print_worker_threads_on(st);
1069   }
1070 }
1071 
1072 void ShenandoahHeap::gc_threads_do(ThreadClosure* tcl) const {
1073   workers()->threads_do(tcl);
1074   if (ShenandoahStringDedup::is_enabled()) {
1075     ShenandoahStringDedup::threads_do(tcl);
1076   }
1077 }
1078 
1079 void ShenandoahHeap::print_tracing_info() const {
1080   if (PrintGC || TraceGen0Time || TraceGen1Time) {
1081     ResourceMark rm;
1082     outputStream* out = gclog_or_tty;
1083     phase_timings()->print_on(out);
1084 
1085     out->cr();
1086     out->cr();
1087 
1088     shenandoah_policy()->print_gc_stats(out);
1089 
1090     out->cr();
1091     out->cr();
1092 
1093     if (ShenandoahPacing) {
1094       pacer()->print_on(out);
1095     }
1096 
1097     out->cr();
1098     out->cr();
1099 
1100     if (ShenandoahAllocationTrace) {
1101       assert(alloc_tracker() != NULL, "Must be");
1102       alloc_tracker()->print_on(out);
1103     } else {
1104       out->print_cr("  Allocation tracing is disabled, use -XX:+ShenandoahAllocationTrace to enable.");
1105     }
1106   }
1107 }
1108 
1109 void ShenandoahHeap::verify(bool silent, VerifyOption vo) {
1110   if (ShenandoahSafepoint::is_at_shenandoah_safepoint() || ! UseTLAB) {
1111     if (ShenandoahVerify) {
1112       verifier()->verify_generic(vo);
1113     } else {
1114       // TODO: Consider allocating verification bitmaps on demand,
1115       // and turn this on unconditionally.
1116     }
1117   }
1118 }
1119 size_t ShenandoahHeap::tlab_capacity(Thread *thr) const {
1120   return _free_set->capacity();
1121 }
1122 
1123 class ObjectIterateScanRootClosure : public ExtendedOopClosure {
1124 private:
1125   MarkBitMap* _bitmap;
1126   Stack<oop,mtGC>* _oop_stack;
1127 
1128   template <class T>
1129   void do_oop_work(T* p) {
1130     T o = oopDesc::load_heap_oop(p);
1131     if (!oopDesc::is_null(o)) {
1132       oop obj = oopDesc::decode_heap_oop_not_null(o);
1133       obj = ShenandoahBarrierSet::resolve_forwarded_not_null(obj);
1134       assert(obj->is_oop(), "must be a valid oop");
1135       if (!_bitmap->isMarked((HeapWord*) obj)) {
1136         _bitmap->mark((HeapWord*) obj);
1137         _oop_stack->push(obj);
1138       }
1139     }
1140   }
1141 public:
1142   ObjectIterateScanRootClosure(MarkBitMap* bitmap, Stack<oop,mtGC>* oop_stack) :
1143     _bitmap(bitmap), _oop_stack(oop_stack) {}
1144   void do_oop(oop* p)       { do_oop_work(p); }
1145   void do_oop(narrowOop* p) { do_oop_work(p); }
1146 };
1147 
1148 /*
1149  * This is public API, used in preparation of object_iterate().
1150  * Since we don't do linear scan of heap in object_iterate() (see comment below), we don't
1151  * need to make the heap parsable. For Shenandoah-internal linear heap scans that we can
1152  * control, we call SH::make_parsable().
1153  */
1154 void ShenandoahHeap::ensure_parsability(bool retire_tlabs) {
1155   // No-op.
1156 }
1157 
1158 /*
1159  * Iterates objects in the heap. This is public API, used for, e.g., heap dumping.
1160  *
1161  * We cannot safely iterate objects by doing a linear scan at random points in time. Linear
1162  * scanning needs to deal with dead objects, which may have dead Klass* pointers (e.g.
1163  * calling oopDesc::size() would crash) or dangling reference fields (crashes) etc. Linear
1164  * scanning therefore depends on having a valid marking bitmap to support it. However, we only
1165  * have a valid marking bitmap after successful marking. In particular, we *don't* have a valid
1166  * marking bitmap during marking, after aborted marking or during/after cleanup (when we just
1167  * wiped the bitmap in preparation for next marking).
1168  *
1169  * For all those reasons, we implement object iteration as a single marking traversal, reporting
1170  * objects as we mark+traverse through the heap, starting from GC roots. JVMTI IterateThroughHeap
1171  * is allowed to report dead objects, but is not required to do so.
1172  */
1173 void ShenandoahHeap::object_iterate(ObjectClosure* cl) {
1174   assert(SafepointSynchronize::is_at_safepoint(), "safe iteration is only available during safepoints");
1175   if (!os::commit_memory((char*)_aux_bitmap_region.start(), _aux_bitmap_region.byte_size(), false)) {
1176     log_warning(gc)("Could not commit native memory for auxiliary marking bitmap for heap iteration");
1177     return;
1178   }
1179 
1180   // Reset bitmap
1181   _aux_bit_map.clear();
1182 
1183   Stack<oop,mtGC> oop_stack;
1184 
1185   // First, we process all GC roots. This populates the work stack with initial objects.
1186   ShenandoahRootProcessor rp(this, 1, ShenandoahPhaseTimings::_num_phases);
1187   ObjectIterateScanRootClosure oops(&_aux_bit_map, &oop_stack);
1188   CLDToOopClosure clds(&oops, false);
1189   CodeBlobToOopClosure blobs(&oops, false);
1190   rp.process_all_roots(&oops, &oops, &clds, &blobs, NULL, 0);
1191 
1192   // Work through the oop stack to traverse heap.
1193   while (! oop_stack.is_empty()) {
1194     oop obj = oop_stack.pop();
1195     assert(obj->is_oop(), "must be a valid oop");
1196     cl->do_object(obj);
1197     obj->oop_iterate(&oops);
1198   }
1199 
1200   assert(oop_stack.is_empty(), "should be empty");
1201 
1202   if (!os::uncommit_memory((char*)_aux_bitmap_region.start(), _aux_bitmap_region.byte_size())) {
1203     log_warning(gc)("Could not uncommit native memory for auxiliary marking bitmap for heap iteration");
1204   }
1205 }
1206 
1207 void ShenandoahHeap::safe_object_iterate(ObjectClosure* cl) {
1208   assert(SafepointSynchronize::is_at_safepoint(), "safe iteration is only available during safepoints");
1209   object_iterate(cl);
1210 }
1211 
1212 void ShenandoahHeap::oop_iterate(ExtendedOopClosure* cl) {
1213   ObjectToOopClosure cl2(cl);
1214   object_iterate(&cl2);
1215 }
1216 
1217 class ShenandoahSpaceClosureRegionClosure: public ShenandoahHeapRegionClosure {
1218   SpaceClosure* _cl;
1219 public:
1220   ShenandoahSpaceClosureRegionClosure(SpaceClosure* cl) : _cl(cl) {}
1221   bool heap_region_do(ShenandoahHeapRegion* r) {
1222     _cl->do_space(r);
1223     return false;
1224   }
1225 };
1226 
1227 void  ShenandoahHeap::space_iterate(SpaceClosure* cl) {
1228   ShenandoahSpaceClosureRegionClosure blk(cl);
1229   heap_region_iterate(&blk);
1230 }
1231 
1232 Space*  ShenandoahHeap::space_containing(const void* oop) const {
1233   Space* res = heap_region_containing(oop);
1234   return res;
1235 }
1236 
1237 void  ShenandoahHeap::gc_prologue(bool b) {
1238   Unimplemented();
1239 }
1240 
1241 void  ShenandoahHeap::gc_epilogue(bool b) {
1242   Unimplemented();
1243 }
1244 
1245 // Apply blk->heap_region_do() on all committed regions in address order,
1246 // terminating the iteration early if heap_region_do() returns true.
1247 void ShenandoahHeap::heap_region_iterate(ShenandoahHeapRegionClosure* blk, bool skip_cset_regions, bool skip_humongous_continuation) const {
1248   for (size_t i = 0; i < num_regions(); i++) {
1249     ShenandoahHeapRegion* current  = get_region(i);
1250     if (skip_humongous_continuation && current->is_humongous_continuation()) {
1251       continue;
1252     }
1253     if (skip_cset_regions && current->is_cset()) {
1254       continue;
1255     }
1256     if (blk->heap_region_do(current)) {
1257       return;
1258     }
1259   }
1260 }
1261 
1262 class ShenandoahClearLivenessClosure : public ShenandoahHeapRegionClosure {
1263 private:
1264   ShenandoahHeap* sh;
1265 public:
1266   ShenandoahClearLivenessClosure(ShenandoahHeap* heap) : sh(heap) {}
1267 
1268   bool heap_region_do(ShenandoahHeapRegion* r) {
1269     r->clear_live_data();
1270     sh->marking_context()->capture_top_at_mark_start(r);
1271     return false;
1272   }
1273 };
1274 
1275 
1276 void ShenandoahHeap::op_init_mark() {
1277   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Should be at safepoint");
1278   assert(Thread::current()->is_VM_thread(), "can only do this in VMThread");
1279 
1280   assert(marking_context()->is_bitmap_clear(), "need clear marking bitmap");
1281   assert(!marking_context()->is_complete(), "should not be complete");
1282 
1283   if (ShenandoahVerify) {
1284     verifier()->verify_before_concmark();
1285   }
1286 
1287   {
1288     ShenandoahGCPhase phase(ShenandoahPhaseTimings::accumulate_stats);
1289     accumulate_statistics_tlabs();
1290   }
1291 
1292   set_concurrent_mark_in_progress(true);
1293   // We need to reset all TLABs because we'd lose marks on all objects allocated in them.
1294   if (UseTLAB) {
1295     ShenandoahGCPhase phase(ShenandoahPhaseTimings::make_parsable);
1296     make_parsable(true);
1297   }
1298 
1299   {
1300     ShenandoahGCPhase phase(ShenandoahPhaseTimings::clear_liveness);
1301     ShenandoahClearLivenessClosure clc(this);
1302     heap_region_iterate(&clc);
1303   }
1304 
1305   // Make above changes visible to worker threads
1306   OrderAccess::fence();
1307 
1308   concurrent_mark()->mark_roots(ShenandoahPhaseTimings::scan_roots);
1309 
1310   if (UseTLAB) {
1311     ShenandoahGCPhase phase(ShenandoahPhaseTimings::resize_tlabs);
1312     resize_tlabs();
1313   }
1314 
1315   if (ShenandoahPacing) {
1316     pacer()->setup_for_mark();
1317   }
1318 }
1319 
1320 void ShenandoahHeap::op_mark() {
1321   concurrent_mark()->mark_from_roots();
1322 }
1323 
1324 void ShenandoahHeap::op_final_mark() {
1325   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Should be at safepoint");
1326 
1327   // It is critical that we
1328   // evacuate roots right after finishing marking, so that we don't
1329   // get unmarked objects in the roots.
1330 
1331   if (!cancelled_gc()) {
1332     concurrent_mark()->finish_mark_from_roots(/* full_gc = */ false);
1333 
1334     TASKQUEUE_STATS_ONLY(concurrent_mark()->task_queues()->reset_taskqueue_stats());
1335 
1336     if (has_forwarded_objects()) {
1337       concurrent_mark()->update_roots(ShenandoahPhaseTimings::update_roots);
1338     }
1339 
1340     TASKQUEUE_STATS_ONLY(concurrent_mark()->task_queues()->print_taskqueue_stats());
1341 
1342     stop_concurrent_marking();
1343 
1344     {
1345       ShenandoahGCPhase phase(ShenandoahPhaseTimings::complete_liveness);
1346 
1347       // All allocations past TAMS are implicitly live, adjust the region data.
1348       // Bitmaps/TAMS are swapped at this point, so we need to poll complete bitmap.
1349       for (size_t i = 0; i < num_regions(); i++) {
1350         ShenandoahHeapRegion* r = get_region(i);
1351         if (!r->is_active()) continue;
1352 
1353         HeapWord* tams = complete_marking_context()->top_at_mark_start(r);
1354         HeapWord* top = r->top();
1355         if (top > tams) {
1356           r->increase_live_data_alloc_words(pointer_delta(top, tams));
1357         }
1358       }
1359     }
1360 
1361     {
1362       ShenandoahGCPhase prepare_evac(ShenandoahPhaseTimings::prepare_evac);
1363 
1364       make_parsable(true);
1365 
1366       if (ShenandoahVerify) {
1367         verifier()->verify_after_concmark();
1368       }
1369 
1370       trash_cset_regions();
1371 
1372       {
1373         ShenandoahHeapLocker locker(lock());
1374         _collection_set->clear();
1375         _free_set->clear();
1376 
1377         heuristics()->choose_collection_set(_collection_set);
1378         _free_set->rebuild();
1379       }
1380 
1381       if (ShenandoahVerify) {
1382         verifier()->verify_before_evacuation();
1383       }
1384     }
1385 
1386     // If collection set has candidates, start evacuation.
1387     // Otherwise, bypass the rest of the cycle.
1388     if (!collection_set()->is_empty()) {
1389       set_evacuation_in_progress(true);
1390       // From here on, we need to update references.
1391       set_has_forwarded_objects(true);
1392 
1393       ShenandoahGCPhase init_evac(ShenandoahPhaseTimings::init_evac);
1394       evacuate_and_update_roots();
1395     }
1396 
1397     if (ShenandoahPacing) {
1398       pacer()->setup_for_evac();
1399     }
1400   } else {
1401     concurrent_mark()->cancel();
1402     stop_concurrent_marking();
1403 
1404     if (process_references()) {
1405       // Abandon reference processing right away: pre-cleaning must have failed.
1406       ReferenceProcessor *rp = ref_processor();
1407       rp->disable_discovery();
1408       rp->abandon_partial_discovery();
1409       rp->verify_no_references_recorded();
1410     }
1411   }
1412 }
1413 
1414 void ShenandoahHeap::op_final_evac() {
1415   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Should be at safepoint");
1416 
1417   set_evacuation_in_progress(false);
1418   if (ShenandoahVerify) {
1419     verifier()->verify_after_evacuation();
1420   }
1421 }
1422 
1423 void ShenandoahHeap::op_conc_evac() {
1424   ShenandoahConcurrentEvacuationTask task(this, _collection_set, true);
1425   workers()->run_task(&task);
1426 }
1427 
1428 void ShenandoahHeap::op_stw_evac() {
1429   ShenandoahConcurrentEvacuationTask task(this, _collection_set, false);
1430   workers()->run_task(&task);
1431 }
1432 
1433 void ShenandoahHeap::op_updaterefs() {
1434   update_heap_references(true);
1435 }
1436 
1437 void ShenandoahHeap::op_cleanup() {
1438   free_set()->recycle_trash();
1439 }
1440 
1441 void ShenandoahHeap::op_reset() {
1442   reset_mark_bitmap();
1443 }
1444 
1445 void ShenandoahHeap::op_preclean() {
1446   concurrent_mark()->preclean_weak_refs();
1447 }
1448 
1449 void ShenandoahHeap::op_full(GCCause::Cause cause) {
1450   ShenandoahMetricsSnapshot metrics;
1451   metrics.snap_before();
1452 
1453   full_gc()->do_it(cause);
1454 
1455   metrics.snap_after();
1456   metrics.print();
1457 
1458   if (metrics.is_good_progress("Full GC")) {
1459     _progress_last_gc.set();
1460   } else {
1461     // Nothing to do. Tell the allocation path that we have failed to make
1462     // progress, and it can finally fail.
1463     _progress_last_gc.unset();
1464   }
1465 }
1466 
1467 void ShenandoahHeap::op_degenerated(ShenandoahDegenPoint point) {
1468   // Degenerated GC is STW, but it can also fail. Current mechanics communicates
1469   // GC failure via cancelled_concgc() flag. So, if we detect the failure after
1470   // some phase, we have to upgrade the Degenerate GC to Full GC.
1471 
1472   clear_cancelled_gc();
1473 
1474   ShenandoahMetricsSnapshot metrics;
1475   metrics.snap_before();
1476 
1477   switch (point) {
1478     // The cases below form the Duff's-like device: it describes the actual GC cycle,
1479     // but enters it at different points, depending on which concurrent phase had
1480     // degenerated.
1481 
1482     case _degenerated_outside_cycle:
1483       // We have degenerated from outside the cycle, which means something is bad with
1484       // the heap, most probably heavy humongous fragmentation, or we are very low on free
1485       // space. It makes little sense to wait for Full GC to reclaim as much as it can, when
1486       // we can do the most aggressive degen cycle, which includes processing references and
1487       // class unloading, unless those features are explicitly disabled.
1488       //
1489       // Note that we can only do this for "outside-cycle" degens, otherwise we would risk
1490       // changing the cycle parameters mid-cycle during concurrent -> degenerated handover.
1491       set_process_references(ShenandoahRefProcFrequency != 0);
1492       set_unload_classes(ClassUnloading);
1493 
1494       op_reset();
1495 
1496       op_init_mark();
1497       if (cancelled_gc()) {
1498         op_degenerated_fail();
1499         return;
1500       }
1501 
1502     case _degenerated_mark:
1503       op_final_mark();
1504       if (cancelled_gc()) {
1505         op_degenerated_fail();
1506         return;
1507       }
1508 
1509       op_cleanup();
1510 
1511     case _degenerated_evac:
1512       // If heuristics thinks we should do the cycle, this flag would be set,
1513       // and we can do evacuation. Otherwise, it would be the shortcut cycle.
1514       if (is_evacuation_in_progress()) {
1515 
1516         // Degeneration under oom-evac protocol might have left some objects in
1517         // collection set un-evacuated. Restart evacuation from the beginning to
1518         // capture all objects. For all the objects that are already evacuated,
1519         // it would be a simple check, which is supposed to be fast. This is also
1520         // safe to do even without degeneration, as CSet iterator is at beginning
1521         // in preparation for evacuation anyway.
1522         collection_set()->clear_current_index();
1523 
1524         op_stw_evac();
1525         if (cancelled_gc()) {
1526           op_degenerated_fail();
1527           return;
1528         }
1529       }
1530 
1531       // If heuristics thinks we should do the cycle, this flag would be set,
1532       // and we need to do update-refs. Otherwise, it would be the shortcut cycle.
1533       if (has_forwarded_objects()) {
1534         op_init_updaterefs();
1535         if (cancelled_gc()) {
1536           op_degenerated_fail();
1537           return;
1538         }
1539       }
1540 
1541     case _degenerated_updaterefs:
1542       if (has_forwarded_objects()) {
1543         op_final_updaterefs();
1544         if (cancelled_gc()) {
1545           op_degenerated_fail();
1546           return;
1547         }
1548       }
1549 
1550       op_cleanup();
1551       break;
1552 
1553     default:
1554       ShouldNotReachHere();
1555   }
1556 
1557   if (ShenandoahVerify) {
1558     verifier()->verify_after_degenerated();
1559   }
1560 
1561   metrics.snap_after();
1562   metrics.print();
1563 
1564   // Check for futility and fail. There is no reason to do several back-to-back Degenerated cycles,
1565   // because that probably means the heap is overloaded and/or fragmented.
1566   if (!metrics.is_good_progress("Degenerated GC")) {
1567     _progress_last_gc.unset();
1568     cancel_gc(GCCause::_shenandoah_upgrade_to_full_gc);
1569     op_degenerated_futile();
1570   } else {
1571     _progress_last_gc.set();
1572   }
1573 }
1574 
1575 void ShenandoahHeap::op_degenerated_fail() {
1576   log_info(gc)("Cannot finish degeneration, upgrading to Full GC");
1577   shenandoah_policy()->record_degenerated_upgrade_to_full();
1578   op_full(GCCause::_shenandoah_upgrade_to_full_gc);
1579 }
1580 
1581 void ShenandoahHeap::op_degenerated_futile() {
1582   shenandoah_policy()->record_degenerated_upgrade_to_full();
1583   op_full(GCCause::_shenandoah_upgrade_to_full_gc);
1584 }
1585 
1586 void ShenandoahHeap::stop_concurrent_marking() {
1587   assert(is_concurrent_mark_in_progress(), "How else could we get here?");
1588   if (!cancelled_gc()) {
1589     // If we needed to update refs, and concurrent marking has been cancelled,
1590     // we need to finish updating references.
1591     set_has_forwarded_objects(false);
1592     mark_complete_marking_context();
1593   }
1594   set_concurrent_mark_in_progress(false);
1595 }
1596 
1597 void ShenandoahHeap::force_satb_flush_all_threads() {
1598   if (!is_concurrent_mark_in_progress()) {
1599     // No need to flush SATBs
1600     return;
1601   }
1602 
1603   // Do not block if Threads lock is busy. This avoids the potential deadlock
1604   // when this code is called from the periodic task, and something else is
1605   // expecting the periodic task to complete without blocking. On the off-chance
1606   // Threads lock is busy momentarily, try to acquire several times.
1607   for (int t = 0; t < 10; t++) {
1608     if (Threads_lock->try_lock()) {
1609       JavaThread::set_force_satb_flush_all_threads(true);
1610       Threads_lock->unlock();
1611 
1612       // The threads are not "acquiring" their thread-local data, but it does not
1613       // hurt to "release" the updates here anyway.
1614       OrderAccess::fence();
1615       break;
1616     }
1617     os::naked_short_sleep(1);
1618   }
1619 }
1620 
1621 
1622 void ShenandoahHeap::set_gc_state_mask(uint mask, bool value) {
1623   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Should really be Shenandoah safepoint");
1624   _gc_state.set_cond(mask, value);
1625   JavaThread::set_gc_state_all_threads(_gc_state.raw_value());
1626 }
1627 
1628 void ShenandoahHeap::set_concurrent_mark_in_progress(bool in_progress) {
1629   set_gc_state_mask(MARKING, in_progress);
1630   JavaThread::satb_mark_queue_set().set_active_all_threads(in_progress, !in_progress);
1631 }
1632 
1633 void ShenandoahHeap::set_evacuation_in_progress(bool in_progress) {
1634   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Only call this at safepoint");
1635   set_gc_state_mask(EVACUATION, in_progress);
1636 }
1637 
1638 HeapWord* ShenandoahHeap::tlab_post_allocation_setup(HeapWord* obj) {
1639   // Initialize Brooks pointer for the next object
1640   HeapWord* result = obj + BrooksPointer::word_size();
1641   BrooksPointer::initialize(oop(result));
1642   return result;
1643 }
1644 
1645 uint ShenandoahHeap::oop_extra_words() {
1646   return BrooksPointer::word_size();
1647 }
1648 
1649 ShenandoahForwardedIsAliveClosure::ShenandoahForwardedIsAliveClosure() :
1650   _mark_context(ShenandoahHeap::heap()->marking_context()) {
1651 }
1652 
1653 ShenandoahIsAliveClosure::ShenandoahIsAliveClosure() :
1654   _mark_context(ShenandoahHeap::heap()->marking_context()) {
1655 }
1656 
1657 bool ShenandoahForwardedIsAliveClosure::do_object_b(oop obj) {
1658   if (oopDesc::is_null(obj)) {
1659     return false;
1660   }
1661   obj = ShenandoahBarrierSet::resolve_forwarded_not_null(obj);
1662   shenandoah_assert_not_forwarded_if(NULL, obj, ShenandoahHeap::heap()->is_concurrent_mark_in_progress());
1663   return _mark_context->is_marked(obj);
1664 }
1665 
1666 bool ShenandoahIsAliveClosure::do_object_b(oop obj) {
1667   if (oopDesc::is_null(obj)) {
1668     return false;
1669   }
1670   shenandoah_assert_not_forwarded(NULL, obj);
1671   return _mark_context->is_marked(obj);
1672 }
1673 
1674 void ShenandoahHeap::ref_processing_init() {
1675   MemRegion mr = reserved_region();
1676 
1677   assert(_max_workers > 0, "Sanity");
1678 
1679   _ref_processor =
1680     new ReferenceProcessor(mr,    // span
1681                            ParallelRefProcEnabled,  // MT processing
1682                            _max_workers,            // Degree of MT processing
1683                            true,                    // MT discovery
1684                            _max_workers,            // Degree of MT discovery
1685                            false,                   // Reference discovery is not atomic
1686                            NULL);                   // No closure, should be installed before use
1687 
1688   shenandoah_assert_rp_isalive_not_installed();
1689 }
1690 
1691 void ShenandoahHeap::acquire_pending_refs_lock() {
1692   _control_thread->slt()->manipulatePLL(SurrogateLockerThread::acquirePLL);
1693 }
1694 
1695 void ShenandoahHeap::release_pending_refs_lock() {
1696   _control_thread->slt()->manipulatePLL(SurrogateLockerThread::releaseAndNotifyPLL);
1697 }
1698 
1699 GCTracer* ShenandoahHeap::tracer() {
1700   return shenandoah_policy()->tracer();
1701 }
1702 
1703 size_t ShenandoahHeap::tlab_used(Thread* thread) const {
1704   return _free_set->used();
1705 }
1706 
1707 void ShenandoahHeap::cancel_gc(GCCause::Cause cause) {
1708   if (try_cancel_gc()) {
1709     FormatBuffer<> msg("Cancelling GC: %s", GCCause::to_string(cause));
1710     log_info(gc)("%s", msg.buffer());
1711     Events::log(Thread::current(), "%s", msg.buffer());
1712   }
1713 }
1714 
1715 uint ShenandoahHeap::max_workers() {
1716   return _max_workers;
1717 }
1718 
1719 void ShenandoahHeap::stop() {
1720   // The shutdown sequence should be able to terminate when GC is running.
1721 
1722   // Step 0. Notify policy to disable event recording.
1723   _shenandoah_policy->record_shutdown();
1724 
1725   // Step 1. Notify control thread that we are in shutdown.
1726   // Note that we cannot do that with stop(), because stop() is blocking and waits for the actual shutdown.
1727   // Doing stop() here would wait for the normal GC cycle to complete, never falling through to cancel below.
1728   _control_thread->prepare_for_graceful_shutdown();
1729 
1730   // Step 2. Notify GC workers that we are cancelling GC.
1731   cancel_gc(GCCause::_shenandoah_stop_vm);
1732 
1733   // Step 3. Wait until GC worker exits normally.
1734   _control_thread->stop();
1735 
1736   // Step 4. Stop String Dedup thread if it is active
1737   if (ShenandoahStringDedup::is_enabled()) {
1738     ShenandoahStringDedup::stop();
1739   }
1740 }
1741 
1742 void ShenandoahHeap::unload_classes_and_cleanup_tables(bool full_gc) {
1743   assert(ClassUnloading || full_gc, "Class unloading should be enabled");
1744 
1745   ShenandoahGCPhase root_phase(full_gc ?
1746                                ShenandoahPhaseTimings::full_gc_purge :
1747                                ShenandoahPhaseTimings::purge);
1748 
1749   ShenandoahIsAliveSelector alive;
1750   BoolObjectClosure* is_alive = alive.is_alive_closure();
1751 
1752   bool purged_class;
1753 
1754   // Unload classes and purge SystemDictionary.
1755   {
1756     ShenandoahGCPhase phase(full_gc ?
1757                             ShenandoahPhaseTimings::full_gc_purge_class_unload :
1758                             ShenandoahPhaseTimings::purge_class_unload);
1759     purged_class = SystemDictionary::do_unloading(is_alive,
1760                                                   full_gc /* do_cleaning*/ );
1761   }
1762 
1763   {
1764     ShenandoahGCPhase phase(full_gc ?
1765                             ShenandoahPhaseTimings::full_gc_purge_par :
1766                             ShenandoahPhaseTimings::purge_par);
1767     uint active = _workers->active_workers();
1768     ParallelCleaningTask unlink_task(is_alive, true, true, active, purged_class);
1769     _workers->run_task(&unlink_task);
1770   }
1771 
1772   if (ShenandoahStringDedup::is_enabled()) {
1773     ShenandoahGCPhase phase(full_gc ?
1774                             ShenandoahPhaseTimings::full_gc_purge_string_dedup :
1775                             ShenandoahPhaseTimings::purge_string_dedup);
1776     ShenandoahStringDedup::parallel_cleanup();
1777   }
1778 
1779   {
1780     ShenandoahGCPhase phase(full_gc ?
1781                             ShenandoahPhaseTimings::full_gc_purge_cldg :
1782                             ShenandoahPhaseTimings::purge_cldg);
1783     ClassLoaderDataGraph::purge();
1784   }
1785 }
1786 
1787 void ShenandoahHeap::set_has_forwarded_objects(bool cond) {
1788   set_gc_state_mask(HAS_FORWARDED, cond);
1789 }
1790 
1791 void ShenandoahHeap::set_process_references(bool pr) {
1792   _process_references.set_cond(pr);
1793 }
1794 
1795 void ShenandoahHeap::set_unload_classes(bool uc) {
1796   _unload_classes.set_cond(uc);
1797 }
1798 
1799 bool ShenandoahHeap::process_references() const {
1800   return _process_references.is_set();
1801 }
1802 
1803 bool ShenandoahHeap::unload_classes() const {
1804   return _unload_classes.is_set();
1805 }
1806 
1807 address ShenandoahHeap::in_cset_fast_test_addr() {
1808   ShenandoahHeap* heap = ShenandoahHeap::heap();
1809   assert(heap->collection_set() != NULL, "Sanity");
1810   return (address) heap->collection_set()->biased_map_address();
1811 }
1812 
1813 address ShenandoahHeap::cancelled_gc_addr() {
1814   return (address) ShenandoahHeap::heap()->_cancelled_gc.addr_of();
1815 }
1816 
1817 address ShenandoahHeap::gc_state_addr() {
1818   return (address) ShenandoahHeap::heap()->_gc_state.addr_of();
1819 }
1820 
1821 size_t ShenandoahHeap::conservative_max_heap_alignment() {
1822   size_t align = ShenandoahMaxRegionSize;
1823   if (UseLargePages) {
1824     align = MAX2(align, os::large_page_size());
1825   }
1826   return align;
1827 }
1828 
1829 size_t ShenandoahHeap::bytes_allocated_since_gc_start() {
1830   return OrderAccess::load_acquire(&_bytes_allocated_since_gc_start);
1831 }
1832 
1833 void ShenandoahHeap::reset_bytes_allocated_since_gc_start() {
1834   OrderAccess::release_store_fence(&_bytes_allocated_since_gc_start, (size_t)0);
1835 }
1836 
1837 void ShenandoahHeap::set_degenerated_gc_in_progress(bool in_progress) {
1838   _degenerated_gc_in_progress.set_cond(in_progress);
1839 }
1840 
1841 void ShenandoahHeap::set_full_gc_in_progress(bool in_progress) {
1842   _full_gc_in_progress.set_cond(in_progress);
1843 }
1844 
1845 void ShenandoahHeap::set_full_gc_move_in_progress(bool in_progress) {
1846   assert (is_full_gc_in_progress(), "should be");
1847   _full_gc_move_in_progress.set_cond(in_progress);
1848 }
1849 
1850 void ShenandoahHeap::set_update_refs_in_progress(bool in_progress) {
1851   set_gc_state_mask(UPDATEREFS, in_progress);
1852 }
1853 
1854 void ShenandoahHeap::register_nmethod(nmethod* nm) {
1855   ShenandoahCodeRoots::add_nmethod(nm);
1856 }
1857 
1858 void ShenandoahHeap::unregister_nmethod(nmethod* nm) {
1859   ShenandoahCodeRoots::remove_nmethod(nm);
1860 }
1861 
1862 oop ShenandoahHeap::pin_object(JavaThread* thr, oop o) {
1863   o = barrier_set()->write_barrier(o);
1864   ShenandoahHeapLocker locker(lock());
1865   heap_region_containing(o)->make_pinned();
1866   return o;
1867 }
1868 
1869 void ShenandoahHeap::unpin_object(JavaThread* thr, oop o) {
1870   o = barrier_set()->read_barrier(o);
1871   ShenandoahHeapLocker locker(lock());
1872   heap_region_containing(o)->make_unpinned();
1873 }
1874 
1875 GCTimer* ShenandoahHeap::gc_timer() const {
1876   return _gc_timer;
1877 }
1878 
1879 #ifdef ASSERT
1880 void ShenandoahHeap::assert_gc_workers(uint nworkers) {
1881   assert(nworkers > 0 && nworkers <= max_workers(), "Sanity");
1882 
1883   if (ShenandoahSafepoint::is_at_shenandoah_safepoint()) {
1884     if (UseDynamicNumberOfGCThreads ||
1885         (FLAG_IS_DEFAULT(ParallelGCThreads) && ForceDynamicNumberOfGCThreads)) {
1886       assert(nworkers <= ParallelGCThreads, "Cannot use more than it has");
1887     } else {
1888       // Use ParallelGCThreads inside safepoints
1889       assert(nworkers == ParallelGCThreads, "Use ParalleGCThreads within safepoints");
1890     }
1891   } else {
1892     if (UseDynamicNumberOfGCThreads ||
1893         (FLAG_IS_DEFAULT(ConcGCThreads) && ForceDynamicNumberOfGCThreads)) {
1894       assert(nworkers <= ConcGCThreads, "Cannot use more than it has");
1895     } else {
1896       // Use ConcGCThreads outside safepoints
1897       assert(nworkers == ConcGCThreads, "Use ConcGCThreads outside safepoints");
1898     }
1899   }
1900 }
1901 #endif
1902 
1903 ShenandoahVerifier* ShenandoahHeap::verifier() {
1904   guarantee(ShenandoahVerify, "Should be enabled");
1905   assert (_verifier != NULL, "sanity");
1906   return _verifier;
1907 }
1908 
1909 ShenandoahUpdateHeapRefsClosure::ShenandoahUpdateHeapRefsClosure() :
1910   _heap(ShenandoahHeap::heap()) {}
1911 
1912 class ShenandoahUpdateHeapRefsTask : public AbstractGangTask {
1913 private:
1914   ShenandoahHeap* _heap;
1915   ShenandoahRegionIterator* _regions;
1916   bool _concurrent;
1917 
1918 public:
1919   ShenandoahUpdateHeapRefsTask(ShenandoahRegionIterator* regions, bool concurrent) :
1920     AbstractGangTask("Concurrent Update References Task"),
1921     _heap(ShenandoahHeap::heap()),
1922     _regions(regions),
1923     _concurrent(concurrent) {
1924   }
1925 
1926   void work(uint worker_id) {
1927     ShenandoahWorkerSession worker_session(worker_id);
1928     ShenandoahSuspendibleThreadSetJoiner stsj(_concurrent && ShenandoahSuspendibleWorkers);
1929     ShenandoahUpdateHeapRefsClosure cl;
1930     ShenandoahHeapRegion* r = _regions->next();
1931     ShenandoahMarkingContext* const ctx = _heap->complete_marking_context();
1932     while (r != NULL) {
1933       HeapWord* top_at_start_ur = r->concurrent_iteration_safe_limit();
1934       assert (top_at_start_ur >= r->bottom(), "sanity");
1935       if (r->is_active() && !r->is_cset()) {
1936         _heap->marked_object_oop_iterate(r, &cl, top_at_start_ur);
1937       }
1938       if (ShenandoahPacing) {
1939         _heap->pacer()->report_updaterefs(pointer_delta(top_at_start_ur, r->bottom()));
1940       }
1941       if (_heap->check_cancelled_gc_and_yield(_concurrent)) {
1942         return;
1943       }
1944       r = _regions->next();
1945     }
1946   }
1947 };
1948 
1949 void ShenandoahHeap::update_heap_references(bool concurrent) {
1950   ShenandoahUpdateHeapRefsTask task(&_update_refs_iterator, concurrent);
1951   workers()->run_task(&task);
1952 }
1953 
1954 void ShenandoahHeap::op_init_updaterefs() {
1955   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "must be at safepoint");
1956 
1957   set_evacuation_in_progress(false);
1958 
1959   if (ShenandoahVerify) {
1960     verifier()->verify_before_updaterefs();
1961   }
1962 
1963   set_update_refs_in_progress(true);
1964   make_parsable(true);
1965   for (uint i = 0; i < num_regions(); i++) {
1966     ShenandoahHeapRegion* r = get_region(i);
1967     r->set_concurrent_iteration_safe_limit(r->top());
1968   }
1969 
1970   // Reset iterator.
1971   _update_refs_iterator.reset();
1972 
1973   if (ShenandoahPacing) {
1974     pacer()->setup_for_updaterefs();
1975   }
1976 }
1977 
1978 void ShenandoahHeap::op_final_updaterefs() {
1979   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "must be at safepoint");
1980 
1981   // Check if there is left-over work, and finish it
1982   if (_update_refs_iterator.has_next()) {
1983     ShenandoahGCPhase final_work(ShenandoahPhaseTimings::final_update_refs_finish_work);
1984 
1985     // Finish updating references where we left off.
1986     clear_cancelled_gc();
1987     update_heap_references(false);
1988   }
1989 
1990   // Clear cancelled GC, if set. On cancellation path, the block before would handle
1991   // everything. On degenerated paths, cancelled gc would not be set anyway.
1992   if (cancelled_gc()) {
1993     clear_cancelled_gc();
1994   }
1995   assert(!cancelled_gc(), "Should have been done right before");
1996 
1997   concurrent_mark()->update_roots(is_degenerated_gc_in_progress() ?
1998                                  ShenandoahPhaseTimings::degen_gc_update_roots:
1999                                  ShenandoahPhaseTimings::final_update_refs_roots);
2000 
2001   ShenandoahGCPhase final_update_refs(ShenandoahPhaseTimings::final_update_refs_recycle);
2002 
2003   trash_cset_regions();
2004   set_has_forwarded_objects(false);
2005   set_update_refs_in_progress(false);
2006 
2007   if (ShenandoahVerify) {
2008     verifier()->verify_after_updaterefs();
2009   }
2010 
2011   {
2012     ShenandoahHeapLocker locker(lock());
2013     _free_set->rebuild();
2014   }
2015 }
2016 
2017 #ifdef ASSERT
2018 void ShenandoahHeap::assert_heaplock_not_owned_by_current_thread() {
2019   _lock.assert_not_owned_by_current_thread();
2020 }
2021 
2022 void ShenandoahHeap::assert_heaplock_owned_by_current_thread() {
2023   _lock.assert_owned_by_current_thread();
2024 }
2025 
2026 void ShenandoahHeap::assert_heaplock_or_safepoint() {
2027   _lock.assert_owned_by_current_thread_or_safepoint();
2028 }
2029 #endif
2030 
2031 void ShenandoahHeap::print_extended_on(outputStream *st) const {
2032   print_on(st);
2033   print_heap_regions_on(st);
2034 }
2035 
2036 bool ShenandoahHeap::is_bitmap_slice_committed(ShenandoahHeapRegion* r, bool skip_self) {
2037   size_t slice = r->region_number() / _bitmap_regions_per_slice;
2038 
2039   size_t regions_from = _bitmap_regions_per_slice * slice;
2040   size_t regions_to   = MIN2(num_regions(), _bitmap_regions_per_slice * (slice + 1));
2041   for (size_t g = regions_from; g < regions_to; g++) {
2042     assert (g / _bitmap_regions_per_slice == slice, "same slice");
2043     if (skip_self && g == r->region_number()) continue;
2044     if (get_region(g)->is_committed()) {
2045       return true;
2046     }
2047   }
2048   return false;
2049 }
2050 
2051 bool ShenandoahHeap::commit_bitmap_slice(ShenandoahHeapRegion* r) {
2052   assert_heaplock_owned_by_current_thread();
2053 
2054   if (is_bitmap_slice_committed(r, true)) {
2055     // Some other region from the group is already committed, meaning the bitmap
2056     // slice is already committed, we exit right away.
2057     return true;
2058   }
2059 
2060   // Commit the bitmap slice:
2061   size_t slice = r->region_number() / _bitmap_regions_per_slice;
2062   size_t off = _bitmap_bytes_per_slice * slice;
2063   size_t len = _bitmap_bytes_per_slice;
2064   if (!os::commit_memory((char*)_bitmap_region.start() + off, len, false)) {
2065     return false;
2066   }
2067   return true;
2068 }
2069 
2070 bool ShenandoahHeap::uncommit_bitmap_slice(ShenandoahHeapRegion *r) {
2071   assert_heaplock_owned_by_current_thread();
2072 
2073   if (is_bitmap_slice_committed(r, true)) {
2074     // Some other region from the group is still committed, meaning the bitmap
2075     // slice is should stay committed, exit right away.
2076     return true;
2077   }
2078 
2079   // Uncommit the bitmap slice:
2080   size_t slice = r->region_number() / _bitmap_regions_per_slice;
2081   size_t off = _bitmap_bytes_per_slice * slice;
2082   size_t len = _bitmap_bytes_per_slice;
2083   if (!os::uncommit_memory((char*)_bitmap_region.start() + off, len)) {
2084     return false;
2085   }
2086   return true;
2087 }
2088 
2089 void ShenandoahHeap::vmop_entry_init_mark() {
2090   TraceCollectorStats tcs(monitoring_support()->stw_collection_counters());
2091   ShenandoahGCPhase total(ShenandoahPhaseTimings::total_pause_gross);
2092   ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_mark_gross);
2093 
2094   try_inject_alloc_failure();
2095   VM_ShenandoahInitMark op;
2096   VMThread::execute(&op); // jump to entry_init_mark() under safepoint
2097 }
2098 
2099 void ShenandoahHeap::vmop_entry_final_mark() {
2100   TraceCollectorStats tcs(monitoring_support()->stw_collection_counters());
2101   ShenandoahGCPhase total(ShenandoahPhaseTimings::total_pause_gross);
2102   ShenandoahGCPhase phase(ShenandoahPhaseTimings::final_mark_gross);
2103 
2104   try_inject_alloc_failure();
2105   VM_ShenandoahFinalMarkStartEvac op;
2106   VMThread::execute(&op); // jump to entry_final_mark under safepoint
2107 }
2108 
2109 void ShenandoahHeap::vmop_entry_final_evac() {
2110   TraceCollectorStats tcs(monitoring_support()->stw_collection_counters());
2111   ShenandoahGCPhase total(ShenandoahPhaseTimings::total_pause_gross);
2112   ShenandoahGCPhase phase(ShenandoahPhaseTimings::final_evac_gross);
2113 
2114   VM_ShenandoahFinalEvac op;
2115   VMThread::execute(&op); // jump to entry_final_evac under safepoint
2116 }
2117 
2118 void ShenandoahHeap::vmop_entry_init_updaterefs() {
2119   TraceCollectorStats tcs(monitoring_support()->stw_collection_counters());
2120   ShenandoahGCPhase total(ShenandoahPhaseTimings::total_pause_gross);
2121   ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_update_refs_gross);
2122 
2123   try_inject_alloc_failure();
2124   VM_ShenandoahInitUpdateRefs op;
2125   VMThread::execute(&op);
2126 }
2127 
2128 void ShenandoahHeap::vmop_entry_final_updaterefs() {
2129   TraceCollectorStats tcs(monitoring_support()->stw_collection_counters());
2130   ShenandoahGCPhase total(ShenandoahPhaseTimings::total_pause_gross);
2131   ShenandoahGCPhase phase(ShenandoahPhaseTimings::final_update_refs_gross);
2132 
2133   try_inject_alloc_failure();
2134   VM_ShenandoahFinalUpdateRefs op;
2135   VMThread::execute(&op);
2136 }
2137 
2138 void ShenandoahHeap::vmop_entry_full(GCCause::Cause cause) {
2139   TraceCollectorStats tcs(monitoring_support()->full_stw_collection_counters());
2140   ShenandoahGCPhase total(ShenandoahPhaseTimings::total_pause_gross);
2141   ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_gross);
2142 
2143   try_inject_alloc_failure();
2144   VM_ShenandoahFullGC op(cause);
2145   VMThread::execute(&op);
2146 }
2147 
2148 void ShenandoahHeap::vmop_degenerated(ShenandoahDegenPoint point) {
2149   TraceCollectorStats tcs(monitoring_support()->full_stw_collection_counters());
2150   ShenandoahGCPhase total(ShenandoahPhaseTimings::total_pause_gross);
2151   ShenandoahGCPhase phase(ShenandoahPhaseTimings::degen_gc_gross);
2152 
2153   VM_ShenandoahDegeneratedGC degenerated_gc((int)point);
2154   VMThread::execute(&degenerated_gc);
2155 }
2156 
2157 void ShenandoahHeap::entry_init_mark() {
2158   ShenandoahGCPhase total_phase(ShenandoahPhaseTimings::total_pause);
2159   ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_mark);
2160 
2161   const char* msg = init_mark_event_message();
2162   GCTraceTime time(msg, PrintGC, _gc_timer, tracer()->gc_id());
2163   EventMark em("%s", msg);
2164 
2165   ShenandoahWorkerScope scope(workers(),
2166                               ShenandoahWorkerPolicy::calc_workers_for_init_marking(),
2167                               "init marking");
2168 
2169   op_init_mark();
2170 }
2171 
2172 void ShenandoahHeap::entry_final_mark() {
2173   ShenandoahGCPhase total_phase(ShenandoahPhaseTimings::total_pause);
2174   ShenandoahGCPhase phase(ShenandoahPhaseTimings::final_mark);
2175 
2176   const char* msg = final_mark_event_message();
2177   GCTraceTime time(msg, PrintGC, _gc_timer, tracer()->gc_id());
2178   EventMark em("%s", msg);
2179 
2180   ShenandoahWorkerScope scope(workers(),
2181                               ShenandoahWorkerPolicy::calc_workers_for_final_marking(),
2182                               "final marking");
2183 
2184   op_final_mark();
2185 }
2186 
2187 void ShenandoahHeap::entry_final_evac() {
2188   ShenandoahGCPhase total_phase(ShenandoahPhaseTimings::total_pause);
2189   ShenandoahGCPhase phase(ShenandoahPhaseTimings::final_evac);
2190 
2191   const char* msg = "Pause Final Evac";
2192   GCTraceTime time(msg, PrintGC, _gc_timer, tracer()->gc_id());
2193   EventMark em("%s", msg);
2194 
2195   op_final_evac();
2196 }
2197 
2198 void ShenandoahHeap::entry_init_updaterefs() {
2199   ShenandoahGCPhase total_phase(ShenandoahPhaseTimings::total_pause);
2200   ShenandoahGCPhase phase(ShenandoahPhaseTimings::init_update_refs);
2201 
2202   static const char* msg = "Pause Init Update Refs";
2203   GCTraceTime time(msg, PrintGC, _gc_timer, tracer()->gc_id());
2204   EventMark em("%s", msg);
2205 
2206   // No workers used in this phase, no setup required
2207 
2208   op_init_updaterefs();
2209 }
2210 
2211 void ShenandoahHeap::entry_final_updaterefs() {
2212   ShenandoahGCPhase total_phase(ShenandoahPhaseTimings::total_pause);
2213   ShenandoahGCPhase phase(ShenandoahPhaseTimings::final_update_refs);
2214 
2215   static const char* msg = "Pause Final Update Refs";
2216   GCTraceTime time(msg, PrintGC, _gc_timer, tracer()->gc_id());
2217   EventMark em("%s", msg);
2218 
2219   ShenandoahWorkerScope scope(workers(),
2220                               ShenandoahWorkerPolicy::calc_workers_for_final_update_ref(),
2221                               "final reference update");
2222 
2223   op_final_updaterefs();
2224 }
2225 
2226 void ShenandoahHeap::entry_full(GCCause::Cause cause) {
2227   ShenandoahGCPhase total_phase(ShenandoahPhaseTimings::total_pause);
2228   ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc);
2229 
2230   static const char* msg = "Pause Full";
2231   GCTraceTime time(msg, PrintGC, _gc_timer, tracer()->gc_id(), true);
2232   EventMark em("%s", msg);
2233 
2234   ShenandoahWorkerScope scope(workers(),
2235                               ShenandoahWorkerPolicy::calc_workers_for_fullgc(),
2236                               "full gc");
2237 
2238   op_full(cause);
2239 }
2240 
2241 void ShenandoahHeap::entry_degenerated(int point) {
2242   ShenandoahGCPhase total_phase(ShenandoahPhaseTimings::total_pause);
2243   ShenandoahGCPhase phase(ShenandoahPhaseTimings::degen_gc);
2244 
2245   ShenandoahDegenPoint dpoint = (ShenandoahDegenPoint)point;
2246   const char* msg = degen_event_message(dpoint);
2247   GCTraceTime time(msg, PrintGC, _gc_timer, tracer()->gc_id(), true);
2248   EventMark em("%s", msg);
2249 
2250   ShenandoahWorkerScope scope(workers(),
2251                               ShenandoahWorkerPolicy::calc_workers_for_stw_degenerated(),
2252                               "stw degenerated gc");
2253 
2254   set_degenerated_gc_in_progress(true);
2255   op_degenerated(dpoint);
2256   set_degenerated_gc_in_progress(false);
2257 }
2258 
2259 void ShenandoahHeap::entry_mark() {
2260   TraceCollectorStats tcs(monitoring_support()->concurrent_collection_counters());
2261 
2262   const char* msg = conc_mark_event_message();
2263   GCTraceTime time(msg, PrintGC, NULL, tracer()->gc_id(), true);
2264   EventMark em("%s", msg);
2265 
2266   ShenandoahWorkerScope scope(workers(),
2267                               ShenandoahWorkerPolicy::calc_workers_for_conc_marking(),
2268                               "concurrent marking");
2269 
2270   try_inject_alloc_failure();
2271   op_mark();
2272 }
2273 
2274 void ShenandoahHeap::entry_evac() {
2275   ShenandoahGCPhase conc_evac_phase(ShenandoahPhaseTimings::conc_evac);
2276   TraceCollectorStats tcs(monitoring_support()->concurrent_collection_counters());
2277 
2278   static const char *msg = "Concurrent evacuation";
2279   GCTraceTime time(msg, PrintGC, NULL, tracer()->gc_id(), true);
2280   EventMark em("%s", msg);
2281 
2282   ShenandoahWorkerScope scope(workers(),
2283                               ShenandoahWorkerPolicy::calc_workers_for_conc_evac(),
2284                               "concurrent evacuation");
2285 
2286   try_inject_alloc_failure();
2287   op_conc_evac();
2288 }
2289 
2290 void ShenandoahHeap::entry_updaterefs() {
2291   ShenandoahGCPhase phase(ShenandoahPhaseTimings::conc_update_refs);
2292 
2293   static const char* msg = "Concurrent update references";
2294   GCTraceTime time(msg, PrintGC, NULL, tracer()->gc_id(), true);
2295   EventMark em("%s", msg);
2296 
2297   ShenandoahWorkerScope scope(workers(),
2298                               ShenandoahWorkerPolicy::calc_workers_for_conc_update_ref(),
2299                               "concurrent reference update");
2300 
2301   try_inject_alloc_failure();
2302   op_updaterefs();
2303 }
2304 
2305 void ShenandoahHeap::entry_cleanup() {
2306   ShenandoahGCPhase phase(ShenandoahPhaseTimings::conc_cleanup);
2307 
2308   static const char* msg = "Concurrent cleanup";
2309   GCTraceTime time(msg, PrintGC, NULL, tracer()->gc_id(), true);
2310   EventMark em("%s", msg);
2311 
2312   // This phase does not use workers, no need for setup
2313 
2314   try_inject_alloc_failure();
2315   op_cleanup();
2316 }
2317 
2318 void ShenandoahHeap::entry_reset() {
2319   ShenandoahGCPhase phase(ShenandoahPhaseTimings::conc_reset);
2320 
2321   static const char* msg = "Concurrent reset";
2322   GCTraceTime time(msg, PrintGC, NULL, tracer()->gc_id(), true);
2323   EventMark em("%s", msg);
2324 
2325   ShenandoahWorkerScope scope(workers(),
2326                               ShenandoahWorkerPolicy::calc_workers_for_conc_reset(),
2327                               "concurrent reset");
2328 
2329   try_inject_alloc_failure();
2330   op_reset();
2331 }
2332 
2333 void ShenandoahHeap::entry_preclean() {
2334   if (ShenandoahPreclean && process_references()) {
2335     ShenandoahGCPhase conc_preclean(ShenandoahPhaseTimings::conc_preclean);
2336 
2337     static const char* msg = "Concurrent precleaning";
2338     GCTraceTime time(msg, PrintGC, NULL, tracer()->gc_id(), true);
2339     EventMark em("%s", msg);
2340 
2341     ShenandoahWorkerScope scope(workers(),
2342                                 ShenandoahWorkerPolicy::calc_workers_for_conc_preclean(),
2343                                 "concurrent preclean");
2344 
2345     try_inject_alloc_failure();
2346     op_preclean();
2347   }
2348 }
2349 
2350 void ShenandoahHeap::entry_uncommit(double shrink_before) {
2351   static const char *msg = "Concurrent uncommit";
2352   GCTraceTime time(msg, PrintGC, NULL, tracer()->gc_id(), true);
2353   EventMark em("%s", msg);
2354 
2355   ShenandoahGCPhase phase(ShenandoahPhaseTimings::conc_uncommit);
2356 
2357   op_uncommit(shrink_before);
2358 }
2359 
2360 void ShenandoahHeap::try_inject_alloc_failure() {
2361   if (ShenandoahAllocFailureALot && !cancelled_gc() && ((os::random() % 1000) > 950)) {
2362     _inject_alloc_failure.set();
2363     os::naked_short_sleep(1);
2364     if (cancelled_gc()) {
2365       log_info(gc)("Allocation failure was successfully injected");
2366     }
2367   }
2368 }
2369 
2370 bool ShenandoahHeap::should_inject_alloc_failure() {
2371   return _inject_alloc_failure.is_set() && _inject_alloc_failure.try_unset();
2372 }
2373 
2374 void ShenandoahHeap::enter_evacuation() {
2375   _oom_evac_handler.enter_evacuation();
2376 }
2377 
2378 void ShenandoahHeap::leave_evacuation() {
2379   _oom_evac_handler.leave_evacuation();
2380 }
2381 
2382 ShenandoahRegionIterator::ShenandoahRegionIterator() :
2383   _index(0),
2384   _heap(ShenandoahHeap::heap()) {}
2385 
2386 ShenandoahRegionIterator::ShenandoahRegionIterator(ShenandoahHeap* heap) :
2387   _index(0),
2388   _heap(heap) {}
2389 
2390 void ShenandoahRegionIterator::reset() {
2391   _index = 0;
2392 }
2393 
2394 bool ShenandoahRegionIterator::has_next() const {
2395   return _index < (jint)_heap->num_regions();
2396 }
2397 
2398 char ShenandoahHeap::gc_state() {
2399   return _gc_state.raw_value();
2400 }
2401 
2402 const char* ShenandoahHeap::init_mark_event_message() const {
2403   bool update_refs = has_forwarded_objects();
2404   bool proc_refs = process_references();
2405   bool unload_cls = unload_classes();
2406 
2407   if (update_refs && proc_refs && unload_cls) {
2408     return "Pause Init Mark (update refs) (process weakrefs) (unload classes)";
2409   } else if (update_refs && proc_refs) {
2410     return "Pause Init Mark (update refs) (process weakrefs)";
2411   } else if (update_refs && unload_cls) {
2412     return "Pause Init Mark (update refs) (unload classes)";
2413   } else if (proc_refs && unload_cls) {
2414     return "Pause Init Mark (process weakrefs) (unload classes)";
2415   } else if (update_refs) {
2416     return "Pause Init Mark (update refs)";
2417   } else if (proc_refs) {
2418     return "Pause Init Mark (process weakrefs)";
2419   } else if (unload_cls) {
2420     return "Pause Init Mark (unload classes)";
2421   } else {
2422     return "Pause Init Mark";
2423   }
2424 }
2425 
2426 const char* ShenandoahHeap::final_mark_event_message() const {
2427   bool update_refs = has_forwarded_objects();
2428   bool proc_refs = process_references();
2429   bool unload_cls = unload_classes();
2430 
2431   if (update_refs && proc_refs && unload_cls) {
2432     return "Pause Final Mark (update refs) (process weakrefs) (unload classes)";
2433   } else if (update_refs && proc_refs) {
2434     return "Pause Final Mark (update refs) (process weakrefs)";
2435   } else if (update_refs && unload_cls) {
2436     return "Pause Final Mark (update refs) (unload classes)";
2437   } else if (proc_refs && unload_cls) {
2438     return "Pause Final Mark (process weakrefs) (unload classes)";
2439   } else if (update_refs) {
2440     return "Pause Final Mark (update refs)";
2441   } else if (proc_refs) {
2442     return "Pause Final Mark (process weakrefs)";
2443   } else if (unload_cls) {
2444     return "Pause Final Mark (unload classes)";
2445   } else {
2446     return "Pause Final Mark";
2447   }
2448 }
2449 
2450 const char* ShenandoahHeap::conc_mark_event_message() const {
2451   bool update_refs = has_forwarded_objects();
2452   bool proc_refs = process_references();
2453   bool unload_cls = unload_classes();
2454 
2455   if (update_refs && proc_refs && unload_cls) {
2456     return "Concurrent marking (update refs) (process weakrefs) (unload classes)";
2457   } else if (update_refs && proc_refs) {
2458     return "Concurrent marking (update refs) (process weakrefs)";
2459   } else if (update_refs && unload_cls) {
2460     return "Concurrent marking (update refs) (unload classes)";
2461   } else if (proc_refs && unload_cls) {
2462     return "Concurrent marking (process weakrefs) (unload classes)";
2463   } else if (update_refs) {
2464     return "Concurrent marking (update refs)";
2465   } else if (proc_refs) {
2466     return "Concurrent marking (process weakrefs)";
2467   } else if (unload_cls) {
2468     return "Concurrent marking (unload classes)";
2469   } else {
2470     return "Concurrent marking";
2471   }
2472 }
2473 
2474 const char* ShenandoahHeap::degen_event_message(ShenandoahDegenPoint point) const {
2475   switch (point) {
2476     case _degenerated_unset:
2477       return "Pause Degenerated GC (<UNSET>)";
2478     case _degenerated_outside_cycle:
2479       return "Pause Degenerated GC (Outside of Cycle)";
2480     case _degenerated_mark:
2481       return "Pause Degenerated GC (Mark)";
2482     case _degenerated_evac:
2483       return "Pause Degenerated GC (Evacuation)";
2484     case _degenerated_updaterefs:
2485       return "Pause Degenerated GC (Update Refs)";
2486     default:
2487       ShouldNotReachHere();
2488       return "ERROR";
2489   }
2490 }
2491 
2492 jushort* ShenandoahHeap::get_liveness_cache(uint worker_id) {
2493 #ifdef ASSERT
2494   assert(worker_id < _max_workers, "sanity");
2495   for (uint i = 0; i < num_regions(); i++) {
2496     assert(_liveness_cache[worker_id][i] == 0, "liveness cache should be empty");
2497   }
2498 #endif
2499   return _liveness_cache[worker_id];
2500 }
2501 
2502 void ShenandoahHeap::flush_liveness_cache(uint worker_id) {
2503   assert(worker_id < _max_workers, "sanity");
2504   jushort* ld = _liveness_cache[worker_id];
2505   for (uint i = 0; i < num_regions(); i++) {
2506     ShenandoahHeapRegion* r = get_region(i);
2507     jushort live = ld[i];
2508     if (live > 0) {
2509       r->increase_live_data_gc_words(live);
2510       ld[i] = 0;
2511     }
2512   }
2513 }
2514 
2515 BoolObjectClosure* ShenandoahIsAliveSelector::is_alive_closure() {
2516   return ShenandoahHeap::heap()->has_forwarded_objects() ? reinterpret_cast<BoolObjectClosure*>(&_fwd_alive_cl)
2517                                                          : reinterpret_cast<BoolObjectClosure*>(&_alive_cl);
2518 }