1 /*
   2  * Copyright (c) 2018, 2019, Red Hat, Inc. All rights reserved.
   3  *
   4  * This code is free software; you can redistribute it and/or modify it
   5  * under the terms of the GNU General Public License version 2 only, as
   6  * published by the Free Software Foundation.
   7  *
   8  * This code is distributed in the hope that it will be useful, but WITHOUT
   9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  11  * version 2 for more details (a copy is included in the LICENSE file that
  12  * accompanied this code).
  13  *
  14  * You should have received a copy of the GNU General Public License version
  15  * 2 along with this work; if not, write to the Free Software Foundation,
  16  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  17  *
  18  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  19  * or visit www.oracle.com if you need additional information or have any
  20  * questions.
  21  *
  22  */
  23 
  24 #include "precompiled.hpp"
  25 
  26 #include "classfile/classLoaderData.hpp"
  27 #include "classfile/classLoaderDataGraph.hpp"
  28 #include "gc/shared/referenceProcessor.hpp"
  29 #include "gc/shared/referenceProcessorPhaseTimes.hpp"
  30 #include "gc/shared/workgroup.hpp"
  31 #include "gc/shared/weakProcessor.inline.hpp"
  32 #include "gc/shenandoah/shenandoahBarrierSet.hpp"
  33 #include "gc/shenandoah/shenandoahCodeRoots.hpp"
  34 #include "gc/shenandoah/shenandoahCollectionSet.hpp"
  35 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
  36 #include "gc/shenandoah/shenandoahFreeSet.hpp"
  37 #include "gc/shenandoah/shenandoahPhaseTimings.hpp"
  38 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
  39 #include "gc/shenandoah/shenandoahHeapRegionSet.inline.hpp"
  40 #include "gc/shenandoah/shenandoahHeuristics.hpp"
  41 #include "gc/shenandoah/shenandoahMarkingContext.inline.hpp"
  42 #include "gc/shenandoah/shenandoahOopClosures.inline.hpp"
  43 #include "gc/shenandoah/shenandoahRootProcessor.inline.hpp"
  44 #include "gc/shenandoah/shenandoahStringDedup.hpp"
  45 #include "gc/shenandoah/shenandoahTaskqueue.inline.hpp"
  46 #include "gc/shenandoah/shenandoahTimingTracker.hpp"
  47 #include "gc/shenandoah/shenandoahTraversalGC.hpp"
  48 #include "gc/shenandoah/shenandoahUtils.hpp"
  49 #include "gc/shenandoah/shenandoahVerifier.hpp"
  50 
  51 #include "memory/iterator.hpp"
  52 #include "memory/metaspace.hpp"
  53 #include "memory/resourceArea.hpp"
  54 
  55 /**
  56  * NOTE: We are using the SATB buffer in thread.hpp and satbMarkQueue.hpp, however, it is not an SATB algorithm.
  57  * We're using the buffer as generic oop buffer to enqueue new values in concurrent oop stores, IOW, the algorithm
  58  * is incremental-update-based.
  59  *
  60  * NOTE on interaction with TAMS: we want to avoid traversing new objects for
  61  * several reasons:
  62  * - We will not reclaim them in this cycle anyway, because they are not in the
  63  *   cset
  64  * - It makes up for the bulk of work during final-pause
  65  * - It also shortens the concurrent cycle because we don't need to
  66  *   pointlessly traverse through newly allocated objects.
  67  * - As a nice side-effect, it solves the I-U termination problem (mutators
  68  *   cannot outrun the GC by allocating like crazy)
  69  * - It is an easy way to achieve MWF. What MWF does is to also enqueue the
  70  *   target object of stores if it's new. Treating new objects live implicitely
  71  *   achieves the same, but without extra barriers. I think the effect of
  72  *   shortened final-pause (mentioned above) is the main advantage of MWF. In
  73  *   particular, we will not see the head of a completely new long linked list
  74  *   in final-pause and end up traversing huge chunks of the heap there.
  75  * - We don't need to see/update the fields of new objects either, because they
  76  *   are either still null, or anything that's been stored into them has been
  77  *   evacuated+enqueued before (and will thus be treated later).
  78  *
  79  * We achieve this by setting TAMS for each region, and everything allocated
  80  * beyond TAMS will be 'implicitely marked'.
  81  *
  82  * Gotchas:
  83  * - While we want new objects to be implicitely marked, we don't want to count
  84  *   them alive. Otherwise the next cycle wouldn't pick them up and consider
  85  *   them for cset. This means that we need to protect such regions from
  86  *   getting accidentally thrashed at the end of traversal cycle. This is why I
  87  *   keep track of alloc-regions and check is_alloc_region() in the trashing
  88  *   code.
  89  * - We *need* to traverse through evacuated objects. Those objects are
  90  *   pre-existing, and any references in them point to interesting objects that
  91  *   we need to see. We also want to count them as live, because we just
  92  *   determined that they are alive :-) I achieve this by upping TAMS
  93  *   concurrently for every gclab/gc-shared alloc before publishing the
  94  *   evacuated object. This way, the GC threads will not consider such objects
  95  *   implictely marked, and traverse through them as normal.
  96  */
  97 class ShenandoahTraversalSATBBufferClosure : public SATBBufferClosure {
  98 private:
  99   ShenandoahObjToScanQueue* _queue;
 100   ShenandoahTraversalGC* _traversal_gc;
 101   ShenandoahHeap* const _heap;
 102 
 103 public:
 104   ShenandoahTraversalSATBBufferClosure(ShenandoahObjToScanQueue* q) :
 105     _queue(q),
 106     _heap(ShenandoahHeap::heap())
 107  { }
 108 
 109   void do_buffer(void** buffer, size_t size) {
 110     for (size_t i = 0; i < size; ++i) {
 111       oop* p = (oop*) &buffer[i];
 112       oop obj = RawAccess<>::oop_load(p);
 113       shenandoah_assert_not_forwarded(p, obj);
 114       if (_heap->marking_context()->mark(obj)) {
 115         _queue->push(ShenandoahMarkTask(obj));
 116       }
 117     }
 118   }
 119 };
 120 
 121 class ShenandoahTraversalSATBThreadsClosure : public ThreadClosure {
 122 private:
 123   ShenandoahTraversalSATBBufferClosure* _satb_cl;
 124 
 125 public:
 126   ShenandoahTraversalSATBThreadsClosure(ShenandoahTraversalSATBBufferClosure* satb_cl) :
 127     _satb_cl(satb_cl) {}
 128 
 129   void do_thread(Thread* thread) {
 130     ShenandoahThreadLocalData::satb_mark_queue(thread).apply_closure_and_empty(_satb_cl);
 131   }
 132 };
 133 
 134 // Like CLDToOopClosure, but clears has_modified_oops, so that we can record modified CLDs during traversal
 135 // and remark them later during final-traversal.
 136 class ShenandoahMarkCLDClosure : public CLDClosure {
 137 private:
 138   OopClosure* _cl;
 139 public:
 140   ShenandoahMarkCLDClosure(OopClosure* cl) : _cl(cl) {}
 141   void do_cld(ClassLoaderData* cld) {
 142     cld->oops_do(_cl, true, true);
 143   }
 144 };
 145 
 146 // Like CLDToOopClosure, but only process modified CLDs
 147 class ShenandoahRemarkCLDClosure : public CLDClosure {
 148 private:
 149   OopClosure* _cl;
 150 public:
 151   ShenandoahRemarkCLDClosure(OopClosure* cl) : _cl(cl) {}
 152   void do_cld(ClassLoaderData* cld) {
 153     if (cld->has_modified_oops()) {
 154       cld->oops_do(_cl, true, true);
 155     }
 156   }
 157 };
 158 
 159 class ShenandoahInitTraversalCollectionTask : public AbstractGangTask {
 160 private:
 161   ShenandoahRootProcessor* _rp;
 162   ShenandoahHeap* _heap;
 163   ShenandoahCsetCodeRootsIterator* _cset_coderoots;
 164 public:
 165   ShenandoahInitTraversalCollectionTask(ShenandoahRootProcessor* rp, ShenandoahCsetCodeRootsIterator* cset_coderoots) :
 166     AbstractGangTask("Shenandoah Init Traversal Collection"),
 167     _rp(rp),
 168     _heap(ShenandoahHeap::heap()),
 169     _cset_coderoots(cset_coderoots) {}
 170 
 171   void work(uint worker_id) {
 172     ShenandoahParallelWorkerSession worker_session(worker_id);
 173 
 174     ShenandoahEvacOOMScope oom_evac_scope;
 175     ShenandoahObjToScanQueueSet* queues = _heap->traversal_gc()->task_queues();
 176     ShenandoahObjToScanQueue* q = queues->queue(worker_id);
 177 
 178     bool process_refs = _heap->process_references();
 179     bool unload_classes = _heap->unload_classes();
 180     ReferenceProcessor* rp = NULL;
 181     if (process_refs) {
 182       rp = _heap->ref_processor();
 183     }
 184 
 185     // Step 1: Process ordinary GC roots.
 186     {
 187       ShenandoahTraversalClosure roots_cl(q, rp);
 188       ShenandoahMarkCLDClosure cld_cl(&roots_cl);
 189       MarkingCodeBlobClosure code_cl(&roots_cl, CodeBlobToOopClosure::FixRelocations);
 190       if (unload_classes) {
 191         _rp->process_strong_roots(&roots_cl, &cld_cl, NULL, NULL, worker_id);
 192         // Need to pre-evac code roots here. Otherwise we might see from-space constants.
 193         ShenandoahWorkerTimings* worker_times = _heap->phase_timings()->worker_times();
 194         ShenandoahWorkerTimingsTracker timer(worker_times, ShenandoahPhaseTimings::CodeCacheRoots, worker_id);
 195         _cset_coderoots->possibly_parallel_blobs_do(&code_cl);
 196       } else {
 197         _rp->process_all_roots(&roots_cl, &cld_cl, &code_cl, NULL, worker_id);
 198       }
 199       if (ShenandoahStringDedup::is_enabled()) {
 200         AlwaysTrueClosure is_alive;
 201         ShenandoahStringDedup::parallel_oops_do(&is_alive, &roots_cl, worker_id);
 202       }
 203     }
 204   }
 205 };
 206 
 207 class ShenandoahConcurrentTraversalCollectionTask : public AbstractGangTask {
 208 private:
 209   ShenandoahTaskTerminator* _terminator;
 210   ShenandoahHeap* _heap;
 211 public:
 212   ShenandoahConcurrentTraversalCollectionTask(ShenandoahTaskTerminator* terminator) :
 213     AbstractGangTask("Shenandoah Concurrent Traversal Collection"),
 214     _terminator(terminator),
 215     _heap(ShenandoahHeap::heap()) {}
 216 
 217   void work(uint worker_id) {
 218     ShenandoahConcurrentWorkerSession worker_session(worker_id);
 219     ShenandoahSuspendibleThreadSetJoiner stsj(ShenandoahSuspendibleWorkers);
 220     ShenandoahEvacOOMScope oom_evac_scope;
 221     ShenandoahTraversalGC* traversal_gc = _heap->traversal_gc();
 222 
 223     // Drain all outstanding work in queues.
 224     traversal_gc->main_loop(worker_id, _terminator, true);
 225   }
 226 };
 227 
 228 class ShenandoahFinalTraversalCollectionTask : public AbstractGangTask {
 229 private:
 230   ShenandoahRootProcessor* _rp;
 231   ShenandoahTaskTerminator* _terminator;
 232   ShenandoahHeap* _heap;
 233 public:
 234   ShenandoahFinalTraversalCollectionTask(ShenandoahRootProcessor* rp, ShenandoahTaskTerminator* terminator) :
 235     AbstractGangTask("Shenandoah Final Traversal Collection"),
 236     _rp(rp),
 237     _terminator(terminator),
 238     _heap(ShenandoahHeap::heap()) {}
 239 
 240   void work(uint worker_id) {
 241     ShenandoahParallelWorkerSession worker_session(worker_id);
 242 
 243     ShenandoahEvacOOMScope oom_evac_scope;
 244     ShenandoahTraversalGC* traversal_gc = _heap->traversal_gc();
 245 
 246     ShenandoahObjToScanQueueSet* queues = traversal_gc->task_queues();
 247     ShenandoahObjToScanQueue* q = queues->queue(worker_id);
 248 
 249     bool process_refs = _heap->process_references();
 250     bool unload_classes = _heap->unload_classes();
 251     ReferenceProcessor* rp = NULL;
 252     if (process_refs) {
 253       rp = _heap->ref_processor();
 254     }
 255 
 256     // Step 0: Drain outstanding SATB queues.
 257     // NOTE: we piggy-back draining of remaining thread SATB buffers on the final root scan below.
 258     ShenandoahTraversalSATBBufferClosure satb_cl(q);
 259     {
 260       // Process remaining finished SATB buffers.
 261       SATBMarkQueueSet& satb_mq_set = ShenandoahBarrierSet::satb_mark_queue_set();
 262       while (satb_mq_set.apply_closure_to_completed_buffer(&satb_cl));
 263       // Process remaining threads SATB buffers below.
 264     }
 265 
 266     // Step 1: Process GC roots.
 267     // For oops in code roots, they are marked, evacuated, enqueued for further traversal,
 268     // and the references to the oops are updated during init pause. New nmethods are handled
 269     // in similar way during nmethod-register process. Therefore, we don't need to rescan code
 270     // roots here.
 271     if (!_heap->is_degenerated_gc_in_progress()) {
 272       ShenandoahTraversalClosure roots_cl(q, rp);
 273       CLDToOopClosure cld_cl(&roots_cl, ClassLoaderData::_claim_strong);
 274       ShenandoahTraversalSATBThreadsClosure tc(&satb_cl);
 275       if (unload_classes) {
 276         ShenandoahRemarkCLDClosure remark_cld_cl(&roots_cl);
 277         _rp->process_strong_roots(&roots_cl, &remark_cld_cl, NULL, &tc, worker_id);
 278       } else {
 279         _rp->process_all_roots(&roots_cl, &cld_cl, NULL, &tc, worker_id);
 280       }
 281     } else {
 282       ShenandoahTraversalDegenClosure roots_cl(q, rp);
 283       CLDToOopClosure cld_cl(&roots_cl, ClassLoaderData::_claim_strong);
 284       ShenandoahTraversalSATBThreadsClosure tc(&satb_cl);
 285       if (unload_classes) {
 286         ShenandoahRemarkCLDClosure remark_cld_cl(&roots_cl);
 287         _rp->process_strong_roots(&roots_cl, &remark_cld_cl, NULL, &tc, worker_id);
 288       } else {
 289         _rp->process_all_roots(&roots_cl, &cld_cl, NULL, &tc, worker_id);
 290       }
 291     }
 292 
 293     {
 294       ShenandoahWorkerTimings *worker_times = _heap->phase_timings()->worker_times();
 295       ShenandoahWorkerTimingsTracker timer(worker_times, ShenandoahPhaseTimings::FinishQueues, worker_id);
 296 
 297       // Step 3: Finally drain all outstanding work in queues.
 298       traversal_gc->main_loop(worker_id, _terminator, false);
 299     }
 300 
 301   }
 302 };
 303 
 304 ShenandoahTraversalGC::ShenandoahTraversalGC(ShenandoahHeap* heap, size_t num_regions) :
 305   _heap(heap),
 306   _task_queues(new ShenandoahObjToScanQueueSet(heap->max_workers())),
 307   _traversal_set(ShenandoahHeapRegionSet()) {
 308 
 309   uint num_queues = heap->max_workers();
 310   for (uint i = 0; i < num_queues; ++i) {
 311     ShenandoahObjToScanQueue* task_queue = new ShenandoahObjToScanQueue();
 312     task_queue->initialize();
 313     _task_queues->register_queue(i, task_queue);
 314   }
 315 }
 316 
 317 ShenandoahTraversalGC::~ShenandoahTraversalGC() {
 318 }
 319 
 320 void ShenandoahTraversalGC::prepare_regions() {
 321   size_t num_regions = _heap->num_regions();
 322   ShenandoahMarkingContext* const ctx = _heap->marking_context();
 323   for (size_t i = 0; i < num_regions; i++) {
 324     ShenandoahHeapRegion* region = _heap->get_region(i);
 325     if (_heap->is_bitmap_slice_committed(region)) {
 326       if (_traversal_set.is_in(i)) {
 327         ctx->capture_top_at_mark_start(region);
 328         region->clear_live_data();
 329         assert(ctx->is_bitmap_clear_range(region->bottom(), region->end()), "bitmap for traversal regions must be cleared");
 330       } else {
 331         // Everything outside the traversal set is always considered live.
 332         ctx->reset_top_at_mark_start(region);
 333       }
 334     } else {
 335       // FreeSet may contain uncommitted empty regions, once they are recommitted,
 336       // their TAMS may have old values, so reset them here.
 337       ctx->reset_top_at_mark_start(region);
 338     }
 339   }
 340 }
 341 
 342 void ShenandoahTraversalGC::prepare() {
 343   _heap->collection_set()->clear();
 344   assert(_heap->collection_set()->count() == 0, "collection set not clear");
 345 
 346   {
 347     ShenandoahGCPhase phase(ShenandoahPhaseTimings::traversal_gc_make_parsable);
 348     _heap->make_parsable(true);
 349   }
 350 
 351   if (UseTLAB) {
 352     ShenandoahGCPhase phase(ShenandoahPhaseTimings::traversal_gc_resize_tlabs);
 353     _heap->resize_tlabs();
 354   }
 355 
 356   assert(_heap->marking_context()->is_bitmap_clear(), "need clean mark bitmap");
 357   assert(!_heap->marking_context()->is_complete(), "should not be complete");
 358 
 359   ShenandoahFreeSet* free_set = _heap->free_set();
 360   ShenandoahCollectionSet* collection_set = _heap->collection_set();
 361 
 362   // Find collection set
 363   _heap->heuristics()->choose_collection_set(collection_set);
 364   prepare_regions();
 365 
 366   // Rebuild free set
 367   free_set->rebuild();
 368 
 369   log_info(gc, ergo)("Collectable Garbage: " SIZE_FORMAT "M, " SIZE_FORMAT "M CSet, " SIZE_FORMAT " CSet regions",
 370                      collection_set->garbage() / M, collection_set->live_data() / M, collection_set->count());
 371 }
 372 
 373 void ShenandoahTraversalGC::init_traversal_collection() {
 374   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "STW traversal GC");
 375 
 376   if (ShenandoahVerify) {
 377     _heap->verifier()->verify_before_traversal();
 378   }
 379 
 380   if (VerifyBeforeGC) {
 381     Universe::verify();
 382   }
 383 
 384   {
 385     ShenandoahGCPhase phase_prepare(ShenandoahPhaseTimings::traversal_gc_prepare);
 386     ShenandoahHeapLocker lock(_heap->lock());
 387     prepare();
 388   }
 389 
 390   _heap->set_concurrent_traversal_in_progress(true);
 391 
 392   bool process_refs = _heap->process_references();
 393   if (process_refs) {
 394     ReferenceProcessor* rp = _heap->ref_processor();
 395     rp->enable_discovery(true /*verify_no_refs*/);
 396     rp->setup_policy(_heap->soft_ref_policy()->should_clear_all_soft_refs());
 397   }
 398 
 399   {
 400     ShenandoahGCPhase phase_work(ShenandoahPhaseTimings::init_traversal_gc_work);
 401     assert(_task_queues->is_empty(), "queues must be empty before traversal GC");
 402     TASKQUEUE_STATS_ONLY(_task_queues->reset_taskqueue_stats());
 403 
 404 #if defined(COMPILER2) || INCLUDE_JVMCI
 405     DerivedPointerTable::clear();
 406 #endif
 407 
 408     {
 409       uint nworkers = _heap->workers()->active_workers();
 410       task_queues()->reserve(nworkers);
 411       ShenandoahRootProcessor rp(_heap, nworkers, ShenandoahPhaseTimings::init_traversal_gc_work);
 412 
 413       ShenandoahCsetCodeRootsIterator cset_coderoots = ShenandoahCodeRoots::cset_iterator();
 414 
 415       ShenandoahInitTraversalCollectionTask traversal_task(&rp, &cset_coderoots);
 416       _heap->workers()->run_task(&traversal_task);
 417     }
 418 
 419 #if defined(COMPILER2) || INCLUDE_JVMCI
 420     DerivedPointerTable::update_pointers();
 421 #endif
 422   }
 423 
 424   if (ShenandoahPacing) {
 425     _heap->pacer()->setup_for_traversal();
 426   }
 427 }
 428 
 429 void ShenandoahTraversalGC::main_loop(uint w, ShenandoahTaskTerminator* t, bool sts_yield) {
 430   ShenandoahObjToScanQueue* q = task_queues()->queue(w);
 431 
 432   // Initialize live data.
 433   jushort* ld = _heap->get_liveness_cache(w);
 434 
 435   ReferenceProcessor* rp = NULL;
 436   if (_heap->process_references()) {
 437     rp = _heap->ref_processor();
 438   }
 439   {
 440     if (!_heap->is_degenerated_gc_in_progress()) {
 441       if (_heap->unload_classes()) {
 442         if (ShenandoahStringDedup::is_enabled()) {
 443           ShenandoahTraversalMetadataDedupClosure cl(q, rp);
 444           main_loop_work<ShenandoahTraversalMetadataDedupClosure>(&cl, ld, w, t, sts_yield);
 445         } else {
 446           ShenandoahTraversalMetadataClosure cl(q, rp);
 447           main_loop_work<ShenandoahTraversalMetadataClosure>(&cl, ld, w, t, sts_yield);
 448         }
 449       } else {
 450         if (ShenandoahStringDedup::is_enabled()) {
 451           ShenandoahTraversalDedupClosure cl(q, rp);
 452           main_loop_work<ShenandoahTraversalDedupClosure>(&cl, ld, w, t, sts_yield);
 453         } else {
 454           ShenandoahTraversalClosure cl(q, rp);
 455           main_loop_work<ShenandoahTraversalClosure>(&cl, ld, w, t, sts_yield);
 456         }
 457       }
 458     } else {
 459       if (_heap->unload_classes()) {
 460         if (ShenandoahStringDedup::is_enabled()) {
 461           ShenandoahTraversalMetadataDedupDegenClosure cl(q, rp);
 462           main_loop_work<ShenandoahTraversalMetadataDedupDegenClosure>(&cl, ld, w, t, sts_yield);
 463         } else {
 464           ShenandoahTraversalMetadataDegenClosure cl(q, rp);
 465           main_loop_work<ShenandoahTraversalMetadataDegenClosure>(&cl, ld, w, t, sts_yield);
 466         }
 467       } else {
 468         if (ShenandoahStringDedup::is_enabled()) {
 469           ShenandoahTraversalDedupDegenClosure cl(q, rp);
 470           main_loop_work<ShenandoahTraversalDedupDegenClosure>(&cl, ld, w, t, sts_yield);
 471         } else {
 472           ShenandoahTraversalDegenClosure cl(q, rp);
 473           main_loop_work<ShenandoahTraversalDegenClosure>(&cl, ld, w, t, sts_yield);
 474         }
 475       }
 476     }
 477   }
 478 
 479   _heap->flush_liveness_cache(w);
 480 }
 481 
 482 template <class T>
 483 void ShenandoahTraversalGC::main_loop_work(T* cl, jushort* live_data, uint worker_id, ShenandoahTaskTerminator* terminator, bool sts_yield) {
 484   ShenandoahObjToScanQueueSet* queues = task_queues();
 485   ShenandoahObjToScanQueue* q = queues->queue(worker_id);
 486   ShenandoahConcurrentMark* conc_mark = _heap->concurrent_mark();
 487 
 488   uintx stride = ShenandoahMarkLoopStride;
 489 
 490   ShenandoahMarkTask task;
 491 
 492   // Process outstanding queues, if any.
 493   q = queues->claim_next();
 494   while (q != NULL) {
 495     if (_heap->check_cancelled_gc_and_yield(sts_yield)) {
 496       return;
 497     }
 498 
 499     for (uint i = 0; i < stride; i++) {
 500       if (q->pop(task)) {
 501         conc_mark->do_task<T>(q, cl, live_data, &task);
 502       } else {
 503         assert(q->is_empty(), "Must be empty");
 504         q = queues->claim_next();
 505         break;
 506       }
 507     }
 508   }
 509 
 510   if (check_and_handle_cancelled_gc(terminator, sts_yield)) return;
 511 
 512   // Normal loop.
 513   q = queues->queue(worker_id);
 514 
 515   ShenandoahTraversalSATBBufferClosure drain_satb(q);
 516   SATBMarkQueueSet& satb_mq_set = ShenandoahBarrierSet::satb_mark_queue_set();
 517 
 518   while (true) {
 519     if (check_and_handle_cancelled_gc(terminator, sts_yield)) return;
 520 
 521     while (satb_mq_set.completed_buffers_num() > 0) {
 522       satb_mq_set.apply_closure_to_completed_buffer(&drain_satb);
 523     }
 524 
 525     uint work = 0;
 526     for (uint i = 0; i < stride; i++) {
 527       if (q->pop(task) ||
 528           queues->steal(worker_id, task)) {
 529         conc_mark->do_task<T>(q, cl, live_data, &task);
 530         work++;
 531       } else {
 532         break;
 533       }
 534     }
 535 
 536     if (work == 0) {
 537       // No more work, try to terminate
 538       ShenandoahEvacOOMScopeLeaver oom_scope_leaver;
 539       ShenandoahSuspendibleThreadSetLeaver stsl(sts_yield && ShenandoahSuspendibleWorkers);
 540       ShenandoahTerminationTimingsTracker term_tracker(worker_id);
 541       ShenandoahTerminatorTerminator tt(_heap);
 542 
 543       if (terminator->offer_termination(&tt)) return;
 544     }
 545   }
 546 }
 547 
 548 bool ShenandoahTraversalGC::check_and_handle_cancelled_gc(ShenandoahTaskTerminator* terminator, bool sts_yield) {
 549   if (_heap->cancelled_gc()) {
 550     return true;
 551   }
 552   return false;
 553 }
 554 
 555 void ShenandoahTraversalGC::concurrent_traversal_collection() {
 556   ClassLoaderDataGraph::clear_claimed_marks();
 557 
 558   ShenandoahGCPhase phase_work(ShenandoahPhaseTimings::conc_traversal);
 559   if (!_heap->cancelled_gc()) {
 560     uint nworkers = _heap->workers()->active_workers();
 561     task_queues()->reserve(nworkers);
 562     ShenandoahTerminationTracker tracker(ShenandoahPhaseTimings::conc_traversal_termination);
 563 
 564     ShenandoahTaskTerminator terminator(nworkers, task_queues());
 565     ShenandoahConcurrentTraversalCollectionTask task(&terminator);
 566     _heap->workers()->run_task(&task);
 567   }
 568 
 569   if (!_heap->cancelled_gc() && ShenandoahPreclean && _heap->process_references()) {
 570     preclean_weak_refs();
 571   }
 572 }
 573 
 574 void ShenandoahTraversalGC::final_traversal_collection() {
 575   _heap->make_parsable(true);
 576 
 577   if (!_heap->cancelled_gc()) {
 578 #if defined(COMPILER2) || INCLUDE_JVMCI
 579     DerivedPointerTable::clear();
 580 #endif
 581     ShenandoahGCPhase phase_work(ShenandoahPhaseTimings::final_traversal_gc_work);
 582     uint nworkers = _heap->workers()->active_workers();
 583     task_queues()->reserve(nworkers);
 584 
 585     // Finish traversal
 586     ShenandoahRootProcessor rp(_heap, nworkers, ShenandoahPhaseTimings::final_traversal_gc_work);
 587     ShenandoahTerminationTracker term(ShenandoahPhaseTimings::final_traversal_gc_termination);
 588 
 589     ShenandoahTaskTerminator terminator(nworkers, task_queues());
 590     ShenandoahFinalTraversalCollectionTask task(&rp, &terminator);
 591     _heap->workers()->run_task(&task);
 592 #if defined(COMPILER2) || INCLUDE_JVMCI
 593     DerivedPointerTable::update_pointers();
 594 #endif
 595   }
 596 
 597   if (!_heap->cancelled_gc() && _heap->process_references()) {
 598     weak_refs_work();
 599   }
 600 
 601   if (!_heap->cancelled_gc()) {
 602     fixup_roots();
 603     if (_heap->unload_classes()) {
 604       _heap->unload_classes_and_cleanup_tables(false);
 605     }
 606   }
 607 
 608   if (!_heap->cancelled_gc()) {
 609     assert(_task_queues->is_empty(), "queues must be empty after traversal GC");
 610     TASKQUEUE_STATS_ONLY(_task_queues->print_taskqueue_stats());
 611     TASKQUEUE_STATS_ONLY(_task_queues->reset_taskqueue_stats());
 612 
 613     // No more marking expected
 614     _heap->mark_complete_marking_context();
 615 
 616     // Resize metaspace
 617     MetaspaceGC::compute_new_size();
 618 
 619     // Still good? We can now trash the cset, and make final verification
 620     {
 621       ShenandoahGCPhase phase_cleanup(ShenandoahPhaseTimings::traversal_gc_cleanup);
 622       ShenandoahHeapLocker lock(_heap->lock());
 623 
 624       // Trash everything
 625       // Clear immediate garbage regions.
 626       size_t num_regions = _heap->num_regions();
 627 
 628       ShenandoahHeapRegionSet* traversal_regions = traversal_set();
 629       ShenandoahFreeSet* free_regions = _heap->free_set();
 630       ShenandoahMarkingContext* const ctx = _heap->marking_context();
 631       free_regions->clear();
 632       for (size_t i = 0; i < num_regions; i++) {
 633         ShenandoahHeapRegion* r = _heap->get_region(i);
 634         bool not_allocated = ctx->top_at_mark_start(r) == r->top();
 635 
 636         bool candidate = traversal_regions->is_in(r) && !r->has_live() && not_allocated;
 637         if (r->is_humongous_start() && candidate) {
 638           // Trash humongous.
 639           HeapWord* humongous_obj = r->bottom() + ShenandoahBrooksPointer::word_size();
 640           assert(!ctx->is_marked(oop(humongous_obj)), "must not be marked");
 641           r->make_trash_immediate();
 642           while (i + 1 < num_regions && _heap->get_region(i + 1)->is_humongous_continuation()) {
 643             i++;
 644             r = _heap->get_region(i);
 645             assert(r->is_humongous_continuation(), "must be humongous continuation");
 646             r->make_trash_immediate();
 647           }
 648         } else if (!r->is_empty() && candidate) {
 649           // Trash regular.
 650           assert(!r->is_humongous(), "handled above");
 651           assert(!r->is_trash(), "must not already be trashed");
 652           r->make_trash_immediate();
 653         }
 654       }
 655       _heap->collection_set()->clear();
 656       _heap->free_set()->rebuild();
 657       reset();
 658     }
 659 
 660     assert(_task_queues->is_empty(), "queues must be empty after traversal GC");
 661     _heap->set_concurrent_traversal_in_progress(false);
 662     assert(!_heap->cancelled_gc(), "must not be cancelled when getting out here");
 663 
 664     if (ShenandoahVerify) {
 665       _heap->verifier()->verify_after_traversal();
 666     }
 667 
 668     if (VerifyAfterGC) {
 669       Universe::verify();
 670     }
 671   }
 672 }
 673 
 674 class ShenandoahTraversalFixRootsClosure : public OopClosure {
 675 private:
 676   template <class T>
 677   inline void do_oop_work(T* p) {
 678     T o = RawAccess<>::oop_load(p);
 679     if (!CompressedOops::is_null(o)) {
 680       oop obj = CompressedOops::decode_not_null(o);
 681       oop forw = ShenandoahBarrierSet::resolve_forwarded_not_null(obj);
 682       if (!oopDesc::equals_raw(obj, forw)) {
 683         RawAccess<IS_NOT_NULL>::oop_store(p, forw);
 684       }
 685     }
 686   }
 687 
 688 public:
 689   inline void do_oop(oop* p) { do_oop_work(p); }
 690   inline void do_oop(narrowOop* p) { do_oop_work(p); }
 691 };
 692 
 693 class ShenandoahTraversalFixRootsTask : public AbstractGangTask {
 694 private:
 695   ShenandoahRootProcessor* _rp;
 696 
 697 public:
 698   ShenandoahTraversalFixRootsTask(ShenandoahRootProcessor* rp) :
 699     AbstractGangTask("Shenandoah traversal fix roots"),
 700     _rp(rp) {
 701     assert(ShenandoahHeap::heap()->has_forwarded_objects(), "Must be");
 702   }
 703 
 704   void work(uint worker_id) {
 705     ShenandoahParallelWorkerSession worker_session(worker_id);
 706     ShenandoahTraversalFixRootsClosure cl;
 707     MarkingCodeBlobClosure blobsCl(&cl, CodeBlobToOopClosure::FixRelocations);
 708     CLDToOopClosure cldCl(&cl, ClassLoaderData::_claim_strong);
 709     _rp->update_all_roots<ShenandoahForwardedIsAliveClosure>(&cl, &cldCl, &blobsCl, NULL, worker_id);
 710   }
 711 };
 712 
 713 void ShenandoahTraversalGC::fixup_roots() {
 714 #if defined(COMPILER2) || INCLUDE_JVMCI
 715   DerivedPointerTable::clear();
 716 #endif
 717   ShenandoahRootProcessor rp(_heap, _heap->workers()->active_workers(), ShenandoahPhaseTimings::final_traversal_update_roots);
 718   ShenandoahTraversalFixRootsTask update_roots_task(&rp);
 719   _heap->workers()->run_task(&update_roots_task);
 720 #if defined(COMPILER2) || INCLUDE_JVMCI
 721   DerivedPointerTable::update_pointers();
 722 #endif
 723 }
 724 
 725 void ShenandoahTraversalGC::reset() {
 726   _task_queues->clear();
 727 }
 728 
 729 ShenandoahObjToScanQueueSet* ShenandoahTraversalGC::task_queues() {
 730   return _task_queues;
 731 }
 732 
 733 class ShenandoahTraversalCancelledGCYieldClosure : public YieldClosure {
 734 private:
 735   ShenandoahHeap* const _heap;
 736 public:
 737   ShenandoahTraversalCancelledGCYieldClosure() : _heap(ShenandoahHeap::heap()) {};
 738   virtual bool should_return() { return _heap->cancelled_gc(); }
 739 };
 740 
 741 class ShenandoahTraversalPrecleanCompleteGCClosure : public VoidClosure {
 742 public:
 743   void do_void() {
 744     ShenandoahHeap* sh = ShenandoahHeap::heap();
 745     ShenandoahTraversalGC* traversal_gc = sh->traversal_gc();
 746     assert(sh->process_references(), "why else would we be here?");
 747     ShenandoahTaskTerminator terminator(1, traversal_gc->task_queues());
 748     shenandoah_assert_rp_isalive_installed();
 749     traversal_gc->main_loop((uint) 0, &terminator, true);
 750   }
 751 };
 752 
 753 class ShenandoahTraversalKeepAliveUpdateClosure : public OopClosure {
 754 private:
 755   ShenandoahObjToScanQueue* _queue;
 756   Thread* _thread;
 757   ShenandoahTraversalGC* _traversal_gc;
 758   ShenandoahMarkingContext* const _mark_context;
 759 
 760   template <class T>
 761   inline void do_oop_work(T* p) {
 762     _traversal_gc->process_oop<T, false /* string dedup */, false /* degen */>(p, _thread, _queue, _mark_context);
 763   }
 764 
 765 public:
 766   ShenandoahTraversalKeepAliveUpdateClosure(ShenandoahObjToScanQueue* q) :
 767     _queue(q), _thread(Thread::current()),
 768     _traversal_gc(ShenandoahHeap::heap()->traversal_gc()),
 769     _mark_context(ShenandoahHeap::heap()->marking_context()) {}
 770 
 771   void do_oop(narrowOop* p) { do_oop_work(p); }
 772   void do_oop(oop* p)       { do_oop_work(p); }
 773 };
 774 
 775 class ShenandoahTraversalKeepAliveUpdateDegenClosure : public OopClosure {
 776 private:
 777   ShenandoahObjToScanQueue* _queue;
 778   Thread* _thread;
 779   ShenandoahTraversalGC* _traversal_gc;
 780   ShenandoahMarkingContext* const _mark_context;
 781 
 782   template <class T>
 783   inline void do_oop_work(T* p) {
 784     _traversal_gc->process_oop<T, false /* string dedup */, true /* degen */>(p, _thread, _queue, _mark_context);
 785   }
 786 
 787 public:
 788   ShenandoahTraversalKeepAliveUpdateDegenClosure(ShenandoahObjToScanQueue* q) :
 789           _queue(q), _thread(Thread::current()),
 790           _traversal_gc(ShenandoahHeap::heap()->traversal_gc()),
 791           _mark_context(ShenandoahHeap::heap()->marking_context()) {}
 792 
 793   void do_oop(narrowOop* p) { do_oop_work(p); }
 794   void do_oop(oop* p)       { do_oop_work(p); }
 795 };
 796 
 797 class ShenandoahTraversalSingleThreadKeepAliveUpdateClosure : public OopClosure {
 798 private:
 799   ShenandoahObjToScanQueue* _queue;
 800   Thread* _thread;
 801   ShenandoahTraversalGC* _traversal_gc;
 802   ShenandoahMarkingContext* const _mark_context;
 803 
 804   template <class T>
 805   inline void do_oop_work(T* p) {
 806     ShenandoahEvacOOMScope evac_scope;
 807     _traversal_gc->process_oop<T, false /* string dedup */, false /* degen */>(p, _thread, _queue, _mark_context);
 808   }
 809 
 810 public:
 811   ShenandoahTraversalSingleThreadKeepAliveUpdateClosure(ShenandoahObjToScanQueue* q) :
 812           _queue(q), _thread(Thread::current()),
 813           _traversal_gc(ShenandoahHeap::heap()->traversal_gc()),
 814           _mark_context(ShenandoahHeap::heap()->marking_context()) {}
 815 
 816   void do_oop(narrowOop* p) { do_oop_work(p); }
 817   void do_oop(oop* p)       { do_oop_work(p); }
 818 };
 819 
 820 class ShenandoahTraversalSingleThreadKeepAliveUpdateDegenClosure : public OopClosure {
 821 private:
 822   ShenandoahObjToScanQueue* _queue;
 823   Thread* _thread;
 824   ShenandoahTraversalGC* _traversal_gc;
 825   ShenandoahMarkingContext* const _mark_context;
 826 
 827   template <class T>
 828   inline void do_oop_work(T* p) {
 829     ShenandoahEvacOOMScope evac_scope;
 830     _traversal_gc->process_oop<T, false /* string dedup */, true /* degen */>(p, _thread, _queue, _mark_context);
 831   }
 832 
 833 public:
 834   ShenandoahTraversalSingleThreadKeepAliveUpdateDegenClosure(ShenandoahObjToScanQueue* q) :
 835           _queue(q), _thread(Thread::current()),
 836           _traversal_gc(ShenandoahHeap::heap()->traversal_gc()),
 837           _mark_context(ShenandoahHeap::heap()->marking_context()) {}
 838 
 839   void do_oop(narrowOop* p) { do_oop_work(p); }
 840   void do_oop(oop* p)       { do_oop_work(p); }
 841 };
 842 
 843 class ShenandoahTraversalPrecleanTask : public AbstractGangTask {
 844 private:
 845   ReferenceProcessor* _rp;
 846 
 847 public:
 848   ShenandoahTraversalPrecleanTask(ReferenceProcessor* rp) :
 849           AbstractGangTask("Precleaning task"),
 850           _rp(rp) {}
 851 
 852   void work(uint worker_id) {
 853     assert(worker_id == 0, "The code below is single-threaded, only one worker is expected");
 854     ShenandoahParallelWorkerSession worker_session(worker_id);
 855     ShenandoahSuspendibleThreadSetJoiner stsj(ShenandoahSuspendibleWorkers);
 856     ShenandoahEvacOOMScope oom_evac_scope;
 857 
 858     ShenandoahHeap* sh = ShenandoahHeap::heap();
 859 
 860     ShenandoahObjToScanQueue* q = sh->traversal_gc()->task_queues()->queue(worker_id);
 861 
 862     ShenandoahForwardedIsAliveClosure is_alive;
 863     ShenandoahTraversalCancelledGCYieldClosure yield;
 864     ShenandoahTraversalPrecleanCompleteGCClosure complete_gc;
 865     ShenandoahTraversalKeepAliveUpdateClosure keep_alive(q);
 866     ResourceMark rm;
 867     _rp->preclean_discovered_references(&is_alive, &keep_alive,
 868                                         &complete_gc, &yield,
 869                                         NULL);
 870   }
 871 };
 872 
 873 void ShenandoahTraversalGC::preclean_weak_refs() {
 874   // Pre-cleaning weak references before diving into STW makes sense at the
 875   // end of concurrent mark. This will filter out the references which referents
 876   // are alive. Note that ReferenceProcessor already filters out these on reference
 877   // discovery, and the bulk of work is done here. This phase processes leftovers
 878   // that missed the initial filtering, i.e. when referent was marked alive after
 879   // reference was discovered by RP.
 880 
 881   assert(_heap->process_references(), "sanity");
 882   assert(!_heap->is_degenerated_gc_in_progress(), "must be in concurrent non-degenerated phase");
 883 
 884   // Shortcut if no references were discovered to avoid winding up threads.
 885   ReferenceProcessor* rp = _heap->ref_processor();
 886   if (!rp->has_discovered_references()) {
 887     return;
 888   }
 889 
 890   ReferenceProcessorMTDiscoveryMutator fix_mt_discovery(rp, false);
 891 
 892   shenandoah_assert_rp_isalive_not_installed();
 893   ShenandoahForwardedIsAliveClosure is_alive;
 894   ReferenceProcessorIsAliveMutator fix_isalive(rp, &is_alive);
 895 
 896   assert(task_queues()->is_empty(), "Should be empty");
 897 
 898   // Execute precleaning in the worker thread: it will give us GCLABs, String dedup
 899   // queues and other goodies. When upstream ReferenceProcessor starts supporting
 900   // parallel precleans, we can extend this to more threads.
 901   ShenandoahPushWorkerScope scope(_heap->workers(), 1, /* check_workers = */ false);
 902 
 903   WorkGang* workers = _heap->workers();
 904   uint nworkers = workers->active_workers();
 905   assert(nworkers == 1, "This code uses only a single worker");
 906   task_queues()->reserve(nworkers);
 907 
 908   ShenandoahTraversalPrecleanTask task(rp);
 909   workers->run_task(&task);
 910 
 911   assert(_heap->cancelled_gc() || task_queues()->is_empty(), "Should be empty");
 912 }
 913 
 914 // Weak Reference Closures
 915 class ShenandoahTraversalDrainMarkingStackClosure: public VoidClosure {
 916   uint _worker_id;
 917   ShenandoahTaskTerminator* _terminator;
 918   bool _reset_terminator;
 919 
 920 public:
 921   ShenandoahTraversalDrainMarkingStackClosure(uint worker_id, ShenandoahTaskTerminator* t, bool reset_terminator = false):
 922     _worker_id(worker_id),
 923     _terminator(t),
 924     _reset_terminator(reset_terminator) {
 925   }
 926 
 927   void do_void() {
 928     assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
 929 
 930     ShenandoahHeap* sh = ShenandoahHeap::heap();
 931     ShenandoahTraversalGC* traversal_gc = sh->traversal_gc();
 932     assert(sh->process_references(), "why else would we be here?");
 933     shenandoah_assert_rp_isalive_installed();
 934 
 935     traversal_gc->main_loop(_worker_id, _terminator, false);
 936 
 937     if (_reset_terminator) {
 938       _terminator->reset_for_reuse();
 939     }
 940   }
 941 };
 942 
 943 class ShenandoahTraversalSingleThreadedDrainMarkingStackClosure: public VoidClosure {
 944   uint _worker_id;
 945   ShenandoahTaskTerminator* _terminator;
 946   bool _reset_terminator;
 947 
 948 public:
 949   ShenandoahTraversalSingleThreadedDrainMarkingStackClosure(uint worker_id, ShenandoahTaskTerminator* t, bool reset_terminator = false):
 950           _worker_id(worker_id),
 951           _terminator(t),
 952           _reset_terminator(reset_terminator) {
 953   }
 954 
 955   void do_void() {
 956     assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
 957 
 958     ShenandoahHeap* sh = ShenandoahHeap::heap();
 959     ShenandoahTraversalGC* traversal_gc = sh->traversal_gc();
 960     assert(sh->process_references(), "why else would we be here?");
 961     shenandoah_assert_rp_isalive_installed();
 962 
 963     ShenandoahEvacOOMScope evac_scope;
 964     traversal_gc->main_loop(_worker_id, _terminator, false);
 965 
 966     if (_reset_terminator) {
 967       _terminator->reset_for_reuse();
 968     }
 969   }
 970 };
 971 
 972 void ShenandoahTraversalGC::weak_refs_work() {
 973   assert(_heap->process_references(), "sanity");
 974 
 975   ShenandoahPhaseTimings::Phase phase_root = ShenandoahPhaseTimings::weakrefs;
 976 
 977   ShenandoahGCPhase phase(phase_root);
 978 
 979   ReferenceProcessor* rp = _heap->ref_processor();
 980 
 981   // NOTE: We cannot shortcut on has_discovered_references() here, because
 982   // we will miss marking JNI Weak refs then, see implementation in
 983   // ReferenceProcessor::process_discovered_references.
 984   weak_refs_work_doit();
 985 
 986   rp->verify_no_references_recorded();
 987   assert(!rp->discovery_enabled(), "Post condition");
 988 
 989 }
 990 
 991 class ShenandoahTraversalRefProcTaskProxy : public AbstractGangTask {
 992 private:
 993   AbstractRefProcTaskExecutor::ProcessTask& _proc_task;
 994   ShenandoahTaskTerminator* _terminator;
 995 
 996 public:
 997   ShenandoahTraversalRefProcTaskProxy(AbstractRefProcTaskExecutor::ProcessTask& proc_task,
 998                                       ShenandoahTaskTerminator* t) :
 999     AbstractGangTask("Process reference objects in parallel"),
1000     _proc_task(proc_task),
1001     _terminator(t) {
1002   }
1003 
1004   void work(uint worker_id) {
1005     ShenandoahEvacOOMScope oom_evac_scope;
1006     assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
1007     ShenandoahHeap* heap = ShenandoahHeap::heap();
1008     ShenandoahTraversalDrainMarkingStackClosure complete_gc(worker_id, _terminator);
1009 
1010     ShenandoahForwardedIsAliveClosure is_alive;
1011     if (!heap->is_degenerated_gc_in_progress()) {
1012       ShenandoahTraversalKeepAliveUpdateClosure keep_alive(heap->traversal_gc()->task_queues()->queue(worker_id));
1013       _proc_task.work(worker_id, is_alive, keep_alive, complete_gc);
1014     } else {
1015       ShenandoahTraversalKeepAliveUpdateDegenClosure keep_alive(heap->traversal_gc()->task_queues()->queue(worker_id));
1016       _proc_task.work(worker_id, is_alive, keep_alive, complete_gc);
1017     }
1018   }
1019 };
1020 
1021 class ShenandoahTraversalRefProcTaskExecutor : public AbstractRefProcTaskExecutor {
1022 private:
1023   WorkGang* _workers;
1024 
1025 public:
1026   ShenandoahTraversalRefProcTaskExecutor(WorkGang* workers) : _workers(workers) {}
1027 
1028   // Executes a task using worker threads.
1029   void execute(ProcessTask& task, uint ergo_workers) {
1030     assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
1031 
1032     ShenandoahHeap* heap = ShenandoahHeap::heap();
1033     ShenandoahTraversalGC* traversal_gc = heap->traversal_gc();
1034     ShenandoahPushWorkerQueuesScope scope(_workers,
1035                                           traversal_gc->task_queues(),
1036                                           ergo_workers,
1037                                           /* do_check = */ false);
1038     uint nworkers = _workers->active_workers();
1039     traversal_gc->task_queues()->reserve(nworkers);
1040     ShenandoahTaskTerminator terminator(nworkers, traversal_gc->task_queues());
1041     ShenandoahTraversalRefProcTaskProxy proc_task_proxy(task, &terminator);
1042     _workers->run_task(&proc_task_proxy);
1043   }
1044 };
1045 
1046 void ShenandoahTraversalGC::weak_refs_work_doit() {
1047   ReferenceProcessor* rp = _heap->ref_processor();
1048 
1049   ShenandoahPhaseTimings::Phase phase_process = ShenandoahPhaseTimings::weakrefs_process;
1050 
1051   shenandoah_assert_rp_isalive_not_installed();
1052   ShenandoahForwardedIsAliveClosure is_alive;
1053   ReferenceProcessorIsAliveMutator fix_isalive(rp, &is_alive);
1054 
1055   WorkGang* workers = _heap->workers();
1056   uint nworkers = workers->active_workers();
1057 
1058   rp->setup_policy(_heap->soft_ref_policy()->should_clear_all_soft_refs());
1059   rp->set_active_mt_degree(nworkers);
1060 
1061   assert(task_queues()->is_empty(), "Should be empty");
1062 
1063   // complete_gc and keep_alive closures instantiated here are only needed for
1064   // single-threaded path in RP. They share the queue 0 for tracking work, which
1065   // simplifies implementation. Since RP may decide to call complete_gc several
1066   // times, we need to be able to reuse the terminator.
1067   uint serial_worker_id = 0;
1068   ShenandoahTaskTerminator terminator(1, task_queues());
1069   ShenandoahTraversalSingleThreadedDrainMarkingStackClosure complete_gc(serial_worker_id, &terminator, /* reset_terminator = */ true);
1070   ShenandoahPushWorkerQueuesScope scope(workers, task_queues(), 1, /* do_check = */ false);
1071 
1072   ShenandoahTraversalRefProcTaskExecutor executor(workers);
1073 
1074   ReferenceProcessorPhaseTimes pt(_heap->gc_timer(), rp->num_queues());
1075   if (!_heap->is_degenerated_gc_in_progress()) {
1076     ShenandoahTraversalSingleThreadKeepAliveUpdateClosure keep_alive(task_queues()->queue(serial_worker_id));
1077     rp->process_discovered_references(&is_alive, &keep_alive,
1078                                       &complete_gc, &executor,
1079                                       &pt);
1080   } else {
1081     ShenandoahTraversalSingleThreadKeepAliveUpdateDegenClosure keep_alive(task_queues()->queue(serial_worker_id));
1082     rp->process_discovered_references(&is_alive, &keep_alive,
1083                                       &complete_gc, &executor,
1084                                       &pt);
1085   }
1086 
1087   pt.print_all_references();
1088   assert(task_queues()->is_empty() || _heap->cancelled_gc(), "Should be empty");
1089 }