1 /*
   2  * Copyright (c) 2018, 2019, Red Hat, Inc. All rights reserved.
   3  *
   4  * This code is free software; you can redistribute it and/or modify it
   5  * under the terms of the GNU General Public License version 2 only, as
   6  * published by the Free Software Foundation.
   7  *
   8  * This code is distributed in the hope that it will be useful, but WITHOUT
   9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  11  * version 2 for more details (a copy is included in the LICENSE file that
  12  * accompanied this code).
  13  *
  14  * You should have received a copy of the GNU General Public License version
  15  * 2 along with this work; if not, write to the Free Software Foundation,
  16  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  17  *
  18  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  19  * or visit www.oracle.com if you need additional information or have any
  20  * questions.
  21  *
  22  */
  23 
  24 #include "precompiled.hpp"
  25 
  26 #include "classfile/classLoaderData.hpp"
  27 #include "classfile/classLoaderDataGraph.hpp"
  28 #include "gc/shared/referenceProcessor.hpp"
  29 #include "gc/shared/referenceProcessorPhaseTimes.hpp"
  30 #include "gc/shared/workgroup.hpp"
  31 #include "gc/shared/weakProcessor.inline.hpp"
  32 #include "gc/shenandoah/shenandoahBarrierSet.hpp"
  33 #include "gc/shenandoah/shenandoahClosures.inline.hpp"
  34 #include "gc/shenandoah/shenandoahCodeRoots.hpp"
  35 #include "gc/shenandoah/shenandoahCollectionSet.hpp"
  36 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
  37 #include "gc/shenandoah/shenandoahFreeSet.hpp"
  38 #include "gc/shenandoah/shenandoahForwarding.hpp"
  39 #include "gc/shenandoah/shenandoahPhaseTimings.hpp"
  40 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
  41 #include "gc/shenandoah/shenandoahHeapRegionSet.inline.hpp"
  42 #include "gc/shenandoah/shenandoahHeuristics.hpp"
  43 #include "gc/shenandoah/shenandoahMarkingContext.inline.hpp"
  44 #include "gc/shenandoah/shenandoahOopClosures.inline.hpp"
  45 #include "gc/shenandoah/shenandoahRootProcessor.inline.hpp"
  46 #include "gc/shenandoah/shenandoahStringDedup.hpp"
  47 #include "gc/shenandoah/shenandoahTaskqueue.inline.hpp"
  48 #include "gc/shenandoah/shenandoahTimingTracker.hpp"
  49 #include "gc/shenandoah/shenandoahTraversalGC.hpp"
  50 #include "gc/shenandoah/shenandoahUtils.hpp"
  51 #include "gc/shenandoah/shenandoahVerifier.hpp"
  52 
  53 #include "memory/iterator.hpp"
  54 #include "memory/metaspace.hpp"
  55 #include "memory/resourceArea.hpp"
  56 
  57 /**
  58  * NOTE: We are using the SATB buffer in thread.hpp and satbMarkQueue.hpp, however, it is not an SATB algorithm.
  59  * We're using the buffer as generic oop buffer to enqueue new values in concurrent oop stores, IOW, the algorithm
  60  * is incremental-update-based.
  61  *
  62  * NOTE on interaction with TAMS: we want to avoid traversing new objects for
  63  * several reasons:
  64  * - We will not reclaim them in this cycle anyway, because they are not in the
  65  *   cset
  66  * - It makes up for the bulk of work during final-pause
  67  * - It also shortens the concurrent cycle because we don't need to
  68  *   pointlessly traverse through newly allocated objects.
  69  * - As a nice side-effect, it solves the I-U termination problem (mutators
  70  *   cannot outrun the GC by allocating like crazy)
  71  * - It is an easy way to achieve MWF. What MWF does is to also enqueue the
  72  *   target object of stores if it's new. Treating new objects live implicitely
  73  *   achieves the same, but without extra barriers. I think the effect of
  74  *   shortened final-pause (mentioned above) is the main advantage of MWF. In
  75  *   particular, we will not see the head of a completely new long linked list
  76  *   in final-pause and end up traversing huge chunks of the heap there.
  77  * - We don't need to see/update the fields of new objects either, because they
  78  *   are either still null, or anything that's been stored into them has been
  79  *   evacuated+enqueued before (and will thus be treated later).
  80  *
  81  * We achieve this by setting TAMS for each region, and everything allocated
  82  * beyond TAMS will be 'implicitely marked'.
  83  *
  84  * Gotchas:
  85  * - While we want new objects to be implicitely marked, we don't want to count
  86  *   them alive. Otherwise the next cycle wouldn't pick them up and consider
  87  *   them for cset. This means that we need to protect such regions from
  88  *   getting accidentally thrashed at the end of traversal cycle. This is why I
  89  *   keep track of alloc-regions and check is_alloc_region() in the trashing
  90  *   code.
  91  * - We *need* to traverse through evacuated objects. Those objects are
  92  *   pre-existing, and any references in them point to interesting objects that
  93  *   we need to see. We also want to count them as live, because we just
  94  *   determined that they are alive :-) I achieve this by upping TAMS
  95  *   concurrently for every gclab/gc-shared alloc before publishing the
  96  *   evacuated object. This way, the GC threads will not consider such objects
  97  *   implictely marked, and traverse through them as normal.
  98  */
  99 class ShenandoahTraversalSATBBufferClosure : public SATBBufferClosure {
 100 private:
 101   ShenandoahObjToScanQueue* _queue;
 102   ShenandoahTraversalGC* _traversal_gc;
 103   ShenandoahHeap* const _heap;
 104 
 105 public:
 106   ShenandoahTraversalSATBBufferClosure(ShenandoahObjToScanQueue* q) :
 107     _queue(q),
 108     _heap(ShenandoahHeap::heap())
 109  { }
 110 
 111   void do_buffer(void** buffer, size_t size) {
 112     for (size_t i = 0; i < size; ++i) {
 113       oop* p = (oop*) &buffer[i];
 114       oop obj = RawAccess<>::oop_load(p);
 115       shenandoah_assert_not_forwarded(p, obj);
 116       if (_heap->marking_context()->mark(obj)) {
 117         _queue->push(ShenandoahMarkTask(obj));
 118       }
 119     }
 120   }
 121 };
 122 
 123 class ShenandoahTraversalSATBThreadsClosure : public ThreadClosure {
 124 private:
 125   ShenandoahTraversalSATBBufferClosure* _satb_cl;
 126 
 127 public:
 128   ShenandoahTraversalSATBThreadsClosure(ShenandoahTraversalSATBBufferClosure* satb_cl) :
 129     _satb_cl(satb_cl) {}
 130 
 131   void do_thread(Thread* thread) {
 132     ShenandoahThreadLocalData::satb_mark_queue(thread).apply_closure_and_empty(_satb_cl);
 133   }
 134 };
 135 
 136 // Like CLDToOopClosure, but clears has_modified_oops, so that we can record modified CLDs during traversal
 137 // and remark them later during final-traversal.
 138 class ShenandoahMarkCLDClosure : public CLDClosure {
 139 private:
 140   OopClosure* _cl;
 141 public:
 142   ShenandoahMarkCLDClosure(OopClosure* cl) : _cl(cl) {}
 143   void do_cld(ClassLoaderData* cld) {
 144     cld->oops_do(_cl, true, true);
 145   }
 146 };
 147 
 148 // Like CLDToOopClosure, but only process modified CLDs
 149 class ShenandoahRemarkCLDClosure : public CLDClosure {
 150 private:
 151   OopClosure* _cl;
 152 public:
 153   ShenandoahRemarkCLDClosure(OopClosure* cl) : _cl(cl) {}
 154   void do_cld(ClassLoaderData* cld) {
 155     if (cld->has_modified_oops()) {
 156       cld->oops_do(_cl, true, true);
 157     }
 158   }
 159 };
 160 
 161 class ShenandoahInitTraversalCollectionTask : public AbstractGangTask {
 162 private:
 163   ShenandoahRootProcessor* _rp;
 164   ShenandoahHeap* _heap;
 165   ShenandoahCsetCodeRootsIterator* _cset_coderoots;
 166 public:
 167   ShenandoahInitTraversalCollectionTask(ShenandoahRootProcessor* rp, ShenandoahCsetCodeRootsIterator* cset_coderoots) :
 168     AbstractGangTask("Shenandoah Init Traversal Collection"),
 169     _rp(rp),
 170     _heap(ShenandoahHeap::heap()),
 171     _cset_coderoots(cset_coderoots) {}
 172 
 173   void work(uint worker_id) {
 174     ShenandoahParallelWorkerSession worker_session(worker_id);
 175 
 176     ShenandoahEvacOOMScope oom_evac_scope;
 177     ShenandoahObjToScanQueueSet* queues = _heap->traversal_gc()->task_queues();
 178     ShenandoahObjToScanQueue* q = queues->queue(worker_id);
 179 
 180     bool process_refs = _heap->process_references();
 181     bool unload_classes = _heap->unload_classes();
 182     ReferenceProcessor* rp = NULL;
 183     if (process_refs) {
 184       rp = _heap->ref_processor();
 185     }
 186 
 187     // Step 1: Process ordinary GC roots.
 188     {
 189       ShenandoahTraversalClosure roots_cl(q, rp);
 190       ShenandoahMarkCLDClosure cld_cl(&roots_cl);
 191       MarkingCodeBlobClosure code_cl(&roots_cl, CodeBlobToOopClosure::FixRelocations);
 192       if (unload_classes) {
 193         _rp->process_strong_roots(&roots_cl, &cld_cl, NULL, NULL, worker_id);
 194         // Need to pre-evac code roots here. Otherwise we might see from-space constants.
 195         ShenandoahWorkerTimings* worker_times = _heap->phase_timings()->worker_times();
 196         ShenandoahWorkerTimingsTracker timer(worker_times, ShenandoahPhaseTimings::CodeCacheRoots, worker_id);
 197         _cset_coderoots->possibly_parallel_blobs_do(&code_cl);
 198       } else {
 199         _rp->process_all_roots(&roots_cl, &cld_cl, &code_cl, NULL, worker_id);
 200       }
 201       if (ShenandoahStringDedup::is_enabled()) {
 202         AlwaysTrueClosure is_alive;
 203         ShenandoahStringDedup::parallel_oops_do(&is_alive, &roots_cl, worker_id);
 204       }
 205     }
 206   }
 207 };
 208 
 209 class ShenandoahConcurrentTraversalCollectionTask : public AbstractGangTask {
 210 private:
 211   ShenandoahTaskTerminator* _terminator;
 212   ShenandoahHeap* _heap;
 213 public:
 214   ShenandoahConcurrentTraversalCollectionTask(ShenandoahTaskTerminator* terminator) :
 215     AbstractGangTask("Shenandoah Concurrent Traversal Collection"),
 216     _terminator(terminator),
 217     _heap(ShenandoahHeap::heap()) {}
 218 
 219   void work(uint worker_id) {
 220     ShenandoahConcurrentWorkerSession worker_session(worker_id);
 221     ShenandoahSuspendibleThreadSetJoiner stsj(ShenandoahSuspendibleWorkers);
 222     ShenandoahEvacOOMScope oom_evac_scope;
 223     ShenandoahTraversalGC* traversal_gc = _heap->traversal_gc();
 224 
 225     // Drain all outstanding work in queues.
 226     traversal_gc->main_loop(worker_id, _terminator, true);
 227   }
 228 };
 229 
 230 class ShenandoahFinalTraversalCollectionTask : public AbstractGangTask {
 231 private:
 232   ShenandoahRootProcessor* _rp;
 233   ShenandoahTaskTerminator* _terminator;
 234   ShenandoahHeap* _heap;
 235 public:
 236   ShenandoahFinalTraversalCollectionTask(ShenandoahRootProcessor* rp, ShenandoahTaskTerminator* terminator) :
 237     AbstractGangTask("Shenandoah Final Traversal Collection"),
 238     _rp(rp),
 239     _terminator(terminator),
 240     _heap(ShenandoahHeap::heap()) {}
 241 
 242   void work(uint worker_id) {
 243     ShenandoahParallelWorkerSession worker_session(worker_id);
 244 
 245     ShenandoahEvacOOMScope oom_evac_scope;
 246     ShenandoahTraversalGC* traversal_gc = _heap->traversal_gc();
 247 
 248     ShenandoahObjToScanQueueSet* queues = traversal_gc->task_queues();
 249     ShenandoahObjToScanQueue* q = queues->queue(worker_id);
 250 
 251     bool process_refs = _heap->process_references();
 252     bool unload_classes = _heap->unload_classes();
 253     ReferenceProcessor* rp = NULL;
 254     if (process_refs) {
 255       rp = _heap->ref_processor();
 256     }
 257 
 258     // Step 0: Drain outstanding SATB queues.
 259     // NOTE: we piggy-back draining of remaining thread SATB buffers on the final root scan below.
 260     ShenandoahTraversalSATBBufferClosure satb_cl(q);
 261     {
 262       // Process remaining finished SATB buffers.
 263       SATBMarkQueueSet& satb_mq_set = ShenandoahBarrierSet::satb_mark_queue_set();
 264       while (satb_mq_set.apply_closure_to_completed_buffer(&satb_cl));
 265       // Process remaining threads SATB buffers below.
 266     }
 267 
 268     // Step 1: Process GC roots.
 269     // For oops in code roots, they are marked, evacuated, enqueued for further traversal,
 270     // and the references to the oops are updated during init pause. New nmethods are handled
 271     // in similar way during nmethod-register process. Therefore, we don't need to rescan code
 272     // roots here.
 273     if (!_heap->is_degenerated_gc_in_progress()) {
 274       ShenandoahTraversalClosure roots_cl(q, rp);
 275       CLDToOopClosure cld_cl(&roots_cl, ClassLoaderData::_claim_strong);
 276       ShenandoahTraversalSATBThreadsClosure tc(&satb_cl);
 277       if (unload_classes) {
 278         ShenandoahRemarkCLDClosure remark_cld_cl(&roots_cl);
 279         _rp->process_strong_roots(&roots_cl, &remark_cld_cl, NULL, &tc, worker_id);
 280       } else {
 281         _rp->process_all_roots(&roots_cl, &cld_cl, NULL, &tc, worker_id);
 282       }
 283     } else {
 284       ShenandoahTraversalDegenClosure roots_cl(q, rp);
 285       CLDToOopClosure cld_cl(&roots_cl, ClassLoaderData::_claim_strong);
 286       ShenandoahTraversalSATBThreadsClosure tc(&satb_cl);
 287       if (unload_classes) {
 288         ShenandoahRemarkCLDClosure remark_cld_cl(&roots_cl);
 289         _rp->process_strong_roots(&roots_cl, &remark_cld_cl, NULL, &tc, worker_id);
 290       } else {
 291         _rp->process_all_roots(&roots_cl, &cld_cl, NULL, &tc, worker_id);
 292       }
 293     }
 294 
 295     {
 296       ShenandoahWorkerTimings *worker_times = _heap->phase_timings()->worker_times();
 297       ShenandoahWorkerTimingsTracker timer(worker_times, ShenandoahPhaseTimings::FinishQueues, worker_id);
 298 
 299       // Step 3: Finally drain all outstanding work in queues.
 300       traversal_gc->main_loop(worker_id, _terminator, false);
 301     }
 302 
 303   }
 304 };
 305 
 306 ShenandoahTraversalGC::ShenandoahTraversalGC(ShenandoahHeap* heap, size_t num_regions) :
 307   _heap(heap),
 308   _task_queues(new ShenandoahObjToScanQueueSet(heap->max_workers())),
 309   _traversal_set(ShenandoahHeapRegionSet()) {
 310 
 311   uint num_queues = heap->max_workers();
 312   for (uint i = 0; i < num_queues; ++i) {
 313     ShenandoahObjToScanQueue* task_queue = new ShenandoahObjToScanQueue();
 314     task_queue->initialize();
 315     _task_queues->register_queue(i, task_queue);
 316   }
 317 }
 318 
 319 ShenandoahTraversalGC::~ShenandoahTraversalGC() {
 320 }
 321 
 322 void ShenandoahTraversalGC::prepare_regions() {
 323   size_t num_regions = _heap->num_regions();
 324   ShenandoahMarkingContext* const ctx = _heap->marking_context();
 325   for (size_t i = 0; i < num_regions; i++) {
 326     ShenandoahHeapRegion* region = _heap->get_region(i);
 327     if (_heap->is_bitmap_slice_committed(region)) {
 328       if (_traversal_set.is_in(i)) {
 329         ctx->capture_top_at_mark_start(region);
 330         region->clear_live_data();
 331         assert(ctx->is_bitmap_clear_range(region->bottom(), region->end()), "bitmap for traversal regions must be cleared");
 332       } else {
 333         // Everything outside the traversal set is always considered live.
 334         ctx->reset_top_at_mark_start(region);
 335       }
 336     } else {
 337       // FreeSet may contain uncommitted empty regions, once they are recommitted,
 338       // their TAMS may have old values, so reset them here.
 339       ctx->reset_top_at_mark_start(region);
 340     }
 341   }
 342 }
 343 
 344 void ShenandoahTraversalGC::prepare() {
 345   _heap->collection_set()->clear();
 346   assert(_heap->collection_set()->count() == 0, "collection set not clear");
 347 
 348   {
 349     ShenandoahGCPhase phase(ShenandoahPhaseTimings::traversal_gc_make_parsable);
 350     _heap->make_parsable(true);
 351   }
 352 
 353   if (UseTLAB) {
 354     ShenandoahGCPhase phase(ShenandoahPhaseTimings::traversal_gc_resize_tlabs);
 355     _heap->resize_tlabs();
 356   }
 357 
 358   assert(_heap->marking_context()->is_bitmap_clear(), "need clean mark bitmap");
 359   assert(!_heap->marking_context()->is_complete(), "should not be complete");
 360 
 361   ShenandoahFreeSet* free_set = _heap->free_set();
 362   ShenandoahCollectionSet* collection_set = _heap->collection_set();
 363 
 364   // Find collection set
 365   _heap->heuristics()->choose_collection_set(collection_set);
 366   prepare_regions();
 367 
 368   // Rebuild free set
 369   free_set->rebuild();
 370 
 371   log_info(gc, ergo)("Collectable Garbage: " SIZE_FORMAT "M, " SIZE_FORMAT "M CSet, " SIZE_FORMAT " CSet regions",
 372                      collection_set->garbage() / M, collection_set->live_data() / M, collection_set->count());
 373 }
 374 
 375 void ShenandoahTraversalGC::init_traversal_collection() {
 376   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "STW traversal GC");
 377 
 378   if (ShenandoahVerify) {
 379     _heap->verifier()->verify_before_traversal();
 380   }
 381 
 382   if (VerifyBeforeGC) {
 383     Universe::verify();
 384   }
 385 
 386   {
 387     ShenandoahGCPhase phase_prepare(ShenandoahPhaseTimings::traversal_gc_prepare);
 388     ShenandoahHeapLocker lock(_heap->lock());
 389     prepare();
 390   }
 391 
 392   _heap->set_concurrent_traversal_in_progress(true);
 393 
 394   bool process_refs = _heap->process_references();
 395   if (process_refs) {
 396     ReferenceProcessor* rp = _heap->ref_processor();
 397     rp->enable_discovery(true /*verify_no_refs*/);
 398     rp->setup_policy(_heap->soft_ref_policy()->should_clear_all_soft_refs());
 399   }
 400 
 401   {
 402     ShenandoahGCPhase phase_work(ShenandoahPhaseTimings::init_traversal_gc_work);
 403     assert(_task_queues->is_empty(), "queues must be empty before traversal GC");
 404     TASKQUEUE_STATS_ONLY(_task_queues->reset_taskqueue_stats());
 405 
 406 #if defined(COMPILER2) || INCLUDE_JVMCI
 407     DerivedPointerTable::clear();
 408 #endif
 409 
 410     {
 411       uint nworkers = _heap->workers()->active_workers();
 412       task_queues()->reserve(nworkers);
 413       ShenandoahRootProcessor rp(_heap, nworkers, ShenandoahPhaseTimings::init_traversal_gc_work);
 414 
 415       ShenandoahCsetCodeRootsIterator cset_coderoots = ShenandoahCodeRoots::cset_iterator();
 416 
 417       ShenandoahInitTraversalCollectionTask traversal_task(&rp, &cset_coderoots);
 418       _heap->workers()->run_task(&traversal_task);
 419     }
 420 
 421 #if defined(COMPILER2) || INCLUDE_JVMCI
 422     DerivedPointerTable::update_pointers();
 423 #endif
 424   }
 425 
 426   if (ShenandoahPacing) {
 427     _heap->pacer()->setup_for_traversal();
 428   }
 429 }
 430 
 431 void ShenandoahTraversalGC::main_loop(uint w, ShenandoahTaskTerminator* t, bool sts_yield) {
 432   ShenandoahObjToScanQueue* q = task_queues()->queue(w);
 433 
 434   // Initialize live data.
 435   jushort* ld = _heap->get_liveness_cache(w);
 436 
 437   ReferenceProcessor* rp = NULL;
 438   if (_heap->process_references()) {
 439     rp = _heap->ref_processor();
 440   }
 441   {
 442     if (!_heap->is_degenerated_gc_in_progress()) {
 443       if (_heap->unload_classes()) {
 444         if (ShenandoahStringDedup::is_enabled()) {
 445           ShenandoahTraversalMetadataDedupClosure cl(q, rp);
 446           main_loop_work<ShenandoahTraversalMetadataDedupClosure>(&cl, ld, w, t, sts_yield);
 447         } else {
 448           ShenandoahTraversalMetadataClosure cl(q, rp);
 449           main_loop_work<ShenandoahTraversalMetadataClosure>(&cl, ld, w, t, sts_yield);
 450         }
 451       } else {
 452         if (ShenandoahStringDedup::is_enabled()) {
 453           ShenandoahTraversalDedupClosure cl(q, rp);
 454           main_loop_work<ShenandoahTraversalDedupClosure>(&cl, ld, w, t, sts_yield);
 455         } else {
 456           ShenandoahTraversalClosure cl(q, rp);
 457           main_loop_work<ShenandoahTraversalClosure>(&cl, ld, w, t, sts_yield);
 458         }
 459       }
 460     } else {
 461       if (_heap->unload_classes()) {
 462         if (ShenandoahStringDedup::is_enabled()) {
 463           ShenandoahTraversalMetadataDedupDegenClosure cl(q, rp);
 464           main_loop_work<ShenandoahTraversalMetadataDedupDegenClosure>(&cl, ld, w, t, sts_yield);
 465         } else {
 466           ShenandoahTraversalMetadataDegenClosure cl(q, rp);
 467           main_loop_work<ShenandoahTraversalMetadataDegenClosure>(&cl, ld, w, t, sts_yield);
 468         }
 469       } else {
 470         if (ShenandoahStringDedup::is_enabled()) {
 471           ShenandoahTraversalDedupDegenClosure cl(q, rp);
 472           main_loop_work<ShenandoahTraversalDedupDegenClosure>(&cl, ld, w, t, sts_yield);
 473         } else {
 474           ShenandoahTraversalDegenClosure cl(q, rp);
 475           main_loop_work<ShenandoahTraversalDegenClosure>(&cl, ld, w, t, sts_yield);
 476         }
 477       }
 478     }
 479   }
 480 
 481   _heap->flush_liveness_cache(w);
 482 }
 483 
 484 template <class T>
 485 void ShenandoahTraversalGC::main_loop_work(T* cl, jushort* live_data, uint worker_id, ShenandoahTaskTerminator* terminator, bool sts_yield) {
 486   ShenandoahObjToScanQueueSet* queues = task_queues();
 487   ShenandoahObjToScanQueue* q = queues->queue(worker_id);
 488   ShenandoahConcurrentMark* conc_mark = _heap->concurrent_mark();
 489 
 490   uintx stride = ShenandoahMarkLoopStride;
 491 
 492   ShenandoahMarkTask task;
 493 
 494   // Process outstanding queues, if any.
 495   q = queues->claim_next();
 496   while (q != NULL) {
 497     if (_heap->check_cancelled_gc_and_yield(sts_yield)) {
 498       return;
 499     }
 500 
 501     for (uint i = 0; i < stride; i++) {
 502       if (q->pop(task)) {
 503         conc_mark->do_task<T>(q, cl, live_data, &task);
 504       } else {
 505         assert(q->is_empty(), "Must be empty");
 506         q = queues->claim_next();
 507         break;
 508       }
 509     }
 510   }
 511 
 512   if (check_and_handle_cancelled_gc(terminator, sts_yield)) return;
 513 
 514   // Normal loop.
 515   q = queues->queue(worker_id);
 516 
 517   ShenandoahTraversalSATBBufferClosure drain_satb(q);
 518   SATBMarkQueueSet& satb_mq_set = ShenandoahBarrierSet::satb_mark_queue_set();
 519 
 520   while (true) {
 521     if (check_and_handle_cancelled_gc(terminator, sts_yield)) return;
 522 
 523     while (satb_mq_set.completed_buffers_num() > 0) {
 524       satb_mq_set.apply_closure_to_completed_buffer(&drain_satb);
 525     }
 526 
 527     uint work = 0;
 528     for (uint i = 0; i < stride; i++) {
 529       if (q->pop(task) ||
 530           queues->steal(worker_id, task)) {
 531         conc_mark->do_task<T>(q, cl, live_data, &task);
 532         work++;
 533       } else {
 534         break;
 535       }
 536     }
 537 
 538     if (work == 0) {
 539       // No more work, try to terminate
 540       ShenandoahEvacOOMScopeLeaver oom_scope_leaver;
 541       ShenandoahSuspendibleThreadSetLeaver stsl(sts_yield && ShenandoahSuspendibleWorkers);
 542       ShenandoahTerminationTimingsTracker term_tracker(worker_id);
 543       ShenandoahTerminatorTerminator tt(_heap);
 544 
 545       if (terminator->offer_termination(&tt)) return;
 546     }
 547   }
 548 }
 549 
 550 bool ShenandoahTraversalGC::check_and_handle_cancelled_gc(ShenandoahTaskTerminator* terminator, bool sts_yield) {
 551   if (_heap->cancelled_gc()) {
 552     return true;
 553   }
 554   return false;
 555 }
 556 
 557 void ShenandoahTraversalGC::concurrent_traversal_collection() {
 558   ClassLoaderDataGraph::clear_claimed_marks();
 559 
 560   ShenandoahGCPhase phase_work(ShenandoahPhaseTimings::conc_traversal);
 561   if (!_heap->cancelled_gc()) {
 562     uint nworkers = _heap->workers()->active_workers();
 563     task_queues()->reserve(nworkers);
 564     ShenandoahTerminationTracker tracker(ShenandoahPhaseTimings::conc_traversal_termination);
 565 
 566     ShenandoahTaskTerminator terminator(nworkers, task_queues());
 567     ShenandoahConcurrentTraversalCollectionTask task(&terminator);
 568     _heap->workers()->run_task(&task);
 569   }
 570 
 571   if (!_heap->cancelled_gc() && ShenandoahPreclean && _heap->process_references()) {
 572     preclean_weak_refs();
 573   }
 574 }
 575 
 576 void ShenandoahTraversalGC::final_traversal_collection() {
 577   _heap->make_parsable(true);
 578 
 579   if (!_heap->cancelled_gc()) {
 580 #if defined(COMPILER2) || INCLUDE_JVMCI
 581     DerivedPointerTable::clear();
 582 #endif
 583     ShenandoahGCPhase phase_work(ShenandoahPhaseTimings::final_traversal_gc_work);
 584     uint nworkers = _heap->workers()->active_workers();
 585     task_queues()->reserve(nworkers);
 586 
 587     // Finish traversal
 588     ShenandoahRootProcessor rp(_heap, nworkers, ShenandoahPhaseTimings::final_traversal_gc_work);
 589     ShenandoahTerminationTracker term(ShenandoahPhaseTimings::final_traversal_gc_termination);
 590 
 591     ShenandoahTaskTerminator terminator(nworkers, task_queues());
 592     ShenandoahFinalTraversalCollectionTask task(&rp, &terminator);
 593     _heap->workers()->run_task(&task);
 594 #if defined(COMPILER2) || INCLUDE_JVMCI
 595     DerivedPointerTable::update_pointers();
 596 #endif
 597   }
 598 
 599   if (!_heap->cancelled_gc() && _heap->process_references()) {
 600     weak_refs_work();
 601   }
 602 
 603   if (!_heap->cancelled_gc()) {
 604     fixup_roots();
 605     if (_heap->unload_classes()) {
 606       _heap->unload_classes_and_cleanup_tables(false);
 607     }
 608   }
 609 
 610   if (!_heap->cancelled_gc()) {
 611     assert(_task_queues->is_empty(), "queues must be empty after traversal GC");
 612     TASKQUEUE_STATS_ONLY(_task_queues->print_taskqueue_stats());
 613     TASKQUEUE_STATS_ONLY(_task_queues->reset_taskqueue_stats());
 614 
 615     // No more marking expected
 616     _heap->mark_complete_marking_context();
 617 
 618     // Resize metaspace
 619     MetaspaceGC::compute_new_size();
 620 
 621     // Still good? We can now trash the cset, and make final verification
 622     {
 623       ShenandoahGCPhase phase_cleanup(ShenandoahPhaseTimings::traversal_gc_cleanup);
 624       ShenandoahHeapLocker lock(_heap->lock());
 625 
 626       // Trash everything
 627       // Clear immediate garbage regions.
 628       size_t num_regions = _heap->num_regions();
 629 
 630       ShenandoahHeapRegionSet* traversal_regions = traversal_set();
 631       ShenandoahFreeSet* free_regions = _heap->free_set();
 632       ShenandoahMarkingContext* const ctx = _heap->marking_context();
 633       free_regions->clear();
 634       for (size_t i = 0; i < num_regions; i++) {
 635         ShenandoahHeapRegion* r = _heap->get_region(i);
 636         bool not_allocated = ctx->top_at_mark_start(r) == r->top();
 637 
 638         bool candidate = traversal_regions->is_in(r) && !r->has_live() && not_allocated;
 639         if (r->is_humongous_start() && candidate) {
 640           // Trash humongous.
 641           HeapWord* humongous_obj = r->bottom() + ShenandoahForwarding::word_size();
 642           assert(!ctx->is_marked(oop(humongous_obj)), "must not be marked");
 643           r->make_trash_immediate();
 644           while (i + 1 < num_regions && _heap->get_region(i + 1)->is_humongous_continuation()) {
 645             i++;
 646             r = _heap->get_region(i);
 647             assert(r->is_humongous_continuation(), "must be humongous continuation");
 648             r->make_trash_immediate();
 649           }
 650         } else if (!r->is_empty() && candidate) {
 651           // Trash regular.
 652           assert(!r->is_humongous(), "handled above");
 653           assert(!r->is_trash(), "must not already be trashed");
 654           r->make_trash_immediate();
 655         }
 656       }
 657       _heap->collection_set()->clear();
 658       _heap->free_set()->rebuild();
 659       reset();
 660     }
 661 
 662     assert(_task_queues->is_empty(), "queues must be empty after traversal GC");
 663     _heap->set_concurrent_traversal_in_progress(false);
 664     assert(!_heap->cancelled_gc(), "must not be cancelled when getting out here");
 665 
 666     if (ShenandoahVerify) {
 667       _heap->verifier()->verify_after_traversal();
 668     }
 669 
 670     if (VerifyAfterGC) {
 671       Universe::verify();
 672     }
 673   }
 674 }
 675 
 676 class ShenandoahTraversalFixRootsClosure : public OopClosure {
 677 private:
 678   template <class T>
 679   inline void do_oop_work(T* p) {
 680     T o = RawAccess<>::oop_load(p);
 681     if (!CompressedOops::is_null(o)) {
 682       oop obj = CompressedOops::decode_not_null(o);
 683       oop forw = ShenandoahBarrierSet::resolve_forwarded_not_null(obj);
 684       if (!oopDesc::equals_raw(obj, forw)) {
 685         RawAccess<IS_NOT_NULL>::oop_store(p, forw);
 686       }
 687     }
 688   }
 689 
 690 public:
 691   inline void do_oop(oop* p) { do_oop_work(p); }
 692   inline void do_oop(narrowOop* p) { do_oop_work(p); }
 693 };
 694 
 695 class ShenandoahTraversalFixRootsTask : public AbstractGangTask {
 696 private:
 697   ShenandoahRootProcessor* _rp;
 698 
 699 public:
 700   ShenandoahTraversalFixRootsTask(ShenandoahRootProcessor* rp) :
 701     AbstractGangTask("Shenandoah traversal fix roots"),
 702     _rp(rp) {
 703     assert(ShenandoahHeap::heap()->has_forwarded_objects(), "Must be");
 704   }
 705 
 706   void work(uint worker_id) {
 707     ShenandoahParallelWorkerSession worker_session(worker_id);
 708     ShenandoahTraversalFixRootsClosure cl;
 709     MarkingCodeBlobClosure blobsCl(&cl, CodeBlobToOopClosure::FixRelocations);
 710     CLDToOopClosure cldCl(&cl, ClassLoaderData::_claim_strong);
 711     _rp->update_all_roots<ShenandoahForwardedIsAliveClosure>(&cl, &cldCl, &blobsCl, NULL, worker_id);
 712   }
 713 };
 714 
 715 void ShenandoahTraversalGC::fixup_roots() {
 716 #if defined(COMPILER2) || INCLUDE_JVMCI
 717   DerivedPointerTable::clear();
 718 #endif
 719   ShenandoahRootProcessor rp(_heap, _heap->workers()->active_workers(), ShenandoahPhaseTimings::final_traversal_update_roots);
 720   ShenandoahTraversalFixRootsTask update_roots_task(&rp);
 721   _heap->workers()->run_task(&update_roots_task);
 722 #if defined(COMPILER2) || INCLUDE_JVMCI
 723   DerivedPointerTable::update_pointers();
 724 #endif
 725 }
 726 
 727 void ShenandoahTraversalGC::reset() {
 728   _task_queues->clear();
 729 }
 730 
 731 ShenandoahObjToScanQueueSet* ShenandoahTraversalGC::task_queues() {
 732   return _task_queues;
 733 }
 734 
 735 class ShenandoahTraversalCancelledGCYieldClosure : public YieldClosure {
 736 private:
 737   ShenandoahHeap* const _heap;
 738 public:
 739   ShenandoahTraversalCancelledGCYieldClosure() : _heap(ShenandoahHeap::heap()) {};
 740   virtual bool should_return() { return _heap->cancelled_gc(); }
 741 };
 742 
 743 class ShenandoahTraversalPrecleanCompleteGCClosure : public VoidClosure {
 744 public:
 745   void do_void() {
 746     ShenandoahHeap* sh = ShenandoahHeap::heap();
 747     ShenandoahTraversalGC* traversal_gc = sh->traversal_gc();
 748     assert(sh->process_references(), "why else would we be here?");
 749     ShenandoahTaskTerminator terminator(1, traversal_gc->task_queues());
 750     shenandoah_assert_rp_isalive_installed();
 751     traversal_gc->main_loop((uint) 0, &terminator, true);
 752   }
 753 };
 754 
 755 class ShenandoahTraversalKeepAliveUpdateClosure : public OopClosure {
 756 private:
 757   ShenandoahObjToScanQueue* _queue;
 758   Thread* _thread;
 759   ShenandoahTraversalGC* _traversal_gc;
 760   ShenandoahMarkingContext* const _mark_context;
 761 
 762   template <class T>
 763   inline void do_oop_work(T* p) {
 764     _traversal_gc->process_oop<T, false /* string dedup */, false /* degen */>(p, _thread, _queue, _mark_context);
 765   }
 766 
 767 public:
 768   ShenandoahTraversalKeepAliveUpdateClosure(ShenandoahObjToScanQueue* q) :
 769     _queue(q), _thread(Thread::current()),
 770     _traversal_gc(ShenandoahHeap::heap()->traversal_gc()),
 771     _mark_context(ShenandoahHeap::heap()->marking_context()) {}
 772 
 773   void do_oop(narrowOop* p) { do_oop_work(p); }
 774   void do_oop(oop* p)       { do_oop_work(p); }
 775 };
 776 
 777 class ShenandoahTraversalKeepAliveUpdateDegenClosure : public OopClosure {
 778 private:
 779   ShenandoahObjToScanQueue* _queue;
 780   Thread* _thread;
 781   ShenandoahTraversalGC* _traversal_gc;
 782   ShenandoahMarkingContext* const _mark_context;
 783 
 784   template <class T>
 785   inline void do_oop_work(T* p) {
 786     _traversal_gc->process_oop<T, false /* string dedup */, true /* degen */>(p, _thread, _queue, _mark_context);
 787   }
 788 
 789 public:
 790   ShenandoahTraversalKeepAliveUpdateDegenClosure(ShenandoahObjToScanQueue* q) :
 791           _queue(q), _thread(Thread::current()),
 792           _traversal_gc(ShenandoahHeap::heap()->traversal_gc()),
 793           _mark_context(ShenandoahHeap::heap()->marking_context()) {}
 794 
 795   void do_oop(narrowOop* p) { do_oop_work(p); }
 796   void do_oop(oop* p)       { do_oop_work(p); }
 797 };
 798 
 799 class ShenandoahTraversalSingleThreadKeepAliveUpdateClosure : public OopClosure {
 800 private:
 801   ShenandoahObjToScanQueue* _queue;
 802   Thread* _thread;
 803   ShenandoahTraversalGC* _traversal_gc;
 804   ShenandoahMarkingContext* const _mark_context;
 805 
 806   template <class T>
 807   inline void do_oop_work(T* p) {
 808     ShenandoahEvacOOMScope evac_scope;
 809     _traversal_gc->process_oop<T, false /* string dedup */, false /* degen */>(p, _thread, _queue, _mark_context);
 810   }
 811 
 812 public:
 813   ShenandoahTraversalSingleThreadKeepAliveUpdateClosure(ShenandoahObjToScanQueue* q) :
 814           _queue(q), _thread(Thread::current()),
 815           _traversal_gc(ShenandoahHeap::heap()->traversal_gc()),
 816           _mark_context(ShenandoahHeap::heap()->marking_context()) {}
 817 
 818   void do_oop(narrowOop* p) { do_oop_work(p); }
 819   void do_oop(oop* p)       { do_oop_work(p); }
 820 };
 821 
 822 class ShenandoahTraversalSingleThreadKeepAliveUpdateDegenClosure : public OopClosure {
 823 private:
 824   ShenandoahObjToScanQueue* _queue;
 825   Thread* _thread;
 826   ShenandoahTraversalGC* _traversal_gc;
 827   ShenandoahMarkingContext* const _mark_context;
 828 
 829   template <class T>
 830   inline void do_oop_work(T* p) {
 831     ShenandoahEvacOOMScope evac_scope;
 832     _traversal_gc->process_oop<T, false /* string dedup */, true /* degen */>(p, _thread, _queue, _mark_context);
 833   }
 834 
 835 public:
 836   ShenandoahTraversalSingleThreadKeepAliveUpdateDegenClosure(ShenandoahObjToScanQueue* q) :
 837           _queue(q), _thread(Thread::current()),
 838           _traversal_gc(ShenandoahHeap::heap()->traversal_gc()),
 839           _mark_context(ShenandoahHeap::heap()->marking_context()) {}
 840 
 841   void do_oop(narrowOop* p) { do_oop_work(p); }
 842   void do_oop(oop* p)       { do_oop_work(p); }
 843 };
 844 
 845 class ShenandoahTraversalPrecleanTask : public AbstractGangTask {
 846 private:
 847   ReferenceProcessor* _rp;
 848 
 849 public:
 850   ShenandoahTraversalPrecleanTask(ReferenceProcessor* rp) :
 851           AbstractGangTask("Precleaning task"),
 852           _rp(rp) {}
 853 
 854   void work(uint worker_id) {
 855     assert(worker_id == 0, "The code below is single-threaded, only one worker is expected");
 856     ShenandoahParallelWorkerSession worker_session(worker_id);
 857     ShenandoahSuspendibleThreadSetJoiner stsj(ShenandoahSuspendibleWorkers);
 858     ShenandoahEvacOOMScope oom_evac_scope;
 859 
 860     ShenandoahHeap* sh = ShenandoahHeap::heap();
 861 
 862     ShenandoahObjToScanQueue* q = sh->traversal_gc()->task_queues()->queue(worker_id);
 863 
 864     ShenandoahForwardedIsAliveClosure is_alive;
 865     ShenandoahTraversalCancelledGCYieldClosure yield;
 866     ShenandoahTraversalPrecleanCompleteGCClosure complete_gc;
 867     ShenandoahTraversalKeepAliveUpdateClosure keep_alive(q);
 868     ResourceMark rm;
 869     _rp->preclean_discovered_references(&is_alive, &keep_alive,
 870                                         &complete_gc, &yield,
 871                                         NULL);
 872   }
 873 };
 874 
 875 void ShenandoahTraversalGC::preclean_weak_refs() {
 876   // Pre-cleaning weak references before diving into STW makes sense at the
 877   // end of concurrent mark. This will filter out the references which referents
 878   // are alive. Note that ReferenceProcessor already filters out these on reference
 879   // discovery, and the bulk of work is done here. This phase processes leftovers
 880   // that missed the initial filtering, i.e. when referent was marked alive after
 881   // reference was discovered by RP.
 882 
 883   assert(_heap->process_references(), "sanity");
 884   assert(!_heap->is_degenerated_gc_in_progress(), "must be in concurrent non-degenerated phase");
 885 
 886   // Shortcut if no references were discovered to avoid winding up threads.
 887   ReferenceProcessor* rp = _heap->ref_processor();
 888   if (!rp->has_discovered_references()) {
 889     return;
 890   }
 891 
 892   ReferenceProcessorMTDiscoveryMutator fix_mt_discovery(rp, false);
 893 
 894   shenandoah_assert_rp_isalive_not_installed();
 895   ShenandoahForwardedIsAliveClosure is_alive;
 896   ReferenceProcessorIsAliveMutator fix_isalive(rp, &is_alive);
 897 
 898   assert(task_queues()->is_empty(), "Should be empty");
 899 
 900   // Execute precleaning in the worker thread: it will give us GCLABs, String dedup
 901   // queues and other goodies. When upstream ReferenceProcessor starts supporting
 902   // parallel precleans, we can extend this to more threads.
 903   ShenandoahPushWorkerScope scope(_heap->workers(), 1, /* check_workers = */ false);
 904 
 905   WorkGang* workers = _heap->workers();
 906   uint nworkers = workers->active_workers();
 907   assert(nworkers == 1, "This code uses only a single worker");
 908   task_queues()->reserve(nworkers);
 909 
 910   ShenandoahTraversalPrecleanTask task(rp);
 911   workers->run_task(&task);
 912 
 913   assert(_heap->cancelled_gc() || task_queues()->is_empty(), "Should be empty");
 914 }
 915 
 916 // Weak Reference Closures
 917 class ShenandoahTraversalDrainMarkingStackClosure: public VoidClosure {
 918   uint _worker_id;
 919   ShenandoahTaskTerminator* _terminator;
 920   bool _reset_terminator;
 921 
 922 public:
 923   ShenandoahTraversalDrainMarkingStackClosure(uint worker_id, ShenandoahTaskTerminator* t, bool reset_terminator = false):
 924     _worker_id(worker_id),
 925     _terminator(t),
 926     _reset_terminator(reset_terminator) {
 927   }
 928 
 929   void do_void() {
 930     assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
 931 
 932     ShenandoahHeap* sh = ShenandoahHeap::heap();
 933     ShenandoahTraversalGC* traversal_gc = sh->traversal_gc();
 934     assert(sh->process_references(), "why else would we be here?");
 935     shenandoah_assert_rp_isalive_installed();
 936 
 937     traversal_gc->main_loop(_worker_id, _terminator, false);
 938 
 939     if (_reset_terminator) {
 940       _terminator->reset_for_reuse();
 941     }
 942   }
 943 };
 944 
 945 class ShenandoahTraversalSingleThreadedDrainMarkingStackClosure: public VoidClosure {
 946   uint _worker_id;
 947   ShenandoahTaskTerminator* _terminator;
 948   bool _reset_terminator;
 949 
 950 public:
 951   ShenandoahTraversalSingleThreadedDrainMarkingStackClosure(uint worker_id, ShenandoahTaskTerminator* t, bool reset_terminator = false):
 952           _worker_id(worker_id),
 953           _terminator(t),
 954           _reset_terminator(reset_terminator) {
 955   }
 956 
 957   void do_void() {
 958     assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
 959 
 960     ShenandoahHeap* sh = ShenandoahHeap::heap();
 961     ShenandoahTraversalGC* traversal_gc = sh->traversal_gc();
 962     assert(sh->process_references(), "why else would we be here?");
 963     shenandoah_assert_rp_isalive_installed();
 964 
 965     ShenandoahEvacOOMScope evac_scope;
 966     traversal_gc->main_loop(_worker_id, _terminator, false);
 967 
 968     if (_reset_terminator) {
 969       _terminator->reset_for_reuse();
 970     }
 971   }
 972 };
 973 
 974 void ShenandoahTraversalGC::weak_refs_work() {
 975   assert(_heap->process_references(), "sanity");
 976 
 977   ShenandoahPhaseTimings::Phase phase_root = ShenandoahPhaseTimings::weakrefs;
 978 
 979   ShenandoahGCPhase phase(phase_root);
 980 
 981   ReferenceProcessor* rp = _heap->ref_processor();
 982 
 983   // NOTE: We cannot shortcut on has_discovered_references() here, because
 984   // we will miss marking JNI Weak refs then, see implementation in
 985   // ReferenceProcessor::process_discovered_references.
 986   weak_refs_work_doit();
 987 
 988   rp->verify_no_references_recorded();
 989   assert(!rp->discovery_enabled(), "Post condition");
 990 
 991 }
 992 
 993 class ShenandoahTraversalRefProcTaskProxy : public AbstractGangTask {
 994 private:
 995   AbstractRefProcTaskExecutor::ProcessTask& _proc_task;
 996   ShenandoahTaskTerminator* _terminator;
 997 
 998 public:
 999   ShenandoahTraversalRefProcTaskProxy(AbstractRefProcTaskExecutor::ProcessTask& proc_task,
1000                                       ShenandoahTaskTerminator* t) :
1001     AbstractGangTask("Process reference objects in parallel"),
1002     _proc_task(proc_task),
1003     _terminator(t) {
1004   }
1005 
1006   void work(uint worker_id) {
1007     ShenandoahEvacOOMScope oom_evac_scope;
1008     assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
1009     ShenandoahHeap* heap = ShenandoahHeap::heap();
1010     ShenandoahTraversalDrainMarkingStackClosure complete_gc(worker_id, _terminator);
1011 
1012     ShenandoahForwardedIsAliveClosure is_alive;
1013     if (!heap->is_degenerated_gc_in_progress()) {
1014       ShenandoahTraversalKeepAliveUpdateClosure keep_alive(heap->traversal_gc()->task_queues()->queue(worker_id));
1015       _proc_task.work(worker_id, is_alive, keep_alive, complete_gc);
1016     } else {
1017       ShenandoahTraversalKeepAliveUpdateDegenClosure keep_alive(heap->traversal_gc()->task_queues()->queue(worker_id));
1018       _proc_task.work(worker_id, is_alive, keep_alive, complete_gc);
1019     }
1020   }
1021 };
1022 
1023 class ShenandoahTraversalRefProcTaskExecutor : public AbstractRefProcTaskExecutor {
1024 private:
1025   WorkGang* _workers;
1026 
1027 public:
1028   ShenandoahTraversalRefProcTaskExecutor(WorkGang* workers) : _workers(workers) {}
1029 
1030   // Executes a task using worker threads.
1031   void execute(ProcessTask& task, uint ergo_workers) {
1032     assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
1033 
1034     ShenandoahHeap* heap = ShenandoahHeap::heap();
1035     ShenandoahTraversalGC* traversal_gc = heap->traversal_gc();
1036     ShenandoahPushWorkerQueuesScope scope(_workers,
1037                                           traversal_gc->task_queues(),
1038                                           ergo_workers,
1039                                           /* do_check = */ false);
1040     uint nworkers = _workers->active_workers();
1041     traversal_gc->task_queues()->reserve(nworkers);
1042     ShenandoahTaskTerminator terminator(nworkers, traversal_gc->task_queues());
1043     ShenandoahTraversalRefProcTaskProxy proc_task_proxy(task, &terminator);
1044     _workers->run_task(&proc_task_proxy);
1045   }
1046 };
1047 
1048 void ShenandoahTraversalGC::weak_refs_work_doit() {
1049   ReferenceProcessor* rp = _heap->ref_processor();
1050 
1051   ShenandoahPhaseTimings::Phase phase_process = ShenandoahPhaseTimings::weakrefs_process;
1052 
1053   shenandoah_assert_rp_isalive_not_installed();
1054   ShenandoahForwardedIsAliveClosure is_alive;
1055   ReferenceProcessorIsAliveMutator fix_isalive(rp, &is_alive);
1056 
1057   WorkGang* workers = _heap->workers();
1058   uint nworkers = workers->active_workers();
1059 
1060   rp->setup_policy(_heap->soft_ref_policy()->should_clear_all_soft_refs());
1061   rp->set_active_mt_degree(nworkers);
1062 
1063   assert(task_queues()->is_empty(), "Should be empty");
1064 
1065   // complete_gc and keep_alive closures instantiated here are only needed for
1066   // single-threaded path in RP. They share the queue 0 for tracking work, which
1067   // simplifies implementation. Since RP may decide to call complete_gc several
1068   // times, we need to be able to reuse the terminator.
1069   uint serial_worker_id = 0;
1070   ShenandoahTaskTerminator terminator(1, task_queues());
1071   ShenandoahTraversalSingleThreadedDrainMarkingStackClosure complete_gc(serial_worker_id, &terminator, /* reset_terminator = */ true);
1072   ShenandoahPushWorkerQueuesScope scope(workers, task_queues(), 1, /* do_check = */ false);
1073 
1074   ShenandoahTraversalRefProcTaskExecutor executor(workers);
1075 
1076   ReferenceProcessorPhaseTimes pt(_heap->gc_timer(), rp->num_queues());
1077   if (!_heap->is_degenerated_gc_in_progress()) {
1078     ShenandoahTraversalSingleThreadKeepAliveUpdateClosure keep_alive(task_queues()->queue(serial_worker_id));
1079     rp->process_discovered_references(&is_alive, &keep_alive,
1080                                       &complete_gc, &executor,
1081                                       &pt);
1082   } else {
1083     ShenandoahTraversalSingleThreadKeepAliveUpdateDegenClosure keep_alive(task_queues()->queue(serial_worker_id));
1084     rp->process_discovered_references(&is_alive, &keep_alive,
1085                                       &complete_gc, &executor,
1086                                       &pt);
1087   }
1088 
1089   pt.print_all_references();
1090   assert(task_queues()->is_empty() || _heap->cancelled_gc(), "Should be empty");
1091 }