1 /*
   2  * Copyright (c) 2018, 2019, Red Hat, Inc. All rights reserved.
   3  *
   4  * This code is free software; you can redistribute it and/or modify it
   5  * under the terms of the GNU General Public License version 2 only, as
   6  * published by the Free Software Foundation.
   7  *
   8  * This code is distributed in the hope that it will be useful, but WITHOUT
   9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  11  * version 2 for more details (a copy is included in the LICENSE file that
  12  * accompanied this code).
  13  *
  14  * You should have received a copy of the GNU General Public License version
  15  * 2 along with this work; if not, write to the Free Software Foundation,
  16  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  17  *
  18  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  19  * or visit www.oracle.com if you need additional information or have any
  20  * questions.
  21  *
  22  */
  23 
  24 #include "precompiled.hpp"
  25 
  26 #include "classfile/classLoaderData.hpp"
  27 #include "classfile/classLoaderDataGraph.hpp"
  28 #include "gc/shared/referenceProcessor.hpp"
  29 #include "gc/shared/referenceProcessorPhaseTimes.hpp"
  30 #include "gc/shared/workgroup.hpp"
  31 #include "gc/shared/weakProcessor.inline.hpp"
  32 #include "gc/shenandoah/shenandoahBarrierSet.hpp"
  33 #include "gc/shenandoah/shenandoahClosures.inline.hpp"
  34 #include "gc/shenandoah/shenandoahCodeRoots.hpp"
  35 #include "gc/shenandoah/shenandoahCollectionSet.hpp"
  36 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
  37 #include "gc/shenandoah/shenandoahFreeSet.hpp"
  38 #include "gc/shenandoah/shenandoahForwarding.hpp"
  39 #include "gc/shenandoah/shenandoahPhaseTimings.hpp"
  40 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
  41 #include "gc/shenandoah/shenandoahHeapRegionSet.inline.hpp"
  42 #include "gc/shenandoah/shenandoahHeuristics.hpp"
  43 #include "gc/shenandoah/shenandoahMarkingContext.inline.hpp"
  44 #include "gc/shenandoah/shenandoahOopClosures.inline.hpp"
  45 #include "gc/shenandoah/shenandoahRootProcessor.inline.hpp"
  46 #include "gc/shenandoah/shenandoahStringDedup.hpp"
  47 #include "gc/shenandoah/shenandoahTaskqueue.inline.hpp"
  48 #include "gc/shenandoah/shenandoahTimingTracker.hpp"
  49 #include "gc/shenandoah/shenandoahTraversalGC.hpp"
  50 #include "gc/shenandoah/shenandoahUtils.hpp"
  51 #include "gc/shenandoah/shenandoahVerifier.hpp"
  52 
  53 #include "memory/iterator.hpp"
  54 #include "memory/metaspace.hpp"
  55 #include "memory/resourceArea.hpp"
  56 #include "memory/universe.hpp"
  57 
  58 /**
  59  * NOTE: We are using the SATB buffer in thread.hpp and satbMarkQueue.hpp, however, it is not an SATB algorithm.
  60  * We're using the buffer as generic oop buffer to enqueue new values in concurrent oop stores, IOW, the algorithm
  61  * is incremental-update-based.
  62  *
  63  * NOTE on interaction with TAMS: we want to avoid traversing new objects for
  64  * several reasons:
  65  * - We will not reclaim them in this cycle anyway, because they are not in the
  66  *   cset
  67  * - It makes up for the bulk of work during final-pause
  68  * - It also shortens the concurrent cycle because we don't need to
  69  *   pointlessly traverse through newly allocated objects.
  70  * - As a nice side-effect, it solves the I-U termination problem (mutators
  71  *   cannot outrun the GC by allocating like crazy)
  72  * - It is an easy way to achieve MWF. What MWF does is to also enqueue the
  73  *   target object of stores if it's new. Treating new objects live implicitely
  74  *   achieves the same, but without extra barriers. I think the effect of
  75  *   shortened final-pause (mentioned above) is the main advantage of MWF. In
  76  *   particular, we will not see the head of a completely new long linked list
  77  *   in final-pause and end up traversing huge chunks of the heap there.
  78  * - We don't need to see/update the fields of new objects either, because they
  79  *   are either still null, or anything that's been stored into them has been
  80  *   evacuated+enqueued before (and will thus be treated later).
  81  *
  82  * We achieve this by setting TAMS for each region, and everything allocated
  83  * beyond TAMS will be 'implicitely marked'.
  84  *
  85  * Gotchas:
  86  * - While we want new objects to be implicitely marked, we don't want to count
  87  *   them alive. Otherwise the next cycle wouldn't pick them up and consider
  88  *   them for cset. This means that we need to protect such regions from
  89  *   getting accidentally thrashed at the end of traversal cycle. This is why I
  90  *   keep track of alloc-regions and check is_alloc_region() in the trashing
  91  *   code.
  92  * - We *need* to traverse through evacuated objects. Those objects are
  93  *   pre-existing, and any references in them point to interesting objects that
  94  *   we need to see. We also want to count them as live, because we just
  95  *   determined that they are alive :-) I achieve this by upping TAMS
  96  *   concurrently for every gclab/gc-shared alloc before publishing the
  97  *   evacuated object. This way, the GC threads will not consider such objects
  98  *   implictely marked, and traverse through them as normal.
  99  */
 100 class ShenandoahTraversalSATBBufferClosure : public SATBBufferClosure {
 101 private:
 102   ShenandoahObjToScanQueue* _queue;
 103   ShenandoahTraversalGC* _traversal_gc;
 104   ShenandoahHeap* const _heap;
 105 
 106 public:
 107   ShenandoahTraversalSATBBufferClosure(ShenandoahObjToScanQueue* q) :
 108     _queue(q),
 109     _heap(ShenandoahHeap::heap())
 110  { }
 111 
 112   void do_buffer(void** buffer, size_t size) {
 113     for (size_t i = 0; i < size; ++i) {
 114       oop* p = (oop*) &buffer[i];
 115       oop obj = RawAccess<>::oop_load(p);
 116       shenandoah_assert_not_forwarded(p, obj);
 117       if (_heap->marking_context()->mark(obj)) {
 118         _queue->push(ShenandoahMarkTask(obj));
 119       }
 120     }
 121   }
 122 };
 123 
 124 class ShenandoahTraversalSATBThreadsClosure : public ThreadClosure {
 125 private:
 126   ShenandoahTraversalSATBBufferClosure* _satb_cl;
 127 
 128 public:
 129   ShenandoahTraversalSATBThreadsClosure(ShenandoahTraversalSATBBufferClosure* satb_cl) :
 130     _satb_cl(satb_cl) {}
 131 
 132   void do_thread(Thread* thread) {
 133     ShenandoahThreadLocalData::satb_mark_queue(thread).apply_closure_and_empty(_satb_cl);
 134   }
 135 };
 136 
 137 // Like CLDToOopClosure, but clears has_modified_oops, so that we can record modified CLDs during traversal
 138 // and remark them later during final-traversal.
 139 class ShenandoahMarkCLDClosure : public CLDClosure {
 140 private:
 141   OopClosure* _cl;
 142 public:
 143   ShenandoahMarkCLDClosure(OopClosure* cl) : _cl(cl) {}
 144   void do_cld(ClassLoaderData* cld) {
 145     cld->oops_do(_cl, true, true);
 146   }
 147 };
 148 
 149 // Like CLDToOopClosure, but only process modified CLDs
 150 class ShenandoahRemarkCLDClosure : public CLDClosure {
 151 private:
 152   OopClosure* _cl;
 153 public:
 154   ShenandoahRemarkCLDClosure(OopClosure* cl) : _cl(cl) {}
 155   void do_cld(ClassLoaderData* cld) {
 156     if (cld->has_modified_oops()) {
 157       cld->oops_do(_cl, true, true);
 158     }
 159   }
 160 };
 161 
 162 class ShenandoahInitTraversalCollectionTask : public AbstractGangTask {
 163 private:
 164   ShenandoahRootProcessor* _rp;
 165   ShenandoahHeap* _heap;
 166   ShenandoahCsetCodeRootsIterator* _cset_coderoots;
 167 public:
 168   ShenandoahInitTraversalCollectionTask(ShenandoahRootProcessor* rp, ShenandoahCsetCodeRootsIterator* cset_coderoots) :
 169     AbstractGangTask("Shenandoah Init Traversal Collection"),
 170     _rp(rp),
 171     _heap(ShenandoahHeap::heap()),
 172     _cset_coderoots(cset_coderoots) {}
 173 
 174   void work(uint worker_id) {
 175     ShenandoahParallelWorkerSession worker_session(worker_id);
 176 
 177     ShenandoahEvacOOMScope oom_evac_scope;
 178     ShenandoahObjToScanQueueSet* queues = _heap->traversal_gc()->task_queues();
 179     ShenandoahObjToScanQueue* q = queues->queue(worker_id);
 180 
 181     bool process_refs = _heap->process_references();
 182     bool unload_classes = _heap->unload_classes();
 183     ReferenceProcessor* rp = NULL;
 184     if (process_refs) {
 185       rp = _heap->ref_processor();
 186     }
 187 
 188     // Step 1: Process ordinary GC roots.
 189     {
 190       ShenandoahTraversalClosure roots_cl(q, rp);
 191       ShenandoahMarkCLDClosure cld_cl(&roots_cl);
 192       MarkingCodeBlobClosure code_cl(&roots_cl, CodeBlobToOopClosure::FixRelocations);
 193       if (unload_classes) {
 194         _rp->process_strong_roots(&roots_cl, &cld_cl, NULL, NULL, worker_id);
 195         // Need to pre-evac code roots here. Otherwise we might see from-space constants.
 196         ShenandoahWorkerTimings* worker_times = _heap->phase_timings()->worker_times();
 197         ShenandoahWorkerTimingsTracker timer(worker_times, ShenandoahPhaseTimings::CodeCacheRoots, worker_id);
 198         _cset_coderoots->possibly_parallel_blobs_do(&code_cl);
 199       } else {
 200         _rp->process_all_roots(&roots_cl, &cld_cl, &code_cl, NULL, worker_id);
 201       }
 202       if (ShenandoahStringDedup::is_enabled()) {
 203         AlwaysTrueClosure is_alive;
 204         ShenandoahStringDedup::parallel_oops_do(&is_alive, &roots_cl, worker_id);
 205       }
 206     }
 207   }
 208 };
 209 
 210 class ShenandoahConcurrentTraversalCollectionTask : public AbstractGangTask {
 211 private:
 212   ShenandoahTaskTerminator* _terminator;
 213   ShenandoahHeap* _heap;
 214 public:
 215   ShenandoahConcurrentTraversalCollectionTask(ShenandoahTaskTerminator* terminator) :
 216     AbstractGangTask("Shenandoah Concurrent Traversal Collection"),
 217     _terminator(terminator),
 218     _heap(ShenandoahHeap::heap()) {}
 219 
 220   void work(uint worker_id) {
 221     ShenandoahConcurrentWorkerSession worker_session(worker_id);
 222     ShenandoahSuspendibleThreadSetJoiner stsj(ShenandoahSuspendibleWorkers);
 223     ShenandoahEvacOOMScope oom_evac_scope;
 224     ShenandoahTraversalGC* traversal_gc = _heap->traversal_gc();
 225 
 226     // Drain all outstanding work in queues.
 227     traversal_gc->main_loop(worker_id, _terminator, true);
 228   }
 229 };
 230 
 231 class ShenandoahFinalTraversalCollectionTask : public AbstractGangTask {
 232 private:
 233   ShenandoahRootProcessor* _rp;
 234   ShenandoahTaskTerminator* _terminator;
 235   ShenandoahHeap* _heap;
 236 public:
 237   ShenandoahFinalTraversalCollectionTask(ShenandoahRootProcessor* rp, ShenandoahTaskTerminator* terminator) :
 238     AbstractGangTask("Shenandoah Final Traversal Collection"),
 239     _rp(rp),
 240     _terminator(terminator),
 241     _heap(ShenandoahHeap::heap()) {}
 242 
 243   void work(uint worker_id) {
 244     ShenandoahParallelWorkerSession worker_session(worker_id);
 245 
 246     ShenandoahEvacOOMScope oom_evac_scope;
 247     ShenandoahTraversalGC* traversal_gc = _heap->traversal_gc();
 248 
 249     ShenandoahObjToScanQueueSet* queues = traversal_gc->task_queues();
 250     ShenandoahObjToScanQueue* q = queues->queue(worker_id);
 251 
 252     bool process_refs = _heap->process_references();
 253     bool unload_classes = _heap->unload_classes();
 254     ReferenceProcessor* rp = NULL;
 255     if (process_refs) {
 256       rp = _heap->ref_processor();
 257     }
 258 
 259     // Step 0: Drain outstanding SATB queues.
 260     // NOTE: we piggy-back draining of remaining thread SATB buffers on the final root scan below.
 261     ShenandoahTraversalSATBBufferClosure satb_cl(q);
 262     {
 263       // Process remaining finished SATB buffers.
 264       SATBMarkQueueSet& satb_mq_set = ShenandoahBarrierSet::satb_mark_queue_set();
 265       while (satb_mq_set.apply_closure_to_completed_buffer(&satb_cl));
 266       // Process remaining threads SATB buffers below.
 267     }
 268 
 269     // Step 1: Process GC roots.
 270     // For oops in code roots, they are marked, evacuated, enqueued for further traversal,
 271     // and the references to the oops are updated during init pause. New nmethods are handled
 272     // in similar way during nmethod-register process. Therefore, we don't need to rescan code
 273     // roots here.
 274     if (!_heap->is_degenerated_gc_in_progress()) {
 275       ShenandoahTraversalClosure roots_cl(q, rp);
 276       CLDToOopClosure cld_cl(&roots_cl, ClassLoaderData::_claim_strong);
 277       ShenandoahTraversalSATBThreadsClosure tc(&satb_cl);
 278       if (unload_classes) {
 279         ShenandoahRemarkCLDClosure remark_cld_cl(&roots_cl);
 280         _rp->process_strong_roots(&roots_cl, &remark_cld_cl, NULL, &tc, worker_id);
 281       } else {
 282         _rp->process_all_roots(&roots_cl, &cld_cl, NULL, &tc, worker_id);
 283       }
 284     } else {
 285       ShenandoahTraversalDegenClosure roots_cl(q, rp);
 286       CLDToOopClosure cld_cl(&roots_cl, ClassLoaderData::_claim_strong);
 287       ShenandoahTraversalSATBThreadsClosure tc(&satb_cl);
 288       if (unload_classes) {
 289         ShenandoahRemarkCLDClosure remark_cld_cl(&roots_cl);
 290         _rp->process_strong_roots(&roots_cl, &remark_cld_cl, NULL, &tc, worker_id);
 291       } else {
 292         _rp->process_all_roots(&roots_cl, &cld_cl, NULL, &tc, worker_id);
 293       }
 294     }
 295 
 296     {
 297       ShenandoahWorkerTimings *worker_times = _heap->phase_timings()->worker_times();
 298       ShenandoahWorkerTimingsTracker timer(worker_times, ShenandoahPhaseTimings::FinishQueues, worker_id);
 299 
 300       // Step 3: Finally drain all outstanding work in queues.
 301       traversal_gc->main_loop(worker_id, _terminator, false);
 302     }
 303 
 304   }
 305 };
 306 
 307 ShenandoahTraversalGC::ShenandoahTraversalGC(ShenandoahHeap* heap, size_t num_regions) :
 308   _heap(heap),
 309   _task_queues(new ShenandoahObjToScanQueueSet(heap->max_workers())),
 310   _traversal_set(ShenandoahHeapRegionSet()) {
 311 
 312   uint num_queues = heap->max_workers();
 313   for (uint i = 0; i < num_queues; ++i) {
 314     ShenandoahObjToScanQueue* task_queue = new ShenandoahObjToScanQueue();
 315     task_queue->initialize();
 316     _task_queues->register_queue(i, task_queue);
 317   }
 318 }
 319 
 320 ShenandoahTraversalGC::~ShenandoahTraversalGC() {
 321 }
 322 
 323 void ShenandoahTraversalGC::prepare_regions() {
 324   size_t num_regions = _heap->num_regions();
 325   ShenandoahMarkingContext* const ctx = _heap->marking_context();
 326   for (size_t i = 0; i < num_regions; i++) {
 327     ShenandoahHeapRegion* region = _heap->get_region(i);
 328     if (_heap->is_bitmap_slice_committed(region)) {
 329       if (_traversal_set.is_in(i)) {
 330         ctx->capture_top_at_mark_start(region);
 331         region->clear_live_data();
 332         assert(ctx->is_bitmap_clear_range(region->bottom(), region->end()), "bitmap for traversal regions must be cleared");
 333       } else {
 334         // Everything outside the traversal set is always considered live.
 335         ctx->reset_top_at_mark_start(region);
 336       }
 337     } else {
 338       // FreeSet may contain uncommitted empty regions, once they are recommitted,
 339       // their TAMS may have old values, so reset them here.
 340       ctx->reset_top_at_mark_start(region);
 341     }
 342   }
 343 }
 344 
 345 void ShenandoahTraversalGC::prepare() {
 346   _heap->collection_set()->clear();
 347   assert(_heap->collection_set()->count() == 0, "collection set not clear");
 348 
 349   {
 350     ShenandoahGCPhase phase(ShenandoahPhaseTimings::traversal_gc_make_parsable);
 351     _heap->make_parsable(true);
 352   }
 353 
 354   if (UseTLAB) {
 355     ShenandoahGCPhase phase(ShenandoahPhaseTimings::traversal_gc_resize_tlabs);
 356     _heap->resize_tlabs();
 357   }
 358 
 359   assert(_heap->marking_context()->is_bitmap_clear(), "need clean mark bitmap");
 360   assert(!_heap->marking_context()->is_complete(), "should not be complete");
 361 
 362   ShenandoahFreeSet* free_set = _heap->free_set();
 363   ShenandoahCollectionSet* collection_set = _heap->collection_set();
 364 
 365   // Find collection set
 366   _heap->heuristics()->choose_collection_set(collection_set);
 367   prepare_regions();
 368 
 369   // Rebuild free set
 370   free_set->rebuild();
 371 
 372   log_info(gc, ergo)("Collectable Garbage: " SIZE_FORMAT "M, " SIZE_FORMAT "M CSet, " SIZE_FORMAT " CSet regions",
 373                      collection_set->garbage() / M, collection_set->live_data() / M, collection_set->count());
 374 }
 375 
 376 void ShenandoahTraversalGC::init_traversal_collection() {
 377   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "STW traversal GC");
 378 
 379   if (ShenandoahVerify) {
 380     _heap->verifier()->verify_before_traversal();
 381   }
 382 
 383   if (VerifyBeforeGC) {
 384     Universe::verify();
 385   }
 386 
 387   {
 388     ShenandoahGCPhase phase_prepare(ShenandoahPhaseTimings::traversal_gc_prepare);
 389     ShenandoahHeapLocker lock(_heap->lock());
 390     prepare();
 391   }
 392 
 393   _heap->set_concurrent_traversal_in_progress(true);
 394 
 395   bool process_refs = _heap->process_references();
 396   if (process_refs) {
 397     ReferenceProcessor* rp = _heap->ref_processor();
 398     rp->enable_discovery(true /*verify_no_refs*/);
 399     rp->setup_policy(_heap->soft_ref_policy()->should_clear_all_soft_refs());
 400   }
 401 
 402   {
 403     ShenandoahGCPhase phase_work(ShenandoahPhaseTimings::init_traversal_gc_work);
 404     assert(_task_queues->is_empty(), "queues must be empty before traversal GC");
 405     TASKQUEUE_STATS_ONLY(_task_queues->reset_taskqueue_stats());
 406 
 407 #if defined(COMPILER2) || INCLUDE_JVMCI
 408     DerivedPointerTable::clear();
 409 #endif
 410 
 411     {
 412       uint nworkers = _heap->workers()->active_workers();
 413       task_queues()->reserve(nworkers);
 414       ShenandoahRootProcessor rp(_heap, nworkers, ShenandoahPhaseTimings::init_traversal_gc_work);
 415 
 416       ShenandoahCsetCodeRootsIterator cset_coderoots = ShenandoahCodeRoots::cset_iterator();
 417 
 418       ShenandoahInitTraversalCollectionTask traversal_task(&rp, &cset_coderoots);
 419       _heap->workers()->run_task(&traversal_task);
 420     }
 421 
 422 #if defined(COMPILER2) || INCLUDE_JVMCI
 423     DerivedPointerTable::update_pointers();
 424 #endif
 425   }
 426 
 427   if (ShenandoahPacing) {
 428     _heap->pacer()->setup_for_traversal();
 429   }
 430 }
 431 
 432 void ShenandoahTraversalGC::main_loop(uint w, ShenandoahTaskTerminator* t, bool sts_yield) {
 433   ShenandoahObjToScanQueue* q = task_queues()->queue(w);
 434 
 435   // Initialize live data.
 436   jushort* ld = _heap->get_liveness_cache(w);
 437 
 438   ReferenceProcessor* rp = NULL;
 439   if (_heap->process_references()) {
 440     rp = _heap->ref_processor();
 441   }
 442   {
 443     if (!_heap->is_degenerated_gc_in_progress()) {
 444       if (_heap->unload_classes()) {
 445         if (ShenandoahStringDedup::is_enabled()) {
 446           ShenandoahTraversalMetadataDedupClosure cl(q, rp);
 447           main_loop_work<ShenandoahTraversalMetadataDedupClosure>(&cl, ld, w, t, sts_yield);
 448         } else {
 449           ShenandoahTraversalMetadataClosure cl(q, rp);
 450           main_loop_work<ShenandoahTraversalMetadataClosure>(&cl, ld, w, t, sts_yield);
 451         }
 452       } else {
 453         if (ShenandoahStringDedup::is_enabled()) {
 454           ShenandoahTraversalDedupClosure cl(q, rp);
 455           main_loop_work<ShenandoahTraversalDedupClosure>(&cl, ld, w, t, sts_yield);
 456         } else {
 457           ShenandoahTraversalClosure cl(q, rp);
 458           main_loop_work<ShenandoahTraversalClosure>(&cl, ld, w, t, sts_yield);
 459         }
 460       }
 461     } else {
 462       if (_heap->unload_classes()) {
 463         if (ShenandoahStringDedup::is_enabled()) {
 464           ShenandoahTraversalMetadataDedupDegenClosure cl(q, rp);
 465           main_loop_work<ShenandoahTraversalMetadataDedupDegenClosure>(&cl, ld, w, t, sts_yield);
 466         } else {
 467           ShenandoahTraversalMetadataDegenClosure cl(q, rp);
 468           main_loop_work<ShenandoahTraversalMetadataDegenClosure>(&cl, ld, w, t, sts_yield);
 469         }
 470       } else {
 471         if (ShenandoahStringDedup::is_enabled()) {
 472           ShenandoahTraversalDedupDegenClosure cl(q, rp);
 473           main_loop_work<ShenandoahTraversalDedupDegenClosure>(&cl, ld, w, t, sts_yield);
 474         } else {
 475           ShenandoahTraversalDegenClosure cl(q, rp);
 476           main_loop_work<ShenandoahTraversalDegenClosure>(&cl, ld, w, t, sts_yield);
 477         }
 478       }
 479     }
 480   }
 481 
 482   _heap->flush_liveness_cache(w);
 483 }
 484 
 485 template <class T>
 486 void ShenandoahTraversalGC::main_loop_work(T* cl, jushort* live_data, uint worker_id, ShenandoahTaskTerminator* terminator, bool sts_yield) {
 487   ShenandoahObjToScanQueueSet* queues = task_queues();
 488   ShenandoahObjToScanQueue* q = queues->queue(worker_id);
 489   ShenandoahConcurrentMark* conc_mark = _heap->concurrent_mark();
 490 
 491   uintx stride = ShenandoahMarkLoopStride;
 492 
 493   ShenandoahMarkTask task;
 494 
 495   // Process outstanding queues, if any.
 496   q = queues->claim_next();
 497   while (q != NULL) {
 498     if (_heap->check_cancelled_gc_and_yield(sts_yield)) {
 499       return;
 500     }
 501 
 502     for (uint i = 0; i < stride; i++) {
 503       if (q->pop(task)) {
 504         conc_mark->do_task<T>(q, cl, live_data, &task);
 505       } else {
 506         assert(q->is_empty(), "Must be empty");
 507         q = queues->claim_next();
 508         break;
 509       }
 510     }
 511   }
 512 
 513   if (check_and_handle_cancelled_gc(terminator, sts_yield)) return;
 514 
 515   // Normal loop.
 516   q = queues->queue(worker_id);
 517 
 518   ShenandoahTraversalSATBBufferClosure drain_satb(q);
 519   SATBMarkQueueSet& satb_mq_set = ShenandoahBarrierSet::satb_mark_queue_set();
 520 
 521   while (true) {
 522     if (check_and_handle_cancelled_gc(terminator, sts_yield)) return;
 523 
 524     while (satb_mq_set.completed_buffers_num() > 0) {
 525       satb_mq_set.apply_closure_to_completed_buffer(&drain_satb);
 526     }
 527 
 528     uint work = 0;
 529     for (uint i = 0; i < stride; i++) {
 530       if (q->pop(task) ||
 531           queues->steal(worker_id, task)) {
 532         conc_mark->do_task<T>(q, cl, live_data, &task);
 533         work++;
 534       } else {
 535         break;
 536       }
 537     }
 538 
 539     if (work == 0) {
 540       // No more work, try to terminate
 541       ShenandoahEvacOOMScopeLeaver oom_scope_leaver;
 542       ShenandoahSuspendibleThreadSetLeaver stsl(sts_yield && ShenandoahSuspendibleWorkers);
 543       ShenandoahTerminationTimingsTracker term_tracker(worker_id);
 544       ShenandoahTerminatorTerminator tt(_heap);
 545 
 546       if (terminator->offer_termination(&tt)) return;
 547     }
 548   }
 549 }
 550 
 551 bool ShenandoahTraversalGC::check_and_handle_cancelled_gc(ShenandoahTaskTerminator* terminator, bool sts_yield) {
 552   if (_heap->cancelled_gc()) {
 553     return true;
 554   }
 555   return false;
 556 }
 557 
 558 void ShenandoahTraversalGC::concurrent_traversal_collection() {
 559   {
 560     MutexLocker ml(ClassLoaderDataGraph_lock);
 561     ClassLoaderDataGraph::clear_claimed_marks();
 562   }
 563 
 564   ShenandoahGCPhase phase_work(ShenandoahPhaseTimings::conc_traversal);
 565   if (!_heap->cancelled_gc()) {
 566     uint nworkers = _heap->workers()->active_workers();
 567     task_queues()->reserve(nworkers);
 568     ShenandoahTerminationTracker tracker(ShenandoahPhaseTimings::conc_traversal_termination);
 569 
 570     ShenandoahTaskTerminator terminator(nworkers, task_queues());
 571     ShenandoahConcurrentTraversalCollectionTask task(&terminator);
 572     _heap->workers()->run_task(&task);
 573   }
 574 
 575   if (!_heap->cancelled_gc() && ShenandoahPreclean && _heap->process_references()) {
 576     preclean_weak_refs();
 577   }
 578 }
 579 
 580 void ShenandoahTraversalGC::final_traversal_collection() {
 581   _heap->make_parsable(true);
 582 
 583   if (!_heap->cancelled_gc()) {
 584 #if defined(COMPILER2) || INCLUDE_JVMCI
 585     DerivedPointerTable::clear();
 586 #endif
 587     ShenandoahGCPhase phase_work(ShenandoahPhaseTimings::final_traversal_gc_work);
 588     uint nworkers = _heap->workers()->active_workers();
 589     task_queues()->reserve(nworkers);
 590 
 591     // Finish traversal
 592     ShenandoahRootProcessor rp(_heap, nworkers, ShenandoahPhaseTimings::final_traversal_gc_work);
 593     ShenandoahTerminationTracker term(ShenandoahPhaseTimings::final_traversal_gc_termination);
 594 
 595     ShenandoahTaskTerminator terminator(nworkers, task_queues());
 596     ShenandoahFinalTraversalCollectionTask task(&rp, &terminator);
 597     _heap->workers()->run_task(&task);
 598 #if defined(COMPILER2) || INCLUDE_JVMCI
 599     DerivedPointerTable::update_pointers();
 600 #endif
 601   }
 602 
 603   if (!_heap->cancelled_gc() && _heap->process_references()) {
 604     weak_refs_work();
 605   }
 606 
 607   if (!_heap->cancelled_gc()) {
 608     fixup_roots();
 609     if (_heap->unload_classes()) {
 610       _heap->unload_classes_and_cleanup_tables(false);
 611     }
 612   }
 613 
 614   if (!_heap->cancelled_gc()) {
 615     assert(_task_queues->is_empty(), "queues must be empty after traversal GC");
 616     TASKQUEUE_STATS_ONLY(_task_queues->print_taskqueue_stats());
 617     TASKQUEUE_STATS_ONLY(_task_queues->reset_taskqueue_stats());
 618 
 619     // No more marking expected
 620     _heap->mark_complete_marking_context();
 621 
 622     // Resize metaspace
 623     MetaspaceGC::compute_new_size();
 624 
 625     // Still good? We can now trash the cset, and make final verification
 626     {
 627       ShenandoahGCPhase phase_cleanup(ShenandoahPhaseTimings::traversal_gc_cleanup);
 628       ShenandoahHeapLocker lock(_heap->lock());
 629 
 630       // Trash everything
 631       // Clear immediate garbage regions.
 632       size_t num_regions = _heap->num_regions();
 633 
 634       ShenandoahHeapRegionSet* traversal_regions = traversal_set();
 635       ShenandoahFreeSet* free_regions = _heap->free_set();
 636       ShenandoahMarkingContext* const ctx = _heap->marking_context();
 637       free_regions->clear();
 638       for (size_t i = 0; i < num_regions; i++) {
 639         ShenandoahHeapRegion* r = _heap->get_region(i);
 640         bool not_allocated = ctx->top_at_mark_start(r) == r->top();
 641 
 642         bool candidate = traversal_regions->is_in(r) && !r->has_live() && not_allocated;
 643         if (r->is_humongous_start() && candidate) {
 644           // Trash humongous.
 645           HeapWord* humongous_obj = r->bottom() + ShenandoahForwarding::word_size();
 646           assert(!ctx->is_marked(oop(humongous_obj)), "must not be marked");
 647           r->make_trash_immediate();
 648           while (i + 1 < num_regions && _heap->get_region(i + 1)->is_humongous_continuation()) {
 649             i++;
 650             r = _heap->get_region(i);
 651             assert(r->is_humongous_continuation(), "must be humongous continuation");
 652             r->make_trash_immediate();
 653           }
 654         } else if (!r->is_empty() && candidate) {
 655           // Trash regular.
 656           assert(!r->is_humongous(), "handled above");
 657           assert(!r->is_trash(), "must not already be trashed");
 658           r->make_trash_immediate();
 659         }
 660       }
 661       _heap->collection_set()->clear();
 662       _heap->free_set()->rebuild();
 663       reset();
 664     }
 665 
 666     assert(_task_queues->is_empty(), "queues must be empty after traversal GC");
 667     _heap->set_concurrent_traversal_in_progress(false);
 668     assert(!_heap->cancelled_gc(), "must not be cancelled when getting out here");
 669 
 670     if (ShenandoahVerify) {
 671       _heap->verifier()->verify_after_traversal();
 672     }
 673 
 674     if (VerifyAfterGC) {
 675       Universe::verify();
 676     }
 677   }
 678 }
 679 
 680 class ShenandoahTraversalFixRootsClosure : public OopClosure {
 681 private:
 682   template <class T>
 683   inline void do_oop_work(T* p) {
 684     T o = RawAccess<>::oop_load(p);
 685     if (!CompressedOops::is_null(o)) {
 686       oop obj = CompressedOops::decode_not_null(o);
 687       oop forw = ShenandoahBarrierSet::resolve_forwarded_not_null(obj);
 688       if (!oopDesc::equals_raw(obj, forw)) {
 689         RawAccess<IS_NOT_NULL>::oop_store(p, forw);
 690       }
 691     }
 692   }
 693 
 694 public:
 695   inline void do_oop(oop* p) { do_oop_work(p); }
 696   inline void do_oop(narrowOop* p) { do_oop_work(p); }
 697 };
 698 
 699 class ShenandoahTraversalFixRootsTask : public AbstractGangTask {
 700 private:
 701   ShenandoahRootProcessor* _rp;
 702 
 703 public:
 704   ShenandoahTraversalFixRootsTask(ShenandoahRootProcessor* rp) :
 705     AbstractGangTask("Shenandoah traversal fix roots"),
 706     _rp(rp) {
 707     assert(ShenandoahHeap::heap()->has_forwarded_objects(), "Must be");
 708   }
 709 
 710   void work(uint worker_id) {
 711     ShenandoahParallelWorkerSession worker_session(worker_id);
 712     ShenandoahTraversalFixRootsClosure cl;
 713     MarkingCodeBlobClosure blobsCl(&cl, CodeBlobToOopClosure::FixRelocations);
 714     CLDToOopClosure cldCl(&cl, ClassLoaderData::_claim_strong);
 715     _rp->update_all_roots<ShenandoahForwardedIsAliveClosure>(&cl, &cldCl, &blobsCl, NULL, worker_id);
 716   }
 717 };
 718 
 719 void ShenandoahTraversalGC::fixup_roots() {
 720 #if defined(COMPILER2) || INCLUDE_JVMCI
 721   DerivedPointerTable::clear();
 722 #endif
 723   ShenandoahRootProcessor rp(_heap, _heap->workers()->active_workers(), ShenandoahPhaseTimings::final_traversal_update_roots);
 724   ShenandoahTraversalFixRootsTask update_roots_task(&rp);
 725   _heap->workers()->run_task(&update_roots_task);
 726 #if defined(COMPILER2) || INCLUDE_JVMCI
 727   DerivedPointerTable::update_pointers();
 728 #endif
 729 }
 730 
 731 void ShenandoahTraversalGC::reset() {
 732   _task_queues->clear();
 733 }
 734 
 735 ShenandoahObjToScanQueueSet* ShenandoahTraversalGC::task_queues() {
 736   return _task_queues;
 737 }
 738 
 739 class ShenandoahTraversalCancelledGCYieldClosure : public YieldClosure {
 740 private:
 741   ShenandoahHeap* const _heap;
 742 public:
 743   ShenandoahTraversalCancelledGCYieldClosure() : _heap(ShenandoahHeap::heap()) {};
 744   virtual bool should_return() { return _heap->cancelled_gc(); }
 745 };
 746 
 747 class ShenandoahTraversalPrecleanCompleteGCClosure : public VoidClosure {
 748 public:
 749   void do_void() {
 750     ShenandoahHeap* sh = ShenandoahHeap::heap();
 751     ShenandoahTraversalGC* traversal_gc = sh->traversal_gc();
 752     assert(sh->process_references(), "why else would we be here?");
 753     ShenandoahTaskTerminator terminator(1, traversal_gc->task_queues());
 754     shenandoah_assert_rp_isalive_installed();
 755     traversal_gc->main_loop((uint) 0, &terminator, true);
 756   }
 757 };
 758 
 759 class ShenandoahTraversalKeepAliveUpdateClosure : public OopClosure {
 760 private:
 761   ShenandoahObjToScanQueue* _queue;
 762   Thread* _thread;
 763   ShenandoahTraversalGC* _traversal_gc;
 764   ShenandoahMarkingContext* const _mark_context;
 765 
 766   template <class T>
 767   inline void do_oop_work(T* p) {
 768     _traversal_gc->process_oop<T, false /* string dedup */, false /* degen */>(p, _thread, _queue, _mark_context);
 769   }
 770 
 771 public:
 772   ShenandoahTraversalKeepAliveUpdateClosure(ShenandoahObjToScanQueue* q) :
 773     _queue(q), _thread(Thread::current()),
 774     _traversal_gc(ShenandoahHeap::heap()->traversal_gc()),
 775     _mark_context(ShenandoahHeap::heap()->marking_context()) {}
 776 
 777   void do_oop(narrowOop* p) { do_oop_work(p); }
 778   void do_oop(oop* p)       { do_oop_work(p); }
 779 };
 780 
 781 class ShenandoahTraversalKeepAliveUpdateDegenClosure : public OopClosure {
 782 private:
 783   ShenandoahObjToScanQueue* _queue;
 784   Thread* _thread;
 785   ShenandoahTraversalGC* _traversal_gc;
 786   ShenandoahMarkingContext* const _mark_context;
 787 
 788   template <class T>
 789   inline void do_oop_work(T* p) {
 790     _traversal_gc->process_oop<T, false /* string dedup */, true /* degen */>(p, _thread, _queue, _mark_context);
 791   }
 792 
 793 public:
 794   ShenandoahTraversalKeepAliveUpdateDegenClosure(ShenandoahObjToScanQueue* q) :
 795           _queue(q), _thread(Thread::current()),
 796           _traversal_gc(ShenandoahHeap::heap()->traversal_gc()),
 797           _mark_context(ShenandoahHeap::heap()->marking_context()) {}
 798 
 799   void do_oop(narrowOop* p) { do_oop_work(p); }
 800   void do_oop(oop* p)       { do_oop_work(p); }
 801 };
 802 
 803 class ShenandoahTraversalSingleThreadKeepAliveUpdateClosure : public OopClosure {
 804 private:
 805   ShenandoahObjToScanQueue* _queue;
 806   Thread* _thread;
 807   ShenandoahTraversalGC* _traversal_gc;
 808   ShenandoahMarkingContext* const _mark_context;
 809 
 810   template <class T>
 811   inline void do_oop_work(T* p) {
 812     ShenandoahEvacOOMScope evac_scope;
 813     _traversal_gc->process_oop<T, false /* string dedup */, false /* degen */>(p, _thread, _queue, _mark_context);
 814   }
 815 
 816 public:
 817   ShenandoahTraversalSingleThreadKeepAliveUpdateClosure(ShenandoahObjToScanQueue* q) :
 818           _queue(q), _thread(Thread::current()),
 819           _traversal_gc(ShenandoahHeap::heap()->traversal_gc()),
 820           _mark_context(ShenandoahHeap::heap()->marking_context()) {}
 821 
 822   void do_oop(narrowOop* p) { do_oop_work(p); }
 823   void do_oop(oop* p)       { do_oop_work(p); }
 824 };
 825 
 826 class ShenandoahTraversalSingleThreadKeepAliveUpdateDegenClosure : public OopClosure {
 827 private:
 828   ShenandoahObjToScanQueue* _queue;
 829   Thread* _thread;
 830   ShenandoahTraversalGC* _traversal_gc;
 831   ShenandoahMarkingContext* const _mark_context;
 832 
 833   template <class T>
 834   inline void do_oop_work(T* p) {
 835     ShenandoahEvacOOMScope evac_scope;
 836     _traversal_gc->process_oop<T, false /* string dedup */, true /* degen */>(p, _thread, _queue, _mark_context);
 837   }
 838 
 839 public:
 840   ShenandoahTraversalSingleThreadKeepAliveUpdateDegenClosure(ShenandoahObjToScanQueue* q) :
 841           _queue(q), _thread(Thread::current()),
 842           _traversal_gc(ShenandoahHeap::heap()->traversal_gc()),
 843           _mark_context(ShenandoahHeap::heap()->marking_context()) {}
 844 
 845   void do_oop(narrowOop* p) { do_oop_work(p); }
 846   void do_oop(oop* p)       { do_oop_work(p); }
 847 };
 848 
 849 class ShenandoahTraversalPrecleanTask : public AbstractGangTask {
 850 private:
 851   ReferenceProcessor* _rp;
 852 
 853 public:
 854   ShenandoahTraversalPrecleanTask(ReferenceProcessor* rp) :
 855           AbstractGangTask("Precleaning task"),
 856           _rp(rp) {}
 857 
 858   void work(uint worker_id) {
 859     assert(worker_id == 0, "The code below is single-threaded, only one worker is expected");
 860     ShenandoahParallelWorkerSession worker_session(worker_id);
 861     ShenandoahSuspendibleThreadSetJoiner stsj(ShenandoahSuspendibleWorkers);
 862     ShenandoahEvacOOMScope oom_evac_scope;
 863 
 864     ShenandoahHeap* sh = ShenandoahHeap::heap();
 865 
 866     ShenandoahObjToScanQueue* q = sh->traversal_gc()->task_queues()->queue(worker_id);
 867 
 868     ShenandoahForwardedIsAliveClosure is_alive;
 869     ShenandoahTraversalCancelledGCYieldClosure yield;
 870     ShenandoahTraversalPrecleanCompleteGCClosure complete_gc;
 871     ShenandoahTraversalKeepAliveUpdateClosure keep_alive(q);
 872     ResourceMark rm;
 873     _rp->preclean_discovered_references(&is_alive, &keep_alive,
 874                                         &complete_gc, &yield,
 875                                         NULL);
 876   }
 877 };
 878 
 879 void ShenandoahTraversalGC::preclean_weak_refs() {
 880   // Pre-cleaning weak references before diving into STW makes sense at the
 881   // end of concurrent mark. This will filter out the references which referents
 882   // are alive. Note that ReferenceProcessor already filters out these on reference
 883   // discovery, and the bulk of work is done here. This phase processes leftovers
 884   // that missed the initial filtering, i.e. when referent was marked alive after
 885   // reference was discovered by RP.
 886 
 887   assert(_heap->process_references(), "sanity");
 888   assert(!_heap->is_degenerated_gc_in_progress(), "must be in concurrent non-degenerated phase");
 889 
 890   // Shortcut if no references were discovered to avoid winding up threads.
 891   ReferenceProcessor* rp = _heap->ref_processor();
 892   if (!rp->has_discovered_references()) {
 893     return;
 894   }
 895 
 896   ReferenceProcessorMTDiscoveryMutator fix_mt_discovery(rp, false);
 897 
 898   shenandoah_assert_rp_isalive_not_installed();
 899   ShenandoahForwardedIsAliveClosure is_alive;
 900   ReferenceProcessorIsAliveMutator fix_isalive(rp, &is_alive);
 901 
 902   assert(task_queues()->is_empty(), "Should be empty");
 903 
 904   // Execute precleaning in the worker thread: it will give us GCLABs, String dedup
 905   // queues and other goodies. When upstream ReferenceProcessor starts supporting
 906   // parallel precleans, we can extend this to more threads.
 907   ShenandoahPushWorkerScope scope(_heap->workers(), 1, /* check_workers = */ false);
 908 
 909   WorkGang* workers = _heap->workers();
 910   uint nworkers = workers->active_workers();
 911   assert(nworkers == 1, "This code uses only a single worker");
 912   task_queues()->reserve(nworkers);
 913 
 914   ShenandoahTraversalPrecleanTask task(rp);
 915   workers->run_task(&task);
 916 
 917   assert(_heap->cancelled_gc() || task_queues()->is_empty(), "Should be empty");
 918 }
 919 
 920 // Weak Reference Closures
 921 class ShenandoahTraversalDrainMarkingStackClosure: public VoidClosure {
 922   uint _worker_id;
 923   ShenandoahTaskTerminator* _terminator;
 924   bool _reset_terminator;
 925 
 926 public:
 927   ShenandoahTraversalDrainMarkingStackClosure(uint worker_id, ShenandoahTaskTerminator* t, bool reset_terminator = false):
 928     _worker_id(worker_id),
 929     _terminator(t),
 930     _reset_terminator(reset_terminator) {
 931   }
 932 
 933   void do_void() {
 934     assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
 935 
 936     ShenandoahHeap* sh = ShenandoahHeap::heap();
 937     ShenandoahTraversalGC* traversal_gc = sh->traversal_gc();
 938     assert(sh->process_references(), "why else would we be here?");
 939     shenandoah_assert_rp_isalive_installed();
 940 
 941     traversal_gc->main_loop(_worker_id, _terminator, false);
 942 
 943     if (_reset_terminator) {
 944       _terminator->reset_for_reuse();
 945     }
 946   }
 947 };
 948 
 949 class ShenandoahTraversalSingleThreadedDrainMarkingStackClosure: public VoidClosure {
 950   uint _worker_id;
 951   ShenandoahTaskTerminator* _terminator;
 952   bool _reset_terminator;
 953 
 954 public:
 955   ShenandoahTraversalSingleThreadedDrainMarkingStackClosure(uint worker_id, ShenandoahTaskTerminator* t, bool reset_terminator = false):
 956           _worker_id(worker_id),
 957           _terminator(t),
 958           _reset_terminator(reset_terminator) {
 959   }
 960 
 961   void do_void() {
 962     assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
 963 
 964     ShenandoahHeap* sh = ShenandoahHeap::heap();
 965     ShenandoahTraversalGC* traversal_gc = sh->traversal_gc();
 966     assert(sh->process_references(), "why else would we be here?");
 967     shenandoah_assert_rp_isalive_installed();
 968 
 969     ShenandoahEvacOOMScope evac_scope;
 970     traversal_gc->main_loop(_worker_id, _terminator, false);
 971 
 972     if (_reset_terminator) {
 973       _terminator->reset_for_reuse();
 974     }
 975   }
 976 };
 977 
 978 void ShenandoahTraversalGC::weak_refs_work() {
 979   assert(_heap->process_references(), "sanity");
 980 
 981   ShenandoahPhaseTimings::Phase phase_root = ShenandoahPhaseTimings::weakrefs;
 982 
 983   ShenandoahGCPhase phase(phase_root);
 984 
 985   ReferenceProcessor* rp = _heap->ref_processor();
 986 
 987   // NOTE: We cannot shortcut on has_discovered_references() here, because
 988   // we will miss marking JNI Weak refs then, see implementation in
 989   // ReferenceProcessor::process_discovered_references.
 990   weak_refs_work_doit();
 991 
 992   rp->verify_no_references_recorded();
 993   assert(!rp->discovery_enabled(), "Post condition");
 994 
 995 }
 996 
 997 class ShenandoahTraversalRefProcTaskProxy : public AbstractGangTask {
 998 private:
 999   AbstractRefProcTaskExecutor::ProcessTask& _proc_task;
1000   ShenandoahTaskTerminator* _terminator;
1001 
1002 public:
1003   ShenandoahTraversalRefProcTaskProxy(AbstractRefProcTaskExecutor::ProcessTask& proc_task,
1004                                       ShenandoahTaskTerminator* t) :
1005     AbstractGangTask("Process reference objects in parallel"),
1006     _proc_task(proc_task),
1007     _terminator(t) {
1008   }
1009 
1010   void work(uint worker_id) {
1011     ShenandoahEvacOOMScope oom_evac_scope;
1012     assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
1013     ShenandoahHeap* heap = ShenandoahHeap::heap();
1014     ShenandoahTraversalDrainMarkingStackClosure complete_gc(worker_id, _terminator);
1015 
1016     ShenandoahForwardedIsAliveClosure is_alive;
1017     if (!heap->is_degenerated_gc_in_progress()) {
1018       ShenandoahTraversalKeepAliveUpdateClosure keep_alive(heap->traversal_gc()->task_queues()->queue(worker_id));
1019       _proc_task.work(worker_id, is_alive, keep_alive, complete_gc);
1020     } else {
1021       ShenandoahTraversalKeepAliveUpdateDegenClosure keep_alive(heap->traversal_gc()->task_queues()->queue(worker_id));
1022       _proc_task.work(worker_id, is_alive, keep_alive, complete_gc);
1023     }
1024   }
1025 };
1026 
1027 class ShenandoahTraversalRefProcTaskExecutor : public AbstractRefProcTaskExecutor {
1028 private:
1029   WorkGang* _workers;
1030 
1031 public:
1032   ShenandoahTraversalRefProcTaskExecutor(WorkGang* workers) : _workers(workers) {}
1033 
1034   // Executes a task using worker threads.
1035   void execute(ProcessTask& task, uint ergo_workers) {
1036     assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
1037 
1038     ShenandoahHeap* heap = ShenandoahHeap::heap();
1039     ShenandoahTraversalGC* traversal_gc = heap->traversal_gc();
1040     ShenandoahPushWorkerQueuesScope scope(_workers,
1041                                           traversal_gc->task_queues(),
1042                                           ergo_workers,
1043                                           /* do_check = */ false);
1044     uint nworkers = _workers->active_workers();
1045     traversal_gc->task_queues()->reserve(nworkers);
1046     ShenandoahTaskTerminator terminator(nworkers, traversal_gc->task_queues());
1047     ShenandoahTraversalRefProcTaskProxy proc_task_proxy(task, &terminator);
1048     _workers->run_task(&proc_task_proxy);
1049   }
1050 };
1051 
1052 void ShenandoahTraversalGC::weak_refs_work_doit() {
1053   ReferenceProcessor* rp = _heap->ref_processor();
1054 
1055   ShenandoahPhaseTimings::Phase phase_process = ShenandoahPhaseTimings::weakrefs_process;
1056 
1057   shenandoah_assert_rp_isalive_not_installed();
1058   ShenandoahForwardedIsAliveClosure is_alive;
1059   ReferenceProcessorIsAliveMutator fix_isalive(rp, &is_alive);
1060 
1061   WorkGang* workers = _heap->workers();
1062   uint nworkers = workers->active_workers();
1063 
1064   rp->setup_policy(_heap->soft_ref_policy()->should_clear_all_soft_refs());
1065   rp->set_active_mt_degree(nworkers);
1066 
1067   assert(task_queues()->is_empty(), "Should be empty");
1068 
1069   // complete_gc and keep_alive closures instantiated here are only needed for
1070   // single-threaded path in RP. They share the queue 0 for tracking work, which
1071   // simplifies implementation. Since RP may decide to call complete_gc several
1072   // times, we need to be able to reuse the terminator.
1073   uint serial_worker_id = 0;
1074   ShenandoahTaskTerminator terminator(1, task_queues());
1075   ShenandoahTraversalSingleThreadedDrainMarkingStackClosure complete_gc(serial_worker_id, &terminator, /* reset_terminator = */ true);
1076   ShenandoahPushWorkerQueuesScope scope(workers, task_queues(), 1, /* do_check = */ false);
1077 
1078   ShenandoahTraversalRefProcTaskExecutor executor(workers);
1079 
1080   ReferenceProcessorPhaseTimes pt(_heap->gc_timer(), rp->num_queues());
1081   if (!_heap->is_degenerated_gc_in_progress()) {
1082     ShenandoahTraversalSingleThreadKeepAliveUpdateClosure keep_alive(task_queues()->queue(serial_worker_id));
1083     rp->process_discovered_references(&is_alive, &keep_alive,
1084                                       &complete_gc, &executor,
1085                                       &pt);
1086   } else {
1087     ShenandoahTraversalSingleThreadKeepAliveUpdateDegenClosure keep_alive(task_queues()->queue(serial_worker_id));
1088     rp->process_discovered_references(&is_alive, &keep_alive,
1089                                       &complete_gc, &executor,
1090                                       &pt);
1091   }
1092 
1093   pt.print_all_references();
1094   assert(task_queues()->is_empty() || _heap->cancelled_gc(), "Should be empty");
1095 }