1 /*
   2  * Copyright (c) 2018, 2019, Red Hat, Inc. All rights reserved.
   3  *
   4  * This code is free software; you can redistribute it and/or modify it
   5  * under the terms of the GNU General Public License version 2 only, as
   6  * published by the Free Software Foundation.
   7  *
   8  * This code is distributed in the hope that it will be useful, but WITHOUT
   9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  11  * version 2 for more details (a copy is included in the LICENSE file that
  12  * accompanied this code).
  13  *
  14  * You should have received a copy of the GNU General Public License version
  15  * 2 along with this work; if not, write to the Free Software Foundation,
  16  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  17  *
  18  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  19  * or visit www.oracle.com if you need additional information or have any
  20  * questions.
  21  *
  22  */
  23 
  24 #include "precompiled.hpp"
  25 
  26 #include "classfile/classLoaderData.hpp"
  27 #include "classfile/classLoaderDataGraph.hpp"
  28 #include "gc/shared/referenceProcessor.hpp"
  29 #include "gc/shared/referenceProcessorPhaseTimes.hpp"
  30 #include "gc/shared/workgroup.hpp"
  31 #include "gc/shenandoah/shenandoahBarrierSet.hpp"
  32 #include "gc/shenandoah/shenandoahClosures.inline.hpp"
  33 #include "gc/shenandoah/shenandoahCodeRoots.hpp"
  34 #include "gc/shenandoah/shenandoahCollectionSet.hpp"
  35 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
  36 #include "gc/shenandoah/shenandoahFreeSet.hpp"
  37 #include "gc/shenandoah/shenandoahPhaseTimings.hpp"
  38 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
  39 #include "gc/shenandoah/shenandoahHeapRegionSet.inline.hpp"
  40 #include "gc/shenandoah/shenandoahHeuristics.hpp"
  41 #include "gc/shenandoah/shenandoahMarkingContext.inline.hpp"
  42 #include "gc/shenandoah/shenandoahOopClosures.inline.hpp"
  43 #include "gc/shenandoah/shenandoahRootProcessor.inline.hpp"
  44 #include "gc/shenandoah/shenandoahStringDedup.hpp"
  45 #include "gc/shenandoah/shenandoahTaskqueue.inline.hpp"
  46 #include "gc/shenandoah/shenandoahTimingTracker.hpp"
  47 #include "gc/shenandoah/shenandoahTraversalGC.hpp"
  48 #include "gc/shenandoah/shenandoahUtils.hpp"
  49 #include "gc/shenandoah/shenandoahVerifier.hpp"
  50 
  51 #include "memory/iterator.hpp"
  52 #include "memory/metaspace.hpp"
  53 #include "memory/resourceArea.hpp"
  54 #include "memory/universe.hpp"
  55 
  56 /**
  57  * NOTE: We are using the SATB buffer in thread.hpp and satbMarkQueue.hpp, however, it is not an SATB algorithm.
  58  * We're using the buffer as generic oop buffer to enqueue new values in concurrent oop stores, IOW, the algorithm
  59  * is incremental-update-based.
  60  *
  61  * NOTE on interaction with TAMS: we want to avoid traversing new objects for
  62  * several reasons:
  63  * - We will not reclaim them in this cycle anyway, because they are not in the
  64  *   cset
  65  * - It makes up for the bulk of work during final-pause
  66  * - It also shortens the concurrent cycle because we don't need to
  67  *   pointlessly traverse through newly allocated objects.
  68  * - As a nice side-effect, it solves the I-U termination problem (mutators
  69  *   cannot outrun the GC by allocating like crazy)
  70  * - It is an easy way to achieve MWF. What MWF does is to also enqueue the
  71  *   target object of stores if it's new. Treating new objects live implicitely
  72  *   achieves the same, but without extra barriers. I think the effect of
  73  *   shortened final-pause (mentioned above) is the main advantage of MWF. In
  74  *   particular, we will not see the head of a completely new long linked list
  75  *   in final-pause and end up traversing huge chunks of the heap there.
  76  * - We don't need to see/update the fields of new objects either, because they
  77  *   are either still null, or anything that's been stored into them has been
  78  *   evacuated+enqueued before (and will thus be treated later).
  79  *
  80  * We achieve this by setting TAMS for each region, and everything allocated
  81  * beyond TAMS will be 'implicitely marked'.
  82  *
  83  * Gotchas:
  84  * - While we want new objects to be implicitely marked, we don't want to count
  85  *   them alive. Otherwise the next cycle wouldn't pick them up and consider
  86  *   them for cset. This means that we need to protect such regions from
  87  *   getting accidentally thrashed at the end of traversal cycle. This is why I
  88  *   keep track of alloc-regions and check is_alloc_region() in the trashing
  89  *   code.
  90  * - We *need* to traverse through evacuated objects. Those objects are
  91  *   pre-existing, and any references in them point to interesting objects that
  92  *   we need to see. We also want to count them as live, because we just
  93  *   determined that they are alive :-) I achieve this by upping TAMS
  94  *   concurrently for every gclab/gc-shared alloc before publishing the
  95  *   evacuated object. This way, the GC threads will not consider such objects
  96  *   implictely marked, and traverse through them as normal.
  97  */
  98 class ShenandoahTraversalSATBBufferClosure : public SATBBufferClosure {
  99 private:
 100   ShenandoahObjToScanQueue* _queue;
 101   ShenandoahTraversalGC* _traversal_gc;
 102   ShenandoahHeap* const _heap;
 103 
 104 public:
 105   ShenandoahTraversalSATBBufferClosure(ShenandoahObjToScanQueue* q) :
 106     _queue(q),
 107     _heap(ShenandoahHeap::heap())
 108  { }
 109 
 110   void do_buffer(void** buffer, size_t size) {
 111     for (size_t i = 0; i < size; ++i) {
 112       oop* p = (oop*) &buffer[i];
 113       oop obj = RawAccess<>::oop_load(p);
 114       shenandoah_assert_not_forwarded(p, obj);
 115       if (_heap->marking_context()->mark(obj)) {
 116         _queue->push(ShenandoahMarkTask(obj));
 117       }
 118     }
 119   }
 120 };
 121 
 122 class ShenandoahTraversalSATBThreadsClosure : public ThreadClosure {
 123 private:
 124   ShenandoahTraversalSATBBufferClosure* _satb_cl;
 125 
 126 public:
 127   ShenandoahTraversalSATBThreadsClosure(ShenandoahTraversalSATBBufferClosure* satb_cl) :
 128     _satb_cl(satb_cl) {}
 129 
 130   void do_thread(Thread* thread) {
 131     ShenandoahThreadLocalData::satb_mark_queue(thread).apply_closure_and_empty(_satb_cl);
 132   }
 133 };
 134 
 135 // Like CLDToOopClosure, but clears has_modified_oops, so that we can record modified CLDs during traversal
 136 // and remark them later during final-traversal.
 137 class ShenandoahMarkCLDClosure : public CLDClosure {
 138 private:
 139   OopClosure* _cl;
 140 public:
 141   ShenandoahMarkCLDClosure(OopClosure* cl) : _cl(cl) {}
 142   void do_cld(ClassLoaderData* cld) {
 143     cld->oops_do(_cl, true, true);
 144   }
 145 };
 146 
 147 // Like CLDToOopClosure, but only process modified CLDs
 148 class ShenandoahRemarkCLDClosure : public CLDClosure {
 149 private:
 150   OopClosure* _cl;
 151 public:
 152   ShenandoahRemarkCLDClosure(OopClosure* cl) : _cl(cl) {}
 153   void do_cld(ClassLoaderData* cld) {
 154     if (cld->has_modified_oops()) {
 155       cld->oops_do(_cl, true, true);
 156     }
 157   }
 158 };
 159 
 160 class ShenandoahInitTraversalCollectionTask : public AbstractGangTask {
 161 private:
 162   ShenandoahCSetRootScanner* _rp;
 163   ShenandoahHeap* _heap;
 164   ShenandoahCsetCodeRootsIterator* _cset_coderoots;
 165   ShenandoahStringDedupRoots       _dedup_roots;
 166 
 167 public:
 168   ShenandoahInitTraversalCollectionTask(ShenandoahCSetRootScanner* rp) :
 169     AbstractGangTask("Shenandoah Init Traversal Collection"),
 170     _rp(rp),
 171     _heap(ShenandoahHeap::heap()) {}
 172 
 173   void work(uint worker_id) {
 174     ShenandoahParallelWorkerSession worker_session(worker_id);
 175 
 176     ShenandoahEvacOOMScope oom_evac_scope;
 177     ShenandoahObjToScanQueueSet* queues = _heap->traversal_gc()->task_queues();
 178     ShenandoahObjToScanQueue* q = queues->queue(worker_id);
 179 
 180     bool process_refs = _heap->process_references();
 181     bool unload_classes = _heap->unload_classes();
 182     ReferenceProcessor* rp = NULL;
 183     if (process_refs) {
 184       rp = _heap->ref_processor();
 185     }
 186 
 187     // Step 1: Process ordinary GC roots.
 188     {
 189       ShenandoahTraversalClosure roots_cl(q, rp);
 190       ShenandoahMarkCLDClosure cld_cl(&roots_cl);
 191       MarkingCodeBlobClosure code_cl(&roots_cl, CodeBlobToOopClosure::FixRelocations);
 192       if (unload_classes) {
 193         _rp->roots_do(worker_id, &roots_cl, NULL, &code_cl);
 194       } else {
 195         _rp->roots_do(worker_id, &roots_cl, &cld_cl, &code_cl);
 196       }
 197 
 198       AlwaysTrueClosure is_alive;
 199       _dedup_roots.oops_do(&is_alive, &roots_cl, worker_id);
 200     }
 201   }
 202 };
 203 
 204 class ShenandoahConcurrentTraversalCollectionTask : public AbstractGangTask {
 205 private:
 206   ShenandoahTaskTerminator* _terminator;
 207   ShenandoahHeap* _heap;
 208 public:
 209   ShenandoahConcurrentTraversalCollectionTask(ShenandoahTaskTerminator* terminator) :
 210     AbstractGangTask("Shenandoah Concurrent Traversal Collection"),
 211     _terminator(terminator),
 212     _heap(ShenandoahHeap::heap()) {}
 213 
 214   void work(uint worker_id) {
 215     ShenandoahConcurrentWorkerSession worker_session(worker_id);
 216     ShenandoahSuspendibleThreadSetJoiner stsj(ShenandoahSuspendibleWorkers);
 217     ShenandoahEvacOOMScope oom_evac_scope;
 218     ShenandoahTraversalGC* traversal_gc = _heap->traversal_gc();
 219 
 220     // Drain all outstanding work in queues.
 221     traversal_gc->main_loop(worker_id, _terminator, true);
 222   }
 223 };
 224 
 225 class ShenandoahFinalTraversalCollectionTask : public AbstractGangTask {
 226 private:
 227   ShenandoahAllRootScanner* _rp;
 228   ShenandoahTaskTerminator* _terminator;
 229   ShenandoahHeap* _heap;
 230 public:
 231   ShenandoahFinalTraversalCollectionTask(ShenandoahAllRootScanner* rp, ShenandoahTaskTerminator* terminator) :
 232     AbstractGangTask("Shenandoah Final Traversal Collection"),
 233     _rp(rp),
 234     _terminator(terminator),
 235     _heap(ShenandoahHeap::heap()) {}
 236 
 237   void work(uint worker_id) {
 238     ShenandoahParallelWorkerSession worker_session(worker_id);
 239 
 240     ShenandoahEvacOOMScope oom_evac_scope;
 241     ShenandoahTraversalGC* traversal_gc = _heap->traversal_gc();
 242 
 243     ShenandoahObjToScanQueueSet* queues = traversal_gc->task_queues();
 244     ShenandoahObjToScanQueue* q = queues->queue(worker_id);
 245 
 246     bool process_refs = _heap->process_references();
 247     bool unload_classes = _heap->unload_classes();
 248     ReferenceProcessor* rp = NULL;
 249     if (process_refs) {
 250       rp = _heap->ref_processor();
 251     }
 252 
 253     // Step 0: Drain outstanding SATB queues.
 254     // NOTE: we piggy-back draining of remaining thread SATB buffers on the final root scan below.
 255     ShenandoahTraversalSATBBufferClosure satb_cl(q);
 256     {
 257       // Process remaining finished SATB buffers.
 258       SATBMarkQueueSet& satb_mq_set = ShenandoahBarrierSet::satb_mark_queue_set();
 259       while (satb_mq_set.apply_closure_to_completed_buffer(&satb_cl));
 260       // Process remaining threads SATB buffers below.
 261     }
 262 
 263     // Step 1: Process GC roots.
 264     // For oops in code roots, they are marked, evacuated, enqueued for further traversal,
 265     // and the references to the oops are updated during init pause. New nmethods are handled
 266     // in similar way during nmethod-register process. Therefore, we don't need to rescan code
 267     // roots here.
 268     if (!_heap->is_degenerated_gc_in_progress()) {
 269       ShenandoahTraversalClosure roots_cl(q, rp);
 270       ShenandoahTraversalSATBThreadsClosure tc(&satb_cl);
 271       if (unload_classes) {
 272         ShenandoahRemarkCLDClosure remark_cld_cl(&roots_cl);
 273         _rp->strong_roots_do(worker_id, &roots_cl, &remark_cld_cl, NULL, &tc);
 274       } else {
 275         CLDToOopClosure cld_cl(&roots_cl, ClassLoaderData::_claim_strong);
 276         _rp->roots_do(worker_id, &roots_cl, &cld_cl, NULL, &tc);
 277       }
 278     } else {
 279       ShenandoahTraversalDegenClosure roots_cl(q, rp);
 280       ShenandoahTraversalSATBThreadsClosure tc(&satb_cl);
 281       if (unload_classes) {
 282         ShenandoahRemarkCLDClosure remark_cld_cl(&roots_cl);
 283         _rp->strong_roots_do(worker_id, &roots_cl, &remark_cld_cl, NULL, &tc);
 284       } else {
 285         CLDToOopClosure cld_cl(&roots_cl, ClassLoaderData::_claim_strong);
 286         _rp->roots_do(worker_id, &roots_cl, &cld_cl, NULL, &tc);
 287       }
 288     }
 289 
 290     {
 291       ShenandoahWorkerTimings *worker_times = _heap->phase_timings()->worker_times();
 292       ShenandoahWorkerTimingsTracker timer(worker_times, ShenandoahPhaseTimings::FinishQueues, worker_id);
 293 
 294       // Step 3: Finally drain all outstanding work in queues.
 295       traversal_gc->main_loop(worker_id, _terminator, false);
 296     }
 297 
 298   }
 299 };
 300 
 301 ShenandoahTraversalGC::ShenandoahTraversalGC(ShenandoahHeap* heap, size_t num_regions) :
 302   _heap(heap),
 303   _task_queues(new ShenandoahObjToScanQueueSet(heap->max_workers())),
 304   _traversal_set(ShenandoahHeapRegionSet()) {
 305 
 306   // Traversal does not support concurrent code root scanning
 307   FLAG_SET_DEFAULT(ShenandoahConcurrentScanCodeRoots, false);
 308 
 309   uint num_queues = heap->max_workers();
 310   for (uint i = 0; i < num_queues; ++i) {
 311     ShenandoahObjToScanQueue* task_queue = new ShenandoahObjToScanQueue();
 312     task_queue->initialize();
 313     _task_queues->register_queue(i, task_queue);
 314   }
 315 }
 316 
 317 ShenandoahTraversalGC::~ShenandoahTraversalGC() {
 318 }
 319 
 320 void ShenandoahTraversalGC::prepare_regions() {
 321   size_t num_regions = _heap->num_regions();
 322   ShenandoahMarkingContext* const ctx = _heap->marking_context();
 323   for (size_t i = 0; i < num_regions; i++) {
 324     ShenandoahHeapRegion* region = _heap->get_region(i);
 325     if (_heap->is_bitmap_slice_committed(region)) {
 326       if (_traversal_set.is_in(i)) {
 327         ctx->capture_top_at_mark_start(region);
 328         region->clear_live_data();
 329         assert(ctx->is_bitmap_clear_range(region->bottom(), region->end()), "bitmap for traversal regions must be cleared");
 330       } else {
 331         // Everything outside the traversal set is always considered live.
 332         ctx->reset_top_at_mark_start(region);
 333       }
 334     } else {
 335       // FreeSet may contain uncommitted empty regions, once they are recommitted,
 336       // their TAMS may have old values, so reset them here.
 337       ctx->reset_top_at_mark_start(region);
 338     }
 339   }
 340 }
 341 
 342 void ShenandoahTraversalGC::prepare() {
 343   _heap->collection_set()->clear();
 344   assert(_heap->collection_set()->count() == 0, "collection set not clear");
 345 
 346   {
 347     ShenandoahGCPhase phase(ShenandoahPhaseTimings::traversal_gc_make_parsable);
 348     _heap->make_parsable(true);
 349   }
 350 
 351   if (UseTLAB) {
 352     ShenandoahGCPhase phase(ShenandoahPhaseTimings::traversal_gc_resize_tlabs);
 353     _heap->resize_tlabs();
 354   }
 355 
 356   assert(_heap->marking_context()->is_bitmap_clear(), "need clean mark bitmap");
 357   assert(!_heap->marking_context()->is_complete(), "should not be complete");
 358 
 359   ShenandoahFreeSet* free_set = _heap->free_set();
 360   ShenandoahCollectionSet* collection_set = _heap->collection_set();
 361 
 362   // Find collection set
 363   _heap->heuristics()->choose_collection_set(collection_set);
 364   prepare_regions();
 365 
 366   // Rebuild free set
 367   free_set->rebuild();
 368 
 369   log_info(gc, ergo)("Collectable Garbage: " SIZE_FORMAT "M, " SIZE_FORMAT "M CSet, " SIZE_FORMAT " CSet regions",
 370                      collection_set->garbage() / M, collection_set->live_data() / M, collection_set->count());
 371 }
 372 
 373 void ShenandoahTraversalGC::init_traversal_collection() {
 374   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "STW traversal GC");
 375 
 376   if (ShenandoahVerify) {
 377     _heap->verifier()->verify_before_traversal();
 378   }
 379 
 380   if (VerifyBeforeGC) {
 381     Universe::verify();
 382   }
 383 
 384   {
 385     ShenandoahGCPhase phase_prepare(ShenandoahPhaseTimings::traversal_gc_prepare);
 386     ShenandoahHeapLocker lock(_heap->lock());
 387     prepare();
 388   }
 389 
 390   _heap->set_concurrent_traversal_in_progress(true);
 391 
 392   bool process_refs = _heap->process_references();
 393   if (process_refs) {
 394     ReferenceProcessor* rp = _heap->ref_processor();
 395     rp->enable_discovery(true /*verify_no_refs*/);
 396     rp->setup_policy(_heap->soft_ref_policy()->should_clear_all_soft_refs());
 397   }
 398 
 399   {
 400     ShenandoahGCPhase phase_work(ShenandoahPhaseTimings::init_traversal_gc_work);
 401     assert(_task_queues->is_empty(), "queues must be empty before traversal GC");
 402     TASKQUEUE_STATS_ONLY(_task_queues->reset_taskqueue_stats());
 403 
 404 #if COMPILER2_OR_JVMCI
 405     DerivedPointerTable::clear();
 406 #endif
 407 
 408     {
 409       uint nworkers = _heap->workers()->active_workers();
 410       task_queues()->reserve(nworkers);
 411       ShenandoahCSetRootScanner rp(nworkers, ShenandoahPhaseTimings::init_traversal_gc_work);
 412       ShenandoahInitTraversalCollectionTask traversal_task(&rp);
 413       _heap->workers()->run_task(&traversal_task);
 414     }
 415 
 416 #if COMPILER2_OR_JVMCI
 417     DerivedPointerTable::update_pointers();
 418 #endif
 419   }
 420 
 421   if (ShenandoahPacing) {
 422     _heap->pacer()->setup_for_traversal();
 423   }
 424 }
 425 
 426 void ShenandoahTraversalGC::main_loop(uint w, ShenandoahTaskTerminator* t, bool sts_yield) {
 427   ShenandoahObjToScanQueue* q = task_queues()->queue(w);
 428 
 429   // Initialize live data.
 430   jushort* ld = _heap->get_liveness_cache(w);
 431 
 432   ReferenceProcessor* rp = NULL;
 433   if (_heap->process_references()) {
 434     rp = _heap->ref_processor();
 435   }
 436   {
 437     if (!_heap->is_degenerated_gc_in_progress()) {
 438       if (_heap->unload_classes()) {
 439         if (ShenandoahStringDedup::is_enabled()) {
 440           ShenandoahTraversalMetadataDedupClosure cl(q, rp);
 441           main_loop_work<ShenandoahTraversalMetadataDedupClosure>(&cl, ld, w, t, sts_yield);
 442         } else {
 443           ShenandoahTraversalMetadataClosure cl(q, rp);
 444           main_loop_work<ShenandoahTraversalMetadataClosure>(&cl, ld, w, t, sts_yield);
 445         }
 446       } else {
 447         if (ShenandoahStringDedup::is_enabled()) {
 448           ShenandoahTraversalDedupClosure cl(q, rp);
 449           main_loop_work<ShenandoahTraversalDedupClosure>(&cl, ld, w, t, sts_yield);
 450         } else {
 451           ShenandoahTraversalClosure cl(q, rp);
 452           main_loop_work<ShenandoahTraversalClosure>(&cl, ld, w, t, sts_yield);
 453         }
 454       }
 455     } else {
 456       if (_heap->unload_classes()) {
 457         if (ShenandoahStringDedup::is_enabled()) {
 458           ShenandoahTraversalMetadataDedupDegenClosure cl(q, rp);
 459           main_loop_work<ShenandoahTraversalMetadataDedupDegenClosure>(&cl, ld, w, t, sts_yield);
 460         } else {
 461           ShenandoahTraversalMetadataDegenClosure cl(q, rp);
 462           main_loop_work<ShenandoahTraversalMetadataDegenClosure>(&cl, ld, w, t, sts_yield);
 463         }
 464       } else {
 465         if (ShenandoahStringDedup::is_enabled()) {
 466           ShenandoahTraversalDedupDegenClosure cl(q, rp);
 467           main_loop_work<ShenandoahTraversalDedupDegenClosure>(&cl, ld, w, t, sts_yield);
 468         } else {
 469           ShenandoahTraversalDegenClosure cl(q, rp);
 470           main_loop_work<ShenandoahTraversalDegenClosure>(&cl, ld, w, t, sts_yield);
 471         }
 472       }
 473     }
 474   }
 475 
 476   _heap->flush_liveness_cache(w);
 477 }
 478 
 479 template <class T>
 480 void ShenandoahTraversalGC::main_loop_work(T* cl, jushort* live_data, uint worker_id, ShenandoahTaskTerminator* terminator, bool sts_yield) {
 481   ShenandoahObjToScanQueueSet* queues = task_queues();
 482   ShenandoahObjToScanQueue* q = queues->queue(worker_id);
 483   ShenandoahConcurrentMark* conc_mark = _heap->concurrent_mark();
 484 
 485   uintx stride = ShenandoahMarkLoopStride;
 486 
 487   ShenandoahMarkTask task;
 488 
 489   // Process outstanding queues, if any.
 490   q = queues->claim_next();
 491   while (q != NULL) {
 492     if (_heap->check_cancelled_gc_and_yield(sts_yield)) {
 493       return;
 494     }
 495 
 496     for (uint i = 0; i < stride; i++) {
 497       if (q->pop(task)) {
 498         conc_mark->do_task<T>(q, cl, live_data, &task);
 499       } else {
 500         assert(q->is_empty(), "Must be empty");
 501         q = queues->claim_next();
 502         break;
 503       }
 504     }
 505   }
 506 
 507   if (check_and_handle_cancelled_gc(terminator, sts_yield)) return;
 508 
 509   // Normal loop.
 510   q = queues->queue(worker_id);
 511 
 512   ShenandoahTraversalSATBBufferClosure drain_satb(q);
 513   SATBMarkQueueSet& satb_mq_set = ShenandoahBarrierSet::satb_mark_queue_set();
 514 
 515   while (true) {
 516     if (check_and_handle_cancelled_gc(terminator, sts_yield)) return;
 517 
 518     while (satb_mq_set.completed_buffers_num() > 0) {
 519       satb_mq_set.apply_closure_to_completed_buffer(&drain_satb);
 520     }
 521 
 522     uint work = 0;
 523     for (uint i = 0; i < stride; i++) {
 524       if (q->pop(task) ||
 525           queues->steal(worker_id, task)) {
 526         conc_mark->do_task<T>(q, cl, live_data, &task);
 527         work++;
 528       } else {
 529         break;
 530       }
 531     }
 532 
 533     if (work == 0) {
 534       // No more work, try to terminate
 535       ShenandoahEvacOOMScopeLeaver oom_scope_leaver;
 536       ShenandoahSuspendibleThreadSetLeaver stsl(sts_yield && ShenandoahSuspendibleWorkers);
 537       ShenandoahTerminationTimingsTracker term_tracker(worker_id);
 538       ShenandoahTerminatorTerminator tt(_heap);
 539 
 540       if (terminator->offer_termination(&tt)) return;
 541     }
 542   }
 543 }
 544 
 545 bool ShenandoahTraversalGC::check_and_handle_cancelled_gc(ShenandoahTaskTerminator* terminator, bool sts_yield) {
 546   if (_heap->cancelled_gc()) {
 547     return true;
 548   }
 549   return false;
 550 }
 551 
 552 void ShenandoahTraversalGC::concurrent_traversal_collection() {
 553   ShenandoahGCPhase phase_work(ShenandoahPhaseTimings::conc_traversal);
 554   if (!_heap->cancelled_gc()) {
 555     uint nworkers = _heap->workers()->active_workers();
 556     task_queues()->reserve(nworkers);
 557     ShenandoahTerminationTracker tracker(ShenandoahPhaseTimings::conc_traversal_termination);
 558 
 559     ShenandoahTaskTerminator terminator(nworkers, task_queues());
 560     ShenandoahConcurrentTraversalCollectionTask task(&terminator);
 561     _heap->workers()->run_task(&task);
 562   }
 563 
 564   if (!_heap->cancelled_gc() && ShenandoahPreclean && _heap->process_references()) {
 565     preclean_weak_refs();
 566   }
 567 }
 568 
 569 void ShenandoahTraversalGC::final_traversal_collection() {
 570   _heap->make_parsable(true);
 571 
 572   if (!_heap->cancelled_gc()) {
 573 #if COMPILER2_OR_JVMCI
 574     DerivedPointerTable::clear();
 575 #endif
 576     ShenandoahGCPhase phase_work(ShenandoahPhaseTimings::final_traversal_gc_work);
 577     uint nworkers = _heap->workers()->active_workers();
 578     task_queues()->reserve(nworkers);
 579 
 580     // Finish traversal
 581     ShenandoahAllRootScanner rp(nworkers, ShenandoahPhaseTimings::final_traversal_gc_work);
 582     ShenandoahTerminationTracker term(ShenandoahPhaseTimings::final_traversal_gc_termination);
 583 
 584     ShenandoahTaskTerminator terminator(nworkers, task_queues());
 585     ShenandoahFinalTraversalCollectionTask task(&rp, &terminator);
 586     _heap->workers()->run_task(&task);
 587 #if COMPILER2_OR_JVMCI
 588     DerivedPointerTable::update_pointers();
 589 #endif
 590   }
 591 
 592   if (!_heap->cancelled_gc() && _heap->process_references()) {
 593     weak_refs_work();
 594   }
 595 
 596   if (!_heap->cancelled_gc()) {
 597     fixup_roots();
 598     _heap->parallel_cleaning(false);
 599   }
 600 
 601   if (!_heap->cancelled_gc()) {
 602     assert(_task_queues->is_empty(), "queues must be empty after traversal GC");
 603     TASKQUEUE_STATS_ONLY(_task_queues->print_taskqueue_stats());
 604     TASKQUEUE_STATS_ONLY(_task_queues->reset_taskqueue_stats());
 605 
 606     // No more marking expected
 607     _heap->mark_complete_marking_context();
 608 
 609     // Resize metaspace
 610     MetaspaceGC::compute_new_size();
 611 
 612     // Still good? We can now trash the cset, and make final verification
 613     {
 614       ShenandoahGCPhase phase_cleanup(ShenandoahPhaseTimings::traversal_gc_cleanup);
 615       ShenandoahHeapLocker lock(_heap->lock());
 616 
 617       // Trash everything
 618       // Clear immediate garbage regions.
 619       size_t num_regions = _heap->num_regions();
 620 
 621       ShenandoahHeapRegionSet* traversal_regions = traversal_set();
 622       ShenandoahFreeSet* free_regions = _heap->free_set();
 623       ShenandoahMarkingContext* const ctx = _heap->marking_context();
 624       free_regions->clear();
 625       for (size_t i = 0; i < num_regions; i++) {
 626         ShenandoahHeapRegion* r = _heap->get_region(i);
 627         bool not_allocated = ctx->top_at_mark_start(r) == r->top();
 628 
 629         bool candidate = traversal_regions->is_in(r) && !r->has_live() && not_allocated;
 630         if (r->is_humongous_start() && candidate) {
 631           // Trash humongous.
 632           HeapWord* humongous_obj = r->bottom();
 633           assert(!ctx->is_marked(oop(humongous_obj)), "must not be marked");
 634           r->make_trash_immediate();
 635           while (i + 1 < num_regions && _heap->get_region(i + 1)->is_humongous_continuation()) {
 636             i++;
 637             r = _heap->get_region(i);
 638             assert(r->is_humongous_continuation(), "must be humongous continuation");
 639             r->make_trash_immediate();
 640           }
 641         } else if (!r->is_empty() && candidate) {
 642           // Trash regular.
 643           assert(!r->is_humongous(), "handled above");
 644           assert(!r->is_trash(), "must not already be trashed");
 645           r->make_trash_immediate();
 646         }
 647       }
 648       _heap->collection_set()->clear();
 649       _heap->free_set()->rebuild();
 650       reset();
 651     }
 652 
 653     assert(_task_queues->is_empty(), "queues must be empty after traversal GC");
 654     _heap->set_concurrent_traversal_in_progress(false);
 655     assert(!_heap->cancelled_gc(), "must not be cancelled when getting out here");
 656 
 657     if (ShenandoahVerify) {
 658       _heap->verifier()->verify_after_traversal();
 659     }
 660 
 661     if (VerifyAfterGC) {
 662       Universe::verify();
 663     }
 664   }
 665 }
 666 
 667 class ShenandoahTraversalFixRootsClosure : public OopClosure {
 668 private:
 669   template <class T>
 670   inline void do_oop_work(T* p) {
 671     T o = RawAccess<>::oop_load(p);
 672     if (!CompressedOops::is_null(o)) {
 673       oop obj = CompressedOops::decode_not_null(o);
 674       oop forw = ShenandoahBarrierSet::resolve_forwarded_not_null(obj);
 675       if (!oopDesc::equals_raw(obj, forw)) {
 676         RawAccess<IS_NOT_NULL>::oop_store(p, forw);
 677       }
 678     }
 679   }
 680 
 681 public:
 682   inline void do_oop(oop* p) { do_oop_work(p); }
 683   inline void do_oop(narrowOop* p) { do_oop_work(p); }
 684 };
 685 
 686 class ShenandoahTraversalFixRootsTask : public AbstractGangTask {
 687 private:
 688   ShenandoahRootUpdater* _rp;
 689 
 690 public:
 691   ShenandoahTraversalFixRootsTask(ShenandoahRootUpdater* rp) :
 692     AbstractGangTask("Shenandoah traversal fix roots"),
 693     _rp(rp) {
 694     assert(ShenandoahHeap::heap()->has_forwarded_objects(), "Must be");
 695   }
 696 
 697   void work(uint worker_id) {
 698     ShenandoahParallelWorkerSession worker_session(worker_id);
 699     ShenandoahTraversalFixRootsClosure cl;
 700     ShenandoahForwardedIsAliveClosure is_alive;
 701     _rp->roots_do<ShenandoahForwardedIsAliveClosure, ShenandoahTraversalFixRootsClosure>(worker_id, &is_alive, &cl);
 702   }
 703 };
 704 
 705 void ShenandoahTraversalGC::fixup_roots() {
 706 #if COMPILER2_OR_JVMCI
 707   DerivedPointerTable::clear();
 708 #endif
 709   ShenandoahRootUpdater rp(_heap->workers()->active_workers(), ShenandoahPhaseTimings::final_traversal_update_roots, true /* update code cache */);
 710   ShenandoahTraversalFixRootsTask update_roots_task(&rp);
 711   _heap->workers()->run_task(&update_roots_task);
 712 #if COMPILER2_OR_JVMCI
 713   DerivedPointerTable::update_pointers();
 714 #endif
 715 }
 716 
 717 void ShenandoahTraversalGC::reset() {
 718   _task_queues->clear();
 719 }
 720 
 721 ShenandoahObjToScanQueueSet* ShenandoahTraversalGC::task_queues() {
 722   return _task_queues;
 723 }
 724 
 725 class ShenandoahTraversalCancelledGCYieldClosure : public YieldClosure {
 726 private:
 727   ShenandoahHeap* const _heap;
 728 public:
 729   ShenandoahTraversalCancelledGCYieldClosure() : _heap(ShenandoahHeap::heap()) {};
 730   virtual bool should_return() { return _heap->cancelled_gc(); }
 731 };
 732 
 733 class ShenandoahTraversalPrecleanCompleteGCClosure : public VoidClosure {
 734 public:
 735   void do_void() {
 736     ShenandoahHeap* sh = ShenandoahHeap::heap();
 737     ShenandoahTraversalGC* traversal_gc = sh->traversal_gc();
 738     assert(sh->process_references(), "why else would we be here?");
 739     ShenandoahTaskTerminator terminator(1, traversal_gc->task_queues());
 740     shenandoah_assert_rp_isalive_installed();
 741     traversal_gc->main_loop((uint) 0, &terminator, true);
 742   }
 743 };
 744 
 745 class ShenandoahTraversalKeepAliveUpdateClosure : public OopClosure {
 746 private:
 747   ShenandoahObjToScanQueue* _queue;
 748   Thread* _thread;
 749   ShenandoahTraversalGC* _traversal_gc;
 750   ShenandoahMarkingContext* const _mark_context;
 751 
 752   template <class T>
 753   inline void do_oop_work(T* p) {
 754     _traversal_gc->process_oop<T, false /* string dedup */, false /* degen */>(p, _thread, _queue, _mark_context);
 755   }
 756 
 757 public:
 758   ShenandoahTraversalKeepAliveUpdateClosure(ShenandoahObjToScanQueue* q) :
 759     _queue(q), _thread(Thread::current()),
 760     _traversal_gc(ShenandoahHeap::heap()->traversal_gc()),
 761     _mark_context(ShenandoahHeap::heap()->marking_context()) {}
 762 
 763   void do_oop(narrowOop* p) { do_oop_work(p); }
 764   void do_oop(oop* p)       { do_oop_work(p); }
 765 };
 766 
 767 class ShenandoahTraversalKeepAliveUpdateDegenClosure : public OopClosure {
 768 private:
 769   ShenandoahObjToScanQueue* _queue;
 770   Thread* _thread;
 771   ShenandoahTraversalGC* _traversal_gc;
 772   ShenandoahMarkingContext* const _mark_context;
 773 
 774   template <class T>
 775   inline void do_oop_work(T* p) {
 776     _traversal_gc->process_oop<T, false /* string dedup */, true /* degen */>(p, _thread, _queue, _mark_context);
 777   }
 778 
 779 public:
 780   ShenandoahTraversalKeepAliveUpdateDegenClosure(ShenandoahObjToScanQueue* q) :
 781           _queue(q), _thread(Thread::current()),
 782           _traversal_gc(ShenandoahHeap::heap()->traversal_gc()),
 783           _mark_context(ShenandoahHeap::heap()->marking_context()) {}
 784 
 785   void do_oop(narrowOop* p) { do_oop_work(p); }
 786   void do_oop(oop* p)       { do_oop_work(p); }
 787 };
 788 
 789 class ShenandoahTraversalSingleThreadKeepAliveUpdateClosure : public OopClosure {
 790 private:
 791   ShenandoahObjToScanQueue* _queue;
 792   Thread* _thread;
 793   ShenandoahTraversalGC* _traversal_gc;
 794   ShenandoahMarkingContext* const _mark_context;
 795 
 796   template <class T>
 797   inline void do_oop_work(T* p) {
 798     ShenandoahEvacOOMScope evac_scope;
 799     _traversal_gc->process_oop<T, false /* string dedup */, false /* degen */>(p, _thread, _queue, _mark_context);
 800   }
 801 
 802 public:
 803   ShenandoahTraversalSingleThreadKeepAliveUpdateClosure(ShenandoahObjToScanQueue* q) :
 804           _queue(q), _thread(Thread::current()),
 805           _traversal_gc(ShenandoahHeap::heap()->traversal_gc()),
 806           _mark_context(ShenandoahHeap::heap()->marking_context()) {}
 807 
 808   void do_oop(narrowOop* p) { do_oop_work(p); }
 809   void do_oop(oop* p)       { do_oop_work(p); }
 810 };
 811 
 812 class ShenandoahTraversalSingleThreadKeepAliveUpdateDegenClosure : public OopClosure {
 813 private:
 814   ShenandoahObjToScanQueue* _queue;
 815   Thread* _thread;
 816   ShenandoahTraversalGC* _traversal_gc;
 817   ShenandoahMarkingContext* const _mark_context;
 818 
 819   template <class T>
 820   inline void do_oop_work(T* p) {
 821     ShenandoahEvacOOMScope evac_scope;
 822     _traversal_gc->process_oop<T, false /* string dedup */, true /* degen */>(p, _thread, _queue, _mark_context);
 823   }
 824 
 825 public:
 826   ShenandoahTraversalSingleThreadKeepAliveUpdateDegenClosure(ShenandoahObjToScanQueue* q) :
 827           _queue(q), _thread(Thread::current()),
 828           _traversal_gc(ShenandoahHeap::heap()->traversal_gc()),
 829           _mark_context(ShenandoahHeap::heap()->marking_context()) {}
 830 
 831   void do_oop(narrowOop* p) { do_oop_work(p); }
 832   void do_oop(oop* p)       { do_oop_work(p); }
 833 };
 834 
 835 class ShenandoahTraversalPrecleanTask : public AbstractGangTask {
 836 private:
 837   ReferenceProcessor* _rp;
 838 
 839 public:
 840   ShenandoahTraversalPrecleanTask(ReferenceProcessor* rp) :
 841           AbstractGangTask("Precleaning task"),
 842           _rp(rp) {}
 843 
 844   void work(uint worker_id) {
 845     assert(worker_id == 0, "The code below is single-threaded, only one worker is expected");
 846     ShenandoahParallelWorkerSession worker_session(worker_id);
 847     ShenandoahSuspendibleThreadSetJoiner stsj(ShenandoahSuspendibleWorkers);
 848     ShenandoahEvacOOMScope oom_evac_scope;
 849 
 850     ShenandoahHeap* sh = ShenandoahHeap::heap();
 851 
 852     ShenandoahObjToScanQueue* q = sh->traversal_gc()->task_queues()->queue(worker_id);
 853 
 854     ShenandoahForwardedIsAliveClosure is_alive;
 855     ShenandoahTraversalCancelledGCYieldClosure yield;
 856     ShenandoahTraversalPrecleanCompleteGCClosure complete_gc;
 857     ShenandoahTraversalKeepAliveUpdateClosure keep_alive(q);
 858     ResourceMark rm;
 859     _rp->preclean_discovered_references(&is_alive, &keep_alive,
 860                                         &complete_gc, &yield,
 861                                         NULL);
 862   }
 863 };
 864 
 865 void ShenandoahTraversalGC::preclean_weak_refs() {
 866   // Pre-cleaning weak references before diving into STW makes sense at the
 867   // end of concurrent mark. This will filter out the references which referents
 868   // are alive. Note that ReferenceProcessor already filters out these on reference
 869   // discovery, and the bulk of work is done here. This phase processes leftovers
 870   // that missed the initial filtering, i.e. when referent was marked alive after
 871   // reference was discovered by RP.
 872 
 873   assert(_heap->process_references(), "sanity");
 874   assert(!_heap->is_degenerated_gc_in_progress(), "must be in concurrent non-degenerated phase");
 875 
 876   // Shortcut if no references were discovered to avoid winding up threads.
 877   ReferenceProcessor* rp = _heap->ref_processor();
 878   if (!rp->has_discovered_references()) {
 879     return;
 880   }
 881 
 882   ReferenceProcessorMTDiscoveryMutator fix_mt_discovery(rp, false);
 883 
 884   shenandoah_assert_rp_isalive_not_installed();
 885   ShenandoahForwardedIsAliveClosure is_alive;
 886   ReferenceProcessorIsAliveMutator fix_isalive(rp, &is_alive);
 887 
 888   assert(task_queues()->is_empty(), "Should be empty");
 889 
 890   // Execute precleaning in the worker thread: it will give us GCLABs, String dedup
 891   // queues and other goodies. When upstream ReferenceProcessor starts supporting
 892   // parallel precleans, we can extend this to more threads.
 893   ShenandoahPushWorkerScope scope(_heap->workers(), 1, /* check_workers = */ false);
 894 
 895   WorkGang* workers = _heap->workers();
 896   uint nworkers = workers->active_workers();
 897   assert(nworkers == 1, "This code uses only a single worker");
 898   task_queues()->reserve(nworkers);
 899 
 900   ShenandoahTraversalPrecleanTask task(rp);
 901   workers->run_task(&task);
 902 
 903   assert(_heap->cancelled_gc() || task_queues()->is_empty(), "Should be empty");
 904 }
 905 
 906 // Weak Reference Closures
 907 class ShenandoahTraversalDrainMarkingStackClosure: public VoidClosure {
 908   uint _worker_id;
 909   ShenandoahTaskTerminator* _terminator;
 910   bool _reset_terminator;
 911 
 912 public:
 913   ShenandoahTraversalDrainMarkingStackClosure(uint worker_id, ShenandoahTaskTerminator* t, bool reset_terminator = false):
 914     _worker_id(worker_id),
 915     _terminator(t),
 916     _reset_terminator(reset_terminator) {
 917   }
 918 
 919   void do_void() {
 920     assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
 921 
 922     ShenandoahHeap* sh = ShenandoahHeap::heap();
 923     ShenandoahTraversalGC* traversal_gc = sh->traversal_gc();
 924     assert(sh->process_references(), "why else would we be here?");
 925     shenandoah_assert_rp_isalive_installed();
 926 
 927     traversal_gc->main_loop(_worker_id, _terminator, false);
 928 
 929     if (_reset_terminator) {
 930       _terminator->reset_for_reuse();
 931     }
 932   }
 933 };
 934 
 935 class ShenandoahTraversalSingleThreadedDrainMarkingStackClosure: public VoidClosure {
 936   uint _worker_id;
 937   ShenandoahTaskTerminator* _terminator;
 938   bool _reset_terminator;
 939 
 940 public:
 941   ShenandoahTraversalSingleThreadedDrainMarkingStackClosure(uint worker_id, ShenandoahTaskTerminator* t, bool reset_terminator = false):
 942           _worker_id(worker_id),
 943           _terminator(t),
 944           _reset_terminator(reset_terminator) {
 945   }
 946 
 947   void do_void() {
 948     assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
 949 
 950     ShenandoahHeap* sh = ShenandoahHeap::heap();
 951     ShenandoahTraversalGC* traversal_gc = sh->traversal_gc();
 952     assert(sh->process_references(), "why else would we be here?");
 953     shenandoah_assert_rp_isalive_installed();
 954 
 955     ShenandoahEvacOOMScope evac_scope;
 956     traversal_gc->main_loop(_worker_id, _terminator, false);
 957 
 958     if (_reset_terminator) {
 959       _terminator->reset_for_reuse();
 960     }
 961   }
 962 };
 963 
 964 void ShenandoahTraversalGC::weak_refs_work() {
 965   assert(_heap->process_references(), "sanity");
 966 
 967   ShenandoahPhaseTimings::Phase phase_root = ShenandoahPhaseTimings::weakrefs;
 968 
 969   ShenandoahGCPhase phase(phase_root);
 970 
 971   ReferenceProcessor* rp = _heap->ref_processor();
 972 
 973   // NOTE: We cannot shortcut on has_discovered_references() here, because
 974   // we will miss marking JNI Weak refs then, see implementation in
 975   // ReferenceProcessor::process_discovered_references.
 976   weak_refs_work_doit();
 977 
 978   rp->verify_no_references_recorded();
 979   assert(!rp->discovery_enabled(), "Post condition");
 980 
 981 }
 982 
 983 class ShenandoahTraversalRefProcTaskProxy : public AbstractGangTask {
 984 private:
 985   AbstractRefProcTaskExecutor::ProcessTask& _proc_task;
 986   ShenandoahTaskTerminator* _terminator;
 987 
 988 public:
 989   ShenandoahTraversalRefProcTaskProxy(AbstractRefProcTaskExecutor::ProcessTask& proc_task,
 990                                       ShenandoahTaskTerminator* t) :
 991     AbstractGangTask("Process reference objects in parallel"),
 992     _proc_task(proc_task),
 993     _terminator(t) {
 994   }
 995 
 996   void work(uint worker_id) {
 997     ShenandoahEvacOOMScope oom_evac_scope;
 998     assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
 999     ShenandoahHeap* heap = ShenandoahHeap::heap();
1000     ShenandoahTraversalDrainMarkingStackClosure complete_gc(worker_id, _terminator);
1001 
1002     ShenandoahForwardedIsAliveClosure is_alive;
1003     if (!heap->is_degenerated_gc_in_progress()) {
1004       ShenandoahTraversalKeepAliveUpdateClosure keep_alive(heap->traversal_gc()->task_queues()->queue(worker_id));
1005       _proc_task.work(worker_id, is_alive, keep_alive, complete_gc);
1006     } else {
1007       ShenandoahTraversalKeepAliveUpdateDegenClosure keep_alive(heap->traversal_gc()->task_queues()->queue(worker_id));
1008       _proc_task.work(worker_id, is_alive, keep_alive, complete_gc);
1009     }
1010   }
1011 };
1012 
1013 class ShenandoahTraversalRefProcTaskExecutor : public AbstractRefProcTaskExecutor {
1014 private:
1015   WorkGang* _workers;
1016 
1017 public:
1018   ShenandoahTraversalRefProcTaskExecutor(WorkGang* workers) : _workers(workers) {}
1019 
1020   // Executes a task using worker threads.
1021   void execute(ProcessTask& task, uint ergo_workers) {
1022     assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
1023 
1024     ShenandoahHeap* heap = ShenandoahHeap::heap();
1025     ShenandoahTraversalGC* traversal_gc = heap->traversal_gc();
1026     ShenandoahPushWorkerQueuesScope scope(_workers,
1027                                           traversal_gc->task_queues(),
1028                                           ergo_workers,
1029                                           /* do_check = */ false);
1030     uint nworkers = _workers->active_workers();
1031     traversal_gc->task_queues()->reserve(nworkers);
1032     ShenandoahTaskTerminator terminator(nworkers, traversal_gc->task_queues());
1033     ShenandoahTraversalRefProcTaskProxy proc_task_proxy(task, &terminator);
1034     _workers->run_task(&proc_task_proxy);
1035   }
1036 };
1037 
1038 void ShenandoahTraversalGC::weak_refs_work_doit() {
1039   ReferenceProcessor* rp = _heap->ref_processor();
1040 
1041   ShenandoahPhaseTimings::Phase phase_process = ShenandoahPhaseTimings::weakrefs_process;
1042 
1043   shenandoah_assert_rp_isalive_not_installed();
1044   ShenandoahForwardedIsAliveClosure is_alive;
1045   ReferenceProcessorIsAliveMutator fix_isalive(rp, &is_alive);
1046 
1047   WorkGang* workers = _heap->workers();
1048   uint nworkers = workers->active_workers();
1049 
1050   rp->setup_policy(_heap->soft_ref_policy()->should_clear_all_soft_refs());
1051   rp->set_active_mt_degree(nworkers);
1052 
1053   assert(task_queues()->is_empty(), "Should be empty");
1054 
1055   // complete_gc and keep_alive closures instantiated here are only needed for
1056   // single-threaded path in RP. They share the queue 0 for tracking work, which
1057   // simplifies implementation. Since RP may decide to call complete_gc several
1058   // times, we need to be able to reuse the terminator.
1059   uint serial_worker_id = 0;
1060   ShenandoahTaskTerminator terminator(1, task_queues());
1061   ShenandoahTraversalSingleThreadedDrainMarkingStackClosure complete_gc(serial_worker_id, &terminator, /* reset_terminator = */ true);
1062   ShenandoahPushWorkerQueuesScope scope(workers, task_queues(), 1, /* do_check = */ false);
1063 
1064   ShenandoahTraversalRefProcTaskExecutor executor(workers);
1065 
1066   ReferenceProcessorPhaseTimes pt(_heap->gc_timer(), rp->num_queues());
1067   if (!_heap->is_degenerated_gc_in_progress()) {
1068     ShenandoahTraversalSingleThreadKeepAliveUpdateClosure keep_alive(task_queues()->queue(serial_worker_id));
1069     rp->process_discovered_references(&is_alive, &keep_alive,
1070                                       &complete_gc, &executor,
1071                                       &pt);
1072   } else {
1073     ShenandoahTraversalSingleThreadKeepAliveUpdateDegenClosure keep_alive(task_queues()->queue(serial_worker_id));
1074     rp->process_discovered_references(&is_alive, &keep_alive,
1075                                       &complete_gc, &executor,
1076                                       &pt);
1077   }
1078 
1079   pt.print_all_references();
1080   assert(task_queues()->is_empty() || _heap->cancelled_gc(), "Should be empty");
1081 }