1 /*
   2  * Copyright (c) 2018, 2020, Red Hat, Inc. All rights reserved.
   3  *
   4  * This code is free software; you can redistribute it and/or modify it
   5  * under the terms of the GNU General Public License version 2 only, as
   6  * published by the Free Software Foundation.
   7  *
   8  * This code is distributed in the hope that it will be useful, but WITHOUT
   9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  11  * version 2 for more details (a copy is included in the LICENSE file that
  12  * accompanied this code).
  13  *
  14  * You should have received a copy of the GNU General Public License version
  15  * 2 along with this work; if not, write to the Free Software Foundation,
  16  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  17  *
  18  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  19  * or visit www.oracle.com if you need additional information or have any
  20  * questions.
  21  *
  22  */
  23 
  24 #include "precompiled.hpp"
  25 
  26 #include "classfile/classLoaderData.hpp"
  27 #include "classfile/classLoaderDataGraph.hpp"
  28 #include "gc/shared/referenceProcessor.hpp"
  29 #include "gc/shared/referenceProcessorPhaseTimes.hpp"
  30 #include "gc/shared/workgroup.hpp"
  31 #include "gc/shenandoah/shenandoahBarrierSet.hpp"
  32 #include "gc/shenandoah/shenandoahClosures.inline.hpp"
  33 #include "gc/shenandoah/shenandoahCodeRoots.hpp"
  34 #include "gc/shenandoah/shenandoahCollectionSet.hpp"
  35 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
  36 #include "gc/shenandoah/shenandoahFreeSet.hpp"
  37 #include "gc/shenandoah/shenandoahPhaseTimings.hpp"
  38 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
  39 #include "gc/shenandoah/shenandoahHeapRegionSet.inline.hpp"
  40 #include "gc/shenandoah/shenandoahHeuristics.hpp"
  41 #include "gc/shenandoah/shenandoahMarkingContext.inline.hpp"
  42 #include "gc/shenandoah/shenandoahOopClosures.inline.hpp"
  43 #include "gc/shenandoah/shenandoahRootProcessor.inline.hpp"
  44 #include "gc/shenandoah/shenandoahStringDedup.hpp"
  45 #include "gc/shenandoah/shenandoahTaskqueue.inline.hpp"
  46 #include "gc/shenandoah/shenandoahTimingTracker.hpp"
  47 #include "gc/shenandoah/shenandoahTraversalGC.hpp"
  48 #include "gc/shenandoah/shenandoahUtils.hpp"
  49 #include "gc/shenandoah/shenandoahVerifier.hpp"
  50 
  51 #include "memory/iterator.hpp"
  52 #include "memory/metaspace.hpp"
  53 #include "memory/resourceArea.hpp"
  54 #include "memory/universe.hpp"
  55 
  56 /**
  57  * NOTE: We are using the SATB buffer in thread.hpp and satbMarkQueue.hpp, however, it is not an SATB algorithm.
  58  * We're using the buffer as generic oop buffer to enqueue new values in concurrent oop stores, IOW, the algorithm
  59  * is incremental-update-based.
  60  *
  61  * NOTE on interaction with TAMS: we want to avoid traversing new objects for
  62  * several reasons:
  63  * - We will not reclaim them in this cycle anyway, because they are not in the
  64  *   cset
  65  * - It makes up for the bulk of work during final-pause
  66  * - It also shortens the concurrent cycle because we don't need to
  67  *   pointlessly traverse through newly allocated objects.
  68  * - As a nice side-effect, it solves the I-U termination problem (mutators
  69  *   cannot outrun the GC by allocating like crazy)
  70  * - It is an easy way to achieve MWF. What MWF does is to also enqueue the
  71  *   target object of stores if it's new. Treating new objects live implicitely
  72  *   achieves the same, but without extra barriers. I think the effect of
  73  *   shortened final-pause (mentioned above) is the main advantage of MWF. In
  74  *   particular, we will not see the head of a completely new long linked list
  75  *   in final-pause and end up traversing huge chunks of the heap there.
  76  * - We don't need to see/update the fields of new objects either, because they
  77  *   are either still null, or anything that's been stored into them has been
  78  *   evacuated+enqueued before (and will thus be treated later).
  79  *
  80  * We achieve this by setting TAMS for each region, and everything allocated
  81  * beyond TAMS will be 'implicitely marked'.
  82  *
  83  * Gotchas:
  84  * - While we want new objects to be implicitely marked, we don't want to count
  85  *   them alive. Otherwise the next cycle wouldn't pick them up and consider
  86  *   them for cset. This means that we need to protect such regions from
  87  *   getting accidentally thrashed at the end of traversal cycle. This is why I
  88  *   keep track of alloc-regions and check is_alloc_region() in the trashing
  89  *   code.
  90  * - We *need* to traverse through evacuated objects. Those objects are
  91  *   pre-existing, and any references in them point to interesting objects that
  92  *   we need to see. We also want to count them as live, because we just
  93  *   determined that they are alive :-) I achieve this by upping TAMS
  94  *   concurrently for every gclab/gc-shared alloc before publishing the
  95  *   evacuated object. This way, the GC threads will not consider such objects
  96  *   implictely marked, and traverse through them as normal.
  97  */
  98 class ShenandoahTraversalSATBBufferClosure : public SATBBufferClosure {
  99 private:
 100   ShenandoahObjToScanQueue* _queue;
 101   ShenandoahTraversalGC* _traversal_gc;
 102   ShenandoahHeap* const _heap;
 103 
 104 public:
 105   ShenandoahTraversalSATBBufferClosure(ShenandoahObjToScanQueue* q) :
 106     _queue(q),
 107     _heap(ShenandoahHeap::heap())
 108  { }
 109 
 110   void do_buffer(void** buffer, size_t size) {
 111     for (size_t i = 0; i < size; ++i) {
 112       oop* p = (oop*) &buffer[i];
 113       oop obj = RawAccess<>::oop_load(p);
 114       shenandoah_assert_not_forwarded(p, obj);
 115       if (_heap->marking_context()->mark(obj)) {
 116         _queue->push(ShenandoahMarkTask(obj));
 117       }
 118     }
 119   }
 120 };
 121 
 122 class ShenandoahTraversalSATBThreadsClosure : public ThreadClosure {
 123 private:
 124   ShenandoahTraversalSATBBufferClosure* _satb_cl;
 125 
 126 public:
 127   ShenandoahTraversalSATBThreadsClosure(ShenandoahTraversalSATBBufferClosure* satb_cl) :
 128     _satb_cl(satb_cl) {}
 129 
 130   void do_thread(Thread* thread) {
 131     ShenandoahThreadLocalData::satb_mark_queue(thread).apply_closure_and_empty(_satb_cl);
 132   }
 133 };
 134 
 135 // Like CLDToOopClosure, but clears has_modified_oops, so that we can record modified CLDs during traversal
 136 // and remark them later during final-traversal.
 137 class ShenandoahMarkCLDClosure : public CLDClosure {
 138 private:
 139   OopClosure* _cl;
 140 public:
 141   ShenandoahMarkCLDClosure(OopClosure* cl) : _cl(cl) {}
 142   void do_cld(ClassLoaderData* cld) {
 143     cld->oops_do(_cl, ClassLoaderData::_claim_strong, true);
 144   }
 145 };
 146 
 147 // Like CLDToOopClosure, but only process modified CLDs
 148 class ShenandoahRemarkCLDClosure : public CLDClosure {
 149 private:
 150   OopClosure* _cl;
 151 public:
 152   ShenandoahRemarkCLDClosure(OopClosure* cl) : _cl(cl) {}
 153   void do_cld(ClassLoaderData* cld) {
 154     if (cld->has_modified_oops()) {
 155       cld->oops_do(_cl, ClassLoaderData::_claim_strong, true);
 156     }
 157   }
 158 };
 159 
 160 class ShenandoahInitTraversalCollectionTask : public AbstractGangTask {
 161 private:
 162   ShenandoahCSetRootScanner* _rp;
 163   ShenandoahHeap* _heap;
 164   ShenandoahCsetCodeRootsIterator* _cset_coderoots;
 165   ShenandoahStringDedupRoots       _dedup_roots;
 166 
 167 public:
 168   ShenandoahInitTraversalCollectionTask(ShenandoahCSetRootScanner* rp) :
 169     AbstractGangTask("Shenandoah Init Traversal Collection"),
 170     _rp(rp),
 171     _heap(ShenandoahHeap::heap()) {}
 172 
 173   void work(uint worker_id) {
 174     ShenandoahParallelWorkerSession worker_session(worker_id);
 175 
 176     ShenandoahObjToScanQueueSet* queues = _heap->traversal_gc()->task_queues();
 177     ShenandoahObjToScanQueue* q = queues->queue(worker_id);
 178 
 179     bool process_refs = _heap->process_references();
 180     bool unload_classes = _heap->unload_classes();
 181     ReferenceProcessor* rp = NULL;
 182     if (process_refs) {
 183       rp = _heap->ref_processor();
 184     }
 185 
 186     // Step 1: Process ordinary GC roots.
 187     {
 188       ShenandoahTraversalRootsClosure roots_cl(q, rp);
 189       ShenandoahMarkCLDClosure cld_cl(&roots_cl);
 190       MarkingCodeBlobClosure code_cl(&roots_cl, CodeBlobToOopClosure::FixRelocations);
 191       if (unload_classes) {
 192         _rp->roots_do(worker_id, &roots_cl, NULL, &code_cl);
 193       } else {
 194         _rp->roots_do(worker_id, &roots_cl, &cld_cl, &code_cl);
 195       }
 196     }
 197   }
 198 };
 199 
 200 class ShenandoahConcurrentTraversalCollectionTask : public AbstractGangTask {
 201 private:
 202   ShenandoahTaskTerminator* _terminator;
 203   ShenandoahHeap* _heap;
 204 public:
 205   ShenandoahConcurrentTraversalCollectionTask(ShenandoahTaskTerminator* terminator) :
 206     AbstractGangTask("Shenandoah Concurrent Traversal Collection"),
 207     _terminator(terminator),
 208     _heap(ShenandoahHeap::heap()) {}
 209 
 210   void work(uint worker_id) {
 211     ShenandoahConcurrentWorkerSession worker_session(worker_id);
 212     ShenandoahSuspendibleThreadSetJoiner stsj(ShenandoahSuspendibleWorkers);
 213     ShenandoahTraversalGC* traversal_gc = _heap->traversal_gc();
 214 
 215     // Drain all outstanding work in queues.
 216     traversal_gc->main_loop(worker_id, _terminator, true);
 217   }
 218 };
 219 
 220 class ShenandoahFinalTraversalCollectionTask : public AbstractGangTask {
 221 private:
 222   ShenandoahAllRootScanner* _rp;
 223   ShenandoahTaskTerminator* _terminator;
 224   ShenandoahHeap* _heap;
 225 public:
 226   ShenandoahFinalTraversalCollectionTask(ShenandoahAllRootScanner* rp, ShenandoahTaskTerminator* terminator) :
 227     AbstractGangTask("Shenandoah Final Traversal Collection"),
 228     _rp(rp),
 229     _terminator(terminator),
 230     _heap(ShenandoahHeap::heap()) {}
 231 
 232   void work(uint worker_id) {
 233     ShenandoahParallelWorkerSession worker_session(worker_id);
 234 
 235     ShenandoahTraversalGC* traversal_gc = _heap->traversal_gc();
 236 
 237     ShenandoahObjToScanQueueSet* queues = traversal_gc->task_queues();
 238     ShenandoahObjToScanQueue* q = queues->queue(worker_id);
 239 
 240     bool process_refs = _heap->process_references();
 241     bool unload_classes = _heap->unload_classes();
 242     ReferenceProcessor* rp = NULL;
 243     if (process_refs) {
 244       rp = _heap->ref_processor();
 245     }
 246 
 247     // Step 0: Drain outstanding SATB queues.
 248     // NOTE: we piggy-back draining of remaining thread SATB buffers on the final root scan below.
 249     ShenandoahTraversalSATBBufferClosure satb_cl(q);
 250     {
 251       // Process remaining finished SATB buffers.
 252       SATBMarkQueueSet& satb_mq_set = ShenandoahBarrierSet::satb_mark_queue_set();
 253       while (satb_mq_set.apply_closure_to_completed_buffer(&satb_cl));
 254       // Process remaining threads SATB buffers below.
 255     }
 256 
 257     // Step 1: Process GC roots.
 258     // For oops in code roots, they are marked, evacuated, enqueued for further traversal,
 259     // and the references to the oops are updated during init pause. New nmethods are handled
 260     // in similar way during nmethod-register process. Therefore, we don't need to rescan code
 261     // roots here.
 262     if (!_heap->is_degenerated_gc_in_progress()) {
 263       ShenandoahTraversalRootsClosure roots_cl(q, rp);
 264       ShenandoahTraversalSATBThreadsClosure tc(&satb_cl);
 265       if (unload_classes) {
 266         ShenandoahRemarkCLDClosure remark_cld_cl(&roots_cl);
 267         _rp->strong_roots_do(worker_id, &roots_cl, &remark_cld_cl, NULL, &tc);
 268       } else {
 269         CLDToOopClosure cld_cl(&roots_cl, ClassLoaderData::_claim_strong);
 270         _rp->roots_do(worker_id, &roots_cl, &cld_cl, NULL, &tc);
 271       }
 272     } else {
 273       ShenandoahTraversalDegenClosure roots_cl(q, rp);
 274       ShenandoahTraversalSATBThreadsClosure tc(&satb_cl);
 275       if (unload_classes) {
 276         ShenandoahRemarkCLDClosure remark_cld_cl(&roots_cl);
 277         _rp->strong_roots_do(worker_id, &roots_cl, &remark_cld_cl, NULL, &tc);
 278       } else {
 279         CLDToOopClosure cld_cl(&roots_cl, ClassLoaderData::_claim_strong);
 280         _rp->roots_do(worker_id, &roots_cl, &cld_cl, NULL, &tc);
 281       }
 282     }
 283 
 284     {
 285       ShenandoahWorkerTimings *worker_times = _heap->phase_timings()->worker_times();
 286       ShenandoahWorkerTimingsTracker timer(worker_times, ShenandoahPhaseTimings::FinishQueues, worker_id);
 287 
 288       // Step 3: Finally drain all outstanding work in queues.
 289       traversal_gc->main_loop(worker_id, _terminator, false);
 290     }
 291 
 292   }
 293 };
 294 
 295 ShenandoahTraversalGC::ShenandoahTraversalGC(ShenandoahHeap* heap, size_t num_regions) :
 296   _heap(heap),
 297   _task_queues(new ShenandoahObjToScanQueueSet(heap->max_workers())),
 298   _traversal_set(ShenandoahHeapRegionSet()) {
 299 
 300   // Traversal does not support concurrent code root scanning
 301   FLAG_SET_DEFAULT(ShenandoahConcurrentScanCodeRoots, false);
 302 
 303   uint num_queues = heap->max_workers();
 304   for (uint i = 0; i < num_queues; ++i) {
 305     ShenandoahObjToScanQueue* task_queue = new ShenandoahObjToScanQueue();
 306     task_queue->initialize();
 307     _task_queues->register_queue(i, task_queue);
 308   }
 309 }
 310 
 311 ShenandoahTraversalGC::~ShenandoahTraversalGC() {
 312 }
 313 
 314 void ShenandoahTraversalGC::prepare_regions() {
 315   size_t num_regions = _heap->num_regions();
 316   ShenandoahMarkingContext* const ctx = _heap->marking_context();
 317   for (size_t i = 0; i < num_regions; i++) {
 318     ShenandoahHeapRegion* region = _heap->get_region(i);
 319     if (_heap->is_bitmap_slice_committed(region)) {
 320       if (_traversal_set.is_in(i)) {
 321         ctx->capture_top_at_mark_start(region);
 322         region->clear_live_data();
 323         assert(ctx->is_bitmap_clear_range(region->bottom(), region->end()), "bitmap for traversal regions must be cleared");
 324       } else {
 325         // Everything outside the traversal set is always considered live.
 326         ctx->reset_top_at_mark_start(region);
 327       }
 328     } else {
 329       // FreeSet may contain uncommitted empty regions, once they are recommitted,
 330       // their TAMS may have old values, so reset them here.
 331       ctx->reset_top_at_mark_start(region);
 332     }
 333   }
 334 }
 335 
 336 void ShenandoahTraversalGC::prepare() {
 337   {
 338     ShenandoahGCPhase phase(ShenandoahPhaseTimings::traversal_gc_make_parsable);
 339     _heap->make_parsable(true);
 340   }
 341 
 342   if (UseTLAB) {
 343     ShenandoahGCPhase phase(ShenandoahPhaseTimings::traversal_gc_resize_tlabs);
 344     _heap->resize_tlabs();
 345   }
 346 
 347   assert(_heap->marking_context()->is_bitmap_clear(), "need clean mark bitmap");
 348   assert(!_heap->marking_context()->is_complete(), "should not be complete");
 349 
 350   // About to choose the collection set, make sure we know which regions are pinned.
 351   {
 352     ShenandoahGCPhase phase_cleanup(ShenandoahPhaseTimings::traversal_gc_prepare_sync_pinned);
 353     _heap->sync_pinned_region_status();
 354   }
 355 
 356   ShenandoahCollectionSet* collection_set = _heap->collection_set();
 357   {
 358     ShenandoahHeapLocker lock(_heap->lock());
 359 
 360     collection_set->clear();
 361     assert(collection_set->count() == 0, "collection set not clear");
 362 
 363     // Find collection set
 364     _heap->heuristics()->choose_collection_set(collection_set);
 365     prepare_regions();
 366 
 367     // Rebuild free set
 368     _heap->free_set()->rebuild();
 369   }
 370 
 371   log_info(gc, ergo)("Collectable Garbage: " SIZE_FORMAT "%s, " SIZE_FORMAT "%s CSet, " SIZE_FORMAT " CSet regions",
 372                      byte_size_in_proper_unit(collection_set->garbage()),   proper_unit_for_byte_size(collection_set->garbage()),
 373                      byte_size_in_proper_unit(collection_set->live_data()), proper_unit_for_byte_size(collection_set->live_data()),
 374                      collection_set->count());
 375 }
 376 
 377 void ShenandoahTraversalGC::init_traversal_collection() {
 378   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "STW traversal GC");
 379 
 380   if (ShenandoahVerify) {
 381     _heap->verifier()->verify_before_traversal();
 382   }
 383 
 384   if (VerifyBeforeGC) {
 385     Universe::verify();
 386   }
 387 
 388   {
 389     ShenandoahGCPhase phase_prepare(ShenandoahPhaseTimings::traversal_gc_prepare);
 390     prepare();
 391   }
 392 
 393   _heap->set_concurrent_traversal_in_progress(true);
 394   _heap->set_has_forwarded_objects(true);
 395 
 396   bool process_refs = _heap->process_references();
 397   if (process_refs) {
 398     ReferenceProcessor* rp = _heap->ref_processor();
 399     rp->enable_discovery(true /*verify_no_refs*/);
 400     rp->setup_policy(_heap->soft_ref_policy()->should_clear_all_soft_refs());
 401   }
 402 
 403   {
 404     ShenandoahGCPhase phase_work(ShenandoahPhaseTimings::init_traversal_gc_work);
 405     assert(_task_queues->is_empty(), "queues must be empty before traversal GC");
 406     TASKQUEUE_STATS_ONLY(_task_queues->reset_taskqueue_stats());
 407 
 408 #if COMPILER2_OR_JVMCI
 409     DerivedPointerTable::clear();
 410 #endif
 411 
 412     {
 413       uint nworkers = _heap->workers()->active_workers();
 414       task_queues()->reserve(nworkers);
 415       ShenandoahCSetRootScanner rp(nworkers, ShenandoahPhaseTimings::init_traversal_gc_work);
 416       ShenandoahInitTraversalCollectionTask traversal_task(&rp);
 417       _heap->workers()->run_task(&traversal_task);
 418     }
 419 
 420 #if COMPILER2_OR_JVMCI
 421     DerivedPointerTable::update_pointers();
 422 #endif
 423   }
 424 
 425   if (ShenandoahPacing) {
 426     _heap->pacer()->setup_for_traversal();
 427   }
 428 }
 429 
 430 void ShenandoahTraversalGC::main_loop(uint w, ShenandoahTaskTerminator* t, bool sts_yield) {
 431   ShenandoahObjToScanQueue* q = task_queues()->queue(w);
 432 
 433   // Initialize live data.
 434   jushort* ld = _heap->get_liveness_cache(w);
 435 
 436   ReferenceProcessor* rp = NULL;
 437   if (_heap->process_references()) {
 438     rp = _heap->ref_processor();
 439   }
 440   {
 441     if (!_heap->is_degenerated_gc_in_progress()) {
 442       if (_heap->unload_classes()) {
 443         if (ShenandoahStringDedup::is_enabled()) {
 444           ShenandoahTraversalMetadataDedupClosure cl(q, rp);
 445           main_loop_work<ShenandoahTraversalMetadataDedupClosure>(&cl, ld, w, t, sts_yield);
 446         } else {
 447           ShenandoahTraversalMetadataClosure cl(q, rp);
 448           main_loop_work<ShenandoahTraversalMetadataClosure>(&cl, ld, w, t, sts_yield);
 449         }
 450       } else {
 451         if (ShenandoahStringDedup::is_enabled()) {
 452           ShenandoahTraversalDedupClosure cl(q, rp);
 453           main_loop_work<ShenandoahTraversalDedupClosure>(&cl, ld, w, t, sts_yield);
 454         } else {
 455           ShenandoahTraversalClosure cl(q, rp);
 456           main_loop_work<ShenandoahTraversalClosure>(&cl, ld, w, t, sts_yield);
 457         }
 458       }
 459     } else {
 460       if (_heap->unload_classes()) {
 461         if (ShenandoahStringDedup::is_enabled()) {
 462           ShenandoahTraversalMetadataDedupDegenClosure cl(q, rp);
 463           main_loop_work<ShenandoahTraversalMetadataDedupDegenClosure>(&cl, ld, w, t, sts_yield);
 464         } else {
 465           ShenandoahTraversalMetadataDegenClosure cl(q, rp);
 466           main_loop_work<ShenandoahTraversalMetadataDegenClosure>(&cl, ld, w, t, sts_yield);
 467         }
 468       } else {
 469         if (ShenandoahStringDedup::is_enabled()) {
 470           ShenandoahTraversalDedupDegenClosure cl(q, rp);
 471           main_loop_work<ShenandoahTraversalDedupDegenClosure>(&cl, ld, w, t, sts_yield);
 472         } else {
 473           ShenandoahTraversalDegenClosure cl(q, rp);
 474           main_loop_work<ShenandoahTraversalDegenClosure>(&cl, ld, w, t, sts_yield);
 475         }
 476       }
 477     }
 478   }
 479 
 480   _heap->flush_liveness_cache(w);
 481 }
 482 
 483 template <class T>
 484 void ShenandoahTraversalGC::main_loop_work(T* cl, jushort* live_data, uint worker_id, ShenandoahTaskTerminator* terminator, bool sts_yield) {
 485   ShenandoahObjToScanQueueSet* queues = task_queues();
 486   ShenandoahObjToScanQueue* q = queues->queue(worker_id);
 487   ShenandoahConcurrentMark* conc_mark = _heap->concurrent_mark();
 488 
 489   uintx stride = ShenandoahMarkLoopStride;
 490 
 491   ShenandoahMarkTask task;
 492 
 493   // Process outstanding queues, if any.
 494   q = queues->claim_next();
 495   while (q != NULL) {
 496     if (_heap->check_cancelled_gc_and_yield(sts_yield)) {
 497       return;
 498     }
 499 
 500     for (uint i = 0; i < stride; i++) {
 501       if (q->pop(task)) {
 502         conc_mark->do_task<T>(q, cl, live_data, &task);
 503       } else {
 504         assert(q->is_empty(), "Must be empty");
 505         q = queues->claim_next();
 506         break;
 507       }
 508     }
 509   }
 510 
 511   if (check_and_handle_cancelled_gc(terminator, sts_yield)) return;
 512 
 513   // Normal loop.
 514   q = queues->queue(worker_id);
 515 
 516   ShenandoahTraversalSATBBufferClosure drain_satb(q);
 517   SATBMarkQueueSet& satb_mq_set = ShenandoahBarrierSet::satb_mark_queue_set();
 518 
 519   while (true) {
 520     if (check_and_handle_cancelled_gc(terminator, sts_yield)) return;
 521 
 522     while (satb_mq_set.completed_buffers_num() > 0) {
 523       satb_mq_set.apply_closure_to_completed_buffer(&drain_satb);
 524     }
 525 
 526     uint work = 0;
 527     for (uint i = 0; i < stride; i++) {
 528       if (q->pop(task) ||
 529           queues->steal(worker_id, task)) {
 530         conc_mark->do_task<T>(q, cl, live_data, &task);
 531         work++;
 532       } else {
 533         break;
 534       }
 535     }
 536 
 537     if (work == 0) {
 538       // No more work, try to terminate
 539       ShenandoahSuspendibleThreadSetLeaver stsl(sts_yield && ShenandoahSuspendibleWorkers);
 540       ShenandoahTerminationTimingsTracker term_tracker(worker_id);
 541       ShenandoahTerminatorTerminator tt(_heap);
 542 
 543       if (terminator->offer_termination(&tt)) return;
 544     }
 545   }
 546 }
 547 
 548 bool ShenandoahTraversalGC::check_and_handle_cancelled_gc(ShenandoahTaskTerminator* terminator, bool sts_yield) {
 549   if (_heap->cancelled_gc()) {
 550     return true;
 551   }
 552   return false;
 553 }
 554 
 555 void ShenandoahTraversalGC::concurrent_traversal_collection() {
 556   ShenandoahGCPhase phase_work(ShenandoahPhaseTimings::conc_traversal);
 557   if (!_heap->cancelled_gc()) {
 558     uint nworkers = _heap->workers()->active_workers();
 559     task_queues()->reserve(nworkers);
 560     ShenandoahTerminationTracker tracker(ShenandoahPhaseTimings::conc_traversal_termination);
 561 
 562     ShenandoahTaskTerminator terminator(nworkers, task_queues());
 563     ShenandoahConcurrentTraversalCollectionTask task(&terminator);
 564     _heap->workers()->run_task(&task);
 565   }
 566 
 567   if (!_heap->cancelled_gc() && ShenandoahPreclean && _heap->process_references()) {
 568     preclean_weak_refs();
 569   }
 570 }
 571 
 572 void ShenandoahTraversalGC::final_traversal_collection() {
 573   _heap->make_parsable(true);
 574 
 575   if (!_heap->cancelled_gc()) {
 576 #if COMPILER2_OR_JVMCI
 577     DerivedPointerTable::clear();
 578 #endif
 579     ShenandoahGCPhase phase_work(ShenandoahPhaseTimings::final_traversal_gc_work);
 580     uint nworkers = _heap->workers()->active_workers();
 581     task_queues()->reserve(nworkers);
 582 
 583     // Finish traversal
 584     ShenandoahAllRootScanner rp(nworkers, ShenandoahPhaseTimings::final_traversal_gc_work);
 585     ShenandoahTerminationTracker term(ShenandoahPhaseTimings::final_traversal_gc_termination);
 586 
 587     ShenandoahTaskTerminator terminator(nworkers, task_queues());
 588     ShenandoahFinalTraversalCollectionTask task(&rp, &terminator);
 589     _heap->workers()->run_task(&task);
 590 #if COMPILER2_OR_JVMCI
 591     DerivedPointerTable::update_pointers();
 592 #endif
 593   }
 594 
 595   if (!_heap->cancelled_gc() && _heap->process_references()) {
 596     weak_refs_work();
 597   }
 598 
 599   if (!_heap->cancelled_gc()) {
 600     assert(_task_queues->is_empty(), "queues must be empty after traversal GC");
 601     TASKQUEUE_STATS_ONLY(_task_queues->print_taskqueue_stats());
 602     TASKQUEUE_STATS_ONLY(_task_queues->reset_taskqueue_stats());
 603 
 604     // No more marking expected
 605     _heap->set_concurrent_traversal_in_progress(false);
 606     _heap->mark_complete_marking_context();
 607 
 608     fixup_roots();
 609     _heap->parallel_cleaning(false);
 610 
 611     _heap->set_has_forwarded_objects(false);
 612 
 613     // Resize metaspace
 614     MetaspaceGC::compute_new_size();
 615 
 616     // Need to see that pinned region status is updated: newly pinned regions must not
 617     // be trashed. New unpinned regions should be trashed.
 618     {
 619       ShenandoahGCPhase phase_cleanup(ShenandoahPhaseTimings::traversal_gc_sync_pinned);
 620       _heap->sync_pinned_region_status();
 621     }
 622 
 623     // Still good? We can now trash the cset, and make final verification
 624     {
 625       ShenandoahGCPhase phase_cleanup(ShenandoahPhaseTimings::traversal_gc_cleanup);
 626       ShenandoahHeapLocker lock(_heap->lock());
 627 
 628       // Trash everything
 629       // Clear immediate garbage regions.
 630       size_t num_regions = _heap->num_regions();
 631 
 632       ShenandoahHeapRegionSet* traversal_regions = traversal_set();
 633       ShenandoahFreeSet* free_regions = _heap->free_set();
 634       ShenandoahMarkingContext* const ctx = _heap->marking_context();
 635       free_regions->clear();
 636       for (size_t i = 0; i < num_regions; i++) {
 637         ShenandoahHeapRegion* r = _heap->get_region(i);
 638         bool not_allocated = ctx->top_at_mark_start(r) == r->top();
 639 
 640         bool candidate = traversal_regions->is_in(r) && !r->has_live() && not_allocated;
 641         if (r->is_humongous_start() && candidate) {
 642           // Trash humongous.
 643           HeapWord* humongous_obj = r->bottom();
 644           assert(!ctx->is_marked(oop(humongous_obj)), "must not be marked");
 645           r->make_trash_immediate();
 646           while (i + 1 < num_regions && _heap->get_region(i + 1)->is_humongous_continuation()) {
 647             i++;
 648             r = _heap->get_region(i);
 649             assert(r->is_humongous_continuation(), "must be humongous continuation");
 650             r->make_trash_immediate();
 651           }
 652         } else if (!r->is_empty() && candidate) {
 653           // Trash regular.
 654           assert(!r->is_humongous(), "handled above");
 655           assert(!r->is_trash(), "must not already be trashed");
 656           r->make_trash_immediate();
 657         }
 658       }
 659       _heap->collection_set()->clear();
 660       _heap->free_set()->rebuild();
 661       reset();
 662     }
 663 
 664     assert(_task_queues->is_empty(), "queues must be empty after traversal GC");
 665     assert(!_heap->cancelled_gc(), "must not be cancelled when getting out here");
 666 
 667     if (ShenandoahVerify) {
 668       _heap->verifier()->verify_after_traversal();
 669     }
 670 
 671     if (VerifyAfterGC) {
 672       Universe::verify();
 673     }
 674   }
 675 }
 676 
 677 class ShenandoahTraversalFixRootsClosure : public OopClosure {
 678 private:
 679   template <class T>
 680   inline void do_oop_work(T* p) {
 681     T o = RawAccess<>::oop_load(p);
 682     if (!CompressedOops::is_null(o)) {
 683       oop obj = CompressedOops::decode_not_null(o);
 684       oop forw = ShenandoahBarrierSet::resolve_forwarded_not_null(obj);
 685       if (obj != forw) {
 686         RawAccess<IS_NOT_NULL>::oop_store(p, forw);
 687       }
 688     }
 689   }
 690 
 691 public:
 692   inline void do_oop(oop* p) { do_oop_work(p); }
 693   inline void do_oop(narrowOop* p) { do_oop_work(p); }
 694 };
 695 
 696 class ShenandoahTraversalFixRootsTask : public AbstractGangTask {
 697 private:
 698   ShenandoahRootUpdater* _rp;
 699 
 700 public:
 701   ShenandoahTraversalFixRootsTask(ShenandoahRootUpdater* rp) :
 702     AbstractGangTask("Shenandoah traversal fix roots"),
 703     _rp(rp) {
 704     assert(ShenandoahHeap::heap()->has_forwarded_objects(), "Must be");
 705   }
 706 
 707   void work(uint worker_id) {
 708     ShenandoahParallelWorkerSession worker_session(worker_id);
 709     ShenandoahTraversalFixRootsClosure cl;
 710     _rp->strong_roots_do(worker_id, &cl);
 711   }
 712 };
 713 
 714 void ShenandoahTraversalGC::fixup_roots() {
 715 #if COMPILER2_OR_JVMCI
 716   DerivedPointerTable::clear();
 717 #endif
 718   ShenandoahRootUpdater rp(_heap->workers()->active_workers(), ShenandoahPhaseTimings::final_traversal_update_roots);
 719   ShenandoahTraversalFixRootsTask update_roots_task(&rp);
 720   _heap->workers()->run_task(&update_roots_task);
 721 #if COMPILER2_OR_JVMCI
 722   DerivedPointerTable::update_pointers();
 723 #endif
 724 }
 725 
 726 void ShenandoahTraversalGC::reset() {
 727   _task_queues->clear();
 728 }
 729 
 730 ShenandoahObjToScanQueueSet* ShenandoahTraversalGC::task_queues() {
 731   return _task_queues;
 732 }
 733 
 734 class ShenandoahTraversalCancelledGCYieldClosure : public YieldClosure {
 735 private:
 736   ShenandoahHeap* const _heap;
 737 public:
 738   ShenandoahTraversalCancelledGCYieldClosure() : _heap(ShenandoahHeap::heap()) {};
 739   virtual bool should_return() { return _heap->cancelled_gc(); }
 740 };
 741 
 742 class ShenandoahTraversalPrecleanCompleteGCClosure : public VoidClosure {
 743 public:
 744   void do_void() {
 745     ShenandoahHeap* sh = ShenandoahHeap::heap();
 746     ShenandoahTraversalGC* traversal_gc = sh->traversal_gc();
 747     assert(sh->process_references(), "why else would we be here?");
 748     ShenandoahTaskTerminator terminator(1, traversal_gc->task_queues());
 749     shenandoah_assert_rp_isalive_installed();
 750     traversal_gc->main_loop((uint) 0, &terminator, true);
 751   }
 752 };
 753 
 754 class ShenandoahTraversalKeepAliveUpdateClosure : public OopClosure {
 755 private:
 756   ShenandoahObjToScanQueue* _queue;
 757   Thread* _thread;
 758   ShenandoahTraversalGC* _traversal_gc;
 759   ShenandoahMarkingContext* const _mark_context;
 760 
 761   template <class T>
 762   inline void do_oop_work(T* p) {
 763     _traversal_gc->process_oop<T, false /* string dedup */, false /* degen */, true /* atomic update */>(p, _thread, _queue, _mark_context);
 764   }
 765 
 766 public:
 767   ShenandoahTraversalKeepAliveUpdateClosure(ShenandoahObjToScanQueue* q) :
 768     _queue(q), _thread(Thread::current()),
 769     _traversal_gc(ShenandoahHeap::heap()->traversal_gc()),
 770     _mark_context(ShenandoahHeap::heap()->marking_context()) {}
 771 
 772   void do_oop(narrowOop* p) { do_oop_work(p); }
 773   void do_oop(oop* p)       { do_oop_work(p); }
 774 };
 775 
 776 class ShenandoahTraversalKeepAliveUpdateDegenClosure : public OopClosure {
 777 private:
 778   ShenandoahObjToScanQueue* _queue;
 779   Thread* _thread;
 780   ShenandoahTraversalGC* _traversal_gc;
 781   ShenandoahMarkingContext* const _mark_context;
 782 
 783   template <class T>
 784   inline void do_oop_work(T* p) {
 785     _traversal_gc->process_oop<T, false /* string dedup */, true /* degen */, false /* atomic update */>(p, _thread, _queue, _mark_context);
 786   }
 787 
 788 public:
 789   ShenandoahTraversalKeepAliveUpdateDegenClosure(ShenandoahObjToScanQueue* q) :
 790           _queue(q), _thread(Thread::current()),
 791           _traversal_gc(ShenandoahHeap::heap()->traversal_gc()),
 792           _mark_context(ShenandoahHeap::heap()->marking_context()) {}
 793 
 794   void do_oop(narrowOop* p) { do_oop_work(p); }
 795   void do_oop(oop* p)       { do_oop_work(p); }
 796 };
 797 
 798 class ShenandoahTraversalSingleThreadKeepAliveUpdateClosure : public OopClosure {
 799 private:
 800   ShenandoahObjToScanQueue* _queue;
 801   Thread* _thread;
 802   ShenandoahTraversalGC* _traversal_gc;
 803   ShenandoahMarkingContext* const _mark_context;
 804 
 805   template <class T>
 806   inline void do_oop_work(T* p) {
 807     _traversal_gc->process_oop<T, false /* string dedup */, false /* degen */, true /* atomic update */>(p, _thread, _queue, _mark_context);
 808   }
 809 
 810 public:
 811   ShenandoahTraversalSingleThreadKeepAliveUpdateClosure(ShenandoahObjToScanQueue* q) :
 812           _queue(q), _thread(Thread::current()),
 813           _traversal_gc(ShenandoahHeap::heap()->traversal_gc()),
 814           _mark_context(ShenandoahHeap::heap()->marking_context()) {}
 815 
 816   void do_oop(narrowOop* p) { do_oop_work(p); }
 817   void do_oop(oop* p)       { do_oop_work(p); }
 818 };
 819 
 820 class ShenandoahTraversalSingleThreadKeepAliveUpdateDegenClosure : public OopClosure {
 821 private:
 822   ShenandoahObjToScanQueue* _queue;
 823   Thread* _thread;
 824   ShenandoahTraversalGC* _traversal_gc;
 825   ShenandoahMarkingContext* const _mark_context;
 826 
 827   template <class T>
 828   inline void do_oop_work(T* p) {
 829     _traversal_gc->process_oop<T, false /* string dedup */, true /* degen */, false /* atomic update */>(p, _thread, _queue, _mark_context);
 830   }
 831 
 832 public:
 833   ShenandoahTraversalSingleThreadKeepAliveUpdateDegenClosure(ShenandoahObjToScanQueue* q) :
 834           _queue(q), _thread(Thread::current()),
 835           _traversal_gc(ShenandoahHeap::heap()->traversal_gc()),
 836           _mark_context(ShenandoahHeap::heap()->marking_context()) {}
 837 
 838   void do_oop(narrowOop* p) { do_oop_work(p); }
 839   void do_oop(oop* p)       { do_oop_work(p); }
 840 };
 841 
 842 class ShenandoahTraversalPrecleanTask : public AbstractGangTask {
 843 private:
 844   ReferenceProcessor* _rp;
 845 
 846 public:
 847   ShenandoahTraversalPrecleanTask(ReferenceProcessor* rp) :
 848           AbstractGangTask("Precleaning task"),
 849           _rp(rp) {}
 850 
 851   void work(uint worker_id) {
 852     assert(worker_id == 0, "The code below is single-threaded, only one worker is expected");
 853     ShenandoahParallelWorkerSession worker_session(worker_id);
 854     ShenandoahSuspendibleThreadSetJoiner stsj(ShenandoahSuspendibleWorkers);
 855 
 856     ShenandoahHeap* sh = ShenandoahHeap::heap();
 857 
 858     ShenandoahObjToScanQueue* q = sh->traversal_gc()->task_queues()->queue(worker_id);
 859 
 860     ShenandoahForwardedIsAliveClosure is_alive;
 861     ShenandoahTraversalCancelledGCYieldClosure yield;
 862     ShenandoahTraversalPrecleanCompleteGCClosure complete_gc;
 863     ShenandoahTraversalKeepAliveUpdateClosure keep_alive(q);
 864     ResourceMark rm;
 865     _rp->preclean_discovered_references(&is_alive, &keep_alive,
 866                                         &complete_gc, &yield,
 867                                         NULL);
 868   }
 869 };
 870 
 871 void ShenandoahTraversalGC::preclean_weak_refs() {
 872   // Pre-cleaning weak references before diving into STW makes sense at the
 873   // end of concurrent mark. This will filter out the references which referents
 874   // are alive. Note that ReferenceProcessor already filters out these on reference
 875   // discovery, and the bulk of work is done here. This phase processes leftovers
 876   // that missed the initial filtering, i.e. when referent was marked alive after
 877   // reference was discovered by RP.
 878 
 879   assert(_heap->process_references(), "sanity");
 880   assert(!_heap->is_degenerated_gc_in_progress(), "must be in concurrent non-degenerated phase");
 881 
 882   // Shortcut if no references were discovered to avoid winding up threads.
 883   ReferenceProcessor* rp = _heap->ref_processor();
 884   if (!rp->has_discovered_references()) {
 885     return;
 886   }
 887 
 888   ReferenceProcessorMTDiscoveryMutator fix_mt_discovery(rp, false);
 889 
 890   shenandoah_assert_rp_isalive_not_installed();
 891   ShenandoahForwardedIsAliveClosure is_alive;
 892   ReferenceProcessorIsAliveMutator fix_isalive(rp, &is_alive);
 893 
 894   assert(task_queues()->is_empty(), "Should be empty");
 895 
 896   // Execute precleaning in the worker thread: it will give us GCLABs, String dedup
 897   // queues and other goodies. When upstream ReferenceProcessor starts supporting
 898   // parallel precleans, we can extend this to more threads.
 899   ShenandoahPushWorkerScope scope(_heap->workers(), 1, /* check_workers = */ false);
 900 
 901   WorkGang* workers = _heap->workers();
 902   uint nworkers = workers->active_workers();
 903   assert(nworkers == 1, "This code uses only a single worker");
 904   task_queues()->reserve(nworkers);
 905 
 906   ShenandoahTraversalPrecleanTask task(rp);
 907   workers->run_task(&task);
 908 
 909   assert(_heap->cancelled_gc() || task_queues()->is_empty(), "Should be empty");
 910 }
 911 
 912 // Weak Reference Closures
 913 class ShenandoahTraversalDrainMarkingStackClosure: public VoidClosure {
 914   uint _worker_id;
 915   ShenandoahTaskTerminator* _terminator;
 916   bool _reset_terminator;
 917 
 918 public:
 919   ShenandoahTraversalDrainMarkingStackClosure(uint worker_id, ShenandoahTaskTerminator* t, bool reset_terminator = false):
 920     _worker_id(worker_id),
 921     _terminator(t),
 922     _reset_terminator(reset_terminator) {
 923   }
 924 
 925   void do_void() {
 926     assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
 927 
 928     ShenandoahHeap* sh = ShenandoahHeap::heap();
 929     ShenandoahTraversalGC* traversal_gc = sh->traversal_gc();
 930     assert(sh->process_references(), "why else would we be here?");
 931     shenandoah_assert_rp_isalive_installed();
 932 
 933     traversal_gc->main_loop(_worker_id, _terminator, false);
 934 
 935     if (_reset_terminator) {
 936       _terminator->reset_for_reuse();
 937     }
 938   }
 939 };
 940 
 941 class ShenandoahTraversalSingleThreadedDrainMarkingStackClosure: public VoidClosure {
 942   uint _worker_id;
 943   ShenandoahTaskTerminator* _terminator;
 944   bool _reset_terminator;
 945 
 946 public:
 947   ShenandoahTraversalSingleThreadedDrainMarkingStackClosure(uint worker_id, ShenandoahTaskTerminator* t, bool reset_terminator = false):
 948           _worker_id(worker_id),
 949           _terminator(t),
 950           _reset_terminator(reset_terminator) {
 951   }
 952 
 953   void do_void() {
 954     assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
 955 
 956     ShenandoahHeap* sh = ShenandoahHeap::heap();
 957     ShenandoahTraversalGC* traversal_gc = sh->traversal_gc();
 958     assert(sh->process_references(), "why else would we be here?");
 959     shenandoah_assert_rp_isalive_installed();
 960 
 961     traversal_gc->main_loop(_worker_id, _terminator, false);
 962 
 963     if (_reset_terminator) {
 964       _terminator->reset_for_reuse();
 965     }
 966   }
 967 };
 968 
 969 void ShenandoahTraversalGC::weak_refs_work() {
 970   assert(_heap->process_references(), "sanity");
 971 
 972   ShenandoahPhaseTimings::Phase phase_root = ShenandoahPhaseTimings::weakrefs;
 973 
 974   ShenandoahGCPhase phase(phase_root);
 975 
 976   ReferenceProcessor* rp = _heap->ref_processor();
 977 
 978   // NOTE: We cannot shortcut on has_discovered_references() here, because
 979   // we will miss marking JNI Weak refs then, see implementation in
 980   // ReferenceProcessor::process_discovered_references.
 981   weak_refs_work_doit();
 982 
 983   rp->verify_no_references_recorded();
 984   assert(!rp->discovery_enabled(), "Post condition");
 985 
 986 }
 987 
 988 class ShenandoahTraversalRefProcTaskProxy : public AbstractGangTask {
 989 private:
 990   AbstractRefProcTaskExecutor::ProcessTask& _proc_task;
 991   ShenandoahTaskTerminator* _terminator;
 992 
 993 public:
 994   ShenandoahTraversalRefProcTaskProxy(AbstractRefProcTaskExecutor::ProcessTask& proc_task,
 995                                       ShenandoahTaskTerminator* t) :
 996     AbstractGangTask("Process reference objects in parallel"),
 997     _proc_task(proc_task),
 998     _terminator(t) {
 999   }
1000 
1001   void work(uint worker_id) {
1002     assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
1003     ShenandoahHeap* heap = ShenandoahHeap::heap();
1004     ShenandoahTraversalDrainMarkingStackClosure complete_gc(worker_id, _terminator);
1005 
1006     ShenandoahForwardedIsAliveClosure is_alive;
1007     if (!heap->is_degenerated_gc_in_progress()) {
1008       ShenandoahTraversalKeepAliveUpdateClosure keep_alive(heap->traversal_gc()->task_queues()->queue(worker_id));
1009       _proc_task.work(worker_id, is_alive, keep_alive, complete_gc);
1010     } else {
1011       ShenandoahTraversalKeepAliveUpdateDegenClosure keep_alive(heap->traversal_gc()->task_queues()->queue(worker_id));
1012       _proc_task.work(worker_id, is_alive, keep_alive, complete_gc);
1013     }
1014   }
1015 };
1016 
1017 class ShenandoahTraversalRefProcTaskExecutor : public AbstractRefProcTaskExecutor {
1018 private:
1019   WorkGang* _workers;
1020 
1021 public:
1022   ShenandoahTraversalRefProcTaskExecutor(WorkGang* workers) : _workers(workers) {}
1023 
1024   // Executes a task using worker threads.
1025   void execute(ProcessTask& task, uint ergo_workers) {
1026     assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
1027 
1028     ShenandoahHeap* heap = ShenandoahHeap::heap();
1029     ShenandoahTraversalGC* traversal_gc = heap->traversal_gc();
1030     ShenandoahPushWorkerQueuesScope scope(_workers,
1031                                           traversal_gc->task_queues(),
1032                                           ergo_workers,
1033                                           /* do_check = */ false);
1034     uint nworkers = _workers->active_workers();
1035     traversal_gc->task_queues()->reserve(nworkers);
1036     ShenandoahTaskTerminator terminator(nworkers, traversal_gc->task_queues());
1037     ShenandoahTraversalRefProcTaskProxy proc_task_proxy(task, &terminator);
1038     _workers->run_task(&proc_task_proxy);
1039   }
1040 };
1041 
1042 void ShenandoahTraversalGC::weak_refs_work_doit() {
1043   ReferenceProcessor* rp = _heap->ref_processor();
1044 
1045   ShenandoahPhaseTimings::Phase phase_process = ShenandoahPhaseTimings::weakrefs_process;
1046 
1047   shenandoah_assert_rp_isalive_not_installed();
1048   ShenandoahForwardedIsAliveClosure is_alive;
1049   ReferenceProcessorIsAliveMutator fix_isalive(rp, &is_alive);
1050 
1051   WorkGang* workers = _heap->workers();
1052   uint nworkers = workers->active_workers();
1053 
1054   rp->setup_policy(_heap->soft_ref_policy()->should_clear_all_soft_refs());
1055   rp->set_active_mt_degree(nworkers);
1056 
1057   assert(task_queues()->is_empty(), "Should be empty");
1058 
1059   // complete_gc and keep_alive closures instantiated here are only needed for
1060   // single-threaded path in RP. They share the queue 0 for tracking work, which
1061   // simplifies implementation. Since RP may decide to call complete_gc several
1062   // times, we need to be able to reuse the terminator.
1063   uint serial_worker_id = 0;
1064   ShenandoahTaskTerminator terminator(1, task_queues());
1065   ShenandoahTraversalSingleThreadedDrainMarkingStackClosure complete_gc(serial_worker_id, &terminator, /* reset_terminator = */ true);
1066   ShenandoahPushWorkerQueuesScope scope(workers, task_queues(), 1, /* do_check = */ false);
1067 
1068   ShenandoahTraversalRefProcTaskExecutor executor(workers);
1069 
1070   ReferenceProcessorPhaseTimes pt(_heap->gc_timer(), rp->num_queues());
1071   if (!_heap->is_degenerated_gc_in_progress()) {
1072     ShenandoahTraversalSingleThreadKeepAliveUpdateClosure keep_alive(task_queues()->queue(serial_worker_id));
1073     rp->process_discovered_references(&is_alive, &keep_alive,
1074                                       &complete_gc, &executor,
1075                                       &pt);
1076   } else {
1077     ShenandoahTraversalSingleThreadKeepAliveUpdateDegenClosure keep_alive(task_queues()->queue(serial_worker_id));
1078     rp->process_discovered_references(&is_alive, &keep_alive,
1079                                       &complete_gc, &executor,
1080                                       &pt);
1081   }
1082 
1083   pt.print_all_references();
1084   assert(task_queues()->is_empty() || _heap->cancelled_gc(), "Should be empty");
1085 }