1 /*
   2  * Copyright (c) 2018, 2019, Red Hat, Inc. All rights reserved.
   3  *
   4  * This code is free software; you can redistribute it and/or modify it
   5  * under the terms of the GNU General Public License version 2 only, as
   6  * published by the Free Software Foundation.
   7  *
   8  * This code is distributed in the hope that it will be useful, but WITHOUT
   9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  11  * version 2 for more details (a copy is included in the LICENSE file that
  12  * accompanied this code).
  13  *
  14  * You should have received a copy of the GNU General Public License version
  15  * 2 along with this work; if not, write to the Free Software Foundation,
  16  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  17  *
  18  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  19  * or visit www.oracle.com if you need additional information or have any
  20  * questions.
  21  *
  22  */
  23 
  24 #include "precompiled.hpp"
  25 
  26 #include "gc/shared/referenceProcessor.hpp"
  27 #include "gc/shared/referenceProcessorPhaseTimes.hpp"
  28 #include "gc/shared/workgroup.hpp"
  29 #include "gc/shared/weakProcessor.hpp"
  30 #include "gc/shenandoah/shenandoahBarrierSet.hpp"
  31 #include "gc/shenandoah/shenandoahClosures.inline.hpp"
  32 #include "gc/shenandoah/shenandoahCodeRoots.hpp"
  33 #include "gc/shenandoah/shenandoahCollectionSet.hpp"
  34 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
  35 #include "gc/shenandoah/shenandoahFreeSet.hpp"
  36 #include "gc/shenandoah/shenandoahPhaseTimings.hpp"
  37 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
  38 #include "gc/shenandoah/shenandoahHeapRegionSet.inline.hpp"
  39 #include "gc/shenandoah/shenandoahHeuristics.hpp"
  40 #include "gc/shenandoah/shenandoahMarkingContext.inline.hpp"
  41 #include "gc/shenandoah/shenandoahOopClosures.inline.hpp"
  42 #include "gc/shenandoah/shenandoahRootProcessor.inline.hpp"
  43 #include "gc/shenandoah/shenandoahStringDedup.hpp"
  44 #include "gc/shenandoah/shenandoahTaskqueue.inline.hpp"
  45 #include "gc/shenandoah/shenandoahTimingTracker.hpp"
  46 #include "gc/shenandoah/shenandoahTraversalGC.hpp"
  47 #include "gc/shenandoah/shenandoahUtils.hpp"
  48 #include "gc/shenandoah/shenandoahVerifier.hpp"
  49 
  50 #include "memory/iterator.hpp"
  51 #include "memory/metaspace.hpp"
  52 #include "memory/resourceArea.hpp"
  53 
  54 /**
  55  * NOTE: We are using the SATB buffer in thread.hpp and satbMarkQueue.hpp, however, it is not an SATB algorithm.
  56  * We're using the buffer as generic oop buffer to enqueue new values in concurrent oop stores, IOW, the algorithm
  57  * is incremental-update-based.
  58  *
  59  * NOTE on interaction with TAMS: we want to avoid traversing new objects for
  60  * several reasons:
  61  * - We will not reclaim them in this cycle anyway, because they are not in the
  62  *   cset
  63  * - It makes up for the bulk of work during final-pause
  64  * - It also shortens the concurrent cycle because we don't need to
  65  *   pointlessly traverse through newly allocated objects.
  66  * - As a nice side-effect, it solves the I-U termination problem (mutators
  67  *   cannot outrun the GC by allocating like crazy)
  68  * - It is an easy way to achieve MWF. What MWF does is to also enqueue the
  69  *   target object of stores if it's new. Treating new objects live implicitely
  70  *   achieves the same, but without extra barriers. I think the effect of
  71  *   shortened final-pause (mentioned above) is the main advantage of MWF. In
  72  *   particular, we will not see the head of a completely new long linked list
  73  *   in final-pause and end up traversing huge chunks of the heap there.
  74  * - We don't need to see/update the fields of new objects either, because they
  75  *   are either still null, or anything that's been stored into them has been
  76  *   evacuated+enqueued before (and will thus be treated later).
  77  *
  78  * We achieve this by setting TAMS for each region, and everything allocated
  79  * beyond TAMS will be 'implicitely marked'.
  80  *
  81  * Gotchas:
  82  * - While we want new objects to be implicitely marked, we don't want to count
  83  *   them alive. Otherwise the next cycle wouldn't pick them up and consider
  84  *   them for cset. This means that we need to protect such regions from
  85  *   getting accidentally thrashed at the end of traversal cycle. This is why I
  86  *   keep track of alloc-regions and check is_alloc_region() in the trashing
  87  *   code.
  88  * - We *need* to traverse through evacuated objects. Those objects are
  89  *   pre-existing, and any references in them point to interesting objects that
  90  *   we need to see. We also want to count them as live, because we just
  91  *   determined that they are alive :-) I achieve this by upping TAMS
  92  *   concurrently for every gclab/gc-shared alloc before publishing the
  93  *   evacuated object. This way, the GC threads will not consider such objects
  94  *   implictely marked, and traverse through them as normal.
  95  */
  96 class ShenandoahTraversalSATBBufferClosure : public ShenandoahSATBBufferClosure {
  97 private:
  98   ShenandoahObjToScanQueue* _queue;
  99   ShenandoahTraversalGC* _traversal_gc;
 100   ShenandoahHeap* const _heap;
 101 
 102 public:
 103   ShenandoahTraversalSATBBufferClosure(ShenandoahObjToScanQueue* q) :
 104     _queue(q),
 105     _heap(ShenandoahHeap::heap())
 106  { }
 107 
 108   void do_buffer(void** buffer, size_t size) {
 109     for (size_t i = 0; i < size; ++i) {
 110       oop* p = (oop*) &buffer[i];
 111       oop obj = RawAccess<>::oop_load(p);
 112       shenandoah_assert_not_forwarded(p, obj);
 113       if (_heap->marking_context()->mark(obj)) {
 114         _queue->push(ShenandoahMarkTask(obj));
 115       }
 116     }
 117   }
 118 };
 119 
 120 class ShenandoahTraversalSATBThreadsClosure : public ThreadClosure {
 121 private:
 122   ShenandoahTraversalSATBBufferClosure* _satb_cl;
 123 
 124 public:
 125   ShenandoahTraversalSATBThreadsClosure(ShenandoahTraversalSATBBufferClosure* satb_cl) :
 126     _satb_cl(satb_cl) {}
 127 
 128   void do_thread(Thread* thread) {
 129     if (thread->is_Java_thread()) {
 130       JavaThread* jt = (JavaThread*)thread;
 131       ShenandoahThreadLocalData::satb_mark_queue(jt).apply_closure_and_empty(_satb_cl);
 132     } else if (thread->is_VM_thread()) {
 133       ShenandoahBarrierSet::satb_mark_queue_set().shared_satb_queue()->apply_closure_and_empty(_satb_cl);
 134     }
 135   }
 136 };
 137 
 138 // Like CLDToOopClosure, but clears has_modified_oops, so that we can record modified CLDs during traversal
 139 // and remark them later during final-traversal.
 140 class ShenandoahMarkCLDClosure : public CLDClosure {
 141 private:
 142   OopClosure* _cl;
 143 public:
 144   ShenandoahMarkCLDClosure(OopClosure* cl) : _cl(cl) {}
 145   void do_cld(ClassLoaderData* cld) {
 146     cld->oops_do(_cl, true, true);
 147   }
 148 };
 149 
 150 // Like CLDToOopClosure, but only process modified CLDs
 151 class ShenandoahRemarkCLDClosure : public CLDClosure {
 152 private:
 153   OopClosure* _cl;
 154 public:
 155   ShenandoahRemarkCLDClosure(OopClosure* cl) : _cl(cl) {}
 156   void do_cld(ClassLoaderData* cld) {
 157     if (cld->has_modified_oops()) {
 158       cld->oops_do(_cl, true, true);
 159     }
 160   }
 161 };
 162 
 163 class ShenandoahInitTraversalCollectionTask : public AbstractGangTask {
 164 private:
 165   ShenandoahCSetRootScanner* _rp;
 166   ShenandoahHeap* _heap;
 167   ShenandoahCsetCodeRootsIterator* _cset_coderoots;
 168   ShenandoahStringDedupRoots       _dedup_roots;
 169 
 170 public:
 171   ShenandoahInitTraversalCollectionTask(ShenandoahCSetRootScanner* rp) :
 172     AbstractGangTask("Shenandoah Init Traversal Collection"),
 173     _rp(rp),
 174     _heap(ShenandoahHeap::heap()) {}
 175 
 176   void work(uint worker_id) {
 177     ShenandoahParallelWorkerSession worker_session(worker_id);
 178 
 179     ShenandoahObjToScanQueueSet* queues = _heap->traversal_gc()->task_queues();
 180     ShenandoahObjToScanQueue* q = queues->queue(worker_id);
 181 
 182     bool process_refs = _heap->process_references();
 183     bool unload_classes = _heap->unload_classes();
 184     ReferenceProcessor* rp = NULL;
 185     if (process_refs) {
 186       rp = _heap->ref_processor();
 187     }
 188 
 189     // Step 1: Process ordinary GC roots.
 190     {
 191       ShenandoahTraversalRootsClosure roots_cl(q, rp);
 192       ShenandoahMarkCLDClosure cld_cl(&roots_cl);
 193       MarkingCodeBlobClosure code_cl(&roots_cl, CodeBlobToOopClosure::FixRelocations);
 194       if (unload_classes) {
 195         _rp->roots_do(worker_id, &roots_cl, NULL, &code_cl);
 196       } else {
 197         _rp->roots_do(worker_id, &roots_cl, &cld_cl, &code_cl);
 198       }
 199 
 200       AlwaysTrueClosure is_alive;
 201       _dedup_roots.oops_do(&is_alive, &roots_cl, worker_id);
 202     }
 203   }
 204 };
 205 
 206 class ShenandoahConcurrentTraversalCollectionTask : public AbstractGangTask {
 207 private:
 208   ShenandoahTaskTerminator* _terminator;
 209   ShenandoahHeap* _heap;
 210 public:
 211   ShenandoahConcurrentTraversalCollectionTask(ShenandoahTaskTerminator* terminator) :
 212     AbstractGangTask("Shenandoah Concurrent Traversal Collection"),
 213     _terminator(terminator),
 214     _heap(ShenandoahHeap::heap()) {}
 215 
 216   void work(uint worker_id) {
 217     ShenandoahConcurrentWorkerSession worker_session(worker_id);
 218     ShenandoahSuspendibleThreadSetJoiner stsj(ShenandoahSuspendibleWorkers);
 219     ShenandoahTraversalGC* traversal_gc = _heap->traversal_gc();
 220 
 221     // Drain all outstanding work in queues.
 222     traversal_gc->main_loop(worker_id, _terminator, true);
 223   }
 224 };
 225 
 226 class ShenandoahFinalTraversalCollectionTask : public AbstractGangTask {
 227 private:
 228   ShenandoahAllRootScanner* _rp;
 229   ShenandoahTaskTerminator* _terminator;
 230   ShenandoahHeap* _heap;
 231 public:
 232   ShenandoahFinalTraversalCollectionTask(ShenandoahAllRootScanner* rp, ShenandoahTaskTerminator* terminator) :
 233     AbstractGangTask("Shenandoah Final Traversal Collection"),
 234     _rp(rp),
 235     _terminator(terminator),
 236     _heap(ShenandoahHeap::heap()) {}
 237 
 238   void work(uint worker_id) {
 239     ShenandoahParallelWorkerSession worker_session(worker_id);
 240 
 241     ShenandoahTraversalGC* traversal_gc = _heap->traversal_gc();
 242 
 243     ShenandoahObjToScanQueueSet* queues = traversal_gc->task_queues();
 244     ShenandoahObjToScanQueue* q = queues->queue(worker_id);
 245 
 246     bool process_refs = _heap->process_references();
 247     bool unload_classes = _heap->unload_classes();
 248     ReferenceProcessor* rp = NULL;
 249     if (process_refs) {
 250       rp = _heap->ref_processor();
 251     }
 252 
 253     // Step 0: Drain outstanding SATB queues.
 254     // NOTE: we piggy-back draining of remaining thread SATB buffers on the final root scan below.
 255     ShenandoahTraversalSATBBufferClosure satb_cl(q);
 256     {
 257       // Process remaining finished SATB buffers.
 258       ShenandoahSATBMarkQueueSet& satb_mq_set = ShenandoahBarrierSet::satb_mark_queue_set();
 259       while (satb_mq_set.apply_closure_to_completed_buffer(&satb_cl));
 260       // Process remaining threads SATB buffers below.
 261     }
 262 
 263     // Step 1: Process GC roots.
 264     // For oops in code roots, they are marked, evacuated, enqueued for further traversal,
 265     // and the references to the oops are updated during init pause. New nmethods are handled
 266     // in similar way during nmethod-register process. Therefore, we don't need to rescan code
 267     // roots here.
 268     if (!_heap->is_degenerated_gc_in_progress()) {
 269       ShenandoahTraversalRootsClosure roots_cl(q, rp);
 270       ShenandoahTraversalSATBThreadsClosure tc(&satb_cl);
 271       if (unload_classes) {
 272         ShenandoahRemarkCLDClosure remark_cld_cl(&roots_cl);
 273         _rp->strong_roots_do(worker_id, &roots_cl, &remark_cld_cl, NULL, &tc);
 274       } else {
 275         CLDToOopClosure cld_cl(&roots_cl);
 276         _rp->roots_do(worker_id, &roots_cl, &cld_cl, NULL, &tc);
 277       }
 278     } else {
 279       ShenandoahTraversalDegenClosure roots_cl(q, rp);
 280       ShenandoahTraversalSATBThreadsClosure tc(&satb_cl);
 281       if (unload_classes) {
 282         ShenandoahRemarkCLDClosure remark_cld_cl(&roots_cl);
 283         _rp->strong_roots_do(worker_id, &roots_cl, &remark_cld_cl, NULL, &tc);
 284       } else {
 285         CLDToOopClosure cld_cl(&roots_cl);
 286         _rp->roots_do(worker_id, &roots_cl, &cld_cl, NULL, &tc);
 287       }
 288     }
 289 
 290     {
 291       ShenandoahWorkerTimings *worker_times = _heap->phase_timings()->worker_times();
 292       ShenandoahWorkerTimingsTracker timer(worker_times, ShenandoahPhaseTimings::FinishQueues, worker_id);
 293 
 294       // Step 3: Finally drain all outstanding work in queues.
 295       traversal_gc->main_loop(worker_id, _terminator, false);
 296     }
 297 
 298   }
 299 };
 300 
 301 ShenandoahTraversalGC::ShenandoahTraversalGC(ShenandoahHeap* heap, size_t num_regions) :
 302   _heap(heap),
 303   _task_queues(new ShenandoahObjToScanQueueSet(heap->max_workers())),
 304   _traversal_set(ShenandoahHeapRegionSet()) {
 305 
 306   // Traversal does not support concurrent code root scanning
 307   FLAG_SET_DEFAULT(ShenandoahConcurrentScanCodeRoots, false);
 308 
 309   uint num_queues = heap->max_workers();
 310   for (uint i = 0; i < num_queues; ++i) {
 311     ShenandoahObjToScanQueue* task_queue = new ShenandoahObjToScanQueue();
 312     task_queue->initialize();
 313     _task_queues->register_queue(i, task_queue);
 314   }
 315 }
 316 
 317 ShenandoahTraversalGC::~ShenandoahTraversalGC() {
 318 }
 319 
 320 void ShenandoahTraversalGC::prepare_regions() {
 321   size_t num_regions = _heap->num_regions();
 322   ShenandoahMarkingContext* const ctx = _heap->marking_context();
 323   for (size_t i = 0; i < num_regions; i++) {
 324     ShenandoahHeapRegion* region = _heap->get_region(i);
 325     if (_heap->is_bitmap_slice_committed(region)) {
 326       if (_traversal_set.is_in(i)) {
 327         ctx->capture_top_at_mark_start(region);
 328         region->clear_live_data();
 329         assert(ctx->is_bitmap_clear_range(region->bottom(), region->end()), "bitmap for traversal regions must be cleared");
 330       } else {
 331         // Everything outside the traversal set is always considered live.
 332         ctx->reset_top_at_mark_start(region);
 333       }
 334     } else {
 335       // FreeSet may contain uncommitted empty regions, once they are recommitted,
 336       // their TAMS may have old values, so reset them here.
 337       ctx->reset_top_at_mark_start(region);
 338     }
 339   }
 340 }
 341 
 342 void ShenandoahTraversalGC::prepare() {
 343   if (UseTLAB) {
 344     ShenandoahGCPhase phase(ShenandoahPhaseTimings::traversal_gc_accumulate_stats);
 345     _heap->accumulate_statistics_tlabs();
 346   }
 347 
 348   {
 349     ShenandoahGCPhase phase(ShenandoahPhaseTimings::traversal_gc_make_parsable);
 350     _heap->make_parsable(true);
 351   }
 352 
 353   if (UseTLAB) {
 354     ShenandoahGCPhase phase(ShenandoahPhaseTimings::traversal_gc_resize_tlabs);
 355     _heap->resize_tlabs();
 356   }
 357 
 358   assert(_heap->marking_context()->is_bitmap_clear(), "need clean mark bitmap");
 359   assert(!_heap->marking_context()->is_complete(), "should not be complete");
 360 
 361   // About to choose the collection set, make sure we know which regions are pinned.
 362   {
 363     ShenandoahGCPhase phase_cleanup(ShenandoahPhaseTimings::traversal_gc_prepare_sync_pinned);
 364     _heap->sync_pinned_region_status();
 365   }
 366 
 367   ShenandoahCollectionSet* collection_set = _heap->collection_set();
 368   {
 369     ShenandoahHeapLocker lock(_heap->lock());
 370 
 371     collection_set->clear();
 372     assert(collection_set->count() == 0, "collection set not clear");
 373 
 374     // Find collection set
 375     _heap->heuristics()->choose_collection_set(collection_set);
 376     prepare_regions();
 377 
 378     // Rebuild free set
 379     _heap->free_set()->rebuild();
 380   }
 381 
 382   log_info(gc, ergo)("Collectable Garbage: " SIZE_FORMAT "%s, " SIZE_FORMAT "%s CSet, " SIZE_FORMAT " CSet regions",
 383                      byte_size_in_proper_unit(collection_set->garbage()),   proper_unit_for_byte_size(collection_set->garbage()),
 384                      byte_size_in_proper_unit(collection_set->live_data()), proper_unit_for_byte_size(collection_set->live_data()),
 385                      collection_set->count());
 386 }
 387 
 388 void ShenandoahTraversalGC::init_traversal_collection() {
 389   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "STW traversal GC");
 390 
 391   if (ShenandoahVerify) {
 392     _heap->verifier()->verify_before_traversal();
 393   }
 394 
 395   if (VerifyBeforeGC) {
 396     Universe::verify();
 397   }
 398 
 399   {
 400     ShenandoahGCPhase phase_prepare(ShenandoahPhaseTimings::traversal_gc_prepare);
 401     prepare();
 402   }
 403 
 404   _heap->set_concurrent_traversal_in_progress(true);
 405 
 406   bool process_refs = _heap->process_references();
 407   if (process_refs) {
 408     ReferenceProcessor* rp = _heap->ref_processor();
 409     rp->enable_discovery(true /*verify_no_refs*/);
 410     rp->setup_policy(_heap->soft_ref_policy()->should_clear_all_soft_refs());
 411   }
 412 
 413   {
 414     ShenandoahGCPhase phase_work(ShenandoahPhaseTimings::init_traversal_gc_work);
 415     assert(_task_queues->is_empty(), "queues must be empty before traversal GC");
 416     TASKQUEUE_STATS_ONLY(_task_queues->reset_taskqueue_stats());
 417 
 418 #if COMPILER2_OR_JVMCI
 419     DerivedPointerTable::clear();
 420 #endif
 421 
 422     {
 423       uint nworkers = _heap->workers()->active_workers();
 424       task_queues()->reserve(nworkers);
 425       ShenandoahCSetRootScanner rp(nworkers, ShenandoahPhaseTimings::init_traversal_gc_work);
 426       ShenandoahInitTraversalCollectionTask traversal_task(&rp);
 427       _heap->workers()->run_task(&traversal_task);
 428     }
 429 
 430 #if COMPILER2_OR_JVMCI
 431     DerivedPointerTable::update_pointers();
 432 #endif
 433   }
 434 
 435   if (ShenandoahPacing) {
 436     _heap->pacer()->setup_for_traversal();
 437   }
 438 }
 439 
 440 void ShenandoahTraversalGC::main_loop(uint w, ShenandoahTaskTerminator* t, bool sts_yield) {
 441   ShenandoahObjToScanQueue* q = task_queues()->queue(w);
 442 
 443   // Initialize live data.
 444   jushort* ld = _heap->get_liveness_cache(w);
 445 
 446   ReferenceProcessor* rp = NULL;
 447   if (_heap->process_references()) {
 448     rp = _heap->ref_processor();
 449   }
 450   {
 451     if (!_heap->is_degenerated_gc_in_progress()) {
 452       if (_heap->unload_classes()) {
 453         if (ShenandoahStringDedup::is_enabled()) {
 454           ShenandoahTraversalMetadataDedupClosure cl(q, rp);
 455           main_loop_work<ShenandoahTraversalMetadataDedupClosure>(&cl, ld, w, t, sts_yield);
 456         } else {
 457           ShenandoahTraversalMetadataClosure cl(q, rp);
 458           main_loop_work<ShenandoahTraversalMetadataClosure>(&cl, ld, w, t, sts_yield);
 459         }
 460       } else {
 461         if (ShenandoahStringDedup::is_enabled()) {
 462           ShenandoahTraversalDedupClosure cl(q, rp);
 463           main_loop_work<ShenandoahTraversalDedupClosure>(&cl, ld, w, t, sts_yield);
 464         } else {
 465           ShenandoahTraversalClosure cl(q, rp);
 466           main_loop_work<ShenandoahTraversalClosure>(&cl, ld, w, t, sts_yield);
 467         }
 468       }
 469     } else {
 470       if (_heap->unload_classes()) {
 471         if (ShenandoahStringDedup::is_enabled()) {
 472           ShenandoahTraversalMetadataDedupDegenClosure cl(q, rp);
 473           main_loop_work<ShenandoahTraversalMetadataDedupDegenClosure>(&cl, ld, w, t, sts_yield);
 474         } else {
 475           ShenandoahTraversalMetadataDegenClosure cl(q, rp);
 476           main_loop_work<ShenandoahTraversalMetadataDegenClosure>(&cl, ld, w, t, sts_yield);
 477         }
 478       } else {
 479         if (ShenandoahStringDedup::is_enabled()) {
 480           ShenandoahTraversalDedupDegenClosure cl(q, rp);
 481           main_loop_work<ShenandoahTraversalDedupDegenClosure>(&cl, ld, w, t, sts_yield);
 482         } else {
 483           ShenandoahTraversalDegenClosure cl(q, rp);
 484           main_loop_work<ShenandoahTraversalDegenClosure>(&cl, ld, w, t, sts_yield);
 485         }
 486       }
 487     }
 488   }
 489 
 490   _heap->flush_liveness_cache(w);
 491 }
 492 
 493 template <class T>
 494 void ShenandoahTraversalGC::main_loop_work(T* cl, jushort* live_data, uint worker_id, ShenandoahTaskTerminator* terminator, bool sts_yield) {
 495   ShenandoahObjToScanQueueSet* queues = task_queues();
 496   ShenandoahObjToScanQueue* q = queues->queue(worker_id);
 497   ShenandoahConcurrentMark* conc_mark = _heap->concurrent_mark();
 498 
 499   uintx stride = ShenandoahMarkLoopStride;
 500 
 501   ShenandoahMarkTask task;
 502 
 503   // Process outstanding queues, if any.
 504   q = queues->claim_next();
 505   while (q != NULL) {
 506     if (_heap->check_cancelled_gc_and_yield(sts_yield)) {
 507       return;
 508     }
 509 
 510     for (uint i = 0; i < stride; i++) {
 511       if (q->pop(task)) {
 512         conc_mark->do_task<T>(q, cl, live_data, &task);
 513       } else {
 514         assert(q->is_empty(), "Must be empty");
 515         q = queues->claim_next();
 516         break;
 517       }
 518     }
 519   }
 520 
 521   if (check_and_handle_cancelled_gc(terminator, sts_yield)) return;
 522 
 523   // Normal loop.
 524   q = queues->queue(worker_id);
 525 
 526   ShenandoahTraversalSATBBufferClosure drain_satb(q);
 527   ShenandoahSATBMarkQueueSet& satb_mq_set = ShenandoahBarrierSet::satb_mark_queue_set();
 528 
 529   int seed = 17;
 530 
 531   while (true) {
 532     if (check_and_handle_cancelled_gc(terminator, sts_yield)) return;
 533 
 534     while (satb_mq_set.completed_buffers_num() > 0) {
 535       satb_mq_set.apply_closure_to_completed_buffer(&drain_satb);
 536     }
 537 
 538     uint work = 0;
 539     for (uint i = 0; i < stride; i++) {
 540       if (q->pop(task) ||
 541           queues->steal(worker_id, &seed, task)) {
 542         conc_mark->do_task<T>(q, cl, live_data, &task);
 543         work++;
 544       } else {
 545         break;
 546       }
 547     }
 548 
 549     if (work == 0) {
 550       // No more work, try to terminate
 551       ShenandoahSuspendibleThreadSetLeaver stsl(sts_yield && ShenandoahSuspendibleWorkers);
 552       ShenandoahTerminationTimingsTracker term_tracker(worker_id);
 553       ShenandoahTerminatorTerminator tt(_heap);
 554 
 555       if (terminator->offer_termination(&tt)) return;
 556     }
 557   }
 558 }
 559 
 560 bool ShenandoahTraversalGC::check_and_handle_cancelled_gc(ShenandoahTaskTerminator* terminator, bool sts_yield) {
 561   if (_heap->cancelled_gc()) {
 562     return true;
 563   }
 564   return false;
 565 }
 566 
 567 void ShenandoahTraversalGC::concurrent_traversal_collection() {
 568   ShenandoahGCPhase phase_work(ShenandoahPhaseTimings::conc_traversal);
 569   if (!_heap->cancelled_gc()) {
 570     uint nworkers = _heap->workers()->active_workers();
 571     task_queues()->reserve(nworkers);
 572     ShenandoahTerminationTracker tracker(ShenandoahPhaseTimings::conc_traversal_termination);
 573 
 574     ShenandoahTaskTerminator terminator(nworkers, task_queues());
 575     ShenandoahConcurrentTraversalCollectionTask task(&terminator);
 576     _heap->workers()->run_task(&task);
 577   }
 578 
 579   if (!_heap->cancelled_gc() && ShenandoahPreclean && _heap->process_references()) {
 580     preclean_weak_refs();
 581   }
 582 }
 583 
 584 void ShenandoahTraversalGC::final_traversal_collection() {
 585   _heap->make_parsable(true);
 586 
 587   if (!_heap->cancelled_gc()) {
 588 #if COMPILER2_OR_JVMCI
 589     DerivedPointerTable::clear();
 590 #endif
 591     ShenandoahGCPhase phase_work(ShenandoahPhaseTimings::final_traversal_gc_work);
 592     uint nworkers = _heap->workers()->active_workers();
 593     task_queues()->reserve(nworkers);
 594 
 595     // Finish traversal
 596     ShenandoahAllRootScanner rp(nworkers, ShenandoahPhaseTimings::final_traversal_gc_work);
 597     ShenandoahTerminationTracker term(ShenandoahPhaseTimings::final_traversal_gc_termination);
 598 
 599     ShenandoahTaskTerminator terminator(nworkers, task_queues());
 600     ShenandoahFinalTraversalCollectionTask task(&rp, &terminator);
 601     _heap->workers()->run_task(&task);
 602 #if COMPILER2_OR_JVMCI
 603     DerivedPointerTable::update_pointers();
 604 #endif
 605   }
 606 
 607   if (!_heap->cancelled_gc() && _heap->process_references()) {
 608     weak_refs_work();
 609   }
 610 
 611   if (!_heap->cancelled_gc()) {
 612     assert(_task_queues->is_empty(), "queues must be empty after traversal GC");
 613     TASKQUEUE_STATS_ONLY(_task_queues->print_taskqueue_stats());
 614     TASKQUEUE_STATS_ONLY(_task_queues->reset_taskqueue_stats());
 615 
 616     // No more marking expected
 617     _heap->mark_complete_marking_context();
 618 
 619     fixup_roots();
 620     if (_heap->unload_classes()) {
 621       _heap->unload_classes_and_cleanup_tables(false);
 622     } else {
 623       ShenandoahIsAliveSelector alive;
 624       StringTable::unlink(alive.is_alive_closure());
 625     }
 626 
 627     // Resize metaspace
 628     MetaspaceGC::compute_new_size();
 629 
 630     // Need to see that pinned region status is updated: newly pinned regions must not
 631     // be trashed. New unpinned regions should be trashed.
 632     {
 633       ShenandoahGCPhase phase_cleanup(ShenandoahPhaseTimings::traversal_gc_sync_pinned);
 634       _heap->sync_pinned_region_status();
 635     }
 636 
 637     // Still good? We can now trash the cset, and make final verification
 638     {
 639       ShenandoahGCPhase phase_cleanup(ShenandoahPhaseTimings::traversal_gc_cleanup);
 640       ShenandoahHeapLocker lock(_heap->lock());
 641 
 642       // Trash everything
 643       // Clear immediate garbage regions.
 644       size_t num_regions = _heap->num_regions();
 645 
 646       ShenandoahHeapRegionSet* traversal_regions = traversal_set();
 647       ShenandoahFreeSet* free_regions = _heap->free_set();
 648       ShenandoahMarkingContext* const ctx = _heap->marking_context();
 649       free_regions->clear();
 650       for (size_t i = 0; i < num_regions; i++) {
 651         ShenandoahHeapRegion* r = _heap->get_region(i);
 652         bool not_allocated = ctx->top_at_mark_start(r) == r->top();
 653 
 654         bool candidate = traversal_regions->is_in(r) && !r->has_live() && not_allocated;
 655         if (r->is_humongous_start() && candidate) {
 656           // Trash humongous.
 657           HeapWord* humongous_obj = r->bottom();
 658           assert(!ctx->is_marked(oop(humongous_obj)), "must not be marked");
 659           r->make_trash_immediate();
 660           while (i + 1 < num_regions && _heap->get_region(i + 1)->is_humongous_continuation()) {
 661             i++;
 662             r = _heap->get_region(i);
 663             assert(r->is_humongous_continuation(), "must be humongous continuation");
 664             r->make_trash_immediate();
 665           }
 666         } else if (!r->is_empty() && candidate) {
 667           // Trash regular.
 668           assert(!r->is_humongous(), "handled above");
 669           assert(!r->is_trash(), "must not already be trashed");
 670           r->make_trash_immediate();
 671         }
 672       }
 673       _heap->collection_set()->clear();
 674       _heap->free_set()->rebuild();
 675       reset();
 676     }
 677 
 678     assert(_task_queues->is_empty(), "queues must be empty after traversal GC");
 679     _heap->set_concurrent_traversal_in_progress(false);
 680     assert(!_heap->cancelled_gc(), "must not be cancelled when getting out here");
 681 
 682     if (ShenandoahVerify) {
 683       _heap->verifier()->verify_after_traversal();
 684     }
 685 
 686     if (VerifyAfterGC) {
 687       Universe::verify();
 688     }
 689   }
 690 }
 691 
 692 class ShenandoahTraversalFixRootsClosure : public OopClosure {
 693 private:
 694   template <class T>
 695   inline void do_oop_work(T* p) {
 696     T o = RawAccess<>::oop_load(p);
 697     if (!CompressedOops::is_null(o)) {
 698       oop obj = CompressedOops::decode_not_null(o);
 699       oop forw = ShenandoahBarrierSet::resolve_forwarded_not_null(obj);
 700       if (obj != forw) {
 701         RawAccess<IS_NOT_NULL>::oop_store(p, forw);
 702       }
 703     }
 704   }
 705 
 706 public:
 707   inline void do_oop(oop* p) { do_oop_work(p); }
 708   inline void do_oop(narrowOop* p) { do_oop_work(p); }
 709 };
 710 
 711 class ShenandoahTraversalFixRootsTask : public AbstractGangTask {
 712 private:
 713   ShenandoahRootUpdater* _rp;
 714 
 715 public:
 716   ShenandoahTraversalFixRootsTask(ShenandoahRootUpdater* rp) :
 717     AbstractGangTask("Shenandoah traversal fix roots"),
 718     _rp(rp) {
 719     assert(ShenandoahHeap::heap()->has_forwarded_objects(), "Must be");
 720   }
 721 
 722   void work(uint worker_id) {
 723     ShenandoahParallelWorkerSession worker_session(worker_id);
 724     ShenandoahTraversalFixRootsClosure cl;
 725     ShenandoahForwardedIsAliveClosure is_alive;
 726     _rp->roots_do<ShenandoahForwardedIsAliveClosure, ShenandoahTraversalFixRootsClosure>(worker_id, &is_alive, &cl);
 727   }
 728 };
 729 
 730 void ShenandoahTraversalGC::fixup_roots() {
 731 #if COMPILER2_OR_JVMCI
 732   DerivedPointerTable::clear();
 733 #endif
 734   ShenandoahRootUpdater rp(_heap->workers()->active_workers(), ShenandoahPhaseTimings::final_traversal_update_roots, true /* update code cache */);
 735   ShenandoahTraversalFixRootsTask update_roots_task(&rp);
 736   _heap->workers()->run_task(&update_roots_task);
 737 #if COMPILER2_OR_JVMCI
 738   DerivedPointerTable::update_pointers();
 739 #endif
 740 }
 741 
 742 void ShenandoahTraversalGC::reset() {
 743   _task_queues->clear();
 744 }
 745 
 746 ShenandoahObjToScanQueueSet* ShenandoahTraversalGC::task_queues() {
 747   return _task_queues;
 748 }
 749 
 750 class ShenandoahTraversalCancelledGCYieldClosure : public YieldClosure {
 751 private:
 752   ShenandoahHeap* const _heap;
 753 public:
 754   ShenandoahTraversalCancelledGCYieldClosure() : _heap(ShenandoahHeap::heap()) {};
 755   virtual bool should_return() { return _heap->cancelled_gc(); }
 756 };
 757 
 758 class ShenandoahTraversalPrecleanCompleteGCClosure : public VoidClosure {
 759 public:
 760   void do_void() {
 761     ShenandoahHeap* sh = ShenandoahHeap::heap();
 762     ShenandoahTraversalGC* traversal_gc = sh->traversal_gc();
 763     assert(sh->process_references(), "why else would we be here?");
 764     ShenandoahTaskTerminator terminator(1, traversal_gc->task_queues());
 765     shenandoah_assert_rp_isalive_installed();
 766     traversal_gc->main_loop((uint) 0, &terminator, true);
 767   }
 768 };
 769 
 770 class ShenandoahTraversalKeepAliveUpdateClosure : public OopClosure {
 771 private:
 772   ShenandoahObjToScanQueue* _queue;
 773   Thread* _thread;
 774   ShenandoahTraversalGC* _traversal_gc;
 775   ShenandoahMarkingContext* const _mark_context;
 776 
 777   template <class T>
 778   inline void do_oop_work(T* p) {
 779     _traversal_gc->process_oop<T, false /* string dedup */, false /* degen */, true /* atomic update */>(p, _thread, _queue, _mark_context);
 780   }
 781 
 782 public:
 783   ShenandoahTraversalKeepAliveUpdateClosure(ShenandoahObjToScanQueue* q) :
 784     _queue(q), _thread(Thread::current()),
 785     _traversal_gc(ShenandoahHeap::heap()->traversal_gc()),
 786     _mark_context(ShenandoahHeap::heap()->marking_context()) {}
 787 
 788   void do_oop(narrowOop* p) { do_oop_work(p); }
 789   void do_oop(oop* p)       { do_oop_work(p); }
 790 };
 791 
 792 class ShenandoahTraversalKeepAliveUpdateDegenClosure : public OopClosure {
 793 private:
 794   ShenandoahObjToScanQueue* _queue;
 795   Thread* _thread;
 796   ShenandoahTraversalGC* _traversal_gc;
 797   ShenandoahMarkingContext* const _mark_context;
 798 
 799   template <class T>
 800   inline void do_oop_work(T* p) {
 801     _traversal_gc->process_oop<T, false /* string dedup */, true /* degen */, false /* atomic update */>(p, _thread, _queue, _mark_context);
 802   }
 803 
 804 public:
 805   ShenandoahTraversalKeepAliveUpdateDegenClosure(ShenandoahObjToScanQueue* q) :
 806           _queue(q), _thread(Thread::current()),
 807           _traversal_gc(ShenandoahHeap::heap()->traversal_gc()),
 808           _mark_context(ShenandoahHeap::heap()->marking_context()) {}
 809 
 810   void do_oop(narrowOop* p) { do_oop_work(p); }
 811   void do_oop(oop* p)       { do_oop_work(p); }
 812 };
 813 
 814 class ShenandoahTraversalSingleThreadKeepAliveUpdateClosure : public OopClosure {
 815 private:
 816   ShenandoahObjToScanQueue* _queue;
 817   Thread* _thread;
 818   ShenandoahTraversalGC* _traversal_gc;
 819   ShenandoahMarkingContext* const _mark_context;
 820 
 821   template <class T>
 822   inline void do_oop_work(T* p) {
 823     _traversal_gc->process_oop<T, false /* string dedup */, false /* degen */, true /* atomic update */>(p, _thread, _queue, _mark_context);
 824   }
 825 
 826 public:
 827   ShenandoahTraversalSingleThreadKeepAliveUpdateClosure(ShenandoahObjToScanQueue* q) :
 828           _queue(q), _thread(Thread::current()),
 829           _traversal_gc(ShenandoahHeap::heap()->traversal_gc()),
 830           _mark_context(ShenandoahHeap::heap()->marking_context()) {}
 831 
 832   void do_oop(narrowOop* p) { do_oop_work(p); }
 833   void do_oop(oop* p)       { do_oop_work(p); }
 834 };
 835 
 836 class ShenandoahTraversalSingleThreadKeepAliveUpdateDegenClosure : public OopClosure {
 837 private:
 838   ShenandoahObjToScanQueue* _queue;
 839   Thread* _thread;
 840   ShenandoahTraversalGC* _traversal_gc;
 841   ShenandoahMarkingContext* const _mark_context;
 842 
 843   template <class T>
 844   inline void do_oop_work(T* p) {
 845     _traversal_gc->process_oop<T, false /* string dedup */, true /* degen */, false /* atomic update */>(p, _thread, _queue, _mark_context);
 846   }
 847 
 848 public:
 849   ShenandoahTraversalSingleThreadKeepAliveUpdateDegenClosure(ShenandoahObjToScanQueue* q) :
 850           _queue(q), _thread(Thread::current()),
 851           _traversal_gc(ShenandoahHeap::heap()->traversal_gc()),
 852           _mark_context(ShenandoahHeap::heap()->marking_context()) {}
 853 
 854   void do_oop(narrowOop* p) { do_oop_work(p); }
 855   void do_oop(oop* p)       { do_oop_work(p); }
 856 };
 857 
 858 class ShenandoahTraversalPrecleanTask : public AbstractGangTask {
 859 private:
 860   ReferenceProcessor* _rp;
 861 
 862 public:
 863   ShenandoahTraversalPrecleanTask(ReferenceProcessor* rp) :
 864           AbstractGangTask("Precleaning task"),
 865           _rp(rp) {}
 866 
 867   void work(uint worker_id) {
 868     assert(worker_id == 0, "The code below is single-threaded, only one worker is expected");
 869     ShenandoahParallelWorkerSession worker_session(worker_id);
 870     ShenandoahSuspendibleThreadSetJoiner stsj(ShenandoahSuspendibleWorkers);
 871 
 872     ShenandoahHeap* sh = ShenandoahHeap::heap();
 873 
 874     ShenandoahObjToScanQueue* q = sh->traversal_gc()->task_queues()->queue(worker_id);
 875 
 876     ShenandoahForwardedIsAliveClosure is_alive;
 877     ShenandoahTraversalCancelledGCYieldClosure yield;
 878     ShenandoahTraversalPrecleanCompleteGCClosure complete_gc;
 879     ShenandoahTraversalKeepAliveUpdateClosure keep_alive(q);
 880     ResourceMark rm;
 881     _rp->preclean_discovered_references(&is_alive, &keep_alive,
 882                                         &complete_gc, &yield,
 883                                         NULL);
 884   }
 885 };
 886 
 887 void ShenandoahTraversalGC::preclean_weak_refs() {
 888   // Pre-cleaning weak references before diving into STW makes sense at the
 889   // end of concurrent mark. This will filter out the references which referents
 890   // are alive. Note that ReferenceProcessor already filters out these on reference
 891   // discovery, and the bulk of work is done here. This phase processes leftovers
 892   // that missed the initial filtering, i.e. when referent was marked alive after
 893   // reference was discovered by RP.
 894 
 895   assert(_heap->process_references(), "sanity");
 896   assert(!_heap->is_degenerated_gc_in_progress(), "must be in concurrent non-degenerated phase");
 897 
 898   // Shortcut if no references were discovered to avoid winding up threads.
 899   ReferenceProcessor* rp = _heap->ref_processor();
 900   if (!rp->has_discovered_references()) {
 901     return;
 902   }
 903 
 904   ReferenceProcessorMTDiscoveryMutator fix_mt_discovery(rp, false);
 905 
 906   shenandoah_assert_rp_isalive_not_installed();
 907   ShenandoahForwardedIsAliveClosure is_alive;
 908   ReferenceProcessorIsAliveMutator fix_isalive(rp, &is_alive);
 909 
 910   assert(task_queues()->is_empty(), "Should be empty");
 911 
 912   // Execute precleaning in the worker thread: it will give us GCLABs, String dedup
 913   // queues and other goodies. When upstream ReferenceProcessor starts supporting
 914   // parallel precleans, we can extend this to more threads.
 915   ShenandoahPushWorkerScope scope(_heap->workers(), 1, /* check_workers = */ false);
 916 
 917   WorkGang* workers = _heap->workers();
 918   uint nworkers = workers->active_workers();
 919   assert(nworkers == 1, "This code uses only a single worker");
 920   task_queues()->reserve(nworkers);
 921 
 922   ShenandoahTraversalPrecleanTask task(rp);
 923   workers->run_task(&task);
 924 
 925   assert(_heap->cancelled_gc() || task_queues()->is_empty(), "Should be empty");
 926 }
 927 
 928 // Weak Reference Closures
 929 class ShenandoahTraversalDrainMarkingStackClosure: public VoidClosure {
 930   uint _worker_id;
 931   ShenandoahTaskTerminator* _terminator;
 932   bool _reset_terminator;
 933 
 934 public:
 935   ShenandoahTraversalDrainMarkingStackClosure(uint worker_id, ShenandoahTaskTerminator* t, bool reset_terminator = false):
 936     _worker_id(worker_id),
 937     _terminator(t),
 938     _reset_terminator(reset_terminator) {
 939   }
 940 
 941   void do_void() {
 942     assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
 943 
 944     ShenandoahHeap* sh = ShenandoahHeap::heap();
 945     ShenandoahTraversalGC* traversal_gc = sh->traversal_gc();
 946     assert(sh->process_references(), "why else would we be here?");
 947     shenandoah_assert_rp_isalive_installed();
 948 
 949     traversal_gc->main_loop(_worker_id, _terminator, false);
 950 
 951     if (_reset_terminator) {
 952       _terminator->reset_for_reuse();
 953     }
 954   }
 955 };
 956 
 957 class ShenandoahTraversalSingleThreadedDrainMarkingStackClosure: public VoidClosure {
 958   uint _worker_id;
 959   ShenandoahTaskTerminator* _terminator;
 960   bool _reset_terminator;
 961 
 962 public:
 963   ShenandoahTraversalSingleThreadedDrainMarkingStackClosure(uint worker_id, ShenandoahTaskTerminator* t, bool reset_terminator = false):
 964           _worker_id(worker_id),
 965           _terminator(t),
 966           _reset_terminator(reset_terminator) {
 967   }
 968 
 969   void do_void() {
 970     assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
 971 
 972     ShenandoahHeap* sh = ShenandoahHeap::heap();
 973     ShenandoahTraversalGC* traversal_gc = sh->traversal_gc();
 974     assert(sh->process_references(), "why else would we be here?");
 975     shenandoah_assert_rp_isalive_installed();
 976 
 977     traversal_gc->main_loop(_worker_id, _terminator, false);
 978 
 979     if (_reset_terminator) {
 980       _terminator->reset_for_reuse();
 981     }
 982   }
 983 };
 984 
 985 void ShenandoahTraversalGC::weak_refs_work() {
 986   assert(_heap->process_references(), "sanity");
 987 
 988   ShenandoahPhaseTimings::Phase phase_root = ShenandoahPhaseTimings::weakrefs;
 989 
 990   ShenandoahGCPhase phase(phase_root);
 991 
 992   ReferenceProcessor* rp = _heap->ref_processor();
 993 
 994   // NOTE: We cannot shortcut on has_discovered_references() here, because
 995   // we will miss marking JNI Weak refs then, see implementation in
 996   // ReferenceProcessor::process_discovered_references.
 997   weak_refs_work_doit();
 998 
 999   rp->verify_no_references_recorded();
1000   assert(!rp->discovery_enabled(), "Post condition");
1001 
1002 }
1003 
1004 class ShenandoahTraversalRefProcTaskProxy : public AbstractGangTask {
1005 private:
1006   AbstractRefProcTaskExecutor::ProcessTask& _proc_task;
1007   ShenandoahTaskTerminator* _terminator;
1008 
1009 public:
1010   ShenandoahTraversalRefProcTaskProxy(AbstractRefProcTaskExecutor::ProcessTask& proc_task,
1011                                       ShenandoahTaskTerminator* t) :
1012     AbstractGangTask("Process reference objects in parallel"),
1013     _proc_task(proc_task),
1014     _terminator(t) {
1015   }
1016 
1017   void work(uint worker_id) {
1018     assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
1019     ShenandoahHeap* heap = ShenandoahHeap::heap();
1020     ShenandoahTraversalDrainMarkingStackClosure complete_gc(worker_id, _terminator);
1021 
1022     ShenandoahForwardedIsAliveClosure is_alive;
1023     if (!heap->is_degenerated_gc_in_progress()) {
1024       ShenandoahTraversalKeepAliveUpdateClosure keep_alive(heap->traversal_gc()->task_queues()->queue(worker_id));
1025       _proc_task.work(worker_id, is_alive, keep_alive, complete_gc);
1026     } else {
1027       ShenandoahTraversalKeepAliveUpdateDegenClosure keep_alive(heap->traversal_gc()->task_queues()->queue(worker_id));
1028       _proc_task.work(worker_id, is_alive, keep_alive, complete_gc);
1029     }
1030   }
1031 };
1032 
1033 class ShenandoahTraversalRefProcTaskExecutor : public AbstractRefProcTaskExecutor {
1034 private:
1035   WorkGang* _workers;
1036 
1037 public:
1038   ShenandoahTraversalRefProcTaskExecutor(WorkGang* workers) : _workers(workers) {}
1039 
1040   // Executes a task using worker threads.
1041   void execute(ProcessTask& task, uint ergo_workers) {
1042     assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
1043 
1044     ShenandoahHeap* heap = ShenandoahHeap::heap();
1045     ShenandoahTraversalGC* traversal_gc = heap->traversal_gc();
1046     ShenandoahPushWorkerQueuesScope scope(_workers,
1047                                           traversal_gc->task_queues(),
1048                                           ergo_workers,
1049                                           /* do_check = */ false);
1050     uint nworkers = _workers->active_workers();
1051     traversal_gc->task_queues()->reserve(nworkers);
1052     ShenandoahTaskTerminator terminator(nworkers, traversal_gc->task_queues());
1053     ShenandoahTraversalRefProcTaskProxy proc_task_proxy(task, &terminator);
1054     _workers->run_task(&proc_task_proxy);
1055   }
1056 };
1057 
1058 void ShenandoahTraversalGC::weak_refs_work_doit() {
1059   ReferenceProcessor* rp = _heap->ref_processor();
1060 
1061   ShenandoahPhaseTimings::Phase phase_process = ShenandoahPhaseTimings::weakrefs_process;
1062 
1063   shenandoah_assert_rp_isalive_not_installed();
1064   ShenandoahForwardedIsAliveClosure is_alive;
1065   ReferenceProcessorIsAliveMutator fix_isalive(rp, &is_alive);
1066 
1067   WorkGang* workers = _heap->workers();
1068   uint nworkers = workers->active_workers();
1069 
1070   rp->setup_policy(_heap->soft_ref_policy()->should_clear_all_soft_refs());
1071   rp->set_active_mt_degree(nworkers);
1072 
1073   assert(task_queues()->is_empty(), "Should be empty");
1074 
1075   // complete_gc and keep_alive closures instantiated here are only needed for
1076   // single-threaded path in RP. They share the queue 0 for tracking work, which
1077   // simplifies implementation. Since RP may decide to call complete_gc several
1078   // times, we need to be able to reuse the terminator.
1079   uint serial_worker_id = 0;
1080   ShenandoahTaskTerminator terminator(1, task_queues());
1081   ShenandoahTraversalSingleThreadedDrainMarkingStackClosure complete_gc(serial_worker_id, &terminator, /* reset_terminator = */ true);
1082   ShenandoahPushWorkerQueuesScope scope(workers, task_queues(), 1, /* do_check = */ false);
1083 
1084   ShenandoahTraversalRefProcTaskExecutor executor(workers);
1085 
1086   ReferenceProcessorPhaseTimes pt(_heap->gc_timer(), rp->num_queues());
1087   if (!_heap->is_degenerated_gc_in_progress()) {
1088     ShenandoahTraversalSingleThreadKeepAliveUpdateClosure keep_alive(task_queues()->queue(serial_worker_id));
1089     rp->process_discovered_references(&is_alive, &keep_alive,
1090                                       &complete_gc, &executor,
1091                                       &pt);
1092   } else {
1093     ShenandoahTraversalSingleThreadKeepAliveUpdateDegenClosure keep_alive(task_queues()->queue(serial_worker_id));
1094     rp->process_discovered_references(&is_alive, &keep_alive,
1095                                       &complete_gc, &executor,
1096                                       &pt);
1097   }
1098 
1099   pt.print_all_references();
1100   assert(task_queues()->is_empty() || _heap->cancelled_gc(), "Should be empty");
1101 }