1 /*
   2  * Copyright (c) 2018, 2019, Red Hat, Inc. All rights reserved.
   3  *
   4  * This code is free software; you can redistribute it and/or modify it
   5  * under the terms of the GNU General Public License version 2 only, as
   6  * published by the Free Software Foundation.
   7  *
   8  * This code is distributed in the hope that it will be useful, but WITHOUT
   9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  11  * version 2 for more details (a copy is included in the LICENSE file that
  12  * accompanied this code).
  13  *
  14  * You should have received a copy of the GNU General Public License version
  15  * 2 along with this work; if not, write to the Free Software Foundation,
  16  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  17  *
  18  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  19  * or visit www.oracle.com if you need additional information or have any
  20  * questions.
  21  *
  22  */
  23 
  24 #include "precompiled.hpp"
  25 
  26 #include "classfile/classLoaderData.hpp"
  27 #include "classfile/classLoaderDataGraph.hpp"
  28 #include "gc/shared/referenceProcessor.hpp"
  29 #include "gc/shared/referenceProcessorPhaseTimes.hpp"
  30 #include "gc/shared/workgroup.hpp"
  31 #include "gc/shared/weakProcessor.inline.hpp"
  32 #include "gc/shenandoah/shenandoahBarrierSet.hpp"
  33 #include "gc/shenandoah/shenandoahCodeRoots.hpp"
  34 #include "gc/shenandoah/shenandoahCollectionSet.hpp"
  35 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
  36 #include "gc/shenandoah/shenandoahFreeSet.hpp"
  37 #include "gc/shenandoah/shenandoahPhaseTimings.hpp"
  38 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
  39 #include "gc/shenandoah/shenandoahHeapRegionSet.inline.hpp"
  40 #include "gc/shenandoah/shenandoahHeuristics.hpp"
  41 #include "gc/shenandoah/shenandoahMarkingContext.inline.hpp"
  42 #include "gc/shenandoah/shenandoahOopClosures.inline.hpp"
  43 #include "gc/shenandoah/shenandoahRootProcessor.hpp"
  44 #include "gc/shenandoah/shenandoahStringDedup.hpp"
  45 #include "gc/shenandoah/shenandoahTaskqueue.inline.hpp"
  46 #include "gc/shenandoah/shenandoahTimingTracker.hpp"
  47 #include "gc/shenandoah/shenandoahTraversalGC.hpp"
  48 #include "gc/shenandoah/shenandoahUtils.hpp"
  49 #include "gc/shenandoah/shenandoahVerifier.hpp"
  50 
  51 #include "memory/iterator.hpp"
  52 #include "memory/metaspace.hpp"
  53 #include "memory/resourceArea.hpp"
  54 
  55 /**
  56  * NOTE: We are using the SATB buffer in thread.hpp and satbMarkQueue.hpp, however, it is not an SATB algorithm.
  57  * We're using the buffer as generic oop buffer to enqueue new values in concurrent oop stores, IOW, the algorithm
  58  * is incremental-update-based.
  59  *
  60  * NOTE on interaction with TAMS: we want to avoid traversing new objects for
  61  * several reasons:
  62  * - We will not reclaim them in this cycle anyway, because they are not in the
  63  *   cset
  64  * - It makes up for the bulk of work during final-pause
  65  * - It also shortens the concurrent cycle because we don't need to
  66  *   pointlessly traverse through newly allocated objects.
  67  * - As a nice side-effect, it solves the I-U termination problem (mutators
  68  *   cannot outrun the GC by allocating like crazy)
  69  * - It is an easy way to achieve MWF. What MWF does is to also enqueue the
  70  *   target object of stores if it's new. Treating new objects live implicitely
  71  *   achieves the same, but without extra barriers. I think the effect of
  72  *   shortened final-pause (mentioned above) is the main advantage of MWF. In
  73  *   particular, we will not see the head of a completely new long linked list
  74  *   in final-pause and end up traversing huge chunks of the heap there.
  75  * - We don't need to see/update the fields of new objects either, because they
  76  *   are either still null, or anything that's been stored into them has been
  77  *   evacuated+enqueued before (and will thus be treated later).
  78  *
  79  * We achieve this by setting TAMS for each region, and everything allocated
  80  * beyond TAMS will be 'implicitely marked'.
  81  *
  82  * Gotchas:
  83  * - While we want new objects to be implicitely marked, we don't want to count
  84  *   them alive. Otherwise the next cycle wouldn't pick them up and consider
  85  *   them for cset. This means that we need to protect such regions from
  86  *   getting accidentally thrashed at the end of traversal cycle. This is why I
  87  *   keep track of alloc-regions and check is_alloc_region() in the trashing
  88  *   code.
  89  * - We *need* to traverse through evacuated objects. Those objects are
  90  *   pre-existing, and any references in them point to interesting objects that
  91  *   we need to see. We also want to count them as live, because we just
  92  *   determined that they are alive :-) I achieve this by upping TAMS
  93  *   concurrently for every gclab/gc-shared alloc before publishing the
  94  *   evacuated object. This way, the GC threads will not consider such objects
  95  *   implictely marked, and traverse through them as normal.
  96  */
  97 class ShenandoahTraversalSATBBufferClosure : public SATBBufferClosure {
  98 private:
  99   ShenandoahObjToScanQueue* _queue;
 100   ShenandoahTraversalGC* _traversal_gc;
 101   ShenandoahHeap* const _heap;
 102 
 103 public:
 104   ShenandoahTraversalSATBBufferClosure(ShenandoahObjToScanQueue* q) :
 105     _queue(q),
 106     _heap(ShenandoahHeap::heap())
 107  { }
 108 
 109   void do_buffer(void** buffer, size_t size) {
 110     for (size_t i = 0; i < size; ++i) {
 111       oop* p = (oop*) &buffer[i];
 112       oop obj = RawAccess<>::oop_load(p);
 113       shenandoah_assert_not_forwarded(p, obj);
 114       if (_heap->marking_context()->mark(obj)) {
 115         _queue->push(ShenandoahMarkTask(obj));
 116       }
 117     }
 118   }
 119 };
 120 
 121 class ShenandoahTraversalSATBThreadsClosure : public ThreadClosure {
 122 private:
 123   ShenandoahTraversalSATBBufferClosure* _satb_cl;
 124 
 125 public:
 126   ShenandoahTraversalSATBThreadsClosure(ShenandoahTraversalSATBBufferClosure* satb_cl) :
 127     _satb_cl(satb_cl) {}
 128 
 129   void do_thread(Thread* thread) {
 130     if (thread->is_Java_thread()) {
 131       JavaThread* jt = (JavaThread*)thread;
 132       ShenandoahThreadLocalData::satb_mark_queue(jt).apply_closure_and_empty(_satb_cl);
 133     } else if (thread->is_VM_thread()) {
 134       ShenandoahBarrierSet::satb_mark_queue_set().shared_satb_queue()->apply_closure_and_empty(_satb_cl);
 135     }
 136   }
 137 };
 138 
 139 // Like CLDToOopClosure, but clears has_modified_oops, so that we can record modified CLDs during traversal
 140 // and remark them later during final-traversal.
 141 class ShenandoahMarkCLDClosure : public CLDClosure {
 142 private:
 143   OopClosure* _cl;
 144 public:
 145   ShenandoahMarkCLDClosure(OopClosure* cl) : _cl(cl) {}
 146   void do_cld(ClassLoaderData* cld) {
 147     cld->oops_do(_cl, true, true);
 148   }
 149 };
 150 
 151 // Like CLDToOopClosure, but only process modified CLDs
 152 class ShenandoahRemarkCLDClosure : public CLDClosure {
 153 private:
 154   OopClosure* _cl;
 155 public:
 156   ShenandoahRemarkCLDClosure(OopClosure* cl) : _cl(cl) {}
 157   void do_cld(ClassLoaderData* cld) {
 158     if (cld->has_modified_oops()) {
 159       cld->oops_do(_cl, true, true);
 160     }
 161   }
 162 };
 163 
 164 class ShenandoahInitTraversalCollectionTask : public AbstractGangTask {
 165 private:
 166   ShenandoahRootProcessor* _rp;
 167   ShenandoahHeap* _heap;
 168   ShenandoahCsetCodeRootsIterator* _cset_coderoots;
 169 public:
 170   ShenandoahInitTraversalCollectionTask(ShenandoahRootProcessor* rp, ShenandoahCsetCodeRootsIterator* cset_coderoots) :
 171     AbstractGangTask("Shenandoah Init Traversal Collection"),
 172     _rp(rp),
 173     _heap(ShenandoahHeap::heap()),
 174     _cset_coderoots(cset_coderoots) {}
 175 
 176   void work(uint worker_id) {
 177     ShenandoahParallelWorkerSession worker_session(worker_id);
 178 
 179     ShenandoahEvacOOMScope oom_evac_scope;
 180     ShenandoahObjToScanQueueSet* queues = _heap->traversal_gc()->task_queues();
 181     ShenandoahObjToScanQueue* q = queues->queue(worker_id);
 182 
 183     bool process_refs = _heap->process_references();
 184     bool unload_classes = _heap->unload_classes();
 185     ReferenceProcessor* rp = NULL;
 186     if (process_refs) {
 187       rp = _heap->ref_processor();
 188     }
 189 
 190     // Step 1: Process ordinary GC roots.
 191     {
 192       ShenandoahTraversalClosure roots_cl(q, rp);
 193       ShenandoahMarkCLDClosure cld_cl(&roots_cl);
 194       MarkingCodeBlobClosure code_cl(&roots_cl, CodeBlobToOopClosure::FixRelocations);
 195       if (unload_classes) {
 196         _rp->process_strong_roots(&roots_cl, process_refs ? NULL : &roots_cl, &cld_cl, NULL, NULL, NULL, worker_id);
 197         // Need to pre-evac code roots here. Otherwise we might see from-space constants.
 198         ShenandoahWorkerTimings* worker_times = _heap->phase_timings()->worker_times();
 199         ShenandoahWorkerTimingsTracker timer(worker_times, ShenandoahPhaseTimings::CodeCacheRoots, worker_id);
 200         _cset_coderoots->possibly_parallel_blobs_do(&code_cl);
 201       } else {
 202         _rp->process_all_roots(&roots_cl, process_refs ? NULL : &roots_cl, &cld_cl, &code_cl, NULL, worker_id);
 203       }
 204     }
 205   }
 206 };
 207 
 208 class ShenandoahConcurrentTraversalCollectionTask : public AbstractGangTask {
 209 private:
 210   ShenandoahTaskTerminator* _terminator;
 211   ShenandoahHeap* _heap;
 212 public:
 213   ShenandoahConcurrentTraversalCollectionTask(ShenandoahTaskTerminator* terminator) :
 214     AbstractGangTask("Shenandoah Concurrent Traversal Collection"),
 215     _terminator(terminator),
 216     _heap(ShenandoahHeap::heap()) {}
 217 
 218   void work(uint worker_id) {
 219     ShenandoahConcurrentWorkerSession worker_session(worker_id);
 220     ShenandoahSuspendibleThreadSetJoiner stsj(ShenandoahSuspendibleWorkers);
 221     ShenandoahEvacOOMScope oom_evac_scope;
 222     ShenandoahTraversalGC* traversal_gc = _heap->traversal_gc();
 223 
 224     // Drain all outstanding work in queues.
 225     traversal_gc->main_loop(worker_id, _terminator, true);
 226   }
 227 };
 228 
 229 class ShenandoahFinalTraversalCollectionTask : public AbstractGangTask {
 230 private:
 231   ShenandoahRootProcessor* _rp;
 232   ShenandoahTaskTerminator* _terminator;
 233   ShenandoahHeap* _heap;
 234 public:
 235   ShenandoahFinalTraversalCollectionTask(ShenandoahRootProcessor* rp, ShenandoahTaskTerminator* terminator) :
 236     AbstractGangTask("Shenandoah Final Traversal Collection"),
 237     _rp(rp),
 238     _terminator(terminator),
 239     _heap(ShenandoahHeap::heap()) {}
 240 
 241   void work(uint worker_id) {
 242     ShenandoahParallelWorkerSession worker_session(worker_id);
 243 
 244     ShenandoahEvacOOMScope oom_evac_scope;
 245     ShenandoahTraversalGC* traversal_gc = _heap->traversal_gc();
 246 
 247     ShenandoahObjToScanQueueSet* queues = traversal_gc->task_queues();
 248     ShenandoahObjToScanQueue* q = queues->queue(worker_id);
 249 
 250     bool process_refs = _heap->process_references();
 251     bool unload_classes = _heap->unload_classes();
 252     ReferenceProcessor* rp = NULL;
 253     if (process_refs) {
 254       rp = _heap->ref_processor();
 255     }
 256 
 257     // Step 0: Drain outstanding SATB queues.
 258     // NOTE: we piggy-back draining of remaining thread SATB buffers on the final root scan below.
 259     ShenandoahTraversalSATBBufferClosure satb_cl(q);
 260     {
 261       // Process remaining finished SATB buffers.
 262       SATBMarkQueueSet& satb_mq_set = ShenandoahBarrierSet::satb_mark_queue_set();
 263       while (satb_mq_set.apply_closure_to_completed_buffer(&satb_cl));
 264       // Process remaining threads SATB buffers below.
 265     }
 266 
 267     // Step 1: Process GC roots.
 268     // For oops in code roots, they are marked, evacuated, enqueued for further traversal,
 269     // and the references to the oops are updated during init pause. New nmethods are handled
 270     // in similar way during nmethod-register process. Therefore, we don't need to rescan code
 271     // roots here.
 272     if (!_heap->is_degenerated_gc_in_progress()) {
 273       ShenandoahTraversalClosure roots_cl(q, rp);
 274       CLDToOopClosure cld_cl(&roots_cl, ClassLoaderData::_claim_strong);
 275       ShenandoahTraversalSATBThreadsClosure tc(&satb_cl);
 276       if (unload_classes) {
 277         ShenandoahRemarkCLDClosure weak_cld_cl(&roots_cl);
 278         _rp->process_strong_roots(&roots_cl, process_refs ? NULL : &roots_cl, &cld_cl, &weak_cld_cl, NULL, &tc, worker_id);
 279       } else {
 280         _rp->process_all_roots(&roots_cl, process_refs ? NULL : &roots_cl, &cld_cl, NULL, &tc, worker_id);
 281       }
 282     } else {
 283       ShenandoahTraversalDegenClosure roots_cl(q, rp);
 284       CLDToOopClosure cld_cl(&roots_cl, ClassLoaderData::_claim_strong);
 285       ShenandoahTraversalSATBThreadsClosure tc(&satb_cl);
 286       if (unload_classes) {
 287         ShenandoahRemarkCLDClosure weak_cld_cl(&roots_cl);
 288         _rp->process_strong_roots(&roots_cl, process_refs ? NULL : &roots_cl, &cld_cl, &weak_cld_cl, NULL, &tc, worker_id);
 289       } else {
 290         _rp->process_all_roots(&roots_cl, process_refs ? NULL : &roots_cl, &cld_cl, NULL, &tc, worker_id);
 291       }
 292     }
 293 
 294     {
 295       ShenandoahWorkerTimings *worker_times = _heap->phase_timings()->worker_times();
 296       ShenandoahWorkerTimingsTracker timer(worker_times, ShenandoahPhaseTimings::FinishQueues, worker_id);
 297 
 298       // Step 3: Finally drain all outstanding work in queues.
 299       traversal_gc->main_loop(worker_id, _terminator, false);
 300     }
 301 
 302   }
 303 };
 304 
 305 ShenandoahTraversalGC::ShenandoahTraversalGC(ShenandoahHeap* heap, size_t num_regions) :
 306   _heap(heap),
 307   _task_queues(new ShenandoahObjToScanQueueSet(heap->max_workers())),
 308   _traversal_set(ShenandoahHeapRegionSet()) {
 309 
 310   uint num_queues = heap->max_workers();
 311   for (uint i = 0; i < num_queues; ++i) {
 312     ShenandoahObjToScanQueue* task_queue = new ShenandoahObjToScanQueue();
 313     task_queue->initialize();
 314     _task_queues->register_queue(i, task_queue);
 315   }
 316 }
 317 
 318 ShenandoahTraversalGC::~ShenandoahTraversalGC() {
 319 }
 320 
 321 void ShenandoahTraversalGC::prepare_regions() {
 322   size_t num_regions = _heap->num_regions();
 323   ShenandoahMarkingContext* const ctx = _heap->marking_context();
 324   for (size_t i = 0; i < num_regions; i++) {
 325     ShenandoahHeapRegion* region = _heap->get_region(i);
 326     if (_heap->is_bitmap_slice_committed(region)) {
 327       if (_traversal_set.is_in(i)) {
 328         ctx->capture_top_at_mark_start(region);
 329         region->clear_live_data();
 330         assert(ctx->is_bitmap_clear_range(region->bottom(), region->end()), "bitmap for traversal regions must be cleared");
 331       } else {
 332         // Everything outside the traversal set is always considered live.
 333         ctx->reset_top_at_mark_start(region);
 334       }
 335     } else {
 336       // FreeSet may contain uncommitted empty regions, once they are recommitted,
 337       // their TAMS may have old values, so reset them here.
 338       ctx->reset_top_at_mark_start(region);
 339     }
 340   }
 341 }
 342 
 343 void ShenandoahTraversalGC::prepare() {
 344   _heap->collection_set()->clear();
 345   assert(_heap->collection_set()->count() == 0, "collection set not clear");
 346 
 347   {
 348     ShenandoahGCPhase phase(ShenandoahPhaseTimings::traversal_gc_make_parsable);
 349     _heap->make_parsable(true);
 350   }
 351 
 352   if (UseTLAB) {
 353     ShenandoahGCPhase phase(ShenandoahPhaseTimings::traversal_gc_resize_tlabs);
 354     _heap->resize_tlabs();
 355   }
 356 
 357   assert(_heap->marking_context()->is_bitmap_clear(), "need clean mark bitmap");
 358   assert(!_heap->marking_context()->is_complete(), "should not be complete");
 359 
 360   ShenandoahFreeSet* free_set = _heap->free_set();
 361   ShenandoahCollectionSet* collection_set = _heap->collection_set();
 362 
 363   // Find collection set
 364   _heap->heuristics()->choose_collection_set(collection_set);
 365   prepare_regions();
 366 
 367   // Rebuild free set
 368   free_set->rebuild();
 369 
 370   log_info(gc, ergo)("Collectable Garbage: " SIZE_FORMAT "M, " SIZE_FORMAT "M CSet, " SIZE_FORMAT " CSet regions",
 371                      collection_set->garbage() / M, collection_set->live_data() / M, collection_set->count());
 372 }
 373 
 374 void ShenandoahTraversalGC::init_traversal_collection() {
 375   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "STW traversal GC");
 376 
 377   if (ShenandoahVerify) {
 378     _heap->verifier()->verify_before_traversal();
 379   }
 380 
 381   if (VerifyBeforeGC) {
 382     Universe::verify();
 383   }
 384 
 385   {
 386     ShenandoahGCPhase phase_prepare(ShenandoahPhaseTimings::traversal_gc_prepare);
 387     ShenandoahHeapLocker lock(_heap->lock());
 388     prepare();
 389   }
 390 
 391   _heap->set_concurrent_traversal_in_progress(true);
 392 
 393   bool process_refs = _heap->process_references();
 394   if (process_refs) {
 395     ReferenceProcessor* rp = _heap->ref_processor();
 396     rp->enable_discovery(true /*verify_no_refs*/);
 397     rp->setup_policy(_heap->soft_ref_policy()->should_clear_all_soft_refs());
 398   }
 399 
 400   {
 401     ShenandoahGCPhase phase_work(ShenandoahPhaseTimings::init_traversal_gc_work);
 402     assert(_task_queues->is_empty(), "queues must be empty before traversal GC");
 403     TASKQUEUE_STATS_ONLY(_task_queues->reset_taskqueue_stats());
 404 
 405 #if defined(COMPILER2) || INCLUDE_JVMCI
 406     DerivedPointerTable::clear();
 407 #endif
 408 
 409     {
 410       uint nworkers = _heap->workers()->active_workers();
 411       task_queues()->reserve(nworkers);
 412       ShenandoahRootProcessor rp(_heap, nworkers, ShenandoahPhaseTimings::init_traversal_gc_work);
 413 
 414       ShenandoahCsetCodeRootsIterator cset_coderoots = ShenandoahCodeRoots::cset_iterator();
 415 
 416       ShenandoahInitTraversalCollectionTask traversal_task(&rp, &cset_coderoots);
 417       _heap->workers()->run_task(&traversal_task);
 418     }
 419 
 420 #if defined(COMPILER2) || INCLUDE_JVMCI
 421     DerivedPointerTable::update_pointers();
 422 #endif
 423   }
 424 
 425   if (ShenandoahPacing) {
 426     _heap->pacer()->setup_for_traversal();
 427   }
 428 }
 429 
 430 void ShenandoahTraversalGC::main_loop(uint w, ShenandoahTaskTerminator* t, bool sts_yield) {
 431   ShenandoahObjToScanQueue* q = task_queues()->queue(w);
 432 
 433   // Initialize live data.
 434   jushort* ld = _heap->get_liveness_cache(w);
 435 
 436   ReferenceProcessor* rp = NULL;
 437   if (_heap->process_references()) {
 438     rp = _heap->ref_processor();
 439   }
 440   {
 441     if (!_heap->is_degenerated_gc_in_progress()) {
 442       if (_heap->unload_classes()) {
 443         if (ShenandoahStringDedup::is_enabled()) {
 444           ShenandoahTraversalMetadataDedupClosure cl(q, rp);
 445           main_loop_work<ShenandoahTraversalMetadataDedupClosure>(&cl, ld, w, t, sts_yield);
 446         } else {
 447           ShenandoahTraversalMetadataClosure cl(q, rp);
 448           main_loop_work<ShenandoahTraversalMetadataClosure>(&cl, ld, w, t, sts_yield);
 449         }
 450       } else {
 451         if (ShenandoahStringDedup::is_enabled()) {
 452           ShenandoahTraversalDedupClosure cl(q, rp);
 453           main_loop_work<ShenandoahTraversalDedupClosure>(&cl, ld, w, t, sts_yield);
 454         } else {
 455           ShenandoahTraversalClosure cl(q, rp);
 456           main_loop_work<ShenandoahTraversalClosure>(&cl, ld, w, t, sts_yield);
 457         }
 458       }
 459     } else {
 460       if (_heap->unload_classes()) {
 461         if (ShenandoahStringDedup::is_enabled()) {
 462           ShenandoahTraversalMetadataDedupDegenClosure cl(q, rp);
 463           main_loop_work<ShenandoahTraversalMetadataDedupDegenClosure>(&cl, ld, w, t, sts_yield);
 464         } else {
 465           ShenandoahTraversalMetadataDegenClosure cl(q, rp);
 466           main_loop_work<ShenandoahTraversalMetadataDegenClosure>(&cl, ld, w, t, sts_yield);
 467         }
 468       } else {
 469         if (ShenandoahStringDedup::is_enabled()) {
 470           ShenandoahTraversalDedupDegenClosure cl(q, rp);
 471           main_loop_work<ShenandoahTraversalDedupDegenClosure>(&cl, ld, w, t, sts_yield);
 472         } else {
 473           ShenandoahTraversalDegenClosure cl(q, rp);
 474           main_loop_work<ShenandoahTraversalDegenClosure>(&cl, ld, w, t, sts_yield);
 475         }
 476       }
 477     }
 478   }
 479 
 480   _heap->flush_liveness_cache(w);
 481 }
 482 
 483 template <class T>
 484 void ShenandoahTraversalGC::main_loop_work(T* cl, jushort* live_data, uint worker_id, ShenandoahTaskTerminator* terminator, bool sts_yield) {
 485   ShenandoahObjToScanQueueSet* queues = task_queues();
 486   ShenandoahObjToScanQueue* q = queues->queue(worker_id);
 487   ShenandoahConcurrentMark* conc_mark = _heap->concurrent_mark();
 488 
 489   uintx stride = ShenandoahMarkLoopStride;
 490 
 491   ShenandoahMarkTask task;
 492 
 493   // Process outstanding queues, if any.
 494   q = queues->claim_next();
 495   while (q != NULL) {
 496     if (_heap->check_cancelled_gc_and_yield(sts_yield)) {
 497       return;
 498     }
 499 
 500     for (uint i = 0; i < stride; i++) {
 501       if (q->pop(task)) {
 502         conc_mark->do_task<T>(q, cl, live_data, &task);
 503       } else {
 504         assert(q->is_empty(), "Must be empty");
 505         q = queues->claim_next();
 506         break;
 507       }
 508     }
 509   }
 510 
 511   if (check_and_handle_cancelled_gc(terminator, sts_yield)) return;
 512 
 513   // Normal loop.
 514   q = queues->queue(worker_id);
 515 
 516   ShenandoahTraversalSATBBufferClosure drain_satb(q);
 517   SATBMarkQueueSet& satb_mq_set = ShenandoahBarrierSet::satb_mark_queue_set();
 518 
 519   while (true) {
 520     if (check_and_handle_cancelled_gc(terminator, sts_yield)) return;
 521 
 522     while (satb_mq_set.completed_buffers_num() > 0) {
 523       satb_mq_set.apply_closure_to_completed_buffer(&drain_satb);
 524     }
 525 
 526     uint work = 0;
 527     for (uint i = 0; i < stride; i++) {
 528       if (q->pop(task) ||
 529           queues->steal(worker_id, task)) {
 530         conc_mark->do_task<T>(q, cl, live_data, &task);
 531         work++;
 532       } else {
 533         break;
 534       }
 535     }
 536 
 537     if (work == 0) {
 538       // No more work, try to terminate
 539       ShenandoahEvacOOMScopeLeaver oom_scope_leaver;
 540       ShenandoahSuspendibleThreadSetLeaver stsl(sts_yield && ShenandoahSuspendibleWorkers);
 541       ShenandoahTerminationTimingsTracker term_tracker(worker_id);
 542       ShenandoahTerminatorTerminator tt(_heap);
 543 
 544       if (terminator->offer_termination(&tt)) return;
 545     }
 546   }
 547 }
 548 
 549 bool ShenandoahTraversalGC::check_and_handle_cancelled_gc(ShenandoahTaskTerminator* terminator, bool sts_yield) {
 550   if (_heap->cancelled_gc()) {
 551     return true;
 552   }
 553   return false;
 554 }
 555 
 556 void ShenandoahTraversalGC::concurrent_traversal_collection() {
 557   ClassLoaderDataGraph::clear_claimed_marks();
 558 
 559   ShenandoahGCPhase phase_work(ShenandoahPhaseTimings::conc_traversal);
 560   if (!_heap->cancelled_gc()) {
 561     uint nworkers = _heap->workers()->active_workers();
 562     task_queues()->reserve(nworkers);
 563     ShenandoahTerminationTracker tracker(ShenandoahPhaseTimings::conc_traversal_termination);
 564 
 565     ShenandoahTaskTerminator terminator(nworkers, task_queues());
 566     ShenandoahConcurrentTraversalCollectionTask task(&terminator);
 567     _heap->workers()->run_task(&task);
 568   }
 569 
 570   if (!_heap->cancelled_gc() && ShenandoahPreclean && _heap->process_references()) {
 571     preclean_weak_refs();
 572   }
 573 }
 574 
 575 void ShenandoahTraversalGC::final_traversal_collection() {
 576   _heap->make_parsable(true);
 577 
 578   if (!_heap->cancelled_gc()) {
 579 #if defined(COMPILER2) || INCLUDE_JVMCI
 580     DerivedPointerTable::clear();
 581 #endif
 582     ShenandoahGCPhase phase_work(ShenandoahPhaseTimings::final_traversal_gc_work);
 583     uint nworkers = _heap->workers()->active_workers();
 584     task_queues()->reserve(nworkers);
 585 
 586     // Finish traversal
 587     ShenandoahRootProcessor rp(_heap, nworkers, ShenandoahPhaseTimings::final_traversal_gc_work);
 588     ShenandoahTerminationTracker term(ShenandoahPhaseTimings::final_traversal_gc_termination);
 589 
 590     ShenandoahTaskTerminator terminator(nworkers, task_queues());
 591     ShenandoahFinalTraversalCollectionTask task(&rp, &terminator);
 592     _heap->workers()->run_task(&task);
 593 #if defined(COMPILER2) || INCLUDE_JVMCI
 594     DerivedPointerTable::update_pointers();
 595 #endif
 596   }
 597 
 598   if (!_heap->cancelled_gc() && _heap->process_references()) {
 599     weak_refs_work();
 600   }
 601 
 602   if (!_heap->cancelled_gc() && _heap->unload_classes()) {
 603     _heap->unload_classes_and_cleanup_tables(false);
 604     fixup_roots();
 605   }
 606 
 607   if (!_heap->cancelled_gc()) {
 608     assert(_task_queues->is_empty(), "queues must be empty after traversal GC");
 609     TASKQUEUE_STATS_ONLY(_task_queues->print_taskqueue_stats());
 610     TASKQUEUE_STATS_ONLY(_task_queues->reset_taskqueue_stats());
 611 
 612     // No more marking expected
 613     _heap->mark_complete_marking_context();
 614 
 615     // Resize metaspace
 616     MetaspaceGC::compute_new_size();
 617 
 618     // Still good? We can now trash the cset, and make final verification
 619     {
 620       ShenandoahGCPhase phase_cleanup(ShenandoahPhaseTimings::traversal_gc_cleanup);
 621       ShenandoahHeapLocker lock(_heap->lock());
 622 
 623       // Trash everything
 624       // Clear immediate garbage regions.
 625       size_t num_regions = _heap->num_regions();
 626 
 627       ShenandoahHeapRegionSet* traversal_regions = traversal_set();
 628       ShenandoahFreeSet* free_regions = _heap->free_set();
 629       ShenandoahMarkingContext* const ctx = _heap->marking_context();
 630       free_regions->clear();
 631       for (size_t i = 0; i < num_regions; i++) {
 632         ShenandoahHeapRegion* r = _heap->get_region(i);
 633         bool not_allocated = ctx->top_at_mark_start(r) == r->top();
 634 
 635         bool candidate = traversal_regions->is_in(r) && !r->has_live() && not_allocated;
 636         if (r->is_humongous_start() && candidate) {
 637           // Trash humongous.
 638           HeapWord* humongous_obj = r->bottom() + ShenandoahBrooksPointer::word_size();
 639           assert(!ctx->is_marked(oop(humongous_obj)), "must not be marked");
 640           r->make_trash_immediate();
 641           while (i + 1 < num_regions && _heap->get_region(i + 1)->is_humongous_continuation()) {
 642             i++;
 643             r = _heap->get_region(i);
 644             assert(r->is_humongous_continuation(), "must be humongous continuation");
 645             r->make_trash_immediate();
 646           }
 647         } else if (!r->is_empty() && candidate) {
 648           // Trash regular.
 649           assert(!r->is_humongous(), "handled above");
 650           assert(!r->is_trash(), "must not already be trashed");
 651           r->make_trash_immediate();
 652         }
 653       }
 654       _heap->collection_set()->clear();
 655       _heap->free_set()->rebuild();
 656       reset();
 657     }
 658 
 659     assert(_task_queues->is_empty(), "queues must be empty after traversal GC");
 660     _heap->set_concurrent_traversal_in_progress(false);
 661     assert(!_heap->cancelled_gc(), "must not be cancelled when getting out here");
 662 
 663     if (ShenandoahVerify) {
 664       _heap->verifier()->verify_after_traversal();
 665     }
 666 
 667     if (VerifyAfterGC) {
 668       Universe::verify();
 669     }
 670   }
 671 }
 672 
 673 class ShenandoahTraversalFixRootsClosure : public OopClosure {
 674 private:
 675   template <class T>
 676   inline void do_oop_work(T* p) {
 677     T o = RawAccess<>::oop_load(p);
 678     if (!CompressedOops::is_null(o)) {
 679       oop obj = CompressedOops::decode_not_null(o);
 680       oop forw = ShenandoahBarrierSet::resolve_forwarded_not_null(obj);
 681       if (!oopDesc::equals_raw(obj, forw)) {
 682         RawAccess<IS_NOT_NULL>::oop_store(p, forw);
 683       }
 684     }
 685   }
 686 
 687 public:
 688   inline void do_oop(oop* p) { do_oop_work(p); }
 689   inline void do_oop(narrowOop* p) { do_oop_work(p); }
 690 };
 691 
 692 class ShenandoahTraversalFixRootsTask : public AbstractGangTask {
 693 private:
 694   ShenandoahRootProcessor* _rp;
 695 
 696 public:
 697   ShenandoahTraversalFixRootsTask(ShenandoahRootProcessor* rp) :
 698     AbstractGangTask("Shenandoah traversal fix roots"),
 699     _rp(rp) {}
 700 
 701   void work(uint worker_id) {
 702     ShenandoahParallelWorkerSession worker_session(worker_id);
 703     ShenandoahTraversalFixRootsClosure cl;
 704     MarkingCodeBlobClosure blobsCl(&cl, CodeBlobToOopClosure::FixRelocations);
 705     CLDToOopClosure cldCl(&cl, ClassLoaderData::_claim_strong);
 706     _rp->process_all_roots(&cl, &cl, &cldCl, &blobsCl, NULL, worker_id);
 707   }
 708 };
 709 
 710 void ShenandoahTraversalGC::fixup_roots() {
 711 #if defined(COMPILER2) || INCLUDE_JVMCI
 712   DerivedPointerTable::clear();
 713 #endif
 714   ShenandoahRootProcessor rp(_heap, _heap->workers()->active_workers(), ShenandoahPhaseTimings::final_traversal_update_roots);
 715   ShenandoahTraversalFixRootsTask update_roots_task(&rp);
 716   _heap->workers()->run_task(&update_roots_task);
 717 #if defined(COMPILER2) || INCLUDE_JVMCI
 718   DerivedPointerTable::update_pointers();
 719 #endif
 720 }
 721 
 722 void ShenandoahTraversalGC::reset() {
 723   _task_queues->clear();
 724 }
 725 
 726 ShenandoahObjToScanQueueSet* ShenandoahTraversalGC::task_queues() {
 727   return _task_queues;
 728 }
 729 
 730 class ShenandoahTraversalCancelledGCYieldClosure : public YieldClosure {
 731 private:
 732   ShenandoahHeap* const _heap;
 733 public:
 734   ShenandoahTraversalCancelledGCYieldClosure() : _heap(ShenandoahHeap::heap()) {};
 735   virtual bool should_return() { return _heap->cancelled_gc(); }
 736 };
 737 
 738 class ShenandoahTraversalPrecleanCompleteGCClosure : public VoidClosure {
 739 public:
 740   void do_void() {
 741     ShenandoahHeap* sh = ShenandoahHeap::heap();
 742     ShenandoahTraversalGC* traversal_gc = sh->traversal_gc();
 743     assert(sh->process_references(), "why else would we be here?");
 744     ShenandoahTaskTerminator terminator(1, traversal_gc->task_queues());
 745     shenandoah_assert_rp_isalive_installed();
 746     traversal_gc->main_loop((uint) 0, &terminator, true);
 747   }
 748 };
 749 
 750 class ShenandoahTraversalKeepAliveUpdateClosure : public OopClosure {
 751 private:
 752   ShenandoahObjToScanQueue* _queue;
 753   Thread* _thread;
 754   ShenandoahTraversalGC* _traversal_gc;
 755   ShenandoahMarkingContext* const _mark_context;
 756 
 757   template <class T>
 758   inline void do_oop_work(T* p) {
 759     _traversal_gc->process_oop<T, false /* string dedup */, false /* degen */>(p, _thread, _queue, _mark_context);
 760   }
 761 
 762 public:
 763   ShenandoahTraversalKeepAliveUpdateClosure(ShenandoahObjToScanQueue* q) :
 764     _queue(q), _thread(Thread::current()),
 765     _traversal_gc(ShenandoahHeap::heap()->traversal_gc()),
 766     _mark_context(ShenandoahHeap::heap()->marking_context()) {}
 767 
 768   void do_oop(narrowOop* p) { do_oop_work(p); }
 769   void do_oop(oop* p)       { do_oop_work(p); }
 770 };
 771 
 772 class ShenandoahTraversalWeakUpdateClosure : public OopClosure {
 773 private:
 774   template <class T>
 775   inline void do_oop_work(T* p) {
 776     // Cannot call maybe_update_with_forwarded, because on traversal-degen
 777     // path the collection set is already dropped. Instead, do the unguarded store.
 778     // TODO: This can be fixed after degen-traversal stops dropping cset.
 779     T o = RawAccess<>::oop_load(p);
 780     if (!CompressedOops::is_null(o)) {
 781       oop obj = CompressedOops::decode_not_null(o);
 782       obj = ShenandoahBarrierSet::resolve_forwarded_not_null(obj);
 783       shenandoah_assert_marked(p, obj);
 784       RawAccess<IS_NOT_NULL>::oop_store(p, obj);
 785     }
 786   }
 787 
 788 public:
 789   ShenandoahTraversalWeakUpdateClosure() {}
 790 
 791   void do_oop(narrowOop* p) { do_oop_work(p); }
 792   void do_oop(oop* p)       { do_oop_work(p); }
 793 };
 794 
 795 class ShenandoahTraversalKeepAliveUpdateDegenClosure : public OopClosure {
 796 private:
 797   ShenandoahObjToScanQueue* _queue;
 798   Thread* _thread;
 799   ShenandoahTraversalGC* _traversal_gc;
 800   ShenandoahMarkingContext* const _mark_context;
 801 
 802   template <class T>
 803   inline void do_oop_work(T* p) {
 804     _traversal_gc->process_oop<T, false /* string dedup */, true /* degen */>(p, _thread, _queue, _mark_context);
 805   }
 806 
 807 public:
 808   ShenandoahTraversalKeepAliveUpdateDegenClosure(ShenandoahObjToScanQueue* q) :
 809           _queue(q), _thread(Thread::current()),
 810           _traversal_gc(ShenandoahHeap::heap()->traversal_gc()),
 811           _mark_context(ShenandoahHeap::heap()->marking_context()) {}
 812 
 813   void do_oop(narrowOop* p) { do_oop_work(p); }
 814   void do_oop(oop* p)       { do_oop_work(p); }
 815 };
 816 
 817 class ShenandoahTraversalSingleThreadKeepAliveUpdateClosure : public OopClosure {
 818 private:
 819   ShenandoahObjToScanQueue* _queue;
 820   Thread* _thread;
 821   ShenandoahTraversalGC* _traversal_gc;
 822   ShenandoahMarkingContext* const _mark_context;
 823 
 824   template <class T>
 825   inline void do_oop_work(T* p) {
 826     ShenandoahEvacOOMScope evac_scope;
 827     _traversal_gc->process_oop<T, false /* string dedup */, false /* degen */>(p, _thread, _queue, _mark_context);
 828   }
 829 
 830 public:
 831   ShenandoahTraversalSingleThreadKeepAliveUpdateClosure(ShenandoahObjToScanQueue* q) :
 832           _queue(q), _thread(Thread::current()),
 833           _traversal_gc(ShenandoahHeap::heap()->traversal_gc()),
 834           _mark_context(ShenandoahHeap::heap()->marking_context()) {}
 835 
 836   void do_oop(narrowOop* p) { do_oop_work(p); }
 837   void do_oop(oop* p)       { do_oop_work(p); }
 838 };
 839 
 840 class ShenandoahTraversalSingleThreadKeepAliveUpdateDegenClosure : public OopClosure {
 841 private:
 842   ShenandoahObjToScanQueue* _queue;
 843   Thread* _thread;
 844   ShenandoahTraversalGC* _traversal_gc;
 845   ShenandoahMarkingContext* const _mark_context;
 846 
 847   template <class T>
 848   inline void do_oop_work(T* p) {
 849     ShenandoahEvacOOMScope evac_scope;
 850     _traversal_gc->process_oop<T, false /* string dedup */, true /* degen */>(p, _thread, _queue, _mark_context);
 851   }
 852 
 853 public:
 854   ShenandoahTraversalSingleThreadKeepAliveUpdateDegenClosure(ShenandoahObjToScanQueue* q) :
 855           _queue(q), _thread(Thread::current()),
 856           _traversal_gc(ShenandoahHeap::heap()->traversal_gc()),
 857           _mark_context(ShenandoahHeap::heap()->marking_context()) {}
 858 
 859   void do_oop(narrowOop* p) { do_oop_work(p); }
 860   void do_oop(oop* p)       { do_oop_work(p); }
 861 };
 862 
 863 class ShenandoahTraversalPrecleanTask : public AbstractGangTask {
 864 private:
 865   ReferenceProcessor* _rp;
 866 
 867 public:
 868   ShenandoahTraversalPrecleanTask(ReferenceProcessor* rp) :
 869           AbstractGangTask("Precleaning task"),
 870           _rp(rp) {}
 871 
 872   void work(uint worker_id) {
 873     assert(worker_id == 0, "The code below is single-threaded, only one worker is expected");
 874     ShenandoahParallelWorkerSession worker_session(worker_id);
 875     ShenandoahSuspendibleThreadSetJoiner stsj(ShenandoahSuspendibleWorkers);
 876     ShenandoahEvacOOMScope oom_evac_scope;
 877 
 878     ShenandoahHeap* sh = ShenandoahHeap::heap();
 879 
 880     ShenandoahObjToScanQueue* q = sh->traversal_gc()->task_queues()->queue(worker_id);
 881 
 882     ShenandoahForwardedIsAliveClosure is_alive;
 883     ShenandoahTraversalCancelledGCYieldClosure yield;
 884     ShenandoahTraversalPrecleanCompleteGCClosure complete_gc;
 885     ShenandoahTraversalKeepAliveUpdateClosure keep_alive(q);
 886     ResourceMark rm;
 887     _rp->preclean_discovered_references(&is_alive, &keep_alive,
 888                                         &complete_gc, &yield,
 889                                         NULL);
 890   }
 891 };
 892 
 893 void ShenandoahTraversalGC::preclean_weak_refs() {
 894   // Pre-cleaning weak references before diving into STW makes sense at the
 895   // end of concurrent mark. This will filter out the references which referents
 896   // are alive. Note that ReferenceProcessor already filters out these on reference
 897   // discovery, and the bulk of work is done here. This phase processes leftovers
 898   // that missed the initial filtering, i.e. when referent was marked alive after
 899   // reference was discovered by RP.
 900 
 901   assert(_heap->process_references(), "sanity");
 902   assert(!_heap->is_degenerated_gc_in_progress(), "must be in concurrent non-degenerated phase");
 903 
 904   // Shortcut if no references were discovered to avoid winding up threads.
 905   ReferenceProcessor* rp = _heap->ref_processor();
 906   if (!rp->has_discovered_references()) {
 907     return;
 908   }
 909 
 910   ReferenceProcessorMTDiscoveryMutator fix_mt_discovery(rp, false);
 911 
 912   shenandoah_assert_rp_isalive_not_installed();
 913   ShenandoahForwardedIsAliveClosure is_alive;
 914   ReferenceProcessorIsAliveMutator fix_isalive(rp, &is_alive);
 915 
 916   assert(task_queues()->is_empty(), "Should be empty");
 917 
 918   // Execute precleaning in the worker thread: it will give us GCLABs, String dedup
 919   // queues and other goodies. When upstream ReferenceProcessor starts supporting
 920   // parallel precleans, we can extend this to more threads.
 921   ShenandoahPushWorkerScope scope(_heap->workers(), 1, /* check_workers = */ false);
 922 
 923   WorkGang* workers = _heap->workers();
 924   uint nworkers = workers->active_workers();
 925   assert(nworkers == 1, "This code uses only a single worker");
 926   task_queues()->reserve(nworkers);
 927 
 928   ShenandoahTraversalPrecleanTask task(rp);
 929   workers->run_task(&task);
 930 
 931   assert(_heap->cancelled_gc() || task_queues()->is_empty(), "Should be empty");
 932 }
 933 
 934 // Weak Reference Closures
 935 class ShenandoahTraversalDrainMarkingStackClosure: public VoidClosure {
 936   uint _worker_id;
 937   ShenandoahTaskTerminator* _terminator;
 938   bool _reset_terminator;
 939 
 940 public:
 941   ShenandoahTraversalDrainMarkingStackClosure(uint worker_id, ShenandoahTaskTerminator* t, bool reset_terminator = false):
 942     _worker_id(worker_id),
 943     _terminator(t),
 944     _reset_terminator(reset_terminator) {
 945   }
 946 
 947   void do_void() {
 948     assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
 949 
 950     ShenandoahHeap* sh = ShenandoahHeap::heap();
 951     ShenandoahTraversalGC* traversal_gc = sh->traversal_gc();
 952     assert(sh->process_references(), "why else would we be here?");
 953     shenandoah_assert_rp_isalive_installed();
 954 
 955     traversal_gc->main_loop(_worker_id, _terminator, false);
 956 
 957     if (_reset_terminator) {
 958       _terminator->reset_for_reuse();
 959     }
 960   }
 961 };
 962 
 963 class ShenandoahTraversalSingleThreadedDrainMarkingStackClosure: public VoidClosure {
 964   uint _worker_id;
 965   ShenandoahTaskTerminator* _terminator;
 966   bool _reset_terminator;
 967 
 968 public:
 969   ShenandoahTraversalSingleThreadedDrainMarkingStackClosure(uint worker_id, ShenandoahTaskTerminator* t, bool reset_terminator = false):
 970           _worker_id(worker_id),
 971           _terminator(t),
 972           _reset_terminator(reset_terminator) {
 973   }
 974 
 975   void do_void() {
 976     assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
 977 
 978     ShenandoahHeap* sh = ShenandoahHeap::heap();
 979     ShenandoahTraversalGC* traversal_gc = sh->traversal_gc();
 980     assert(sh->process_references(), "why else would we be here?");
 981     shenandoah_assert_rp_isalive_installed();
 982 
 983     ShenandoahEvacOOMScope evac_scope;
 984     traversal_gc->main_loop(_worker_id, _terminator, false);
 985 
 986     if (_reset_terminator) {
 987       _terminator->reset_for_reuse();
 988     }
 989   }
 990 };
 991 
 992 void ShenandoahTraversalGC::weak_refs_work() {
 993   assert(_heap->process_references(), "sanity");
 994 
 995   ShenandoahPhaseTimings::Phase phase_root = ShenandoahPhaseTimings::weakrefs;
 996 
 997   ShenandoahGCPhase phase(phase_root);
 998 
 999   ReferenceProcessor* rp = _heap->ref_processor();
1000 
1001   // NOTE: We cannot shortcut on has_discovered_references() here, because
1002   // we will miss marking JNI Weak refs then, see implementation in
1003   // ReferenceProcessor::process_discovered_references.
1004   weak_refs_work_doit();
1005 
1006   rp->verify_no_references_recorded();
1007   assert(!rp->discovery_enabled(), "Post condition");
1008 
1009 }
1010 
1011 class ShenandoahTraversalRefProcTaskProxy : public AbstractGangTask {
1012 private:
1013   AbstractRefProcTaskExecutor::ProcessTask& _proc_task;
1014   ShenandoahTaskTerminator* _terminator;
1015 
1016 public:
1017   ShenandoahTraversalRefProcTaskProxy(AbstractRefProcTaskExecutor::ProcessTask& proc_task,
1018                                       ShenandoahTaskTerminator* t) :
1019     AbstractGangTask("Process reference objects in parallel"),
1020     _proc_task(proc_task),
1021     _terminator(t) {
1022   }
1023 
1024   void work(uint worker_id) {
1025     ShenandoahEvacOOMScope oom_evac_scope;
1026     assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
1027     ShenandoahHeap* heap = ShenandoahHeap::heap();
1028     ShenandoahTraversalDrainMarkingStackClosure complete_gc(worker_id, _terminator);
1029 
1030     ShenandoahForwardedIsAliveClosure is_alive;
1031     if (!heap->is_degenerated_gc_in_progress()) {
1032       ShenandoahTraversalKeepAliveUpdateClosure keep_alive(heap->traversal_gc()->task_queues()->queue(worker_id));
1033       _proc_task.work(worker_id, is_alive, keep_alive, complete_gc);
1034     } else {
1035       ShenandoahTraversalKeepAliveUpdateDegenClosure keep_alive(heap->traversal_gc()->task_queues()->queue(worker_id));
1036       _proc_task.work(worker_id, is_alive, keep_alive, complete_gc);
1037     }
1038   }
1039 };
1040 
1041 class ShenandoahTraversalRefProcTaskExecutor : public AbstractRefProcTaskExecutor {
1042 private:
1043   WorkGang* _workers;
1044 
1045 public:
1046   ShenandoahTraversalRefProcTaskExecutor(WorkGang* workers) : _workers(workers) {}
1047 
1048   // Executes a task using worker threads.
1049   void execute(ProcessTask& task, uint ergo_workers) {
1050     assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
1051 
1052     ShenandoahHeap* heap = ShenandoahHeap::heap();
1053     ShenandoahTraversalGC* traversal_gc = heap->traversal_gc();
1054     ShenandoahPushWorkerQueuesScope scope(_workers,
1055                                           traversal_gc->task_queues(),
1056                                           ergo_workers,
1057                                           /* do_check = */ false);
1058     uint nworkers = _workers->active_workers();
1059     traversal_gc->task_queues()->reserve(nworkers);
1060     ShenandoahTaskTerminator terminator(nworkers, traversal_gc->task_queues());
1061     ShenandoahTraversalRefProcTaskProxy proc_task_proxy(task, &terminator);
1062     _workers->run_task(&proc_task_proxy);
1063   }
1064 };
1065 
1066 void ShenandoahTraversalGC::weak_refs_work_doit() {
1067   ReferenceProcessor* rp = _heap->ref_processor();
1068 
1069   ShenandoahPhaseTimings::Phase phase_process = ShenandoahPhaseTimings::weakrefs_process;
1070 
1071   shenandoah_assert_rp_isalive_not_installed();
1072   ShenandoahForwardedIsAliveClosure is_alive;
1073   ReferenceProcessorIsAliveMutator fix_isalive(rp, &is_alive);
1074 
1075   WorkGang* workers = _heap->workers();
1076   uint nworkers = workers->active_workers();
1077 
1078   rp->setup_policy(_heap->soft_ref_policy()->should_clear_all_soft_refs());
1079   rp->set_active_mt_degree(nworkers);
1080 
1081   assert(task_queues()->is_empty(), "Should be empty");
1082 
1083   // complete_gc and keep_alive closures instantiated here are only needed for
1084   // single-threaded path in RP. They share the queue 0 for tracking work, which
1085   // simplifies implementation. Since RP may decide to call complete_gc several
1086   // times, we need to be able to reuse the terminator.
1087   uint serial_worker_id = 0;
1088   ShenandoahTaskTerminator terminator(1, task_queues());
1089   ShenandoahTraversalSingleThreadedDrainMarkingStackClosure complete_gc(serial_worker_id, &terminator, /* reset_terminator = */ true);
1090   ShenandoahPushWorkerQueuesScope scope(workers, task_queues(), 1, /* do_check = */ false);
1091 
1092   ShenandoahTraversalRefProcTaskExecutor executor(workers);
1093 
1094   ReferenceProcessorPhaseTimes pt(_heap->gc_timer(), rp->num_queues());
1095   if (!_heap->is_degenerated_gc_in_progress()) {
1096     ShenandoahTraversalSingleThreadKeepAliveUpdateClosure keep_alive(task_queues()->queue(serial_worker_id));
1097     rp->process_discovered_references(&is_alive, &keep_alive,
1098                                       &complete_gc, &executor,
1099                                       &pt);
1100   } else {
1101     ShenandoahTraversalSingleThreadKeepAliveUpdateDegenClosure keep_alive(task_queues()->queue(serial_worker_id));
1102     rp->process_discovered_references(&is_alive, &keep_alive,
1103                                       &complete_gc, &executor,
1104                                       &pt);
1105   }
1106 
1107   {
1108     ShenandoahGCPhase phase(phase_process);
1109     ShenandoahTerminationTracker termination(ShenandoahPhaseTimings::weakrefs_termination);
1110 
1111     // Process leftover weak oops (using parallel version)
1112     ShenandoahTraversalWeakUpdateClosure cl;
1113     WeakProcessor::weak_oops_do(workers, &is_alive, &cl, 1);
1114 
1115     pt.print_all_references();
1116 
1117     assert(task_queues()->is_empty() || _heap->cancelled_gc(), "Should be empty");
1118   }
1119 }