1 /*
   2  * Copyright (c) 2018, Red Hat, Inc. and/or its affiliates.
   3  *
   4  * This code is free software; you can redistribute it and/or modify it
   5  * under the terms of the GNU General Public License version 2 only, as
   6  * published by the Free Software Foundation.
   7  *
   8  * This code is distributed in the hope that it will be useful, but WITHOUT
   9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  11  * version 2 for more details (a copy is included in the LICENSE file that
  12  * accompanied this code).
  13  *
  14  * You should have received a copy of the GNU General Public License version
  15  * 2 along with this work; if not, write to the Free Software Foundation,
  16  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  17  *
  18  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  19  * or visit www.oracle.com if you need additional information or have any
  20  * questions.
  21  *
  22  */
  23 
  24 #include "precompiled.hpp"
  25 
  26 #include "classfile/classLoaderData.hpp"
  27 #include "classfile/classLoaderDataGraph.hpp"
  28 #include "gc/shared/referenceProcessor.hpp"
  29 #include "gc/shared/referenceProcessorPhaseTimes.hpp"
  30 #include "gc/shared/workgroup.hpp"
  31 #include "gc/shared/weakProcessor.hpp"
  32 #include "gc/shared/weakProcessor.inline.hpp"
  33 #include "gc/shenandoah/shenandoahBarrierSet.hpp"
  34 #include "gc/shenandoah/shenandoahCodeRoots.hpp"
  35 #include "gc/shenandoah/shenandoahCollectionSet.hpp"
  36 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
  37 #include "gc/shenandoah/shenandoahFreeSet.hpp"
  38 #include "gc/shenandoah/shenandoahPhaseTimings.hpp"
  39 #include "gc/shenandoah/shenandoahHeap.hpp"
  40 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
  41 #include "gc/shenandoah/shenandoahHeapRegionSet.hpp"
  42 #include "gc/shenandoah/shenandoahHeapRegionSet.inline.hpp"
  43 #include "gc/shenandoah/shenandoahHeuristics.hpp"
  44 #include "gc/shenandoah/shenandoahMarkingContext.inline.hpp"
  45 #include "gc/shenandoah/shenandoahOopClosures.inline.hpp"
  46 #include "gc/shenandoah/shenandoahRootProcessor.hpp"
  47 #include "gc/shenandoah/shenandoahStringDedup.hpp"
  48 #include "gc/shenandoah/shenandoahTaskqueue.hpp"
  49 #include "gc/shenandoah/shenandoahTaskqueue.inline.hpp"
  50 #include "gc/shenandoah/shenandoahTimingTracker.hpp"
  51 #include "gc/shenandoah/shenandoahTraversalGC.hpp"
  52 #include "gc/shenandoah/shenandoahUtils.hpp"
  53 #include "gc/shenandoah/shenandoahVerifier.hpp"
  54 
  55 #include "memory/iterator.hpp"
  56 #include "memory/resourceArea.hpp"
  57 
  58 /**
  59  * NOTE: We are using the SATB buffer in thread.hpp and satbMarkQueue.hpp, however, it is not an SATB algorithm.
  60  * We're using the buffer as generic oop buffer to enqueue new values in concurrent oop stores, IOW, the algorithm
  61  * is incremental-update-based.
  62  *
  63  * NOTE on interaction with TAMS: we want to avoid traversing new objects for
  64  * several reasons:
  65  * - We will not reclaim them in this cycle anyway, because they are not in the
  66  *   cset
  67  * - It makes up for the bulk of work during final-pause
  68  * - It also shortens the concurrent cycle because we don't need to
  69  *   pointlessly traverse through newly allocated objects.
  70  * - As a nice side-effect, it solves the I-U termination problem (mutators
  71  *   cannot outrun the GC by allocating like crazy)
  72  * - It is an easy way to achieve MWF. What MWF does is to also enqueue the
  73  *   target object of stores if it's new. Treating new objects live implicitely
  74  *   achieves the same, but without extra barriers. I think the effect of
  75  *   shortened final-pause (mentioned above) is the main advantage of MWF. In
  76  *   particular, we will not see the head of a completely new long linked list
  77  *   in final-pause and end up traversing huge chunks of the heap there.
  78  * - We don't need to see/update the fields of new objects either, because they
  79  *   are either still null, or anything that's been stored into them has been
  80  *   evacuated+enqueued before (and will thus be treated later).
  81  *
  82  * We achieve this by setting TAMS for each region, and everything allocated
  83  * beyond TAMS will be 'implicitely marked'.
  84  *
  85  * Gotchas:
  86  * - While we want new objects to be implicitely marked, we don't want to count
  87  *   them alive. Otherwise the next cycle wouldn't pick them up and consider
  88  *   them for cset. This means that we need to protect such regions from
  89  *   getting accidentally thrashed at the end of traversal cycle. This is why I
  90  *   keep track of alloc-regions and check is_alloc_region() in the trashing
  91  *   code.
  92  * - We *need* to traverse through evacuated objects. Those objects are
  93  *   pre-existing, and any references in them point to interesting objects that
  94  *   we need to see. We also want to count them as live, because we just
  95  *   determined that they are alive :-) I achieve this by upping TAMS
  96  *   concurrently for every gclab/gc-shared alloc before publishing the
  97  *   evacuated object. This way, the GC threads will not consider such objects
  98  *   implictely marked, and traverse through them as normal.
  99  */
 100 class ShenandoahTraversalSATBBufferClosure : public SATBBufferClosure {
 101 private:
 102   ShenandoahObjToScanQueue* _queue;
 103   ShenandoahTraversalGC* _traversal_gc;
 104   ShenandoahHeap* const _heap;
 105 
 106 public:
 107   ShenandoahTraversalSATBBufferClosure(ShenandoahObjToScanQueue* q) :
 108     _queue(q),
 109     _heap(ShenandoahHeap::heap())
 110  { }
 111 
 112   void do_buffer(void** buffer, size_t size) {
 113     for (size_t i = 0; i < size; ++i) {
 114       oop* p = (oop*) &buffer[i];
 115       oop obj = RawAccess<>::oop_load(p);
 116       shenandoah_assert_not_forwarded(p, obj);
 117       if (_heap->marking_context()->mark(obj)) {
 118         _queue->push(ShenandoahMarkTask(obj));
 119       }
 120     }
 121   }
 122 };
 123 
 124 class ShenandoahTraversalSATBThreadsClosure : public ThreadClosure {
 125 private:
 126   ShenandoahTraversalSATBBufferClosure* _satb_cl;
 127 
 128 public:
 129   ShenandoahTraversalSATBThreadsClosure(ShenandoahTraversalSATBBufferClosure* satb_cl) :
 130     _satb_cl(satb_cl) {}
 131 
 132   void do_thread(Thread* thread) {
 133     if (thread->is_Java_thread()) {
 134       JavaThread* jt = (JavaThread*)thread;
 135       ShenandoahThreadLocalData::satb_mark_queue(jt).apply_closure_and_empty(_satb_cl);
 136     } else if (thread->is_VM_thread()) {
 137       ShenandoahBarrierSet::satb_mark_queue_set().shared_satb_queue()->apply_closure_and_empty(_satb_cl);
 138     }
 139   }
 140 };
 141 
 142 // Like CLDToOopClosure, but clears has_modified_oops, so that we can record modified CLDs during traversal
 143 // and remark them later during final-traversal.
 144 class ShenandoahMarkCLDClosure : public CLDClosure {
 145 private:
 146   OopClosure* _cl;
 147 public:
 148   ShenandoahMarkCLDClosure(OopClosure* cl) : _cl(cl) {}
 149   void do_cld(ClassLoaderData* cld) {
 150     cld->oops_do(_cl, true, true);
 151   }
 152 };
 153 
 154 // Like CLDToOopClosure, but only process modified CLDs
 155 class ShenandoahRemarkCLDClosure : public CLDClosure {
 156 private:
 157   OopClosure* _cl;
 158 public:
 159   ShenandoahRemarkCLDClosure(OopClosure* cl) : _cl(cl) {}
 160   void do_cld(ClassLoaderData* cld) {
 161     if (cld->has_modified_oops()) {
 162       cld->oops_do(_cl, true, true);
 163     }
 164   }
 165 };
 166 
 167 class ShenandoahInitTraversalCollectionTask : public AbstractGangTask {
 168 private:
 169   ShenandoahRootProcessor* _rp;
 170   ShenandoahHeap* _heap;
 171   ShenandoahCsetCodeRootsIterator* _cset_coderoots;
 172 public:
 173   ShenandoahInitTraversalCollectionTask(ShenandoahRootProcessor* rp, ShenandoahCsetCodeRootsIterator* cset_coderoots) :
 174     AbstractGangTask("Shenandoah Init Traversal Collection"),
 175     _rp(rp),
 176     _heap(ShenandoahHeap::heap()),
 177     _cset_coderoots(cset_coderoots) {}
 178 
 179   void work(uint worker_id) {
 180     ShenandoahParallelWorkerSession worker_session(worker_id);
 181 
 182     ShenandoahEvacOOMScope oom_evac_scope;
 183     ShenandoahObjToScanQueueSet* queues = _heap->traversal_gc()->task_queues();
 184     ShenandoahObjToScanQueue* q = queues->queue(worker_id);
 185 
 186     bool process_refs = _heap->process_references();
 187     bool unload_classes = _heap->unload_classes();
 188     ReferenceProcessor* rp = NULL;
 189     if (process_refs) {
 190       rp = _heap->ref_processor();
 191     }
 192 
 193     // Step 1: Process ordinary GC roots.
 194     {
 195       ShenandoahTraversalClosure roots_cl(q, rp);
 196       ShenandoahMarkCLDClosure cld_cl(&roots_cl);
 197       MarkingCodeBlobClosure code_cl(&roots_cl, CodeBlobToOopClosure::FixRelocations);
 198       if (unload_classes) {
 199         _rp->process_strong_roots(&roots_cl, process_refs ? NULL : &roots_cl, &cld_cl, NULL, NULL, NULL, worker_id);
 200         // Need to pre-evac code roots here. Otherwise we might see from-space constants.
 201         ShenandoahWorkerTimings* worker_times = _heap->phase_timings()->worker_times();
 202         ShenandoahWorkerTimingsTracker timer(worker_times, ShenandoahPhaseTimings::CodeCacheRoots, worker_id);
 203         _cset_coderoots->possibly_parallel_blobs_do(&code_cl);
 204       } else {
 205         _rp->process_all_roots(&roots_cl, process_refs ? NULL : &roots_cl, &cld_cl, &code_cl, NULL, worker_id);
 206       }
 207     }
 208   }
 209 };
 210 
 211 class ShenandoahConcurrentTraversalCollectionTask : public AbstractGangTask {
 212 private:
 213   ShenandoahTaskTerminator* _terminator;
 214   ShenandoahHeap* _heap;
 215 public:
 216   ShenandoahConcurrentTraversalCollectionTask(ShenandoahTaskTerminator* terminator) :
 217     AbstractGangTask("Shenandoah Concurrent Traversal Collection"),
 218     _terminator(terminator),
 219     _heap(ShenandoahHeap::heap()) {}
 220 
 221   void work(uint worker_id) {
 222     ShenandoahConcurrentWorkerSession worker_session(worker_id);
 223     ShenandoahSuspendibleThreadSetJoiner stsj(ShenandoahSuspendibleWorkers);
 224     ShenandoahEvacOOMScope oom_evac_scope;
 225     ShenandoahTraversalGC* traversal_gc = _heap->traversal_gc();
 226 
 227     // Drain all outstanding work in queues.
 228     traversal_gc->main_loop(worker_id, _terminator, true);
 229   }
 230 };
 231 
 232 class ShenandoahFinalTraversalCollectionTask : public AbstractGangTask {
 233 private:
 234   ShenandoahRootProcessor* _rp;
 235   ShenandoahTaskTerminator* _terminator;
 236   ShenandoahHeap* _heap;
 237 public:
 238   ShenandoahFinalTraversalCollectionTask(ShenandoahRootProcessor* rp, ShenandoahTaskTerminator* terminator) :
 239     AbstractGangTask("Shenandoah Final Traversal Collection"),
 240     _rp(rp),
 241     _terminator(terminator),
 242     _heap(ShenandoahHeap::heap()) {}
 243 
 244   void work(uint worker_id) {
 245     ShenandoahParallelWorkerSession worker_session(worker_id);
 246 
 247     ShenandoahEvacOOMScope oom_evac_scope;
 248     ShenandoahTraversalGC* traversal_gc = _heap->traversal_gc();
 249 
 250     ShenandoahObjToScanQueueSet* queues = traversal_gc->task_queues();
 251     ShenandoahObjToScanQueue* q = queues->queue(worker_id);
 252 
 253     bool process_refs = _heap->process_references();
 254     bool unload_classes = _heap->unload_classes();
 255     ReferenceProcessor* rp = NULL;
 256     if (process_refs) {
 257       rp = _heap->ref_processor();
 258     }
 259 
 260     // Step 0: Drain outstanding SATB queues.
 261     // NOTE: we piggy-back draining of remaining thread SATB buffers on the final root scan below.
 262     ShenandoahTraversalSATBBufferClosure satb_cl(q);
 263     {
 264       // Process remaining finished SATB buffers.
 265       SATBMarkQueueSet& satb_mq_set = ShenandoahBarrierSet::satb_mark_queue_set();
 266       while (satb_mq_set.apply_closure_to_completed_buffer(&satb_cl));
 267       // Process remaining threads SATB buffers below.
 268     }
 269 
 270     // Step 1: Process GC roots.
 271     // For oops in code roots, they are marked, evacuated, enqueued for further traversal,
 272     // and the references to the oops are updated during init pause. New nmethods are handled
 273     // in similar way during nmethod-register process. Therefore, we don't need to rescan code
 274     // roots here.
 275     if (!_heap->is_degenerated_gc_in_progress()) {
 276       ShenandoahTraversalClosure roots_cl(q, rp);
 277       CLDToOopClosure cld_cl(&roots_cl, ClassLoaderData::_claim_strong);
 278       ShenandoahTraversalSATBThreadsClosure tc(&satb_cl);
 279       if (unload_classes) {
 280         ShenandoahRemarkCLDClosure weak_cld_cl(&roots_cl);
 281         _rp->process_strong_roots(&roots_cl, process_refs ? NULL : &roots_cl, &cld_cl, &weak_cld_cl, NULL, &tc, worker_id);
 282       } else {
 283         _rp->process_all_roots(&roots_cl, process_refs ? NULL : &roots_cl, &cld_cl, NULL, &tc, worker_id);
 284       }
 285     } else {
 286       ShenandoahTraversalDegenClosure roots_cl(q, rp);
 287       CLDToOopClosure cld_cl(&roots_cl, ClassLoaderData::_claim_strong);
 288       ShenandoahTraversalSATBThreadsClosure tc(&satb_cl);
 289       if (unload_classes) {
 290         ShenandoahRemarkCLDClosure weak_cld_cl(&roots_cl);
 291         _rp->process_strong_roots(&roots_cl, process_refs ? NULL : &roots_cl, &cld_cl, &weak_cld_cl, NULL, &tc, worker_id);
 292       } else {
 293         _rp->process_all_roots(&roots_cl, process_refs ? NULL : &roots_cl, &cld_cl, NULL, &tc, worker_id);
 294       }
 295     }
 296 
 297     {
 298       ShenandoahWorkerTimings *worker_times = _heap->phase_timings()->worker_times();
 299       ShenandoahWorkerTimingsTracker timer(worker_times, ShenandoahPhaseTimings::FinishQueues, worker_id);
 300 
 301       // Step 3: Finally drain all outstanding work in queues.
 302       traversal_gc->main_loop(worker_id, _terminator, false);
 303     }
 304 
 305   }
 306 };
 307 
 308 ShenandoahTraversalGC::ShenandoahTraversalGC(ShenandoahHeap* heap, size_t num_regions) :
 309   _heap(heap),
 310   _task_queues(new ShenandoahObjToScanQueueSet(heap->max_workers())),
 311   _traversal_set(ShenandoahHeapRegionSet()) {
 312 
 313   uint num_queues = heap->max_workers();
 314   for (uint i = 0; i < num_queues; ++i) {
 315     ShenandoahObjToScanQueue* task_queue = new ShenandoahObjToScanQueue();
 316     task_queue->initialize();
 317     _task_queues->register_queue(i, task_queue);
 318   }
 319 }
 320 
 321 ShenandoahTraversalGC::~ShenandoahTraversalGC() {
 322 }
 323 
 324 void ShenandoahTraversalGC::prepare_regions() {
 325   size_t num_regions = _heap->num_regions();
 326   ShenandoahMarkingContext* const ctx = _heap->marking_context();
 327   for (size_t i = 0; i < num_regions; i++) {
 328     ShenandoahHeapRegion* region = _heap->get_region(i);
 329     if (_heap->is_bitmap_slice_committed(region)) {
 330       if (_traversal_set.is_in(i)) {
 331         ctx->capture_top_at_mark_start(region);
 332         region->clear_live_data();
 333         assert(ctx->is_bitmap_clear_range(region->bottom(), region->end()), "bitmap for traversal regions must be cleared");
 334       } else {
 335         // Everything outside the traversal set is always considered live.
 336         ctx->reset_top_at_mark_start(region);
 337       }
 338     } else {
 339       // FreeSet may contain uncommitted empty regions, once they are recommitted,
 340       // their TAMS may have old values, so reset them here.
 341       ctx->reset_top_at_mark_start(region);
 342     }
 343   }
 344 }
 345 
 346 void ShenandoahTraversalGC::prepare() {
 347   _heap->collection_set()->clear();
 348   assert(_heap->collection_set()->count() == 0, "collection set not clear");
 349 
 350   {
 351     ShenandoahGCPhase phase(ShenandoahPhaseTimings::traversal_gc_make_parsable);
 352     _heap->make_parsable(true);
 353   }
 354 
 355   if (UseTLAB) {
 356     ShenandoahGCPhase phase(ShenandoahPhaseTimings::traversal_gc_resize_tlabs);
 357     _heap->resize_tlabs();
 358   }
 359 
 360   assert(_heap->marking_context()->is_bitmap_clear(), "need clean mark bitmap");
 361   assert(!_heap->marking_context()->is_complete(), "should not be complete");
 362 
 363   ShenandoahFreeSet* free_set = _heap->free_set();
 364   ShenandoahCollectionSet* collection_set = _heap->collection_set();
 365 
 366   // Find collection set
 367   _heap->heuristics()->choose_collection_set(collection_set);
 368   prepare_regions();
 369 
 370   // Rebuild free set
 371   free_set->rebuild();
 372 
 373   log_info(gc, ergo)("Collectable Garbage: " SIZE_FORMAT "M, " SIZE_FORMAT "M CSet, " SIZE_FORMAT " CSet regions",
 374                      collection_set->garbage() / M, collection_set->live_data() / M, collection_set->count());
 375 }
 376 
 377 void ShenandoahTraversalGC::init_traversal_collection() {
 378   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "STW traversal GC");
 379 
 380   if (ShenandoahVerify) {
 381     _heap->verifier()->verify_before_traversal();
 382   }
 383 
 384   if (VerifyBeforeGC) {
 385     Universe::verify();
 386   }
 387 
 388   {
 389     ShenandoahGCPhase phase_prepare(ShenandoahPhaseTimings::traversal_gc_prepare);
 390     ShenandoahHeapLocker lock(_heap->lock());
 391     prepare();
 392   }
 393 
 394   _heap->set_concurrent_traversal_in_progress(true);
 395 
 396   bool process_refs = _heap->process_references();
 397   if (process_refs) {
 398     ReferenceProcessor* rp = _heap->ref_processor();
 399     rp->enable_discovery(true /*verify_no_refs*/);
 400     rp->setup_policy(_heap->soft_ref_policy()->should_clear_all_soft_refs());
 401   }
 402 
 403   {
 404     ShenandoahGCPhase phase_work(ShenandoahPhaseTimings::init_traversal_gc_work);
 405     assert(_task_queues->is_empty(), "queues must be empty before traversal GC");
 406     TASKQUEUE_STATS_ONLY(_task_queues->reset_taskqueue_stats());
 407 
 408 #if defined(COMPILER2) || INCLUDE_JVMCI
 409     DerivedPointerTable::clear();
 410 #endif
 411 
 412     {
 413       uint nworkers = _heap->workers()->active_workers();
 414       task_queues()->reserve(nworkers);
 415       ShenandoahRootProcessor rp(_heap, nworkers, ShenandoahPhaseTimings::init_traversal_gc_work);
 416 
 417       ShenandoahCsetCodeRootsIterator cset_coderoots = ShenandoahCodeRoots::cset_iterator();
 418 
 419       ShenandoahInitTraversalCollectionTask traversal_task(&rp, &cset_coderoots);
 420       _heap->workers()->run_task(&traversal_task);
 421     }
 422 
 423 #if defined(COMPILER2) || INCLUDE_JVMCI
 424     DerivedPointerTable::update_pointers();
 425 #endif
 426   }
 427 
 428   if (ShenandoahPacing) {
 429     _heap->pacer()->setup_for_traversal();
 430   }
 431 }
 432 
 433 void ShenandoahTraversalGC::main_loop(uint w, ShenandoahTaskTerminator* t, bool sts_yield) {
 434   ShenandoahObjToScanQueue* q = task_queues()->queue(w);
 435 
 436   // Initialize live data.
 437   jushort* ld = _heap->get_liveness_cache(w);
 438 
 439   ReferenceProcessor* rp = NULL;
 440   if (_heap->process_references()) {
 441     rp = _heap->ref_processor();
 442   }
 443   {
 444     if (!_heap->is_degenerated_gc_in_progress()) {
 445       if (_heap->unload_classes()) {
 446         if (ShenandoahStringDedup::is_enabled()) {
 447           ShenandoahTraversalMetadataDedupClosure cl(q, rp);
 448           main_loop_work<ShenandoahTraversalMetadataDedupClosure>(&cl, ld, w, t, sts_yield);
 449         } else {
 450           ShenandoahTraversalMetadataClosure cl(q, rp);
 451           main_loop_work<ShenandoahTraversalMetadataClosure>(&cl, ld, w, t, sts_yield);
 452         }
 453       } else {
 454         if (ShenandoahStringDedup::is_enabled()) {
 455           ShenandoahTraversalDedupClosure cl(q, rp);
 456           main_loop_work<ShenandoahTraversalDedupClosure>(&cl, ld, w, t, sts_yield);
 457         } else {
 458           ShenandoahTraversalClosure cl(q, rp);
 459           main_loop_work<ShenandoahTraversalClosure>(&cl, ld, w, t, sts_yield);
 460         }
 461       }
 462     } else {
 463       if (_heap->unload_classes()) {
 464         if (ShenandoahStringDedup::is_enabled()) {
 465           ShenandoahTraversalMetadataDedupDegenClosure cl(q, rp);
 466           main_loop_work<ShenandoahTraversalMetadataDedupDegenClosure>(&cl, ld, w, t, sts_yield);
 467         } else {
 468           ShenandoahTraversalMetadataDegenClosure cl(q, rp);
 469           main_loop_work<ShenandoahTraversalMetadataDegenClosure>(&cl, ld, w, t, sts_yield);
 470         }
 471       } else {
 472         if (ShenandoahStringDedup::is_enabled()) {
 473           ShenandoahTraversalDedupDegenClosure cl(q, rp);
 474           main_loop_work<ShenandoahTraversalDedupDegenClosure>(&cl, ld, w, t, sts_yield);
 475         } else {
 476           ShenandoahTraversalDegenClosure cl(q, rp);
 477           main_loop_work<ShenandoahTraversalDegenClosure>(&cl, ld, w, t, sts_yield);
 478         }
 479       }
 480     }
 481   }
 482 
 483   _heap->flush_liveness_cache(w);
 484 }
 485 
 486 template <class T>
 487 void ShenandoahTraversalGC::main_loop_work(T* cl, jushort* live_data, uint worker_id, ShenandoahTaskTerminator* terminator, bool sts_yield) {
 488   ShenandoahObjToScanQueueSet* queues = task_queues();
 489   ShenandoahObjToScanQueue* q = queues->queue(worker_id);
 490   ShenandoahConcurrentMark* conc_mark = _heap->concurrent_mark();
 491 
 492   uintx stride = ShenandoahMarkLoopStride;
 493 
 494   ShenandoahMarkTask task;
 495 
 496   // Process outstanding queues, if any.
 497   q = queues->claim_next();
 498   while (q != NULL) {
 499     if (_heap->check_cancelled_gc_and_yield(sts_yield)) {
 500       ShenandoahCancelledTerminatorTerminator tt;
 501       ShenandoahEvacOOMScopeLeaver oom_scope_leaver;
 502       ShenandoahSuspendibleThreadSetLeaver stsl(sts_yield && ShenandoahSuspendibleWorkers);
 503       while (!terminator->offer_termination(&tt));
 504       return;
 505     }
 506 
 507     for (uint i = 0; i < stride; i++) {
 508       if (q->pop(task)) {
 509         conc_mark->do_task<T>(q, cl, live_data, &task);
 510       } else {
 511         assert(q->is_empty(), "Must be empty");
 512         q = queues->claim_next();
 513         break;
 514       }
 515     }
 516   }
 517 
 518   if (check_and_handle_cancelled_gc(terminator, sts_yield)) return;
 519 
 520   // Normal loop.
 521   q = queues->queue(worker_id);
 522 
 523   ShenandoahTraversalSATBBufferClosure drain_satb(q);
 524   SATBMarkQueueSet& satb_mq_set = ShenandoahBarrierSet::satb_mark_queue_set();
 525 
 526   while (true) {
 527     if (check_and_handle_cancelled_gc(terminator, sts_yield)) return;
 528 
 529     while (satb_mq_set.completed_buffers_num() > 0) {
 530       satb_mq_set.apply_closure_to_completed_buffer(&drain_satb);
 531     }
 532 
 533     uint work = 0;
 534     for (uint i = 0; i < stride; i++) {
 535       if (q->pop(task) ||
 536           queues->steal(worker_id, task)) {
 537         conc_mark->do_task<T>(q, cl, live_data, &task);
 538         work++;
 539       } else {
 540         break;
 541       }
 542     }
 543 
 544     if (work == 0) {
 545       // No more work, try to terminate
 546       ShenandoahEvacOOMScopeLeaver oom_scope_leaver;
 547       ShenandoahSuspendibleThreadSetLeaver stsl(sts_yield && ShenandoahSuspendibleWorkers);
 548       ShenandoahTerminationTimingsTracker term_tracker(worker_id);
 549       if (terminator->offer_termination()) return;
 550     }
 551   }
 552 }
 553 
 554 bool ShenandoahTraversalGC::check_and_handle_cancelled_gc(ShenandoahTaskTerminator* terminator, bool sts_yield) {
 555   if (_heap->cancelled_gc()) {
 556     ShenandoahCancelledTerminatorTerminator tt;
 557     ShenandoahEvacOOMScopeLeaver oom_scope_leaver;
 558     ShenandoahSuspendibleThreadSetLeaver stsl(sts_yield && ShenandoahSuspendibleWorkers);
 559     while (! terminator->offer_termination(&tt));
 560     return true;
 561   }
 562   return false;
 563 }
 564 
 565 void ShenandoahTraversalGC::concurrent_traversal_collection() {
 566   ClassLoaderDataGraph::clear_claimed_marks();
 567 
 568   ShenandoahGCPhase phase_work(ShenandoahPhaseTimings::conc_traversal);
 569   if (!_heap->cancelled_gc()) {
 570     uint nworkers = _heap->workers()->active_workers();
 571     task_queues()->reserve(nworkers);
 572     ShenandoahTerminationTracker tracker(ShenandoahPhaseTimings::conc_traversal_termination);
 573 
 574     ShenandoahTaskTerminator terminator(nworkers, task_queues());
 575     ShenandoahConcurrentTraversalCollectionTask task(&terminator);
 576     _heap->workers()->run_task(&task);
 577   }
 578 
 579   if (!_heap->cancelled_gc() && ShenandoahPreclean && _heap->process_references()) {
 580     preclean_weak_refs();
 581   }
 582 }
 583 
 584 void ShenandoahTraversalGC::final_traversal_collection() {
 585   _heap->make_parsable(true);
 586 
 587   if (!_heap->cancelled_gc()) {
 588 #if defined(COMPILER2) || INCLUDE_JVMCI
 589     DerivedPointerTable::clear();
 590 #endif
 591     ShenandoahGCPhase phase_work(ShenandoahPhaseTimings::final_traversal_gc_work);
 592     uint nworkers = _heap->workers()->active_workers();
 593     task_queues()->reserve(nworkers);
 594 
 595     // Finish traversal
 596     ShenandoahRootProcessor rp(_heap, nworkers, ShenandoahPhaseTimings::final_traversal_gc_work);
 597     ShenandoahTerminationTracker term(ShenandoahPhaseTimings::final_traversal_gc_termination);
 598 
 599     ShenandoahTaskTerminator terminator(nworkers, task_queues());
 600     ShenandoahFinalTraversalCollectionTask task(&rp, &terminator);
 601     _heap->workers()->run_task(&task);
 602 #if defined(COMPILER2) || INCLUDE_JVMCI
 603     DerivedPointerTable::update_pointers();
 604 #endif
 605   }
 606 
 607   if (!_heap->cancelled_gc() && _heap->process_references()) {
 608     weak_refs_work();
 609   }
 610 
 611   if (!_heap->cancelled_gc() && _heap->unload_classes()) {
 612     _heap->unload_classes_and_cleanup_tables(false);
 613     fixup_roots();
 614   }
 615 
 616   if (!_heap->cancelled_gc()) {
 617     assert(_task_queues->is_empty(), "queues must be empty after traversal GC");
 618     TASKQUEUE_STATS_ONLY(_task_queues->print_taskqueue_stats());
 619     TASKQUEUE_STATS_ONLY(_task_queues->reset_taskqueue_stats());
 620 
 621     // No more marking expected
 622     _heap->mark_complete_marking_context();
 623 
 624     // Still good? We can now trash the cset, and make final verification
 625     {
 626       ShenandoahGCPhase phase_cleanup(ShenandoahPhaseTimings::traversal_gc_cleanup);
 627       ShenandoahHeapLocker lock(_heap->lock());
 628 
 629       // Trash everything
 630       // Clear immediate garbage regions.
 631       size_t num_regions = _heap->num_regions();
 632 
 633       ShenandoahHeapRegionSet* traversal_regions = traversal_set();
 634       ShenandoahFreeSet* free_regions = _heap->free_set();
 635       ShenandoahMarkingContext* const ctx = _heap->marking_context();
 636       free_regions->clear();
 637       for (size_t i = 0; i < num_regions; i++) {
 638         ShenandoahHeapRegion* r = _heap->get_region(i);
 639         bool not_allocated = ctx->top_at_mark_start(r) == r->top();
 640 
 641         bool candidate = traversal_regions->is_in(r) && !r->has_live() && not_allocated;
 642         if (r->is_humongous_start() && candidate) {
 643           // Trash humongous.
 644           HeapWord* humongous_obj = r->bottom() + BrooksPointer::word_size();
 645           assert(!ctx->is_marked(oop(humongous_obj)), "must not be marked");
 646           r->make_trash_immediate();
 647           while (i + 1 < num_regions && _heap->get_region(i + 1)->is_humongous_continuation()) {
 648             i++;
 649             r = _heap->get_region(i);
 650             assert(r->is_humongous_continuation(), "must be humongous continuation");
 651             r->make_trash_immediate();
 652           }
 653         } else if (!r->is_empty() && candidate) {
 654           // Trash regular.
 655           assert(!r->is_humongous(), "handled above");
 656           assert(!r->is_trash(), "must not already be trashed");
 657           r->make_trash_immediate();
 658         }
 659       }
 660       _heap->collection_set()->clear();
 661       _heap->free_set()->rebuild();
 662       reset();
 663     }
 664 
 665     assert(_task_queues->is_empty(), "queues must be empty after traversal GC");
 666     _heap->set_concurrent_traversal_in_progress(false);
 667     assert(!_heap->cancelled_gc(), "must not be cancelled when getting out here");
 668 
 669     if (ShenandoahVerify) {
 670       _heap->verifier()->verify_after_traversal();
 671     }
 672 
 673     if (VerifyAfterGC) {
 674       Universe::verify();
 675     }
 676   }
 677 }
 678 
 679 class ShenandoahTraversalFixRootsClosure : public OopClosure {
 680 private:
 681   template <class T>
 682   inline void do_oop_work(T* p) {
 683     T o = RawAccess<>::oop_load(p);
 684     if (!CompressedOops::is_null(o)) {
 685       oop obj = CompressedOops::decode_not_null(o);
 686       oop forw = ShenandoahBarrierSet::resolve_forwarded_not_null(obj);
 687       if (!oopDesc::equals_raw(obj, forw)) {
 688         RawAccess<IS_NOT_NULL>::oop_store(p, forw);
 689       }
 690     }
 691   }
 692 
 693 public:
 694   inline void do_oop(oop* p) { do_oop_work(p); }
 695   inline void do_oop(narrowOop* p) { do_oop_work(p); }
 696 };
 697 
 698 class ShenandoahTraversalFixRootsTask : public AbstractGangTask {
 699 private:
 700   ShenandoahRootProcessor* _rp;
 701 
 702 public:
 703   ShenandoahTraversalFixRootsTask(ShenandoahRootProcessor* rp) :
 704     AbstractGangTask("Shenandoah traversal fix roots"),
 705     _rp(rp) {}
 706 
 707   void work(uint worker_id) {
 708     ShenandoahParallelWorkerSession worker_session(worker_id);
 709     ShenandoahTraversalFixRootsClosure cl;
 710     MarkingCodeBlobClosure blobsCl(&cl, CodeBlobToOopClosure::FixRelocations);
 711     CLDToOopClosure cldCl(&cl, ClassLoaderData::_claim_strong);
 712     _rp->process_all_roots(&cl, &cl, &cldCl, &blobsCl, NULL, worker_id);
 713   }
 714 };
 715 
 716 void ShenandoahTraversalGC::fixup_roots() {
 717 #if defined(COMPILER2) || INCLUDE_JVMCI
 718   DerivedPointerTable::clear();
 719 #endif
 720   ShenandoahRootProcessor rp(_heap, _heap->workers()->active_workers(), ShenandoahPhaseTimings::final_traversal_update_roots);
 721   ShenandoahTraversalFixRootsTask update_roots_task(&rp);
 722   _heap->workers()->run_task(&update_roots_task);
 723 #if defined(COMPILER2) || INCLUDE_JVMCI
 724   DerivedPointerTable::update_pointers();
 725 #endif
 726 }
 727 
 728 void ShenandoahTraversalGC::reset() {
 729   _task_queues->clear();
 730 }
 731 
 732 ShenandoahObjToScanQueueSet* ShenandoahTraversalGC::task_queues() {
 733   return _task_queues;
 734 }
 735 
 736 class ShenandoahTraversalCancelledGCYieldClosure : public YieldClosure {
 737 private:
 738   ShenandoahHeap* const _heap;
 739 public:
 740   ShenandoahTraversalCancelledGCYieldClosure() : _heap(ShenandoahHeap::heap()) {};
 741   virtual bool should_return() { return _heap->cancelled_gc(); }
 742 };
 743 
 744 class ShenandoahTraversalPrecleanCompleteGCClosure : public VoidClosure {
 745 public:
 746   void do_void() {
 747     ShenandoahHeap* sh = ShenandoahHeap::heap();
 748     ShenandoahTraversalGC* traversal_gc = sh->traversal_gc();
 749     assert(sh->process_references(), "why else would we be here?");
 750     ShenandoahTaskTerminator terminator(1, traversal_gc->task_queues());
 751     shenandoah_assert_rp_isalive_installed();
 752     traversal_gc->main_loop((uint) 0, &terminator, true);
 753   }
 754 };
 755 
 756 class ShenandoahTraversalKeepAliveUpdateClosure : public OopClosure {
 757 private:
 758   ShenandoahObjToScanQueue* _queue;
 759   Thread* _thread;
 760   ShenandoahTraversalGC* _traversal_gc;
 761   ShenandoahMarkingContext* const _mark_context;
 762 
 763   template <class T>
 764   inline void do_oop_work(T* p) {
 765     _traversal_gc->process_oop<T, false /* string dedup */, false /* degen */>(p, _thread, _queue, _mark_context);
 766   }
 767 
 768 public:
 769   ShenandoahTraversalKeepAliveUpdateClosure(ShenandoahObjToScanQueue* q) :
 770     _queue(q), _thread(Thread::current()),
 771     _traversal_gc(ShenandoahHeap::heap()->traversal_gc()),
 772     _mark_context(ShenandoahHeap::heap()->marking_context()) {}
 773 
 774   void do_oop(narrowOop* p) { do_oop_work(p); }
 775   void do_oop(oop* p)       { do_oop_work(p); }
 776 };
 777 
 778 class ShenandoahTraversalWeakUpdateClosure : public OopClosure {
 779 private:
 780   template <class T>
 781   inline void do_oop_work(T* p) {
 782     // Cannot call maybe_update_with_forwarded, because on traversal-degen
 783     // path the collection set is already dropped. Instead, do the unguarded store.
 784     // TODO: This can be fixed after degen-traversal stops dropping cset.
 785     T o = RawAccess<>::oop_load(p);
 786     if (!CompressedOops::is_null(o)) {
 787       oop obj = CompressedOops::decode_not_null(o);
 788       obj = ShenandoahBarrierSet::resolve_forwarded_not_null(obj);
 789       shenandoah_assert_marked(p, obj);
 790       RawAccess<IS_NOT_NULL>::oop_store(p, obj);
 791     }
 792   }
 793 
 794 public:
 795   ShenandoahTraversalWeakUpdateClosure() {}
 796 
 797   void do_oop(narrowOop* p) { do_oop_work(p); }
 798   void do_oop(oop* p)       { do_oop_work(p); }
 799 };
 800 
 801 class ShenandoahTraversalKeepAliveUpdateDegenClosure : public OopClosure {
 802 private:
 803   ShenandoahObjToScanQueue* _queue;
 804   Thread* _thread;
 805   ShenandoahTraversalGC* _traversal_gc;
 806   ShenandoahMarkingContext* const _mark_context;
 807 
 808   template <class T>
 809   inline void do_oop_work(T* p) {
 810     _traversal_gc->process_oop<T, false /* string dedup */, true /* degen */>(p, _thread, _queue, _mark_context);
 811   }
 812 
 813 public:
 814   ShenandoahTraversalKeepAliveUpdateDegenClosure(ShenandoahObjToScanQueue* q) :
 815           _queue(q), _thread(Thread::current()),
 816           _traversal_gc(ShenandoahHeap::heap()->traversal_gc()),
 817           _mark_context(ShenandoahHeap::heap()->marking_context()) {}
 818 
 819   void do_oop(narrowOop* p) { do_oop_work(p); }
 820   void do_oop(oop* p)       { do_oop_work(p); }
 821 };
 822 
 823 class ShenandoahTraversalSingleThreadKeepAliveUpdateClosure : public OopClosure {
 824 private:
 825   ShenandoahObjToScanQueue* _queue;
 826   Thread* _thread;
 827   ShenandoahTraversalGC* _traversal_gc;
 828   ShenandoahMarkingContext* const _mark_context;
 829 
 830   template <class T>
 831   inline void do_oop_work(T* p) {
 832     ShenandoahEvacOOMScope evac_scope;
 833     _traversal_gc->process_oop<T, false /* string dedup */, false /* degen */>(p, _thread, _queue, _mark_context);
 834   }
 835 
 836 public:
 837   ShenandoahTraversalSingleThreadKeepAliveUpdateClosure(ShenandoahObjToScanQueue* q) :
 838           _queue(q), _thread(Thread::current()),
 839           _traversal_gc(ShenandoahHeap::heap()->traversal_gc()),
 840           _mark_context(ShenandoahHeap::heap()->marking_context()) {}
 841 
 842   void do_oop(narrowOop* p) { do_oop_work(p); }
 843   void do_oop(oop* p)       { do_oop_work(p); }
 844 };
 845 
 846 class ShenandoahTraversalSingleThreadKeepAliveUpdateDegenClosure : public OopClosure {
 847 private:
 848   ShenandoahObjToScanQueue* _queue;
 849   Thread* _thread;
 850   ShenandoahTraversalGC* _traversal_gc;
 851   ShenandoahMarkingContext* const _mark_context;
 852 
 853   template <class T>
 854   inline void do_oop_work(T* p) {
 855     ShenandoahEvacOOMScope evac_scope;
 856     _traversal_gc->process_oop<T, false /* string dedup */, true /* degen */>(p, _thread, _queue, _mark_context);
 857   }
 858 
 859 public:
 860   ShenandoahTraversalSingleThreadKeepAliveUpdateDegenClosure(ShenandoahObjToScanQueue* q) :
 861           _queue(q), _thread(Thread::current()),
 862           _traversal_gc(ShenandoahHeap::heap()->traversal_gc()),
 863           _mark_context(ShenandoahHeap::heap()->marking_context()) {}
 864 
 865   void do_oop(narrowOop* p) { do_oop_work(p); }
 866   void do_oop(oop* p)       { do_oop_work(p); }
 867 };
 868 
 869 class ShenandoahTraversalPrecleanTask : public AbstractGangTask {
 870 private:
 871   ReferenceProcessor* _rp;
 872 
 873 public:
 874   ShenandoahTraversalPrecleanTask(ReferenceProcessor* rp) :
 875           AbstractGangTask("Precleaning task"),
 876           _rp(rp) {}
 877 
 878   void work(uint worker_id) {
 879     assert(worker_id == 0, "The code below is single-threaded, only one worker is expected");
 880     ShenandoahParallelWorkerSession worker_session(worker_id);
 881     ShenandoahSuspendibleThreadSetJoiner stsj(ShenandoahSuspendibleWorkers);
 882     ShenandoahEvacOOMScope oom_evac_scope;
 883 
 884     ShenandoahHeap* sh = ShenandoahHeap::heap();
 885 
 886     ShenandoahObjToScanQueue* q = sh->traversal_gc()->task_queues()->queue(worker_id);
 887 
 888     ShenandoahForwardedIsAliveClosure is_alive;
 889     ShenandoahTraversalCancelledGCYieldClosure yield;
 890     ShenandoahTraversalPrecleanCompleteGCClosure complete_gc;
 891     ShenandoahTraversalKeepAliveUpdateClosure keep_alive(q);
 892     ResourceMark rm;
 893     _rp->preclean_discovered_references(&is_alive, &keep_alive,
 894                                         &complete_gc, &yield,
 895                                         NULL);
 896   }
 897 };
 898 
 899 void ShenandoahTraversalGC::preclean_weak_refs() {
 900   // Pre-cleaning weak references before diving into STW makes sense at the
 901   // end of concurrent mark. This will filter out the references which referents
 902   // are alive. Note that ReferenceProcessor already filters out these on reference
 903   // discovery, and the bulk of work is done here. This phase processes leftovers
 904   // that missed the initial filtering, i.e. when referent was marked alive after
 905   // reference was discovered by RP.
 906 
 907   assert(_heap->process_references(), "sanity");
 908   assert(!_heap->is_degenerated_gc_in_progress(), "must be in concurrent non-degenerated phase");
 909 
 910   // Shortcut if no references were discovered to avoid winding up threads.
 911   ReferenceProcessor* rp = _heap->ref_processor();
 912   if (!rp->has_discovered_references()) {
 913     return;
 914   }
 915 
 916   ReferenceProcessorMTDiscoveryMutator fix_mt_discovery(rp, false);
 917 
 918   shenandoah_assert_rp_isalive_not_installed();
 919   ShenandoahForwardedIsAliveClosure is_alive;
 920   ReferenceProcessorIsAliveMutator fix_isalive(rp, &is_alive);
 921 
 922   assert(task_queues()->is_empty(), "Should be empty");
 923 
 924   // Execute precleaning in the worker thread: it will give us GCLABs, String dedup
 925   // queues and other goodies. When upstream ReferenceProcessor starts supporting
 926   // parallel precleans, we can extend this to more threads.
 927   ShenandoahPushWorkerScope scope(_heap->workers(), 1, /* check_workers = */ false);
 928 
 929   WorkGang* workers = _heap->workers();
 930   uint nworkers = workers->active_workers();
 931   assert(nworkers == 1, "This code uses only a single worker");
 932   task_queues()->reserve(nworkers);
 933 
 934   ShenandoahTraversalPrecleanTask task(rp);
 935   workers->run_task(&task);
 936 
 937   assert(_heap->cancelled_gc() || task_queues()->is_empty(), "Should be empty");
 938 }
 939 
 940 // Weak Reference Closures
 941 class ShenandoahTraversalDrainMarkingStackClosure: public VoidClosure {
 942   uint _worker_id;
 943   ShenandoahTaskTerminator* _terminator;
 944   bool _reset_terminator;
 945 
 946 public:
 947   ShenandoahTraversalDrainMarkingStackClosure(uint worker_id, ShenandoahTaskTerminator* t, bool reset_terminator = false):
 948     _worker_id(worker_id),
 949     _terminator(t),
 950     _reset_terminator(reset_terminator) {
 951   }
 952 
 953   void do_void() {
 954     assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
 955 
 956     ShenandoahHeap* sh = ShenandoahHeap::heap();
 957     ShenandoahTraversalGC* traversal_gc = sh->traversal_gc();
 958     assert(sh->process_references(), "why else would we be here?");
 959     shenandoah_assert_rp_isalive_installed();
 960 
 961     traversal_gc->main_loop(_worker_id, _terminator, false);
 962 
 963     if (_reset_terminator) {
 964       _terminator->reset_for_reuse();
 965     }
 966   }
 967 };
 968 
 969 class ShenandoahTraversalSingleThreadedDrainMarkingStackClosure: public VoidClosure {
 970   uint _worker_id;
 971   ShenandoahTaskTerminator* _terminator;
 972   bool _reset_terminator;
 973 
 974 public:
 975   ShenandoahTraversalSingleThreadedDrainMarkingStackClosure(uint worker_id, ShenandoahTaskTerminator* t, bool reset_terminator = false):
 976           _worker_id(worker_id),
 977           _terminator(t),
 978           _reset_terminator(reset_terminator) {
 979   }
 980 
 981   void do_void() {
 982     assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
 983 
 984     ShenandoahHeap* sh = ShenandoahHeap::heap();
 985     ShenandoahTraversalGC* traversal_gc = sh->traversal_gc();
 986     assert(sh->process_references(), "why else would we be here?");
 987     shenandoah_assert_rp_isalive_installed();
 988 
 989     ShenandoahEvacOOMScope evac_scope;
 990     traversal_gc->main_loop(_worker_id, _terminator, false);
 991 
 992     if (_reset_terminator) {
 993       _terminator->reset_for_reuse();
 994     }
 995   }
 996 };
 997 
 998 void ShenandoahTraversalGC::weak_refs_work() {
 999   assert(_heap->process_references(), "sanity");
1000 
1001   ShenandoahPhaseTimings::Phase phase_root = ShenandoahPhaseTimings::weakrefs;
1002 
1003   ShenandoahGCPhase phase(phase_root);
1004 
1005   ReferenceProcessor* rp = _heap->ref_processor();
1006 
1007   // NOTE: We cannot shortcut on has_discovered_references() here, because
1008   // we will miss marking JNI Weak refs then, see implementation in
1009   // ReferenceProcessor::process_discovered_references.
1010   weak_refs_work_doit();
1011 
1012   rp->verify_no_references_recorded();
1013   assert(!rp->discovery_enabled(), "Post condition");
1014 
1015 }
1016 
1017 class ShenandoahTraversalRefProcTaskProxy : public AbstractGangTask {
1018 private:
1019   AbstractRefProcTaskExecutor::ProcessTask& _proc_task;
1020   ShenandoahTaskTerminator* _terminator;
1021 
1022 public:
1023   ShenandoahTraversalRefProcTaskProxy(AbstractRefProcTaskExecutor::ProcessTask& proc_task,
1024                                       ShenandoahTaskTerminator* t) :
1025     AbstractGangTask("Process reference objects in parallel"),
1026     _proc_task(proc_task),
1027     _terminator(t) {
1028   }
1029 
1030   void work(uint worker_id) {
1031     ShenandoahEvacOOMScope oom_evac_scope;
1032     assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
1033     ShenandoahHeap* heap = ShenandoahHeap::heap();
1034     ShenandoahTraversalDrainMarkingStackClosure complete_gc(worker_id, _terminator);
1035 
1036     ShenandoahForwardedIsAliveClosure is_alive;
1037     if (!heap->is_degenerated_gc_in_progress()) {
1038       ShenandoahTraversalKeepAliveUpdateClosure keep_alive(heap->traversal_gc()->task_queues()->queue(worker_id));
1039       _proc_task.work(worker_id, is_alive, keep_alive, complete_gc);
1040     } else {
1041       ShenandoahTraversalKeepAliveUpdateDegenClosure keep_alive(heap->traversal_gc()->task_queues()->queue(worker_id));
1042       _proc_task.work(worker_id, is_alive, keep_alive, complete_gc);
1043     }
1044   }
1045 };
1046 
1047 class ShenandoahTraversalRefProcTaskExecutor : public AbstractRefProcTaskExecutor {
1048 private:
1049   WorkGang* _workers;
1050 
1051 public:
1052   ShenandoahTraversalRefProcTaskExecutor(WorkGang* workers) : _workers(workers) {}
1053 
1054   // Executes a task using worker threads.
1055   void execute(ProcessTask& task, uint ergo_workers) {
1056     assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
1057 
1058     ShenandoahHeap* heap = ShenandoahHeap::heap();
1059     ShenandoahTraversalGC* traversal_gc = heap->traversal_gc();
1060     ShenandoahPushWorkerQueuesScope scope(_workers,
1061                                           traversal_gc->task_queues(),
1062                                           ergo_workers,
1063                                           /* do_check = */ false);
1064     uint nworkers = _workers->active_workers();
1065     traversal_gc->task_queues()->reserve(nworkers);
1066     ShenandoahTaskTerminator terminator(nworkers, traversal_gc->task_queues());
1067     ShenandoahTraversalRefProcTaskProxy proc_task_proxy(task, &terminator);
1068     _workers->run_task(&proc_task_proxy);
1069   }
1070 };
1071 
1072 void ShenandoahTraversalGC::weak_refs_work_doit() {
1073   ReferenceProcessor* rp = _heap->ref_processor();
1074 
1075   ShenandoahPhaseTimings::Phase phase_process = ShenandoahPhaseTimings::weakrefs_process;
1076 
1077   shenandoah_assert_rp_isalive_not_installed();
1078   ShenandoahForwardedIsAliveClosure is_alive;
1079   ReferenceProcessorIsAliveMutator fix_isalive(rp, &is_alive);
1080 
1081   WorkGang* workers = _heap->workers();
1082   uint nworkers = workers->active_workers();
1083 
1084   rp->setup_policy(_heap->soft_ref_policy()->should_clear_all_soft_refs());
1085   rp->set_active_mt_degree(nworkers);
1086 
1087   assert(task_queues()->is_empty(), "Should be empty");
1088 
1089   // complete_gc and keep_alive closures instantiated here are only needed for
1090   // single-threaded path in RP. They share the queue 0 for tracking work, which
1091   // simplifies implementation. Since RP may decide to call complete_gc several
1092   // times, we need to be able to reuse the terminator.
1093   uint serial_worker_id = 0;
1094   ShenandoahTaskTerminator terminator(1, task_queues());
1095   ShenandoahTraversalSingleThreadedDrainMarkingStackClosure complete_gc(serial_worker_id, &terminator, /* reset_terminator = */ true);
1096   ShenandoahPushWorkerQueuesScope scope(workers, task_queues(), 1, /* do_check = */ false);
1097 
1098   ShenandoahTraversalRefProcTaskExecutor executor(workers);
1099 
1100   ReferenceProcessorPhaseTimes pt(_heap->gc_timer(), rp->num_queues());
1101   if (!_heap->is_degenerated_gc_in_progress()) {
1102     ShenandoahTraversalSingleThreadKeepAliveUpdateClosure keep_alive(task_queues()->queue(serial_worker_id));
1103     rp->process_discovered_references(&is_alive, &keep_alive,
1104                                       &complete_gc, &executor,
1105                                       &pt);
1106   } else {
1107     ShenandoahTraversalSingleThreadKeepAliveUpdateDegenClosure keep_alive(task_queues()->queue(serial_worker_id));
1108     rp->process_discovered_references(&is_alive, &keep_alive,
1109                                       &complete_gc, &executor,
1110                                       &pt);
1111   }
1112 
1113   {
1114     ShenandoahGCPhase phase(phase_process);
1115     ShenandoahTerminationTracker termination(ShenandoahPhaseTimings::weakrefs_termination);
1116 
1117     // Process leftover weak oops (using parallel version)
1118     ShenandoahTraversalWeakUpdateClosure cl;
1119     WeakProcessor::weak_oops_do(workers, &is_alive, &cl, 1);
1120 
1121     pt.print_all_references();
1122 
1123     assert(task_queues()->is_empty() || _heap->cancelled_gc(), "Should be empty");
1124   }
1125 }