1 /*
   2  * Copyright (c) 2018, 2020, Red Hat, Inc. All rights reserved.
   3  *
   4  * This code is free software; you can redistribute it and/or modify it
   5  * under the terms of the GNU General Public License version 2 only, as
   6  * published by the Free Software Foundation.
   7  *
   8  * This code is distributed in the hope that it will be useful, but WITHOUT
   9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  11  * version 2 for more details (a copy is included in the LICENSE file that
  12  * accompanied this code).
  13  *
  14  * You should have received a copy of the GNU General Public License version
  15  * 2 along with this work; if not, write to the Free Software Foundation,
  16  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  17  *
  18  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  19  * or visit www.oracle.com if you need additional information or have any
  20  * questions.
  21  *
  22  */
  23 
  24 #include "precompiled.hpp"
  25 
  26 #include "classfile/classLoaderData.hpp"
  27 #include "classfile/classLoaderDataGraph.hpp"
  28 #include "gc/shared/referenceProcessor.hpp"
  29 #include "gc/shared/referenceProcessorPhaseTimes.hpp"
  30 #include "gc/shared/workgroup.hpp"
  31 #include "gc/shenandoah/shenandoahBarrierSet.hpp"
  32 #include "gc/shenandoah/shenandoahClosures.inline.hpp"
  33 #include "gc/shenandoah/shenandoahCodeRoots.hpp"
  34 #include "gc/shenandoah/shenandoahCollectionSet.hpp"
  35 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
  36 #include "gc/shenandoah/shenandoahFreeSet.hpp"
  37 #include "gc/shenandoah/shenandoahPhaseTimings.hpp"
  38 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
  39 #include "gc/shenandoah/shenandoahHeapRegionSet.inline.hpp"
  40 #include "gc/shenandoah/shenandoahHeuristics.hpp"
  41 #include "gc/shenandoah/shenandoahMarkingContext.inline.hpp"
  42 #include "gc/shenandoah/shenandoahOopClosures.inline.hpp"
  43 #include "gc/shenandoah/shenandoahPhaseTimings.hpp"
  44 #include "gc/shenandoah/shenandoahRootProcessor.inline.hpp"
  45 #include "gc/shenandoah/shenandoahStringDedup.hpp"
  46 #include "gc/shenandoah/shenandoahTaskqueue.inline.hpp"
  47 #include "gc/shenandoah/shenandoahTraversalGC.hpp"
  48 #include "gc/shenandoah/shenandoahUtils.hpp"
  49 #include "gc/shenandoah/shenandoahVerifier.hpp"
  50 
  51 #include "memory/iterator.hpp"
  52 #include "memory/metaspace.hpp"
  53 #include "memory/resourceArea.hpp"
  54 #include "memory/universe.hpp"
  55 
  56 /**
  57  * NOTE: We are using the SATB buffer in thread.hpp and satbMarkQueue.hpp, however, it is not an SATB algorithm.
  58  * We're using the buffer as generic oop buffer to enqueue new values in concurrent oop stores, IOW, the algorithm
  59  * is incremental-update-based.
  60  *
  61  * NOTE on interaction with TAMS: we want to avoid traversing new objects for
  62  * several reasons:
  63  * - We will not reclaim them in this cycle anyway, because they are not in the
  64  *   cset
  65  * - It makes up for the bulk of work during final-pause
  66  * - It also shortens the concurrent cycle because we don't need to
  67  *   pointlessly traverse through newly allocated objects.
  68  * - As a nice side-effect, it solves the I-U termination problem (mutators
  69  *   cannot outrun the GC by allocating like crazy)
  70  * - It is an easy way to achieve MWF. What MWF does is to also enqueue the
  71  *   target object of stores if it's new. Treating new objects live implicitely
  72  *   achieves the same, but without extra barriers. I think the effect of
  73  *   shortened final-pause (mentioned above) is the main advantage of MWF. In
  74  *   particular, we will not see the head of a completely new long linked list
  75  *   in final-pause and end up traversing huge chunks of the heap there.
  76  * - We don't need to see/update the fields of new objects either, because they
  77  *   are either still null, or anything that's been stored into them has been
  78  *   evacuated+enqueued before (and will thus be treated later).
  79  *
  80  * We achieve this by setting TAMS for each region, and everything allocated
  81  * beyond TAMS will be 'implicitely marked'.
  82  *
  83  * Gotchas:
  84  * - While we want new objects to be implicitely marked, we don't want to count
  85  *   them alive. Otherwise the next cycle wouldn't pick them up and consider
  86  *   them for cset. This means that we need to protect such regions from
  87  *   getting accidentally thrashed at the end of traversal cycle. This is why I
  88  *   keep track of alloc-regions and check is_alloc_region() in the trashing
  89  *   code.
  90  * - We *need* to traverse through evacuated objects. Those objects are
  91  *   pre-existing, and any references in them point to interesting objects that
  92  *   we need to see. We also want to count them as live, because we just
  93  *   determined that they are alive :-) I achieve this by upping TAMS
  94  *   concurrently for every gclab/gc-shared alloc before publishing the
  95  *   evacuated object. This way, the GC threads will not consider such objects
  96  *   implictely marked, and traverse through them as normal.
  97  */
  98 class ShenandoahTraversalSATBBufferClosure : public SATBBufferClosure {
  99 private:
 100   ShenandoahObjToScanQueue* _queue;
 101   ShenandoahTraversalGC* _traversal_gc;
 102   ShenandoahHeap* const _heap;
 103 
 104 public:
 105   ShenandoahTraversalSATBBufferClosure(ShenandoahObjToScanQueue* q) :
 106     _queue(q),
 107     _heap(ShenandoahHeap::heap())
 108  { }
 109 
 110   void do_buffer(void** buffer, size_t size) {
 111     for (size_t i = 0; i < size; ++i) {
 112       oop* p = (oop*) &buffer[i];
 113       oop obj = RawAccess<>::oop_load(p);
 114       shenandoah_assert_not_forwarded(p, obj);
 115       if (_heap->marking_context()->mark(obj)) {
 116         _queue->push(ShenandoahMarkTask(obj));
 117       }
 118     }
 119   }
 120 };
 121 
 122 class ShenandoahTraversalSATBThreadsClosure : public ThreadClosure {
 123 private:
 124   ShenandoahTraversalSATBBufferClosure* _satb_cl;
 125 
 126 public:
 127   ShenandoahTraversalSATBThreadsClosure(ShenandoahTraversalSATBBufferClosure* satb_cl) :
 128     _satb_cl(satb_cl) {}
 129 
 130   void do_thread(Thread* thread) {
 131     ShenandoahThreadLocalData::satb_mark_queue(thread).apply_closure_and_empty(_satb_cl);
 132   }
 133 };
 134 
 135 // Like CLDToOopClosure, but clears has_modified_oops, so that we can record modified CLDs during traversal
 136 // and remark them later during final-traversal.
 137 class ShenandoahMarkCLDClosure : public CLDClosure {
 138 private:
 139   OopClosure* _cl;
 140 public:
 141   ShenandoahMarkCLDClosure(OopClosure* cl) : _cl(cl) {}
 142   void do_cld(ClassLoaderData* cld) {
 143     cld->oops_do(_cl, ClassLoaderData::_claim_strong, true);
 144   }
 145 };
 146 
 147 // Like CLDToOopClosure, but only process modified CLDs
 148 class ShenandoahRemarkCLDClosure : public CLDClosure {
 149 private:
 150   OopClosure* _cl;
 151 public:
 152   ShenandoahRemarkCLDClosure(OopClosure* cl) : _cl(cl) {}
 153   void do_cld(ClassLoaderData* cld) {
 154     if (cld->has_modified_oops()) {
 155       cld->oops_do(_cl, ClassLoaderData::_claim_strong, true);
 156     }
 157   }
 158 };
 159 
 160 class ShenandoahInitTraversalCollectionTask : public AbstractGangTask {
 161 private:
 162   ShenandoahCSetRootScanner* _rp;
 163   ShenandoahHeap* _heap;
 164 
 165 public:
 166   ShenandoahInitTraversalCollectionTask(ShenandoahCSetRootScanner* rp) :
 167     AbstractGangTask("Shenandoah Init Traversal Collection"),
 168     _rp(rp),
 169     _heap(ShenandoahHeap::heap()) {}
 170 
 171   void work(uint worker_id) {
 172     ShenandoahParallelWorkerSession worker_session(worker_id);
 173 
 174     ShenandoahObjToScanQueueSet* queues = _heap->traversal_gc()->task_queues();
 175     ShenandoahObjToScanQueue* q = queues->queue(worker_id);
 176 
 177     bool process_refs = _heap->process_references();
 178     bool unload_classes = _heap->unload_classes();
 179     ReferenceProcessor* rp = NULL;
 180     if (process_refs) {
 181       rp = _heap->ref_processor();
 182     }
 183 
 184     // Step 1: Process ordinary GC roots.
 185     {
 186       ShenandoahTraversalRootsClosure roots_cl(q, rp);
 187       ShenandoahMarkCLDClosure cld_cl(&roots_cl);
 188       MarkingCodeBlobClosure code_cl(&roots_cl, CodeBlobToOopClosure::FixRelocations);
 189       if (unload_classes) {
 190         _rp->roots_do(worker_id, &roots_cl, NULL, &code_cl);
 191       } else {
 192         _rp->roots_do(worker_id, &roots_cl, &cld_cl, &code_cl);
 193       }
 194     }
 195   }
 196 };
 197 
 198 class ShenandoahConcurrentTraversalCollectionTask : public AbstractGangTask {
 199 private:
 200   ShenandoahTaskTerminator* _terminator;
 201   ShenandoahHeap* _heap;
 202 public:
 203   ShenandoahConcurrentTraversalCollectionTask(ShenandoahTaskTerminator* terminator) :
 204     AbstractGangTask("Shenandoah Concurrent Traversal Collection"),
 205     _terminator(terminator),
 206     _heap(ShenandoahHeap::heap()) {}
 207 
 208   void work(uint worker_id) {
 209     ShenandoahConcurrentWorkerSession worker_session(worker_id);
 210     ShenandoahSuspendibleThreadSetJoiner stsj(ShenandoahSuspendibleWorkers);
 211     ShenandoahTraversalGC* traversal_gc = _heap->traversal_gc();
 212 
 213     // Drain all outstanding work in queues.
 214     traversal_gc->main_loop(worker_id, _terminator, true);
 215   }
 216 };
 217 
 218 class ShenandoahFinalTraversalCollectionTask : public AbstractGangTask {
 219 private:
 220   ShenandoahAllRootScanner* _rp;
 221   ShenandoahTaskTerminator* _terminator;
 222   ShenandoahHeap* _heap;
 223 public:
 224   ShenandoahFinalTraversalCollectionTask(ShenandoahAllRootScanner* rp, ShenandoahTaskTerminator* terminator) :
 225     AbstractGangTask("Shenandoah Final Traversal Collection"),
 226     _rp(rp),
 227     _terminator(terminator),
 228     _heap(ShenandoahHeap::heap()) {}
 229 
 230   void work(uint worker_id) {
 231     ShenandoahParallelWorkerSession worker_session(worker_id);
 232 
 233     ShenandoahTraversalGC* traversal_gc = _heap->traversal_gc();
 234 
 235     ShenandoahObjToScanQueueSet* queues = traversal_gc->task_queues();
 236     ShenandoahObjToScanQueue* q = queues->queue(worker_id);
 237 
 238     bool process_refs = _heap->process_references();
 239     bool unload_classes = _heap->unload_classes();
 240     ReferenceProcessor* rp = NULL;
 241     if (process_refs) {
 242       rp = _heap->ref_processor();
 243     }
 244 
 245     // Step 0: Drain outstanding SATB queues.
 246     // NOTE: we piggy-back draining of remaining thread SATB buffers on the final root scan below.
 247     ShenandoahTraversalSATBBufferClosure satb_cl(q);
 248     {
 249       // Process remaining finished SATB buffers.
 250       SATBMarkQueueSet& satb_mq_set = ShenandoahBarrierSet::satb_mark_queue_set();
 251       while (satb_mq_set.apply_closure_to_completed_buffer(&satb_cl));
 252       // Process remaining threads SATB buffers below.
 253     }
 254 
 255     // Step 1: Process GC roots.
 256     // For oops in code roots, they are marked, evacuated, enqueued for further traversal,
 257     // and the references to the oops are updated during init pause. We only need to rescan
 258     // on stack code roots, in case of class unloading is enabled. Otherwise, code roots are
 259     // scanned during init traversal or degenerated GC will update them at the end.
 260     if (!_heap->is_degenerated_gc_in_progress()) {
 261       ShenandoahTraversalRootsClosure roots_cl(q, rp);
 262       ShenandoahTraversalSATBThreadsClosure tc(&satb_cl);
 263       if (unload_classes) {
 264         ShenandoahRemarkCLDClosure remark_cld_cl(&roots_cl);
 265         MarkingCodeBlobClosure code_cl(&roots_cl, CodeBlobToOopClosure::FixRelocations);
 266         _rp->strong_roots_do(worker_id, &roots_cl, &remark_cld_cl, &code_cl, &tc);
 267       } else {
 268         CLDToOopClosure cld_cl(&roots_cl, ClassLoaderData::_claim_strong);
 269         _rp->roots_do(worker_id, &roots_cl, &cld_cl, NULL, &tc);
 270       }
 271     } else {
 272       ShenandoahTraversalDegenClosure roots_cl(q, rp);
 273       ShenandoahTraversalSATBThreadsClosure tc(&satb_cl);
 274       if (unload_classes) {
 275         ShenandoahRemarkCLDClosure remark_cld_cl(&roots_cl);
 276         _rp->strong_roots_do(worker_id, &roots_cl, &remark_cld_cl, NULL, &tc);
 277       } else {
 278         CLDToOopClosure cld_cl(&roots_cl, ClassLoaderData::_claim_strong);
 279         _rp->roots_do(worker_id, &roots_cl, &cld_cl, NULL, &tc);
 280       }
 281     }
 282 
 283     {
 284       ShenandoahWorkerTimingsTracker timer(ShenandoahPhaseTimings::FinishQueues, worker_id);
 285 
 286       // Step 3: Finally drain all outstanding work in queues.
 287       traversal_gc->main_loop(worker_id, _terminator, false);
 288     }
 289 
 290   }
 291 };
 292 
 293 ShenandoahTraversalGC::ShenandoahTraversalGC(ShenandoahHeap* heap, size_t num_regions) :
 294   _heap(heap),
 295   _task_queues(new ShenandoahObjToScanQueueSet(heap->max_workers())),
 296   _traversal_set(ShenandoahHeapRegionSet()) {
 297 
 298   // Traversal does not support concurrent code root scanning
 299   FLAG_SET_DEFAULT(ShenandoahConcurrentScanCodeRoots, false);
 300 
 301   uint num_queues = heap->max_workers();
 302   for (uint i = 0; i < num_queues; ++i) {
 303     ShenandoahObjToScanQueue* task_queue = new ShenandoahObjToScanQueue();
 304     task_queue->initialize();
 305     _task_queues->register_queue(i, task_queue);
 306   }
 307 }
 308 
 309 ShenandoahTraversalGC::~ShenandoahTraversalGC() {
 310 }
 311 
 312 void ShenandoahTraversalGC::prepare_regions() {
 313   size_t num_regions = _heap->num_regions();
 314   ShenandoahMarkingContext* const ctx = _heap->marking_context();
 315   for (size_t i = 0; i < num_regions; i++) {
 316     ShenandoahHeapRegion* region = _heap->get_region(i);
 317     region->set_update_watermark(region->top());
 318     if (_heap->is_bitmap_slice_committed(region)) {
 319       if (_traversal_set.is_in(i)) {
 320         ctx->capture_top_at_mark_start(region);
 321         region->clear_live_data();
 322         assert(ctx->is_bitmap_clear_range(region->bottom(), region->end()), "bitmap for traversal regions must be cleared");
 323       } else {
 324         // Everything outside the traversal set is always considered live.
 325         ctx->reset_top_at_mark_start(region);
 326       }
 327     } else {
 328       // FreeSet may contain uncommitted empty regions, once they are recommitted,
 329       // their TAMS may have old values, so reset them here.
 330       ctx->reset_top_at_mark_start(region);
 331     }
 332   }
 333 }
 334 
 335 void ShenandoahTraversalGC::prepare() {
 336   {
 337     ShenandoahGCPhase phase(ShenandoahPhaseTimings::traversal_gc_make_parsable);
 338     _heap->make_parsable(true);
 339   }
 340 
 341   if (UseTLAB) {
 342     ShenandoahGCPhase phase(ShenandoahPhaseTimings::traversal_gc_resize_tlabs);
 343     _heap->resize_tlabs();
 344   }
 345 
 346   assert(_heap->marking_context()->is_bitmap_clear(), "need clean mark bitmap");
 347   assert(!_heap->marking_context()->is_complete(), "should not be complete");
 348 
 349   // About to choose the collection set, make sure we know which regions are pinned.
 350   {
 351     ShenandoahGCPhase phase_cleanup(ShenandoahPhaseTimings::traversal_gc_prepare_sync_pinned);
 352     _heap->sync_pinned_region_status();
 353   }
 354 
 355   ShenandoahCollectionSet* collection_set = _heap->collection_set();
 356   {
 357     ShenandoahHeapLocker lock(_heap->lock());
 358 
 359     collection_set->clear();
 360     assert(collection_set->count() == 0, "collection set not clear");
 361 
 362     // Find collection set
 363     _heap->heuristics()->choose_collection_set(collection_set);
 364     prepare_regions();
 365 
 366     // Rebuild free set
 367     _heap->free_set()->rebuild();
 368   }
 369 
 370   log_info(gc, ergo)("Collectable Garbage: " SIZE_FORMAT "%s, " SIZE_FORMAT "%s CSet, " SIZE_FORMAT " CSet regions",
 371                      byte_size_in_proper_unit(collection_set->garbage()),   proper_unit_for_byte_size(collection_set->garbage()),
 372                      byte_size_in_proper_unit(collection_set->live_data()), proper_unit_for_byte_size(collection_set->live_data()),
 373                      collection_set->count());
 374 }
 375 
 376 void ShenandoahTraversalGC::init_traversal_collection() {
 377   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "STW traversal GC");
 378 
 379   if (ShenandoahVerify) {
 380     _heap->verifier()->verify_before_traversal();
 381   }
 382 
 383   if (VerifyBeforeGC) {
 384     Universe::verify();
 385   }
 386 
 387   {
 388     ShenandoahGCPhase phase_prepare(ShenandoahPhaseTimings::traversal_gc_prepare);
 389     prepare();
 390   }
 391 
 392   _heap->set_concurrent_traversal_in_progress(true);
 393   _heap->set_has_forwarded_objects(true);
 394 
 395   bool process_refs = _heap->process_references();
 396   if (process_refs) {
 397     ReferenceProcessor* rp = _heap->ref_processor();
 398     rp->enable_discovery(true /*verify_no_refs*/);
 399     rp->setup_policy(_heap->soft_ref_policy()->should_clear_all_soft_refs());
 400   }
 401 
 402   {
 403     ShenandoahGCPhase phase_work(ShenandoahPhaseTimings::init_traversal_gc_work);
 404     assert(_task_queues->is_empty(), "queues must be empty before traversal GC");
 405     TASKQUEUE_STATS_ONLY(_task_queues->reset_taskqueue_stats());
 406 
 407 #if COMPILER2_OR_JVMCI
 408     DerivedPointerTable::clear();
 409 #endif
 410 
 411     {
 412       uint nworkers = _heap->workers()->active_workers();
 413       task_queues()->reserve(nworkers);
 414       ShenandoahCSetRootScanner rp(nworkers, ShenandoahPhaseTimings::init_traversal_gc_work);
 415       ShenandoahInitTraversalCollectionTask traversal_task(&rp);
 416       _heap->workers()->run_task(&traversal_task);
 417     }
 418 
 419 #if COMPILER2_OR_JVMCI
 420     DerivedPointerTable::update_pointers();
 421 #endif
 422   }
 423 
 424   if (ShenandoahPacing) {
 425     _heap->pacer()->setup_for_traversal();
 426   }
 427 }
 428 
 429 void ShenandoahTraversalGC::main_loop(uint w, ShenandoahTaskTerminator* t, bool sts_yield) {
 430   ShenandoahObjToScanQueue* q = task_queues()->queue(w);
 431 
 432   // Initialize live data.
 433   jushort* ld = _heap->get_liveness_cache(w);
 434 
 435   ReferenceProcessor* rp = NULL;
 436   if (_heap->process_references()) {
 437     rp = _heap->ref_processor();
 438   }
 439   {
 440     if (!_heap->is_degenerated_gc_in_progress()) {
 441       if (_heap->unload_classes()) {
 442         if (ShenandoahStringDedup::is_enabled()) {
 443           ShenandoahTraversalMetadataDedupClosure cl(q, rp);
 444           main_loop_work<ShenandoahTraversalMetadataDedupClosure>(&cl, ld, w, t, sts_yield);
 445         } else {
 446           ShenandoahTraversalMetadataClosure cl(q, rp);
 447           main_loop_work<ShenandoahTraversalMetadataClosure>(&cl, ld, w, t, sts_yield);
 448         }
 449       } else {
 450         if (ShenandoahStringDedup::is_enabled()) {
 451           ShenandoahTraversalDedupClosure cl(q, rp);
 452           main_loop_work<ShenandoahTraversalDedupClosure>(&cl, ld, w, t, sts_yield);
 453         } else {
 454           ShenandoahTraversalClosure cl(q, rp);
 455           main_loop_work<ShenandoahTraversalClosure>(&cl, ld, w, t, sts_yield);
 456         }
 457       }
 458     } else {
 459       if (_heap->unload_classes()) {
 460         if (ShenandoahStringDedup::is_enabled()) {
 461           ShenandoahTraversalMetadataDedupDegenClosure cl(q, rp);
 462           main_loop_work<ShenandoahTraversalMetadataDedupDegenClosure>(&cl, ld, w, t, sts_yield);
 463         } else {
 464           ShenandoahTraversalMetadataDegenClosure cl(q, rp);
 465           main_loop_work<ShenandoahTraversalMetadataDegenClosure>(&cl, ld, w, t, sts_yield);
 466         }
 467       } else {
 468         if (ShenandoahStringDedup::is_enabled()) {
 469           ShenandoahTraversalDedupDegenClosure cl(q, rp);
 470           main_loop_work<ShenandoahTraversalDedupDegenClosure>(&cl, ld, w, t, sts_yield);
 471         } else {
 472           ShenandoahTraversalDegenClosure cl(q, rp);
 473           main_loop_work<ShenandoahTraversalDegenClosure>(&cl, ld, w, t, sts_yield);
 474         }
 475       }
 476     }
 477   }
 478 
 479   _heap->flush_liveness_cache(w);
 480 }
 481 
 482 template <class T>
 483 void ShenandoahTraversalGC::main_loop_work(T* cl, jushort* live_data, uint worker_id, ShenandoahTaskTerminator* terminator, bool sts_yield) {
 484   ShenandoahObjToScanQueueSet* queues = task_queues();
 485   ShenandoahObjToScanQueue* q = queues->queue(worker_id);
 486   ShenandoahConcurrentMark* conc_mark = _heap->concurrent_mark();
 487 
 488   uintx stride = ShenandoahMarkLoopStride;
 489 
 490   ShenandoahMarkTask task;
 491 
 492   // Process outstanding queues, if any.
 493   q = queues->claim_next();
 494   while (q != NULL) {
 495     if (_heap->check_cancelled_gc_and_yield(sts_yield)) {
 496       return;
 497     }
 498 
 499     for (uint i = 0; i < stride; i++) {
 500       if (q->pop(task)) {
 501         conc_mark->do_task<T>(q, cl, live_data, &task);
 502       } else {
 503         assert(q->is_empty(), "Must be empty");
 504         q = queues->claim_next();
 505         break;
 506       }
 507     }
 508   }
 509 
 510   if (check_and_handle_cancelled_gc(terminator, sts_yield)) return;
 511 
 512   // Normal loop.
 513   q = queues->queue(worker_id);
 514 
 515   ShenandoahTraversalSATBBufferClosure drain_satb(q);
 516   SATBMarkQueueSet& satb_mq_set = ShenandoahBarrierSet::satb_mark_queue_set();
 517 
 518   while (true) {
 519     if (check_and_handle_cancelled_gc(terminator, sts_yield)) return;
 520 
 521     while (satb_mq_set.completed_buffers_num() > 0) {
 522       satb_mq_set.apply_closure_to_completed_buffer(&drain_satb);
 523     }
 524 
 525     uint work = 0;
 526     for (uint i = 0; i < stride; i++) {
 527       if (q->pop(task) ||
 528           queues->steal(worker_id, task)) {
 529         conc_mark->do_task<T>(q, cl, live_data, &task);
 530         work++;
 531       } else {
 532         break;
 533       }
 534     }
 535 
 536     if (work == 0) {
 537       // No more work, try to terminate
 538       ShenandoahSuspendibleThreadSetLeaver stsl(sts_yield && ShenandoahSuspendibleWorkers);
 539       ShenandoahTerminatorTerminator tt(_heap);
 540 
 541       if (terminator->offer_termination(&tt)) return;
 542     }
 543   }
 544 }
 545 
 546 bool ShenandoahTraversalGC::check_and_handle_cancelled_gc(ShenandoahTaskTerminator* terminator, bool sts_yield) {
 547   if (_heap->cancelled_gc()) {
 548     return true;
 549   }
 550   return false;
 551 }
 552 
 553 void ShenandoahTraversalGC::concurrent_traversal_collection() {
 554   ShenandoahGCPhase phase_work(ShenandoahPhaseTimings::conc_traversal);
 555   if (!_heap->cancelled_gc()) {
 556     uint nworkers = _heap->workers()->active_workers();
 557     task_queues()->reserve(nworkers);
 558 
 559     ShenandoahTaskTerminator terminator(nworkers, task_queues());
 560     ShenandoahConcurrentTraversalCollectionTask task(&terminator);
 561     _heap->workers()->run_task(&task);
 562   }
 563 
 564   if (!_heap->cancelled_gc() && ShenandoahPreclean && _heap->process_references()) {
 565     preclean_weak_refs();
 566   }
 567 }
 568 
 569 void ShenandoahTraversalGC::final_traversal_collection() {
 570   if (!_heap->cancelled_gc()) {
 571 #if COMPILER2_OR_JVMCI
 572     DerivedPointerTable::clear();
 573 #endif
 574     ShenandoahGCPhase phase_work(ShenandoahPhaseTimings::final_traversal_gc_work);
 575     uint nworkers = _heap->workers()->active_workers();
 576     task_queues()->reserve(nworkers);
 577 
 578     // Finish traversal
 579     ShenandoahAllRootScanner rp(nworkers, ShenandoahPhaseTimings::final_traversal_gc_work);
 580     ShenandoahTaskTerminator terminator(nworkers, task_queues());
 581     ShenandoahFinalTraversalCollectionTask task(&rp, &terminator);
 582     _heap->workers()->run_task(&task);
 583 #if COMPILER2_OR_JVMCI
 584     DerivedPointerTable::update_pointers();
 585 #endif
 586   }
 587 
 588   if (!_heap->cancelled_gc() && _heap->process_references()) {
 589     weak_refs_work();
 590   }
 591 
 592   if (!_heap->cancelled_gc()) {
 593     assert(_task_queues->is_empty(), "queues must be empty after traversal GC");
 594     TASKQUEUE_STATS_ONLY(_task_queues->print_taskqueue_stats());
 595     TASKQUEUE_STATS_ONLY(_task_queues->reset_taskqueue_stats());
 596 
 597     // No more marking expected
 598     _heap->set_concurrent_traversal_in_progress(false);
 599     _heap->mark_complete_marking_context();
 600 
 601     // A rare case, TLAB/GCLAB is initialized from an empty region without
 602     // any live data, the region can be trashed and may be uncommitted in later code,
 603     // that results the TLAB/GCLAB not usable. Retire them here.
 604     _heap->make_parsable(true);
 605 
 606     _heap->parallel_cleaning(false);
 607     fixup_roots();
 608 
 609     _heap->set_has_forwarded_objects(false);
 610 
 611     // Resize metaspace
 612     MetaspaceGC::compute_new_size();
 613 
 614     // Need to see that pinned region status is updated: newly pinned regions must not
 615     // be trashed. New unpinned regions should be trashed.
 616     {
 617       ShenandoahGCPhase phase_cleanup(ShenandoahPhaseTimings::traversal_gc_sync_pinned);
 618       _heap->sync_pinned_region_status();
 619     }
 620 
 621     // Still good? We can now trash the cset, and make final verification
 622     {
 623       ShenandoahGCPhase phase_cleanup(ShenandoahPhaseTimings::traversal_gc_cleanup);
 624       ShenandoahHeapLocker lock(_heap->lock());
 625 
 626       // Trash everything
 627       // Clear immediate garbage regions.
 628       size_t num_regions = _heap->num_regions();
 629 
 630       ShenandoahHeapRegionSet* traversal_regions = traversal_set();
 631       ShenandoahFreeSet* free_regions = _heap->free_set();
 632       ShenandoahMarkingContext* const ctx = _heap->marking_context();
 633       free_regions->clear();
 634       for (size_t i = 0; i < num_regions; i++) {
 635         ShenandoahHeapRegion* r = _heap->get_region(i);
 636         bool not_allocated = ctx->top_at_mark_start(r) == r->top();
 637 
 638         bool candidate = traversal_regions->is_in(r) && !r->has_live() && not_allocated;
 639         if (r->is_humongous_start() && candidate) {
 640           // Trash humongous.
 641           HeapWord* humongous_obj = r->bottom();
 642           assert(!ctx->is_marked(oop(humongous_obj)), "must not be marked");
 643           r->make_trash_immediate();
 644           while (i + 1 < num_regions && _heap->get_region(i + 1)->is_humongous_continuation()) {
 645             i++;
 646             r = _heap->get_region(i);
 647             assert(r->is_humongous_continuation(), "must be humongous continuation");
 648             r->make_trash_immediate();
 649           }
 650         } else if (!r->is_empty() && candidate) {
 651           // Trash regular.
 652           assert(!r->is_humongous(), "handled above");
 653           assert(!r->is_trash(), "must not already be trashed");
 654           r->make_trash_immediate();
 655         }
 656       }
 657       _heap->collection_set()->clear();
 658       _heap->free_set()->rebuild();
 659       reset();
 660     }
 661 
 662     assert(_task_queues->is_empty(), "queues must be empty after traversal GC");
 663     assert(!_heap->cancelled_gc(), "must not be cancelled when getting out here");
 664 
 665     if (ShenandoahVerify) {
 666       _heap->verifier()->verify_after_traversal();
 667     }
 668 #ifdef ASSERT
 669     else {
 670       verify_roots_after_gc();
 671     }
 672 #endif
 673 
 674     if (VerifyAfterGC) {
 675       Universe::verify();
 676     }
 677   }
 678 }
 679 
 680 class ShenandoahVerifyAfterGC : public OopClosure {
 681 private:
 682   template <class T>
 683   void do_oop_work(T* p) {
 684     T o = RawAccess<>::oop_load(p);
 685     if (!CompressedOops::is_null(o)) {
 686       oop obj = CompressedOops::decode_not_null(o);
 687       shenandoah_assert_correct(p, obj);
 688       shenandoah_assert_not_in_cset_except(p, obj, ShenandoahHeap::heap()->cancelled_gc());
 689       shenandoah_assert_not_forwarded(p, obj);
 690     }
 691   }
 692 
 693 public:
 694   void do_oop(narrowOop* p) { do_oop_work(p); }
 695   void do_oop(oop* p)       { do_oop_work(p); }
 696 };
 697 
 698 void ShenandoahTraversalGC::verify_roots_after_gc() {
 699   ShenandoahRootVerifier verifier;
 700   ShenandoahVerifyAfterGC cl;
 701   verifier.oops_do(&cl);
 702 }
 703 
 704 class ShenandoahTraversalFixRootsClosure : public OopClosure {
 705 private:
 706   template <class T>
 707   inline void do_oop_work(T* p) {
 708     T o = RawAccess<>::oop_load(p);
 709     if (!CompressedOops::is_null(o)) {
 710       oop obj = CompressedOops::decode_not_null(o);
 711       oop forw = ShenandoahBarrierSet::resolve_forwarded_not_null(obj);
 712       if (obj != forw) {
 713         RawAccess<IS_NOT_NULL>::oop_store(p, forw);
 714       }
 715     }
 716   }
 717 
 718 public:
 719   inline void do_oop(oop* p) { do_oop_work(p); }
 720   inline void do_oop(narrowOop* p) { do_oop_work(p); }
 721 };
 722 
 723 class ShenandoahTraversalFixRootsTask : public AbstractGangTask {
 724 private:
 725   ShenandoahRootUpdater* _rp;
 726 
 727 public:
 728   ShenandoahTraversalFixRootsTask(ShenandoahRootUpdater* rp) :
 729     AbstractGangTask("Shenandoah traversal fix roots"),
 730     _rp(rp) {
 731     assert(ShenandoahHeap::heap()->has_forwarded_objects(), "Must be");
 732   }
 733 
 734   void work(uint worker_id) {
 735     ShenandoahParallelWorkerSession worker_session(worker_id);
 736     ShenandoahTraversalFixRootsClosure cl;
 737     ShenandoahForwardedIsAliveClosure is_alive;
 738     _rp->roots_do(worker_id, &is_alive, &cl);
 739   }
 740 };
 741 
 742 void ShenandoahTraversalGC::fixup_roots() {
 743 #if COMPILER2_OR_JVMCI
 744   DerivedPointerTable::clear();
 745 #endif
 746   ShenandoahRootUpdater rp(_heap->workers()->active_workers(), ShenandoahPhaseTimings::final_traversal_update_roots);
 747   ShenandoahTraversalFixRootsTask update_roots_task(&rp);
 748   _heap->workers()->run_task(&update_roots_task);
 749 #if COMPILER2_OR_JVMCI
 750   DerivedPointerTable::update_pointers();
 751 #endif
 752 }
 753 
 754 void ShenandoahTraversalGC::reset() {
 755   _task_queues->clear();
 756 }
 757 
 758 ShenandoahObjToScanQueueSet* ShenandoahTraversalGC::task_queues() {
 759   return _task_queues;
 760 }
 761 
 762 class ShenandoahTraversalCancelledGCYieldClosure : public YieldClosure {
 763 private:
 764   ShenandoahHeap* const _heap;
 765 public:
 766   ShenandoahTraversalCancelledGCYieldClosure() : _heap(ShenandoahHeap::heap()) {};
 767   virtual bool should_return() { return _heap->cancelled_gc(); }
 768 };
 769 
 770 class ShenandoahTraversalPrecleanCompleteGCClosure : public VoidClosure {
 771 public:
 772   void do_void() {
 773     ShenandoahHeap* sh = ShenandoahHeap::heap();
 774     ShenandoahTraversalGC* traversal_gc = sh->traversal_gc();
 775     assert(sh->process_references(), "why else would we be here?");
 776     ShenandoahTaskTerminator terminator(1, traversal_gc->task_queues());
 777     shenandoah_assert_rp_isalive_installed();
 778     traversal_gc->main_loop((uint) 0, &terminator, true);
 779   }
 780 };
 781 
 782 class ShenandoahTraversalKeepAliveUpdateClosure : public OopClosure {
 783 private:
 784   ShenandoahObjToScanQueue* _queue;
 785   Thread* _thread;
 786   ShenandoahTraversalGC* _traversal_gc;
 787   ShenandoahMarkingContext* const _mark_context;
 788 
 789   template <class T>
 790   inline void do_oop_work(T* p) {
 791     _traversal_gc->process_oop<T, false /* string dedup */, false /* degen */, true /* atomic update */>(p, _thread, _queue, _mark_context);
 792   }
 793 
 794 public:
 795   ShenandoahTraversalKeepAliveUpdateClosure(ShenandoahObjToScanQueue* q) :
 796     _queue(q), _thread(Thread::current()),
 797     _traversal_gc(ShenandoahHeap::heap()->traversal_gc()),
 798     _mark_context(ShenandoahHeap::heap()->marking_context()) {}
 799 
 800   void do_oop(narrowOop* p) { do_oop_work(p); }
 801   void do_oop(oop* p)       { do_oop_work(p); }
 802 };
 803 
 804 class ShenandoahTraversalKeepAliveUpdateDegenClosure : public OopClosure {
 805 private:
 806   ShenandoahObjToScanQueue* _queue;
 807   Thread* _thread;
 808   ShenandoahTraversalGC* _traversal_gc;
 809   ShenandoahMarkingContext* const _mark_context;
 810 
 811   template <class T>
 812   inline void do_oop_work(T* p) {
 813     _traversal_gc->process_oop<T, false /* string dedup */, true /* degen */, false /* atomic update */>(p, _thread, _queue, _mark_context);
 814   }
 815 
 816 public:
 817   ShenandoahTraversalKeepAliveUpdateDegenClosure(ShenandoahObjToScanQueue* q) :
 818           _queue(q), _thread(Thread::current()),
 819           _traversal_gc(ShenandoahHeap::heap()->traversal_gc()),
 820           _mark_context(ShenandoahHeap::heap()->marking_context()) {}
 821 
 822   void do_oop(narrowOop* p) { do_oop_work(p); }
 823   void do_oop(oop* p)       { do_oop_work(p); }
 824 };
 825 
 826 class ShenandoahTraversalSingleThreadKeepAliveUpdateClosure : public OopClosure {
 827 private:
 828   ShenandoahObjToScanQueue* _queue;
 829   Thread* _thread;
 830   ShenandoahTraversalGC* _traversal_gc;
 831   ShenandoahMarkingContext* const _mark_context;
 832 
 833   template <class T>
 834   inline void do_oop_work(T* p) {
 835     _traversal_gc->process_oop<T, false /* string dedup */, false /* degen */, true /* atomic update */>(p, _thread, _queue, _mark_context);
 836   }
 837 
 838 public:
 839   ShenandoahTraversalSingleThreadKeepAliveUpdateClosure(ShenandoahObjToScanQueue* q) :
 840           _queue(q), _thread(Thread::current()),
 841           _traversal_gc(ShenandoahHeap::heap()->traversal_gc()),
 842           _mark_context(ShenandoahHeap::heap()->marking_context()) {}
 843 
 844   void do_oop(narrowOop* p) { do_oop_work(p); }
 845   void do_oop(oop* p)       { do_oop_work(p); }
 846 };
 847 
 848 class ShenandoahTraversalSingleThreadKeepAliveUpdateDegenClosure : public OopClosure {
 849 private:
 850   ShenandoahObjToScanQueue* _queue;
 851   Thread* _thread;
 852   ShenandoahTraversalGC* _traversal_gc;
 853   ShenandoahMarkingContext* const _mark_context;
 854 
 855   template <class T>
 856   inline void do_oop_work(T* p) {
 857     _traversal_gc->process_oop<T, false /* string dedup */, true /* degen */, false /* atomic update */>(p, _thread, _queue, _mark_context);
 858   }
 859 
 860 public:
 861   ShenandoahTraversalSingleThreadKeepAliveUpdateDegenClosure(ShenandoahObjToScanQueue* q) :
 862           _queue(q), _thread(Thread::current()),
 863           _traversal_gc(ShenandoahHeap::heap()->traversal_gc()),
 864           _mark_context(ShenandoahHeap::heap()->marking_context()) {}
 865 
 866   void do_oop(narrowOop* p) { do_oop_work(p); }
 867   void do_oop(oop* p)       { do_oop_work(p); }
 868 };
 869 
 870 class ShenandoahTraversalPrecleanTask : public AbstractGangTask {
 871 private:
 872   ReferenceProcessor* _rp;
 873 
 874 public:
 875   ShenandoahTraversalPrecleanTask(ReferenceProcessor* rp) :
 876           AbstractGangTask("Precleaning task"),
 877           _rp(rp) {}
 878 
 879   void work(uint worker_id) {
 880     assert(worker_id == 0, "The code below is single-threaded, only one worker is expected");
 881     ShenandoahParallelWorkerSession worker_session(worker_id);
 882     ShenandoahSuspendibleThreadSetJoiner stsj(ShenandoahSuspendibleWorkers);
 883 
 884     ShenandoahHeap* sh = ShenandoahHeap::heap();
 885 
 886     ShenandoahObjToScanQueue* q = sh->traversal_gc()->task_queues()->queue(worker_id);
 887 
 888     ShenandoahForwardedIsAliveClosure is_alive;
 889     ShenandoahTraversalCancelledGCYieldClosure yield;
 890     ShenandoahTraversalPrecleanCompleteGCClosure complete_gc;
 891     ShenandoahTraversalKeepAliveUpdateClosure keep_alive(q);
 892     ResourceMark rm;
 893     _rp->preclean_discovered_references(&is_alive, &keep_alive,
 894                                         &complete_gc, &yield,
 895                                         NULL);
 896   }
 897 };
 898 
 899 void ShenandoahTraversalGC::preclean_weak_refs() {
 900   // Pre-cleaning weak references before diving into STW makes sense at the
 901   // end of concurrent mark. This will filter out the references which referents
 902   // are alive. Note that ReferenceProcessor already filters out these on reference
 903   // discovery, and the bulk of work is done here. This phase processes leftovers
 904   // that missed the initial filtering, i.e. when referent was marked alive after
 905   // reference was discovered by RP.
 906 
 907   assert(_heap->process_references(), "sanity");
 908   assert(!_heap->is_degenerated_gc_in_progress(), "must be in concurrent non-degenerated phase");
 909 
 910   // Shortcut if no references were discovered to avoid winding up threads.
 911   ReferenceProcessor* rp = _heap->ref_processor();
 912   if (!rp->has_discovered_references()) {
 913     return;
 914   }
 915 
 916   ReferenceProcessorMTDiscoveryMutator fix_mt_discovery(rp, false);
 917 
 918   shenandoah_assert_rp_isalive_not_installed();
 919   ShenandoahForwardedIsAliveClosure is_alive;
 920   ReferenceProcessorIsAliveMutator fix_isalive(rp, &is_alive);
 921 
 922   assert(task_queues()->is_empty(), "Should be empty");
 923 
 924   // Execute precleaning in the worker thread: it will give us GCLABs, String dedup
 925   // queues and other goodies. When upstream ReferenceProcessor starts supporting
 926   // parallel precleans, we can extend this to more threads.
 927   ShenandoahPushWorkerScope scope(_heap->workers(), 1, /* check_workers = */ false);
 928 
 929   WorkGang* workers = _heap->workers();
 930   uint nworkers = workers->active_workers();
 931   assert(nworkers == 1, "This code uses only a single worker");
 932   task_queues()->reserve(nworkers);
 933 
 934   ShenandoahTraversalPrecleanTask task(rp);
 935   workers->run_task(&task);
 936 
 937   assert(_heap->cancelled_gc() || task_queues()->is_empty(), "Should be empty");
 938 }
 939 
 940 // Weak Reference Closures
 941 class ShenandoahTraversalDrainMarkingStackClosure: public VoidClosure {
 942   uint _worker_id;
 943   ShenandoahTaskTerminator* _terminator;
 944   bool _reset_terminator;
 945 
 946 public:
 947   ShenandoahTraversalDrainMarkingStackClosure(uint worker_id, ShenandoahTaskTerminator* t, bool reset_terminator = false):
 948     _worker_id(worker_id),
 949     _terminator(t),
 950     _reset_terminator(reset_terminator) {
 951   }
 952 
 953   void do_void() {
 954     assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
 955 
 956     ShenandoahHeap* sh = ShenandoahHeap::heap();
 957     ShenandoahTraversalGC* traversal_gc = sh->traversal_gc();
 958     assert(sh->process_references(), "why else would we be here?");
 959     shenandoah_assert_rp_isalive_installed();
 960 
 961     traversal_gc->main_loop(_worker_id, _terminator, false);
 962 
 963     if (_reset_terminator) {
 964       _terminator->reset_for_reuse();
 965     }
 966   }
 967 };
 968 
 969 class ShenandoahTraversalSingleThreadedDrainMarkingStackClosure: public VoidClosure {
 970   uint _worker_id;
 971   ShenandoahTaskTerminator* _terminator;
 972   bool _reset_terminator;
 973 
 974 public:
 975   ShenandoahTraversalSingleThreadedDrainMarkingStackClosure(uint worker_id, ShenandoahTaskTerminator* t, bool reset_terminator = false):
 976           _worker_id(worker_id),
 977           _terminator(t),
 978           _reset_terminator(reset_terminator) {
 979   }
 980 
 981   void do_void() {
 982     assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
 983 
 984     ShenandoahHeap* sh = ShenandoahHeap::heap();
 985     ShenandoahTraversalGC* traversal_gc = sh->traversal_gc();
 986     assert(sh->process_references(), "why else would we be here?");
 987     shenandoah_assert_rp_isalive_installed();
 988 
 989     traversal_gc->main_loop(_worker_id, _terminator, false);
 990 
 991     if (_reset_terminator) {
 992       _terminator->reset_for_reuse();
 993     }
 994   }
 995 };
 996 
 997 void ShenandoahTraversalGC::weak_refs_work() {
 998   assert(_heap->process_references(), "sanity");
 999 
1000   ShenandoahPhaseTimings::Phase phase_root = ShenandoahPhaseTimings::weakrefs;
1001 
1002   ShenandoahGCPhase phase(phase_root);
1003 
1004   ReferenceProcessor* rp = _heap->ref_processor();
1005 
1006   // NOTE: We cannot shortcut on has_discovered_references() here, because
1007   // we will miss marking JNI Weak refs then, see implementation in
1008   // ReferenceProcessor::process_discovered_references.
1009   weak_refs_work_doit();
1010 
1011   rp->verify_no_references_recorded();
1012   assert(!rp->discovery_enabled(), "Post condition");
1013 
1014 }
1015 
1016 class ShenandoahTraversalRefProcTaskProxy : public AbstractGangTask {
1017 private:
1018   AbstractRefProcTaskExecutor::ProcessTask& _proc_task;
1019   ShenandoahTaskTerminator* _terminator;
1020 
1021 public:
1022   ShenandoahTraversalRefProcTaskProxy(AbstractRefProcTaskExecutor::ProcessTask& proc_task,
1023                                       ShenandoahTaskTerminator* t) :
1024     AbstractGangTask("Process reference objects in parallel"),
1025     _proc_task(proc_task),
1026     _terminator(t) {
1027   }
1028 
1029   void work(uint worker_id) {
1030     assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
1031     ShenandoahHeap* heap = ShenandoahHeap::heap();
1032     ShenandoahTraversalDrainMarkingStackClosure complete_gc(worker_id, _terminator);
1033 
1034     ShenandoahForwardedIsAliveClosure is_alive;
1035     if (!heap->is_degenerated_gc_in_progress()) {
1036       ShenandoahTraversalKeepAliveUpdateClosure keep_alive(heap->traversal_gc()->task_queues()->queue(worker_id));
1037       _proc_task.work(worker_id, is_alive, keep_alive, complete_gc);
1038     } else {
1039       ShenandoahTraversalKeepAliveUpdateDegenClosure keep_alive(heap->traversal_gc()->task_queues()->queue(worker_id));
1040       _proc_task.work(worker_id, is_alive, keep_alive, complete_gc);
1041     }
1042   }
1043 };
1044 
1045 class ShenandoahTraversalRefProcTaskExecutor : public AbstractRefProcTaskExecutor {
1046 private:
1047   WorkGang* _workers;
1048 
1049 public:
1050   ShenandoahTraversalRefProcTaskExecutor(WorkGang* workers) : _workers(workers) {}
1051 
1052   // Executes a task using worker threads.
1053   void execute(ProcessTask& task, uint ergo_workers) {
1054     assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
1055 
1056     ShenandoahHeap* heap = ShenandoahHeap::heap();
1057     ShenandoahTraversalGC* traversal_gc = heap->traversal_gc();
1058     ShenandoahPushWorkerQueuesScope scope(_workers,
1059                                           traversal_gc->task_queues(),
1060                                           ergo_workers,
1061                                           /* do_check = */ false);
1062     uint nworkers = _workers->active_workers();
1063     traversal_gc->task_queues()->reserve(nworkers);
1064     ShenandoahTaskTerminator terminator(nworkers, traversal_gc->task_queues());
1065     ShenandoahTraversalRefProcTaskProxy proc_task_proxy(task, &terminator);
1066     _workers->run_task(&proc_task_proxy);
1067   }
1068 };
1069 
1070 void ShenandoahTraversalGC::weak_refs_work_doit() {
1071   ReferenceProcessor* rp = _heap->ref_processor();
1072 
1073   ShenandoahPhaseTimings::Phase phase_process = ShenandoahPhaseTimings::weakrefs_process;
1074 
1075   shenandoah_assert_rp_isalive_not_installed();
1076   ShenandoahForwardedIsAliveClosure is_alive;
1077   ReferenceProcessorIsAliveMutator fix_isalive(rp, &is_alive);
1078 
1079   WorkGang* workers = _heap->workers();
1080   uint nworkers = workers->active_workers();
1081 
1082   rp->setup_policy(_heap->soft_ref_policy()->should_clear_all_soft_refs());
1083   rp->set_active_mt_degree(nworkers);
1084 
1085   assert(task_queues()->is_empty(), "Should be empty");
1086 
1087   // complete_gc and keep_alive closures instantiated here are only needed for
1088   // single-threaded path in RP. They share the queue 0 for tracking work, which
1089   // simplifies implementation. Since RP may decide to call complete_gc several
1090   // times, we need to be able to reuse the terminator.
1091   uint serial_worker_id = 0;
1092   ShenandoahTaskTerminator terminator(1, task_queues());
1093   ShenandoahTraversalSingleThreadedDrainMarkingStackClosure complete_gc(serial_worker_id, &terminator, /* reset_terminator = */ true);
1094   ShenandoahPushWorkerQueuesScope scope(workers, task_queues(), 1, /* do_check = */ false);
1095 
1096   ShenandoahTraversalRefProcTaskExecutor executor(workers);
1097 
1098   ReferenceProcessorPhaseTimes pt(_heap->gc_timer(), rp->num_queues());
1099   if (!_heap->is_degenerated_gc_in_progress()) {
1100     ShenandoahTraversalSingleThreadKeepAliveUpdateClosure keep_alive(task_queues()->queue(serial_worker_id));
1101     rp->process_discovered_references(&is_alive, &keep_alive,
1102                                       &complete_gc, &executor,
1103                                       &pt);
1104   } else {
1105     ShenandoahTraversalSingleThreadKeepAliveUpdateDegenClosure keep_alive(task_queues()->queue(serial_worker_id));
1106     rp->process_discovered_references(&is_alive, &keep_alive,
1107                                       &complete_gc, &executor,
1108                                       &pt);
1109   }
1110 
1111   pt.print_all_references();
1112   assert(task_queues()->is_empty() || _heap->cancelled_gc(), "Should be empty");
1113 }