1 /*
   2  * Copyright (c) 2018, 2019, Red Hat, Inc. All rights reserved.
   3  *
   4  * This code is free software; you can redistribute it and/or modify it
   5  * under the terms of the GNU General Public License version 2 only, as
   6  * published by the Free Software Foundation.
   7  *
   8  * This code is distributed in the hope that it will be useful, but WITHOUT
   9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  11  * version 2 for more details (a copy is included in the LICENSE file that
  12  * accompanied this code).
  13  *
  14  * You should have received a copy of the GNU General Public License version
  15  * 2 along with this work; if not, write to the Free Software Foundation,
  16  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  17  *
  18  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  19  * or visit www.oracle.com if you need additional information or have any
  20  * questions.
  21  *
  22  */
  23 
  24 #include "precompiled.hpp"
  25 
  26 #include "memory/referenceProcessor.hpp"
  27 #include "utilities/workgroup.hpp"
  28 #include "gc_implementation/shenandoah/shenandoahBarrierSet.hpp"
  29 #include "gc_implementation/shenandoah/shenandoahClosures.inline.hpp"
  30 #include "gc_implementation/shenandoah/shenandoahCodeRoots.hpp"
  31 #include "gc_implementation/shenandoah/shenandoahCollectionSet.hpp"
  32 #include "gc_implementation/shenandoah/shenandoahCollectorPolicy.hpp"
  33 #include "gc_implementation/shenandoah/shenandoahFreeSet.hpp"
  34 #include "gc_implementation/shenandoah/shenandoahPhaseTimings.hpp"
  35 #include "gc_implementation/shenandoah/shenandoahHeap.inline.hpp"
  36 #include "gc_implementation/shenandoah/shenandoahHeapRegionSet.inline.hpp"
  37 #include "gc_implementation/shenandoah/shenandoahHeuristics.hpp"
  38 #include "gc_implementation/shenandoah/shenandoahMarkingContext.inline.hpp"
  39 #include "gc_implementation/shenandoah/shenandoahOopClosures.inline.hpp"
  40 #include "gc_implementation/shenandoah/shenandoahRootProcessor.hpp"
  41 #include "gc_implementation/shenandoah/shenandoahStringDedup.hpp"
  42 #include "gc_implementation/shenandoah/shenandoahTaskqueue.inline.hpp"
  43 #include "gc_implementation/shenandoah/shenandoahTimingTracker.hpp"
  44 #include "gc_implementation/shenandoah/shenandoahTraversalGC.hpp"
  45 #include "gc_implementation/shenandoah/shenandoahUtils.hpp"
  46 #include "gc_implementation/shenandoah/shenandoahVerifier.hpp"
  47 #include "gc_implementation/shenandoah/shenandoahWorkGroup.hpp"
  48 
  49 #include "memory/iterator.hpp"
  50 #include "memory/metaspace.hpp"
  51 #include "memory/resourceArea.hpp"
  52 
  53 /**
  54  * NOTE: We are using the SATB buffer in thread.hpp and satbMarkQueue.hpp, however, it is not an SATB algorithm.
  55  * We're using the buffer as generic oop buffer to enqueue new values in concurrent oop stores, IOW, the algorithm
  56  * is incremental-update-based.
  57  *
  58  * NOTE on interaction with TAMS: we want to avoid traversing new objects for
  59  * several reasons:
  60  * - We will not reclaim them in this cycle anyway, because they are not in the
  61  *   cset
  62  * - It makes up for the bulk of work during final-pause
  63  * - It also shortens the concurrent cycle because we don't need to
  64  *   pointlessly traverse through newly allocated objects.
  65  * - As a nice side-effect, it solves the I-U termination problem (mutators
  66  *   cannot outrun the GC by allocating like crazy)
  67  * - It is an easy way to achieve MWF. What MWF does is to also enqueue the
  68  *   target object of stores if it's new. Treating new objects live implicitely
  69  *   achieves the same, but without extra barriers. I think the effect of
  70  *   shortened final-pause (mentioned above) is the main advantage of MWF. In
  71  *   particular, we will not see the head of a completely new long linked list
  72  *   in final-pause and end up traversing huge chunks of the heap there.
  73  * - We don't need to see/update the fields of new objects either, because they
  74  *   are either still null, or anything that's been stored into them has been
  75  *   evacuated+enqueued before (and will thus be treated later).
  76  *
  77  * We achieve this by setting TAMS for each region, and everything allocated
  78  * beyond TAMS will be 'implicitely marked'.
  79  *
  80  * Gotchas:
  81  * - While we want new objects to be implicitely marked, we don't want to count
  82  *   them alive. Otherwise the next cycle wouldn't pick them up and consider
  83  *   them for cset. This means that we need to protect such regions from
  84  *   getting accidentally thrashed at the end of traversal cycle. This is why I
  85  *   keep track of alloc-regions and check is_alloc_region() in the trashing
  86  *   code.
  87  * - We *need* to traverse through evacuated objects. Those objects are
  88  *   pre-existing, and any references in them point to interesting objects that
  89  *   we need to see. We also want to count them as live, because we just
  90  *   determined that they are alive :-) I achieve this by upping TAMS
  91  *   concurrently for every gclab/gc-shared alloc before publishing the
  92  *   evacuated object. This way, the GC threads will not consider such objects
  93  *   implictely marked, and traverse through them as normal.
  94  */
  95 class ShenandoahTraversalSATBBufferClosure : public SATBBufferClosure {
  96 private:
  97   ShenandoahObjToScanQueue* _queue;
  98   ShenandoahTraversalGC* _traversal_gc;
  99   ShenandoahHeap* const _heap;
 100 
 101 public:
 102   ShenandoahTraversalSATBBufferClosure(ShenandoahObjToScanQueue* q) :
 103     _queue(q),
 104     _heap(ShenandoahHeap::heap())
 105  { }
 106 
 107   void do_buffer(void** buffer, size_t size) {
 108     for (size_t i = 0; i < size; ++i) {
 109       oop* p = (oop*) &buffer[i];
 110       oop obj = oopDesc::load_heap_oop(p);
 111       shenandoah_assert_not_forwarded(p, obj);
 112       if (_heap->marking_context()->mark(obj)) {
 113         _queue->push(ShenandoahMarkTask(obj));
 114       }
 115     }
 116   }
 117 };
 118 
 119 class ShenandoahTraversalSATBThreadsClosure : public ThreadClosure {
 120 private:
 121   ShenandoahTraversalSATBBufferClosure* _satb_cl;
 122   int _thread_parity;
 123 
 124 public:
 125   ShenandoahTraversalSATBThreadsClosure(ShenandoahTraversalSATBBufferClosure* satb_cl) :
 126     _satb_cl(satb_cl),
 127     _thread_parity(SharedHeap::heap()->strong_roots_parity())  {}
 128 
 129   void do_thread(Thread* thread) {
 130     if (thread->is_Java_thread()) {
 131       if (thread->claim_oops_do(true, _thread_parity)) {
 132         JavaThread* jt = (JavaThread*)thread;
 133         jt->satb_mark_queue().apply_closure_and_empty(_satb_cl);
 134       }
 135     } else if (thread->is_VM_thread()) {
 136       if (thread->claim_oops_do(true, _thread_parity)) {
 137         JavaThread::satb_mark_queue_set().shared_satb_queue()->apply_closure_and_empty(_satb_cl);
 138       }
 139     }
 140   }
 141 };
 142 
 143 // Like CLDToOopClosure, but clears has_modified_oops, so that we can record modified CLDs during traversal
 144 // and remark them later during final-traversal.
 145 class ShenandoahMarkCLDClosure : public CLDClosure {
 146 private:
 147   OopClosure* _cl;
 148 public:
 149   ShenandoahMarkCLDClosure(OopClosure* cl) : _cl(cl) {}
 150   void do_cld(ClassLoaderData* cld) {
 151     KlassToOopClosure klasscl(_cl);
 152     cld->oops_do(_cl, &klasscl, true);
 153   }
 154 };
 155 
 156 // Like CLDToOopClosure, but only process modified CLDs
 157 class ShenandoahRemarkCLDClosure : public CLDClosure {
 158 private:
 159   OopClosure* _cl;
 160 public:
 161   ShenandoahRemarkCLDClosure(OopClosure* cl) : _cl(cl) {}
 162   void do_cld(ClassLoaderData* cld) {
 163     KlassToOopClosure klasscl(_cl);
 164     cld->oops_do(_cl, &klasscl, true);
 165   }
 166 };
 167 
 168 class ShenandoahInitTraversalCollectionTask : public AbstractGangTask {
 169 private:
 170   ShenandoahRootProcessor* _rp;
 171   ShenandoahHeap* _heap;
 172   ShenandoahCsetCodeRootsIterator _cset_coderoots;
 173 public:
 174   ShenandoahInitTraversalCollectionTask(ShenandoahRootProcessor* rp) :
 175     AbstractGangTask("Shenandoah Init Traversal Collection"),
 176     _rp(rp),
 177     _heap(ShenandoahHeap::heap()),
 178     _cset_coderoots(ShenandoahCodeRoots::cset_iterator()) {}
 179 
 180   void work(uint worker_id) {
 181     ShenandoahParallelWorkerSession worker_session(worker_id);
 182 
 183     ShenandoahEvacOOMScope oom_evac_scope;
 184     ShenandoahObjToScanQueueSet* queues = _heap->traversal_gc()->task_queues();
 185     ShenandoahObjToScanQueue* q = queues->queue(worker_id);
 186 
 187     bool process_refs = _heap->process_references();
 188     bool unload_classes = _heap->unload_classes();
 189     ReferenceProcessor* rp = NULL;
 190     if (process_refs) {
 191       rp = _heap->ref_processor();
 192     }
 193 
 194     // Step 1: Process ordinary GC roots.
 195     {
 196       ShenandoahTraversalRootsClosure roots_cl(q, rp);
 197       ShenandoahMarkCLDClosure cld_cl(&roots_cl);
 198       MarkingCodeBlobClosure code_cl(&roots_cl, CodeBlobToOopClosure::FixRelocations);
 199       if (unload_classes) {
 200         _rp->process_strong_roots(&roots_cl, NULL, &cld_cl, NULL, NULL, NULL, worker_id);
 201         // Need to pre-evac code roots here. Otherwise we might see from-space constants.
 202         ShenandoahWorkerTimings* worker_times = _heap->phase_timings()->worker_times();
 203         ShenandoahWorkerTimingsTracker timer(worker_times, ShenandoahPhaseTimings::CodeCacheRoots, worker_id);
 204         _cset_coderoots.possibly_parallel_blobs_do(&code_cl);
 205       } else {
 206         _rp->process_all_roots(&roots_cl, process_refs ? NULL : &roots_cl, &cld_cl, &code_cl, NULL, worker_id);
 207       }
 208     }
 209   }
 210 };
 211 
 212 class ShenandoahTraversalWeakRootsClosure : public OopClosure {
 213 private:
 214   ShenandoahHeap* const _heap;
 215 
 216   template <class T>
 217   void do_oop_work(T* p) {
 218     T o = oopDesc::load_heap_oop(p);
 219     if (!oopDesc::is_null(o)) {
 220       oop obj = oopDesc::decode_heap_oop_not_null(o);
 221       if (_heap->in_collection_set(obj)) {
 222         oop forw = ShenandoahBarrierSet::resolve_forwarded_not_null(obj);
 223         if (obj == forw) {
 224           forw = _heap->evacuate_object(obj, Thread::current());
 225         }
 226         shenandoah_assert_forwarded_except(p, obj, _heap->cancelled_gc());
 227         // Update reference.
 228         oopDesc::encode_store_heap_oop_not_null(p, forw);
 229       }
 230     }
 231   }
 232 public:
 233   ShenandoahTraversalWeakRootsClosure() :
 234     _heap(ShenandoahHeap::heap()) {}
 235 
 236   virtual void do_oop(oop* p)       { do_oop_work(p); }
 237   virtual void do_oop(narrowOop* p) { do_oop_work(p); }
 238 };
 239 
 240 class ShenandoahWeakInitTraversalCollectionTask : public AbstractGangTask {
 241 private:
 242   ShenandoahRootProcessor* _rp;
 243   ShenandoahHeap* _heap;
 244 public:
 245   ShenandoahWeakInitTraversalCollectionTask(ShenandoahRootProcessor* rp) :
 246     AbstractGangTask("Shenandoah Weak Init Traversal Collection"),
 247     _rp(rp),
 248     _heap(ShenandoahHeap::heap()) {}
 249 
 250   void work(uint worker_id) {
 251     ShenandoahParallelWorkerSession worker_session(worker_id);
 252 
 253     ShenandoahEvacOOMScope oom_evac_scope;
 254     ShenandoahTraversalWeakRootsClosure roots_cl;
 255     CLDToOopClosure cld_cl(&roots_cl);
 256     MarkingCodeBlobClosure code_cl(&roots_cl, CodeBlobToOopClosure::FixRelocations);
 257     _rp->process_all_roots(&roots_cl, &roots_cl, &cld_cl, &code_cl, NULL, worker_id);
 258   }
 259 };
 260 
 261 class ShenandoahConcurrentTraversalCollectionTask : public AbstractGangTask {
 262 private:
 263   ShenandoahTaskTerminator* _terminator;
 264   ShenandoahHeap* _heap;
 265 public:
 266   ShenandoahConcurrentTraversalCollectionTask(ShenandoahTaskTerminator* terminator) :
 267     AbstractGangTask("Shenandoah Concurrent Traversal Collection"),
 268     _terminator(terminator),
 269     _heap(ShenandoahHeap::heap()) {}
 270 
 271   void work(uint worker_id) {
 272     ShenandoahConcurrentWorkerSession worker_session(worker_id);
 273     ShenandoahEvacOOMScope oom_evac_scope;
 274     ShenandoahTraversalGC* traversal_gc = _heap->traversal_gc();
 275 
 276     // Drain all outstanding work in queues.
 277     traversal_gc->main_loop(worker_id, _terminator);
 278   }
 279 };
 280 
 281 class ShenandoahPreFinalTraversalCollectionTask : public AbstractGangTask {
 282 private:
 283   ShenandoahHeap* const _heap;
 284 public:
 285   ShenandoahPreFinalTraversalCollectionTask() :
 286     AbstractGangTask("Shenandoah Pre Final Traversal Collection"),
 287     _heap(ShenandoahHeap::heap()) {}
 288 
 289   void work(uint worker_id) {
 290     ShenandoahParallelWorkerSession worker_session(worker_id);
 291     ShenandoahTraversalGC* traversal_gc = _heap->traversal_gc();
 292     ShenandoahObjToScanQueueSet* queues = traversal_gc->task_queues();
 293     ShenandoahObjToScanQueue* q = queues->queue(worker_id);
 294 
 295     // Step 0: Drain outstanding SATB queues.
 296     ShenandoahTraversalSATBBufferClosure satb_cl(q);
 297     // Process remaining finished SATB buffers.
 298     SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
 299     while (satb_mq_set.apply_closure_to_completed_buffer(&satb_cl));
 300 
 301     ShenandoahTraversalSATBThreadsClosure tc(&satb_cl);
 302     Threads::threads_do(&tc);
 303   }
 304 };
 305 
 306 class ShenandoahFinalTraversalCollectionTask : public AbstractGangTask {
 307 private:
 308   ShenandoahRootProcessor* _rp;
 309   ShenandoahTaskTerminator* _terminator;
 310   ShenandoahHeap* _heap;
 311 public:
 312   ShenandoahFinalTraversalCollectionTask(ShenandoahRootProcessor* rp, ShenandoahTaskTerminator* terminator) :
 313     AbstractGangTask("Shenandoah Final Traversal Collection"),
 314     _rp(rp),
 315     _terminator(terminator),
 316     _heap(ShenandoahHeap::heap()) {}
 317 
 318   void work(uint worker_id) {
 319     ShenandoahParallelWorkerSession worker_session(worker_id);
 320 
 321     ShenandoahEvacOOMScope oom_evac_scope;
 322     ShenandoahTraversalGC* traversal_gc = _heap->traversal_gc();
 323 
 324     ShenandoahObjToScanQueueSet* queues = traversal_gc->task_queues();
 325     ShenandoahObjToScanQueue* q = queues->queue(worker_id);
 326 
 327     bool process_refs = _heap->process_references();
 328     bool unload_classes = _heap->unload_classes();
 329     ReferenceProcessor* rp = NULL;
 330     if (process_refs) {
 331       rp = _heap->ref_processor();
 332     }
 333 
 334     // Step 1: Process GC roots.
 335     // For oops in code roots, they are marked, evacuated, enqueued for further traversal,
 336     // and the references to the oops are updated during init pause. New nmethods are handled
 337     // in similar way during nmethod-register process. Therefore, we don't need to rescan code
 338     // roots here.
 339     if (!_heap->is_degenerated_gc_in_progress()) {
 340       ShenandoahTraversalRootsClosure roots_cl(q, rp);
 341       CLDToOopClosure cld_cl(&roots_cl);
 342       if (unload_classes) {
 343         ShenandoahRemarkCLDClosure weak_cld_cl(&roots_cl);
 344         _rp->process_strong_roots(&roots_cl, process_refs ? NULL : &roots_cl, &cld_cl, &weak_cld_cl, NULL, NULL, worker_id);
 345       } else {
 346         _rp->process_all_roots(&roots_cl, process_refs ? NULL : &roots_cl, &cld_cl, NULL, NULL, worker_id);
 347       }
 348     } else {
 349       ShenandoahTraversalDegenClosure roots_cl(q, rp);
 350       CLDToOopClosure cld_cl(&roots_cl);
 351       if (unload_classes) {
 352         ShenandoahRemarkCLDClosure weak_cld_cl(&roots_cl);
 353         _rp->process_strong_roots(&roots_cl, process_refs ? NULL : &roots_cl, &cld_cl, &weak_cld_cl, NULL, NULL, worker_id);
 354       } else {
 355         _rp->process_all_roots(&roots_cl, process_refs ? NULL : &roots_cl, &cld_cl, NULL, NULL, worker_id);
 356       }
 357     }
 358 
 359     {
 360       ShenandoahWorkerTimings *worker_times = _heap->phase_timings()->worker_times();
 361       ShenandoahWorkerTimingsTracker timer(worker_times, ShenandoahPhaseTimings::FinishQueues, worker_id);
 362 
 363       // Step 3: Finally drain all outstanding work in queues.
 364       traversal_gc->main_loop(worker_id, _terminator);
 365     }
 366 
 367   }
 368 };
 369 
 370 ShenandoahTraversalGC::ShenandoahTraversalGC(ShenandoahHeap* heap, size_t num_regions) :
 371   _heap(heap),
 372   _task_queues(new ShenandoahObjToScanQueueSet(heap->max_workers())),
 373   _traversal_set(ShenandoahHeapRegionSet()) {
 374 
 375   // Traversal does not support concurrent code root scanning
 376   FLAG_SET_DEFAULT(ShenandoahConcurrentScanCodeRoots, false);
 377 
 378   uint num_queues = heap->max_workers();
 379   for (uint i = 0; i < num_queues; ++i) {
 380     ShenandoahObjToScanQueue* task_queue = new ShenandoahObjToScanQueue();
 381     task_queue->initialize();
 382     _task_queues->register_queue(i, task_queue);
 383   }
 384 }
 385 
 386 ShenandoahTraversalGC::~ShenandoahTraversalGC() {
 387 }
 388 
 389 void ShenandoahTraversalGC::prepare_regions() {
 390   size_t num_regions = _heap->num_regions();
 391   ShenandoahMarkingContext* const ctx = _heap->marking_context();
 392   for (size_t i = 0; i < num_regions; i++) {
 393     ShenandoahHeapRegion* region = _heap->get_region(i);
 394     if (_heap->is_bitmap_slice_committed(region)) {
 395       if (_traversal_set.is_in(i)) {
 396         ctx->capture_top_at_mark_start(region);
 397         region->clear_live_data();
 398         assert(ctx->is_bitmap_clear_range(region->bottom(), region->end()), "bitmap for traversal regions must be cleared");
 399       } else {
 400         // Everything outside the traversal set is always considered live.
 401         ctx->reset_top_at_mark_start(region);
 402       }
 403     } else {
 404       // FreeSet may contain uncommitted empty regions, once they are recommitted,
 405       // their TAMS may have old values, so reset them here.
 406       ctx->reset_top_at_mark_start(region);
 407     }
 408   }
 409 }
 410 
 411 void ShenandoahTraversalGC::prepare() {
 412   if (UseTLAB) {
 413     ShenandoahGCPhase phase(ShenandoahPhaseTimings::traversal_gc_accumulate_stats);
 414     _heap->accumulate_statistics_tlabs();
 415   }
 416 
 417   {
 418     ShenandoahGCPhase phase(ShenandoahPhaseTimings::traversal_gc_make_parsable);
 419     _heap->make_parsable(true);
 420   }
 421 
 422   if (UseTLAB) {
 423     ShenandoahGCPhase phase(ShenandoahPhaseTimings::traversal_gc_resize_tlabs);
 424     _heap->resize_tlabs();
 425   }
 426 
 427   assert(_heap->marking_context()->is_bitmap_clear(), "need clean mark bitmap");
 428   assert(!_heap->marking_context()->is_complete(), "should not be complete");
 429 
 430   // About to choose the collection set, make sure we know which regions are pinned.
 431   {
 432     ShenandoahGCPhase phase_cleanup(ShenandoahPhaseTimings::traversal_gc_prepare_sync_pinned);
 433     _heap->sync_pinned_region_status();
 434   }
 435 
 436   ShenandoahCollectionSet* collection_set = _heap->collection_set();
 437   {
 438     ShenandoahHeapLocker lock(_heap->lock());
 439 
 440     collection_set->clear();
 441     assert(collection_set->count() == 0, "collection set not clear");
 442 
 443     // Find collection set
 444     _heap->heuristics()->choose_collection_set(collection_set);
 445     prepare_regions();
 446 
 447     // Rebuild free set
 448     _heap->free_set()->rebuild();
 449   }
 450 
 451   log_info(gc, ergo)("Collectable Garbage: " SIZE_FORMAT "%s, " SIZE_FORMAT "%s CSet, " SIZE_FORMAT " CSet regions",
 452                      byte_size_in_proper_unit(collection_set->garbage()),   proper_unit_for_byte_size(collection_set->garbage()),
 453                      byte_size_in_proper_unit(collection_set->live_data()), proper_unit_for_byte_size(collection_set->live_data()),
 454                      collection_set->count());
 455 }
 456 
 457 void ShenandoahTraversalGC::init_traversal_collection() {
 458   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "STW traversal GC");
 459 
 460   if (ShenandoahVerify) {
 461     _heap->verifier()->verify_before_traversal();
 462   }
 463 
 464   if (VerifyBeforeGC) {
 465     Universe::verify();
 466   }
 467 
 468   {
 469     ShenandoahGCPhase phase_prepare(ShenandoahPhaseTimings::traversal_gc_prepare);
 470     prepare();
 471   }
 472 
 473   _heap->set_concurrent_traversal_in_progress(true);
 474 
 475   bool process_refs = _heap->process_references();
 476   if (process_refs) {
 477     ReferenceProcessor* rp = _heap->ref_processor();
 478     rp->enable_discovery(true /*verify_no_refs*/, true);
 479     rp->setup_policy(_heap->collector_policy()->should_clear_all_soft_refs());
 480   }
 481 
 482   {
 483     ShenandoahGCPhase phase_work(ShenandoahPhaseTimings::init_traversal_gc_work);
 484     assert(_task_queues->is_empty(), "queues must be empty before traversal GC");
 485     TASKQUEUE_STATS_ONLY(_task_queues->reset_taskqueue_stats());
 486 
 487 
 488     {
 489       COMPILER2_PRESENT(DerivedPointerTable::clear());
 490       uint nworkers = _heap->workers()->active_workers();
 491       task_queues()->reserve(nworkers);
 492       ShenandoahRootProcessor rp(_heap, nworkers, ShenandoahPhaseTimings::init_traversal_gc_work);
 493       ShenandoahInitTraversalCollectionTask traversal_task(&rp);
 494       _heap->workers()->run_task(&traversal_task);
 495       COMPILER2_PRESENT(DerivedPointerTable::update_pointers());
 496     }
 497 
 498 
 499     if (_heap->unload_classes()) {
 500       COMPILER2_PRESENT(DerivedPointerTable::clear());
 501       uint nworkers = _heap->workers()->active_workers();
 502       task_queues()->reserve(nworkers);
 503       ShenandoahRootProcessor rp(_heap, nworkers, ShenandoahPhaseTimings::init_weak_traversal_gc_work);
 504       ShenandoahWeakInitTraversalCollectionTask task(&rp);
 505       _heap->workers()->run_task(&task);
 506       COMPILER2_PRESENT(DerivedPointerTable::update_pointers());
 507     }
 508   }
 509 
 510   if (ShenandoahPacing) {
 511     _heap->pacer()->setup_for_traversal();
 512   }
 513 }
 514 
 515 void ShenandoahTraversalGC::main_loop(uint w, ShenandoahTaskTerminator* t) {
 516   ShenandoahObjToScanQueue* q = task_queues()->queue(w);
 517 
 518   // Initialize live data.
 519   jushort* ld = _heap->get_liveness_cache(w);
 520 
 521   ReferenceProcessor* rp = NULL;
 522   if (_heap->process_references()) {
 523     rp = _heap->ref_processor();
 524   }
 525   {
 526     if (!_heap->is_degenerated_gc_in_progress()) {
 527       if (_heap->unload_classes()) {
 528         if (ShenandoahStringDedup::is_enabled()) {
 529           ShenandoahStrDedupQueue* dq = ShenandoahStringDedup::queue(w);
 530           ShenandoahTraversalMetadataDedupClosure cl(q, rp, dq);
 531           main_loop_work<ShenandoahTraversalMetadataDedupClosure>(&cl, ld, w, t);
 532         } else {
 533           ShenandoahTraversalMetadataClosure cl(q, rp);
 534           main_loop_work<ShenandoahTraversalMetadataClosure>(&cl, ld, w, t);
 535         }
 536       } else {
 537         if (ShenandoahStringDedup::is_enabled()) {
 538           ShenandoahStrDedupQueue* dq = ShenandoahStringDedup::queue(w);
 539           ShenandoahTraversalDedupClosure cl(q, rp, dq);
 540           main_loop_work<ShenandoahTraversalDedupClosure>(&cl, ld, w, t);
 541         } else {
 542           ShenandoahTraversalClosure cl(q, rp);
 543           main_loop_work<ShenandoahTraversalClosure>(&cl, ld, w, t);
 544         }
 545       }
 546     } else {
 547       if (_heap->unload_classes()) {
 548         if (ShenandoahStringDedup::is_enabled()) {
 549           ShenandoahStrDedupQueue* dq = ShenandoahStringDedup::queue(w);
 550           ShenandoahTraversalMetadataDedupDegenClosure cl(q, rp, dq);
 551           main_loop_work<ShenandoahTraversalMetadataDedupDegenClosure>(&cl, ld, w, t);
 552         } else {
 553           ShenandoahTraversalMetadataDegenClosure cl(q, rp);
 554           main_loop_work<ShenandoahTraversalMetadataDegenClosure>(&cl, ld, w, t);
 555         }
 556       } else {
 557         if (ShenandoahStringDedup::is_enabled()) {
 558           ShenandoahStrDedupQueue* dq = ShenandoahStringDedup::queue(w);
 559           ShenandoahTraversalDedupDegenClosure cl(q, rp, dq);
 560           main_loop_work<ShenandoahTraversalDedupDegenClosure>(&cl, ld, w, t);
 561         } else {
 562           ShenandoahTraversalDegenClosure cl(q, rp);
 563           main_loop_work<ShenandoahTraversalDegenClosure>(&cl, ld, w, t);
 564         }
 565       }
 566     }
 567   }
 568 
 569   _heap->flush_liveness_cache(w);
 570 }
 571 
 572 template <class T>
 573 void ShenandoahTraversalGC::main_loop_work(T* cl, jushort* live_data, uint worker_id, ShenandoahTaskTerminator* terminator) {
 574   ShenandoahObjToScanQueueSet* queues = task_queues();
 575   ShenandoahObjToScanQueue* q = queues->queue(worker_id);
 576   ShenandoahConcurrentMark* conc_mark = _heap->concurrent_mark();
 577 
 578   uintx stride = ShenandoahMarkLoopStride;
 579 
 580   ShenandoahMarkTask task;
 581 
 582   // Process outstanding queues, if any.
 583   q = queues->claim_next();
 584   while (q != NULL) {
 585     if (_heap->cancelled_gc()) {
 586       return;
 587     }
 588 
 589     for (uint i = 0; i < stride; i++) {
 590       if (q->pop(task)) {
 591         conc_mark->do_task<T>(q, cl, live_data, &task);
 592       } else {
 593         assert(q->is_empty(), "Must be empty");
 594         q = queues->claim_next();
 595         break;
 596       }
 597     }
 598   }
 599 
 600   if (_heap->cancelled_gc()) return;
 601 
 602   // Normal loop.
 603   q = queues->queue(worker_id);
 604 
 605   ShenandoahTraversalSATBBufferClosure drain_satb(q);
 606   SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
 607 
 608   int seed = 17;
 609 
 610   while (true) {
 611     if (_heap->cancelled_gc()) return;
 612 
 613     while (satb_mq_set.completed_buffers_num() > 0) {
 614       satb_mq_set.apply_closure_to_completed_buffer(&drain_satb);
 615     }
 616 
 617     uint work = 0;
 618     for (uint i = 0; i < stride; i++) {
 619       if (q->pop(task) ||
 620           queues->steal(worker_id, &seed, task)) {
 621         conc_mark->do_task<T>(q, cl, live_data, &task);
 622         work++;
 623       } else {
 624         break;
 625       }
 626     }
 627 
 628     if (work == 0) {
 629       // No more work, try to terminate
 630       ShenandoahEvacOOMScopeLeaver oom_scope_leaver;
 631       ShenandoahTerminationTimingsTracker term_tracker(worker_id);
 632       ShenandoahTerminatorTerminator tt(_heap);
 633 
 634       if (terminator->offer_termination(&tt)) return;
 635     }
 636   }
 637 }
 638 
 639 void ShenandoahTraversalGC::concurrent_traversal_collection() {
 640   ShenandoahGCPhase phase_work(ShenandoahPhaseTimings::conc_traversal);
 641   if (!_heap->cancelled_gc()) {
 642     uint nworkers = _heap->workers()->active_workers();
 643     task_queues()->reserve(nworkers);
 644     ShenandoahTerminationTracker tracker(ShenandoahPhaseTimings::conc_traversal_termination);
 645 
 646     ShenandoahTaskTerminator terminator(nworkers, task_queues());
 647     ShenandoahConcurrentTraversalCollectionTask task(&terminator);
 648     _heap->workers()->run_task(&task);
 649   }
 650 
 651   if (!_heap->cancelled_gc() && ShenandoahPreclean && _heap->process_references()) {
 652     preclean_weak_refs();
 653   }
 654 }
 655 
 656 void ShenandoahTraversalGC::final_traversal_collection() {
 657   _heap->make_parsable(true);
 658 
 659   if (!_heap->cancelled_gc()) {
 660     COMPILER2_PRESENT(DerivedPointerTable::clear());
 661     ShenandoahGCPhase phase_work(ShenandoahPhaseTimings::final_traversal_gc_work);
 662     uint nworkers = _heap->workers()->active_workers();
 663     task_queues()->reserve(nworkers);
 664 
 665     // Finish traversal
 666     {
 667       SharedHeap::StrongRootsScope scope(_heap, true);
 668       ShenandoahPreFinalTraversalCollectionTask task;
 669       _heap->workers()->run_task(&task);
 670     }
 671 
 672     ShenandoahRootProcessor rp(_heap, nworkers, ShenandoahPhaseTimings::final_traversal_gc_work);
 673     ShenandoahTerminationTracker term(ShenandoahPhaseTimings::final_traversal_gc_termination);
 674     ShenandoahTaskTerminator terminator(nworkers, task_queues());
 675     ShenandoahFinalTraversalCollectionTask task(&rp, &terminator);
 676     _heap->workers()->run_task(&task);
 677     COMPILER2_PRESENT(DerivedPointerTable::update_pointers());
 678   }
 679 
 680   if (!_heap->cancelled_gc() && _heap->process_references()) {
 681     weak_refs_work();
 682   }
 683 
 684   if (!_heap->cancelled_gc()) {
 685     fixup_roots();
 686     if (_heap->unload_classes()) {
 687       _heap->unload_classes_and_cleanup_tables(false);
 688     } else {
 689       ShenandoahIsAliveSelector alive;
 690       StringTable::unlink(alive.is_alive_closure());
 691     }
 692   }
 693 
 694   if (!_heap->cancelled_gc()) {
 695     assert(_task_queues->is_empty(), "queues must be empty after traversal GC");
 696     TASKQUEUE_STATS_ONLY(_task_queues->print_taskqueue_stats());
 697     TASKQUEUE_STATS_ONLY(_task_queues->reset_taskqueue_stats());
 698 
 699     // No more marking expected
 700     _heap->mark_complete_marking_context();
 701 
 702     // Resize metaspace
 703     MetaspaceGC::compute_new_size();
 704 
 705     // Need to see that pinned region status is updated: newly pinned regions must not
 706     // be trashed. New unpinned regions should be trashed.
 707     {
 708       ShenandoahGCPhase phase_cleanup(ShenandoahPhaseTimings::traversal_gc_sync_pinned);
 709       _heap->sync_pinned_region_status();
 710     }
 711 
 712     // Still good? We can now trash the cset, and make final verification
 713     {
 714       ShenandoahGCPhase phase_cleanup(ShenandoahPhaseTimings::traversal_gc_cleanup);
 715       ShenandoahHeapLocker lock(_heap->lock());
 716 
 717       // Trash everything
 718       // Clear immediate garbage regions.
 719       size_t num_regions = _heap->num_regions();
 720 
 721       ShenandoahHeapRegionSet* traversal_regions = traversal_set();
 722       ShenandoahFreeSet* free_regions = _heap->free_set();
 723       ShenandoahMarkingContext* const ctx = _heap->marking_context();
 724       free_regions->clear();
 725       for (size_t i = 0; i < num_regions; i++) {
 726         ShenandoahHeapRegion* r = _heap->get_region(i);
 727         bool not_allocated = ctx->top_at_mark_start(r) == r->top();
 728 
 729         bool candidate = traversal_regions->is_in(r) && !r->has_live() && not_allocated;
 730         if (r->is_humongous_start() && candidate) {
 731           // Trash humongous.
 732           HeapWord* humongous_obj = r->bottom();
 733           assert(!ctx->is_marked(oop(humongous_obj)), "must not be marked");
 734           r->make_trash_immediate();
 735           while (i + 1 < num_regions && _heap->get_region(i + 1)->is_humongous_continuation()) {
 736             i++;
 737             r = _heap->get_region(i);
 738             assert(r->is_humongous_continuation(), "must be humongous continuation");
 739             r->make_trash_immediate();
 740           }
 741         } else if (!r->is_empty() && candidate) {
 742           // Trash regular.
 743           assert(!r->is_humongous(), "handled above");
 744           assert(!r->is_trash(), "must not already be trashed");
 745           r->make_trash_immediate();
 746         }
 747       }
 748       _heap->collection_set()->clear();
 749       _heap->free_set()->rebuild();
 750       reset();
 751     }
 752 
 753     assert(_task_queues->is_empty(), "queues must be empty after traversal GC");
 754     _heap->set_concurrent_traversal_in_progress(false);
 755     assert(!_heap->cancelled_gc(), "must not be cancelled when getting out here");
 756 
 757     if (ShenandoahVerify) {
 758       _heap->verifier()->verify_after_traversal();
 759     }
 760 
 761     if (VerifyAfterGC) {
 762       Universe::verify();
 763     }
 764   }
 765 }
 766 
 767 class ShenandoahTraversalFixRootsClosure : public OopClosure {
 768 private:
 769   template <class T>
 770   inline void do_oop_work(T* p) {
 771     T o = oopDesc::load_heap_oop(p);
 772     if (!oopDesc::is_null(o)) {
 773       oop obj = oopDesc::decode_heap_oop_not_null(o);
 774       oop forw = ShenandoahBarrierSet::resolve_forwarded_not_null(obj);
 775       if (obj != forw) {
 776         oopDesc::encode_store_heap_oop_not_null(p, forw);
 777       }
 778     }
 779   }
 780 
 781 public:
 782   inline void do_oop(oop* p) { do_oop_work(p); }
 783   inline void do_oop(narrowOop* p) { do_oop_work(p); }
 784 };
 785 
 786 class ShenandoahTraversalFixRootsTask : public AbstractGangTask {
 787 private:
 788   ShenandoahRootProcessor* _rp;
 789 
 790 public:
 791   ShenandoahTraversalFixRootsTask(ShenandoahRootProcessor* rp) :
 792     AbstractGangTask("Shenandoah traversal fix roots"),
 793     _rp(rp) {
 794     assert(ShenandoahHeap::heap()->has_forwarded_objects(), "Must be");
 795   }
 796 
 797   void work(uint worker_id) {
 798     ShenandoahParallelWorkerSession worker_session(worker_id);
 799     ShenandoahTraversalFixRootsClosure cl;
 800     MarkingCodeBlobClosure blobsCl(&cl, CodeBlobToOopClosure::FixRelocations);
 801     CLDToOopClosure cldCl(&cl);
 802     _rp->process_all_roots(&cl, &cl, &cldCl, &blobsCl, NULL, worker_id);
 803   }
 804 };
 805 
 806 void ShenandoahTraversalGC::fixup_roots() {
 807   COMPILER2_PRESENT(DerivedPointerTable::clear());
 808   ShenandoahRootProcessor rp(_heap, _heap->workers()->active_workers(), ShenandoahPhaseTimings::final_traversal_update_roots);
 809   ShenandoahTraversalFixRootsTask update_roots_task(&rp);
 810   _heap->workers()->run_task(&update_roots_task);
 811   COMPILER2_PRESENT(DerivedPointerTable::update_pointers());
 812 }
 813 
 814 void ShenandoahTraversalGC::reset() {
 815   _task_queues->clear();
 816 }
 817 
 818 ShenandoahObjToScanQueueSet* ShenandoahTraversalGC::task_queues() {
 819   return _task_queues;
 820 }
 821 
 822 class ShenandoahTraversalCancelledGCYieldClosure : public YieldClosure {
 823 private:
 824   ShenandoahHeap* const _heap;
 825 public:
 826   ShenandoahTraversalCancelledGCYieldClosure() : _heap(ShenandoahHeap::heap()) {};
 827   virtual bool should_return() { return _heap->cancelled_gc(); }
 828 };
 829 
 830 class ShenandoahTraversalPrecleanCompleteGCClosure : public VoidClosure {
 831 public:
 832   void do_void() {
 833     ShenandoahHeap* sh = ShenandoahHeap::heap();
 834     ShenandoahTraversalGC* traversal_gc = sh->traversal_gc();
 835     assert(sh->process_references(), "why else would we be here?");
 836     ShenandoahTaskTerminator terminator(1, traversal_gc->task_queues());
 837     shenandoah_assert_rp_isalive_installed();
 838     traversal_gc->main_loop((uint) 0, &terminator);
 839   }
 840 };
 841 
 842 class ShenandoahTraversalKeepAliveUpdateClosure : public OopClosure {
 843 private:
 844   ShenandoahObjToScanQueue* _queue;
 845   Thread* _thread;
 846   ShenandoahTraversalGC* _traversal_gc;
 847   ShenandoahMarkingContext* const _mark_context;
 848 
 849   template <class T>
 850   inline void do_oop_work(T* p) {
 851     _traversal_gc->process_oop<T, false /* string dedup */, false /* degen */, true /* atomic update */>(p, _thread, _queue, _mark_context);
 852   }
 853 
 854 public:
 855   ShenandoahTraversalKeepAliveUpdateClosure(ShenandoahObjToScanQueue* q) :
 856     _queue(q), _thread(Thread::current()),
 857     _traversal_gc(ShenandoahHeap::heap()->traversal_gc()),
 858     _mark_context(ShenandoahHeap::heap()->marking_context()) {}
 859 
 860   void do_oop(narrowOop* p) { do_oop_work(p); }
 861   void do_oop(oop* p)       { do_oop_work(p); }
 862 };
 863 
 864 class ShenandoahTraversalKeepAliveUpdateDegenClosure : public OopClosure {
 865 private:
 866   ShenandoahObjToScanQueue* _queue;
 867   Thread* _thread;
 868   ShenandoahTraversalGC* _traversal_gc;
 869   ShenandoahMarkingContext* const _mark_context;
 870 
 871   template <class T>
 872   inline void do_oop_work(T* p) {
 873     _traversal_gc->process_oop<T, false /* string dedup */, true /* degen */, false /* atomic update */>(p, _thread, _queue, _mark_context);
 874   }
 875 
 876 public:
 877   ShenandoahTraversalKeepAliveUpdateDegenClosure(ShenandoahObjToScanQueue* q) :
 878           _queue(q), _thread(Thread::current()),
 879           _traversal_gc(ShenandoahHeap::heap()->traversal_gc()),
 880           _mark_context(ShenandoahHeap::heap()->marking_context()) {}
 881 
 882   void do_oop(narrowOop* p) { do_oop_work(p); }
 883   void do_oop(oop* p)       { do_oop_work(p); }
 884 };
 885 
 886 class ShenandoahTraversalSingleThreadKeepAliveUpdateClosure : public OopClosure {
 887 private:
 888   ShenandoahObjToScanQueue* _queue;
 889   Thread* _thread;
 890   ShenandoahTraversalGC* _traversal_gc;
 891   ShenandoahMarkingContext* const _mark_context;
 892 
 893   template <class T>
 894   inline void do_oop_work(T* p) {
 895     ShenandoahEvacOOMScope evac_scope;
 896     _traversal_gc->process_oop<T, false /* string dedup */, false /* degen */, true /* atomic update */>(p, _thread, _queue, _mark_context);
 897   }
 898 
 899 public:
 900   ShenandoahTraversalSingleThreadKeepAliveUpdateClosure(ShenandoahObjToScanQueue* q) :
 901           _queue(q), _thread(Thread::current()),
 902           _traversal_gc(ShenandoahHeap::heap()->traversal_gc()),
 903           _mark_context(ShenandoahHeap::heap()->marking_context()) {}
 904 
 905   void do_oop(narrowOop* p) { do_oop_work(p); }
 906   void do_oop(oop* p)       { do_oop_work(p); }
 907 };
 908 
 909 class ShenandoahTraversalSingleThreadKeepAliveUpdateDegenClosure : public OopClosure {
 910 private:
 911   ShenandoahObjToScanQueue* _queue;
 912   Thread* _thread;
 913   ShenandoahTraversalGC* _traversal_gc;
 914   ShenandoahMarkingContext* const _mark_context;
 915 
 916   template <class T>
 917   inline void do_oop_work(T* p) {
 918     ShenandoahEvacOOMScope evac_scope;
 919     _traversal_gc->process_oop<T, false /* string dedup */, true /* degen */, false /* atomic update */>(p, _thread, _queue, _mark_context);
 920   }
 921 
 922 public:
 923   ShenandoahTraversalSingleThreadKeepAliveUpdateDegenClosure(ShenandoahObjToScanQueue* q) :
 924           _queue(q), _thread(Thread::current()),
 925           _traversal_gc(ShenandoahHeap::heap()->traversal_gc()),
 926           _mark_context(ShenandoahHeap::heap()->marking_context()) {}
 927 
 928   void do_oop(narrowOop* p) { do_oop_work(p); }
 929   void do_oop(oop* p)       { do_oop_work(p); }
 930 };
 931 
 932 class ShenandoahTraversalPrecleanTask : public AbstractGangTask {
 933 private:
 934   ReferenceProcessor* _rp;
 935 
 936 public:
 937   ShenandoahTraversalPrecleanTask(ReferenceProcessor* rp) :
 938           AbstractGangTask("Precleaning task"),
 939           _rp(rp) {}
 940 
 941   void work(uint worker_id) {
 942     assert(worker_id == 0, "The code below is single-threaded, only one worker is expected");
 943     ShenandoahParallelWorkerSession worker_session(worker_id);
 944     ShenandoahEvacOOMScope oom_evac_scope;
 945 
 946     ShenandoahHeap* sh = ShenandoahHeap::heap();
 947 
 948     ShenandoahObjToScanQueue* q = sh->traversal_gc()->task_queues()->queue(worker_id);
 949 
 950     ShenandoahForwardedIsAliveClosure is_alive;
 951     ShenandoahTraversalCancelledGCYieldClosure yield;
 952     ShenandoahTraversalPrecleanCompleteGCClosure complete_gc;
 953     ShenandoahTraversalKeepAliveUpdateClosure keep_alive(q);
 954     ResourceMark rm;
 955     _rp->preclean_discovered_references(&is_alive, &keep_alive,
 956                                         &complete_gc, &yield,
 957                                         NULL, sh->shenandoah_policy()->tracer()->gc_id());
 958   }
 959 };
 960 
 961 void ShenandoahTraversalGC::preclean_weak_refs() {
 962   // Pre-cleaning weak references before diving into STW makes sense at the
 963   // end of concurrent mark. This will filter out the references which referents
 964   // are alive. Note that ReferenceProcessor already filters out these on reference
 965   // discovery, and the bulk of work is done here. This phase processes leftovers
 966   // that missed the initial filtering, i.e. when referent was marked alive after
 967   // reference was discovered by RP.
 968 
 969   assert(_heap->process_references(), "sanity");
 970   assert(!_heap->is_degenerated_gc_in_progress(), "must be in concurrent non-degenerated phase");
 971 
 972   // Shortcut if no references were discovered to avoid winding up threads.
 973   ReferenceProcessor* rp = _heap->ref_processor();
 974   ReferenceProcessorMTDiscoveryMutator fix_mt_discovery(rp, false);
 975 
 976   shenandoah_assert_rp_isalive_not_installed();
 977   ShenandoahForwardedIsAliveClosure is_alive;
 978   ReferenceProcessorIsAliveMutator fix_isalive(rp, &is_alive);
 979 
 980   assert(task_queues()->is_empty(), "Should be empty");
 981 
 982   // Execute precleaning in the worker thread: it will give us GCLABs, String dedup
 983   // queues and other goodies. When upstream ReferenceProcessor starts supporting
 984   // parallel precleans, we can extend this to more threads.
 985   ShenandoahPushWorkerScope scope(_heap->workers(), 1, /* check_workers = */ false);
 986 
 987   WorkGang* workers = _heap->workers();
 988   uint nworkers = workers->active_workers();
 989   assert(nworkers == 1, "This code uses only a single worker");
 990   task_queues()->reserve(nworkers);
 991 
 992   ShenandoahTraversalPrecleanTask task(rp);
 993   workers->run_task(&task);
 994 
 995   assert(_heap->cancelled_gc() || task_queues()->is_empty(), "Should be empty");
 996 }
 997 
 998 // Weak Reference Closures
 999 class ShenandoahTraversalDrainMarkingStackClosure: public VoidClosure {
1000   uint _worker_id;
1001   ShenandoahTaskTerminator* _terminator;
1002   bool _reset_terminator;
1003 
1004 public:
1005   ShenandoahTraversalDrainMarkingStackClosure(uint worker_id, ShenandoahTaskTerminator* t, bool reset_terminator = false):
1006     _worker_id(worker_id),
1007     _terminator(t),
1008     _reset_terminator(reset_terminator) {
1009   }
1010 
1011   void do_void() {
1012     assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
1013 
1014     ShenandoahHeap* sh = ShenandoahHeap::heap();
1015     ShenandoahTraversalGC* traversal_gc = sh->traversal_gc();
1016     assert(sh->process_references(), "why else would we be here?");
1017     shenandoah_assert_rp_isalive_installed();
1018 
1019     traversal_gc->main_loop(_worker_id, _terminator);
1020 
1021     if (_reset_terminator) {
1022       _terminator->reset_for_reuse();
1023     }
1024   }
1025 };
1026 
1027 class ShenandoahTraversalSingleThreadedDrainMarkingStackClosure: public VoidClosure {
1028   uint _worker_id;
1029   ShenandoahTaskTerminator* _terminator;
1030   bool _reset_terminator;
1031 
1032 public:
1033   ShenandoahTraversalSingleThreadedDrainMarkingStackClosure(uint worker_id, ShenandoahTaskTerminator* t, bool reset_terminator = false):
1034           _worker_id(worker_id),
1035           _terminator(t),
1036           _reset_terminator(reset_terminator) {
1037   }
1038 
1039   void do_void() {
1040     assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
1041 
1042     ShenandoahHeap* sh = ShenandoahHeap::heap();
1043     ShenandoahTraversalGC* traversal_gc = sh->traversal_gc();
1044     assert(sh->process_references(), "why else would we be here?");
1045     shenandoah_assert_rp_isalive_installed();
1046 
1047     ShenandoahEvacOOMScope evac_scope;
1048     traversal_gc->main_loop(_worker_id, _terminator);
1049 
1050     if (_reset_terminator) {
1051       _terminator->reset_for_reuse();
1052     }
1053   }
1054 };
1055 
1056 void ShenandoahTraversalGC::weak_refs_work() {
1057   assert(_heap->process_references(), "sanity");
1058 
1059   ShenandoahPhaseTimings::Phase phase_root = ShenandoahPhaseTimings::weakrefs;
1060 
1061   ShenandoahGCPhase phase(phase_root);
1062 
1063   ReferenceProcessor* rp = _heap->ref_processor();
1064 
1065   // NOTE: We cannot shortcut on has_discovered_references() here, because
1066   // we will miss marking JNI Weak refs then, see implementation in
1067   // ReferenceProcessor::process_discovered_references.
1068   weak_refs_work_doit();
1069 
1070   rp->verify_no_references_recorded();
1071   assert(!rp->discovery_enabled(), "Post condition");
1072 
1073 }
1074 
1075 class ShenandoahTraversalRefProcTaskProxy : public AbstractGangTask {
1076 private:
1077   AbstractRefProcTaskExecutor::ProcessTask& _proc_task;
1078   ShenandoahTaskTerminator* _terminator;
1079 
1080 public:
1081   ShenandoahTraversalRefProcTaskProxy(AbstractRefProcTaskExecutor::ProcessTask& proc_task,
1082                                       ShenandoahTaskTerminator* t) :
1083     AbstractGangTask("Process reference objects in parallel"),
1084     _proc_task(proc_task),
1085     _terminator(t) {
1086   }
1087 
1088   void work(uint worker_id) {
1089     ShenandoahEvacOOMScope oom_evac_scope;
1090     assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
1091     ShenandoahHeap* heap = ShenandoahHeap::heap();
1092     ShenandoahTraversalDrainMarkingStackClosure complete_gc(worker_id, _terminator);
1093 
1094     ShenandoahForwardedIsAliveClosure is_alive;
1095     if (!heap->is_degenerated_gc_in_progress()) {
1096       ShenandoahTraversalKeepAliveUpdateClosure keep_alive(heap->traversal_gc()->task_queues()->queue(worker_id));
1097       _proc_task.work(worker_id, is_alive, keep_alive, complete_gc);
1098     } else {
1099       ShenandoahTraversalKeepAliveUpdateDegenClosure keep_alive(heap->traversal_gc()->task_queues()->queue(worker_id));
1100       _proc_task.work(worker_id, is_alive, keep_alive, complete_gc);
1101     }
1102   }
1103 };
1104 
1105 class ShenandoahTraversalRefEnqueueTaskProxy : public AbstractGangTask {
1106 private:
1107   AbstractRefProcTaskExecutor::EnqueueTask& _enqueue_task;
1108 
1109 public:
1110   ShenandoahTraversalRefEnqueueTaskProxy(AbstractRefProcTaskExecutor::EnqueueTask& enqueue_task) :
1111     AbstractGangTask("Enqueue reference objects in parallel"),
1112     _enqueue_task(enqueue_task) {
1113   }
1114 
1115   void work(uint worker_id) {
1116     _enqueue_task.work(worker_id);
1117   }
1118 };
1119 
1120 class ShenandoahTraversalRefProcTaskExecutor : public AbstractRefProcTaskExecutor {
1121 private:
1122   WorkGang* _workers;
1123 
1124 public:
1125   ShenandoahTraversalRefProcTaskExecutor(WorkGang* workers) : _workers(workers) {}
1126 
1127   // Executes a task using worker threads.
1128   void execute(ProcessTask& task) {
1129     assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
1130 
1131     // Shortcut execution if task is empty.
1132     // This should be replaced with the generic ReferenceProcessor shortcut,
1133     // see JDK-8181214, JDK-8043575, JDK-6938732.
1134     if (task.is_empty()) {
1135       return;
1136     }
1137 
1138     ShenandoahHeap* heap = ShenandoahHeap::heap();
1139     ShenandoahTraversalGC* traversal_gc = heap->traversal_gc();
1140     uint nworkers = _workers->active_workers();
1141     traversal_gc->task_queues()->reserve(nworkers);
1142 
1143     ShenandoahTaskTerminator terminator(nworkers, traversal_gc->task_queues());
1144     ShenandoahTraversalRefProcTaskProxy proc_task_proxy(task, &terminator);
1145     _workers->run_task(&proc_task_proxy);
1146   }
1147 
1148   void execute(EnqueueTask& task) {
1149     ShenandoahTraversalRefEnqueueTaskProxy enqueue_task_proxy(task);
1150     _workers->run_task(&enqueue_task_proxy);
1151   }
1152 };
1153 
1154 void ShenandoahTraversalGC::weak_refs_work_doit() {
1155   ReferenceProcessor* rp = _heap->ref_processor();
1156 
1157   ShenandoahPhaseTimings::Phase phase_process = ShenandoahPhaseTimings::weakrefs_process;
1158   ShenandoahPhaseTimings::Phase phase_enqueue = ShenandoahPhaseTimings::weakrefs_enqueue;
1159 
1160   shenandoah_assert_rp_isalive_not_installed();
1161   ShenandoahForwardedIsAliveClosure is_alive;
1162   ReferenceProcessorIsAliveMutator fix_isalive(rp, &is_alive);
1163 
1164   WorkGang* workers = _heap->workers();
1165   uint nworkers = workers->active_workers();
1166 
1167   rp->setup_policy(_heap->collector_policy()->should_clear_all_soft_refs());
1168   rp->set_active_mt_degree(nworkers);
1169 
1170   assert(task_queues()->is_empty(), "Should be empty");
1171 
1172   // complete_gc and keep_alive closures instantiated here are only needed for
1173   // single-threaded path in RP. They share the queue 0 for tracking work, which
1174   // simplifies implementation. Since RP may decide to call complete_gc several
1175   // times, we need to be able to reuse the terminator.
1176   uint serial_worker_id = 0;
1177   ShenandoahTaskTerminator terminator(1, task_queues());
1178   ShenandoahTraversalSingleThreadedDrainMarkingStackClosure complete_gc(serial_worker_id, &terminator, /* reset_terminator = */ true);
1179   ShenandoahTraversalRefProcTaskExecutor executor(workers);
1180 
1181   if (!_heap->is_degenerated_gc_in_progress()) {
1182     ShenandoahTraversalSingleThreadKeepAliveUpdateClosure keep_alive(task_queues()->queue(serial_worker_id));
1183     rp->process_discovered_references(&is_alive, &keep_alive,
1184                                       &complete_gc, &executor,
1185                                       NULL, _heap->shenandoah_policy()->tracer()->gc_id());
1186   } else {
1187     ShenandoahTraversalSingleThreadKeepAliveUpdateDegenClosure keep_alive(task_queues()->queue(serial_worker_id));
1188     rp->process_discovered_references(&is_alive, &keep_alive,
1189                                       &complete_gc, &executor,
1190                                       NULL, _heap->shenandoah_policy()->tracer()->gc_id());
1191   }
1192   assert(task_queues()->is_empty() || _heap->cancelled_gc(), "Should be empty");
1193 
1194   {
1195     ShenandoahGCPhase phase(phase_enqueue);
1196     rp->enqueue_discovered_references(&executor);
1197   }
1198 }