1 /*
   2  * Copyright (c) 2018, Red Hat, Inc. and/or its affiliates.
   3  *
   4  * This code is free software; you can redistribute it and/or modify it
   5  * under the terms of the GNU General Public License version 2 only, as
   6  * published by the Free Software Foundation.
   7  *
   8  * This code is distributed in the hope that it will be useful, but WITHOUT
   9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  11  * version 2 for more details (a copy is included in the LICENSE file that
  12  * accompanied this code).
  13  *
  14  * You should have received a copy of the GNU General Public License version
  15  * 2 along with this work; if not, write to the Free Software Foundation,
  16  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  17  *
  18  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  19  * or visit www.oracle.com if you need additional information or have any
  20  * questions.
  21  *
  22  */
  23 
  24 #include "precompiled.hpp"
  25 
  26 #include "gc/shared/gcTraceTime.inline.hpp"
  27 #include "gc/shared/markBitMap.inline.hpp"
  28 #include "gc/shared/workgroup.hpp"
  29 #include "gc/shared/taskqueue.inline.hpp"
  30 #include "gc/shared/weakProcessor.hpp"
  31 #include "gc/shenandoah/shenandoahBarrierSet.hpp"
  32 #include "gc/shenandoah/shenandoahConnectionMatrix.hpp"
  33 #include "gc/shenandoah/shenandoahCollectionSet.hpp"
  34 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
  35 #include "gc/shenandoah/shenandoahFreeSet.hpp"
  36 #include "gc/shenandoah/shenandoahPhaseTimings.hpp"
  37 #include "gc/shenandoah/shenandoahHeapRegionSet.hpp"
  38 #include "gc/shenandoah/shenandoahHeap.hpp"
  39 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
  40 #include "gc/shenandoah/shenandoahHeapRegionSet.inline.hpp"
  41 #include "gc/shenandoah/shenandoahOopClosures.inline.hpp"
  42 #include "gc/shenandoah/shenandoahTraversalGC.hpp"
  43 #include "gc/shenandoah/shenandoahRootProcessor.hpp"
  44 #include "gc/shenandoah/shenandoahStringDedup.hpp"
  45 #include "gc/shenandoah/shenandoahStrDedupQueue.inline.hpp"
  46 #include "gc/shenandoah/shenandoahTaskqueue.hpp"
  47 #include "gc/shenandoah/shenandoahUtils.hpp"
  48 #include "gc/shenandoah/shenandoahVerifier.hpp"
  49 #include "gc/shenandoah/shenandoahWorkGroup.hpp"
  50 #include "gc/shenandoah/shenandoahWorkerPolicy.hpp"
  51 
  52 #include "memory/iterator.hpp"
  53 
  54 /**
  55  * NOTE: We are using the SATB buffer in thread.hpp and satbMarkQueue.hpp, however, it is not an SATB algorithm.
  56  * We're using the buffer as generic oop buffer to enqueue new values in concurrent oop stores, IOW, the algorithm
  57  * is incremental-update-based.
  58  *
  59  * NOTE on interaction with TAMS: we want to avoid traversing new objects for
  60  * several reasons:
  61  * - We will not reclaim them in this cycle anyway, because they are not in the
  62  *   cset
  63  * - It makes up for the bulk of work during final-pause
  64  * - It also shortens the concurrent cycle because we don't need to
  65  *   pointlessly traverse through newly allocated objects.
  66  * - As a nice side-effect, it solves the I-U termination problem (mutators
  67  *   cannot outrun the GC by allocating like crazy)
  68  * - It is an easy way to achieve MWF. What MWF does is to also enqueue the
  69  *   target object of stores if it's new. Treating new objects live implicitely
  70  *   achieves the same, but without extra barriers. I think the effect of
  71  *   shortened final-pause (mentioned above) is the main advantage of MWF. In
  72  *   particular, we will not see the head of a completely new long linked list
  73  *   in final-pause and end up traversing huge chunks of the heap there.
  74  * - We don't need to see/update the fields of new objects either, because they
  75  *   are either still null, or anything that's been stored into them has been
  76  *   evacuated+enqueued before (and will thus be treated later).
  77  *
  78  * We achieve this by setting TAMS for each region, and everything allocated
  79  * beyond TAMS will be 'implicitely marked'.
  80  *
  81  * Gotchas:
  82  * - While we want new objects to be implicitely marked, we don't want to count
  83  *   them alive. Otherwise the next cycle wouldn't pick them up and consider
  84  *   them for cset. This means that we need to protect such regions from
  85  *   getting accidentally thrashed at the end of traversal cycle. This is why I
  86  *   keep track of alloc-regions and check is_alloc_region() in the trashing
  87  *   code.
  88  * - We *need* to traverse through evacuated objects. Those objects are
  89  *   pre-existing, and any references in them point to interesting objects that
  90  *   we need to see. We also want to count them as live, because we just
  91  *   determined that they are alive :-) I achieve this by upping TAMS
  92  *   concurrently for every gclab/gc-shared alloc before publishing the
  93  *   evacuated object. This way, the GC threads will not consider such objects
  94  *   implictely marked, and traverse through them as normal.
  95  */
  96 class ShenandoahTraversalSATBBufferClosure : public SATBBufferClosure {
  97 private:
  98   ShenandoahObjToScanQueue* _queue;
  99   ShenandoahTraversalGC* _traversal_gc;
 100   ShenandoahHeap* _heap;
 101   ShenandoahHeapRegionSet* _traversal_set;
 102 
 103 public:
 104   ShenandoahTraversalSATBBufferClosure(ShenandoahObjToScanQueue* q) :
 105     _queue(q), _traversal_gc(ShenandoahHeap::heap()->traversal_gc()),
 106     _heap(ShenandoahHeap::heap()),
 107     _traversal_set(ShenandoahHeap::heap()->traversal_gc()->traversal_set())
 108  { }
 109 
 110   void do_buffer(void** buffer, size_t size) {
 111     for (size_t i = 0; i < size; ++i) {
 112       oop* p = (oop*) &buffer[i];
 113       oop obj = RawAccess<>::oop_load(p);
 114       shenandoah_assert_not_forwarded(p, obj);
 115       if (_traversal_set->is_in((HeapWord*) obj) && !_heap->is_marked_next(obj) && _heap->mark_next(obj)) {
 116         _queue->push(ShenandoahMarkTask(obj));
 117       }
 118     }
 119   }
 120 };
 121 
 122 class ShenandoahTraversalSATBThreadsClosure : public ThreadClosure {
 123   ShenandoahTraversalSATBBufferClosure* _satb_cl;
 124 
 125  public:
 126   ShenandoahTraversalSATBThreadsClosure(ShenandoahTraversalSATBBufferClosure* satb_cl) :
 127     _satb_cl(satb_cl) {}
 128 
 129   void do_thread(Thread* thread) {
 130     if (thread->is_Java_thread()) {
 131       JavaThread* jt = (JavaThread*)thread;
 132       ShenandoahThreadLocalData::satb_mark_queue(jt).apply_closure_and_empty(_satb_cl);
 133     } else if (thread->is_VM_thread()) {
 134       ShenandoahBarrierSet::satb_mark_queue_set().shared_satb_queue()->apply_closure_and_empty(_satb_cl);
 135     }
 136   }
 137 };
 138 
 139 // Like CLDToOopClosure, but clears has_modified_oops, so that we can record modified CLDs during traversal
 140 // and remark them later during final-traversal.
 141 class ShenandoahMarkCLDClosure : public CLDClosure {
 142 private:
 143   OopClosure* _cl;
 144 public:
 145   ShenandoahMarkCLDClosure(OopClosure* cl) : _cl(cl) {}
 146   void do_cld(ClassLoaderData* cld) {
 147     cld->oops_do(_cl, true, true);
 148   }
 149 };
 150 
 151 // Like CLDToOopClosure, but only process modified CLDs
 152 class ShenandoahRemarkCLDClosure : public CLDClosure {
 153 private:
 154   OopClosure* _cl;
 155 public:
 156   ShenandoahRemarkCLDClosure(OopClosure* cl) : _cl(cl) {}
 157   void do_cld(ClassLoaderData* cld) {
 158     if (cld->has_modified_oops()) {
 159       cld->oops_do(_cl, true, true);
 160     }
 161   }
 162 };
 163 
 164 class ShenandoahInitTraversalCollectionTask : public AbstractGangTask {
 165 private:
 166   ShenandoahRootProcessor* _rp;
 167   ShenandoahHeap* _heap;
 168 public:
 169   ShenandoahInitTraversalCollectionTask(ShenandoahRootProcessor* rp) :
 170     AbstractGangTask("Shenandoah Init Traversal Collection"),
 171     _rp(rp),
 172     _heap(ShenandoahHeap::heap()) {}
 173 
 174   void work(uint worker_id) {
 175     ShenandoahEvacOOMScope oom_evac_scope;
 176     ShenandoahObjToScanQueueSet* queues = _heap->traversal_gc()->task_queues();
 177     ShenandoahObjToScanQueue* q = queues->queue(worker_id);
 178 
 179     bool process_refs = _heap->process_references();
 180     bool unload_classes = _heap->unload_classes();
 181     ReferenceProcessor* rp = NULL;
 182     if (process_refs) {
 183       rp = _heap->ref_processor();
 184     }
 185 
 186     // Step 1: Process ordinary GC roots.
 187     {
 188       ShenandoahTraversalClosure roots_cl(q, rp);
 189       ShenandoahMarkCLDClosure cld_cl(&roots_cl);
 190       MarkingCodeBlobClosure code_cl(&roots_cl, CodeBlobToOopClosure::FixRelocations);
 191       if (unload_classes) {
 192         _rp->process_strong_roots(&roots_cl, process_refs ? NULL : &roots_cl, &cld_cl, NULL, &code_cl, NULL, worker_id);
 193       } else {
 194         _rp->process_all_roots(&roots_cl, process_refs ? NULL : &roots_cl, &cld_cl, &code_cl, NULL, worker_id);
 195       }
 196     }
 197   }
 198 };
 199 
 200 class ShenandoahConcurrentTraversalCollectionTask : public AbstractGangTask {
 201 private:
 202   ParallelTaskTerminator* _terminator;
 203   ShenandoahHeap* _heap;
 204 public:
 205   ShenandoahConcurrentTraversalCollectionTask(ParallelTaskTerminator* terminator) :
 206     AbstractGangTask("Shenandoah Concurrent Traversal Collection"),
 207     _terminator(terminator),
 208     _heap(ShenandoahHeap::heap()) {}
 209 
 210   void work(uint worker_id) {
 211     ShenandoahEvacOOMScope oom_evac_scope;
 212     ShenandoahTraversalGC* traversal_gc = _heap->traversal_gc();
 213     ShenandoahObjToScanQueueSet* queues = traversal_gc->task_queues();
 214     ShenandoahObjToScanQueue* q = queues->queue(worker_id);
 215 
 216     // Drain all outstanding work in queues.
 217     traversal_gc->main_loop(worker_id, _terminator, true);
 218   }
 219 };
 220 
 221 class ShenandoahFinalTraversalCollectionTask : public AbstractGangTask {
 222 private:
 223   ShenandoahRootProcessor* _rp;
 224   ParallelTaskTerminator* _terminator;
 225   ShenandoahHeap* _heap;
 226 public:
 227   ShenandoahFinalTraversalCollectionTask(ShenandoahRootProcessor* rp, ParallelTaskTerminator* terminator) :
 228     AbstractGangTask("Shenandoah Final Traversal Collection"),
 229     _rp(rp),
 230     _terminator(terminator),
 231     _heap(ShenandoahHeap::heap()) {}
 232 
 233   void work(uint worker_id) {
 234     ShenandoahEvacOOMScope oom_evac_scope;
 235     ShenandoahTraversalGC* traversal_gc = _heap->traversal_gc();
 236 
 237     ShenandoahObjToScanQueueSet* queues = traversal_gc->task_queues();
 238     ShenandoahObjToScanQueue* q = queues->queue(worker_id);
 239 
 240     bool process_refs = _heap->process_references();
 241     bool unload_classes = _heap->unload_classes();
 242     ReferenceProcessor* rp = NULL;
 243     if (process_refs) {
 244       rp = _heap->ref_processor();
 245     }
 246 
 247     // Step 1: Drain outstanding SATB queues.
 248     // NOTE: we piggy-back draining of remaining thread SATB buffers on the final root scan below.
 249     ShenandoahTraversalSATBBufferClosure satb_cl(q);
 250     {
 251       // Process remaining finished SATB buffers.
 252       SATBMarkQueueSet& satb_mq_set = ShenandoahBarrierSet::satb_mark_queue_set();
 253       while (satb_mq_set.apply_closure_to_completed_buffer(&satb_cl));
 254       // Process remaining threads SATB buffers below.
 255     }
 256 
 257     // Step 1: Process ordinary GC roots.
 258     if (!_heap->is_degenerated_gc_in_progress()) {
 259       ShenandoahTraversalClosure roots_cl(q, rp);
 260       CLDToOopClosure cld_cl(&roots_cl);
 261       MarkingCodeBlobClosure code_cl(&roots_cl, CodeBlobToOopClosure::FixRelocations);
 262       ShenandoahTraversalSATBThreadsClosure tc(&satb_cl);
 263       if (unload_classes) {
 264         ShenandoahRemarkCLDClosure weak_cld_cl(&roots_cl);
 265         _rp->process_strong_roots(&roots_cl, process_refs ? NULL : &roots_cl, &cld_cl, &weak_cld_cl, &code_cl, &tc, worker_id);
 266       } else {
 267         _rp->process_all_roots(&roots_cl, process_refs ? NULL : &roots_cl, &cld_cl, &code_cl, &tc, worker_id);
 268       }
 269     } else {
 270       ShenandoahTraversalDegenClosure roots_cl(q, rp);
 271       CLDToOopClosure cld_cl(&roots_cl);
 272       MarkingCodeBlobClosure code_cl(&roots_cl, CodeBlobToOopClosure::FixRelocations);
 273       ShenandoahTraversalSATBThreadsClosure tc(&satb_cl);
 274       if (unload_classes) {
 275         ShenandoahRemarkCLDClosure weak_cld_cl(&roots_cl);
 276         _rp->process_strong_roots(&roots_cl, process_refs ? NULL : &roots_cl, &cld_cl, &weak_cld_cl, &code_cl, &tc, worker_id);
 277       } else {
 278         _rp->process_all_roots(&roots_cl, process_refs ? NULL : &roots_cl, &cld_cl, &code_cl, &tc, worker_id);
 279       }
 280     }
 281 
 282     {
 283       ShenandoahWorkerTimings *worker_times = _heap->phase_timings()->worker_times();
 284       ShenandoahWorkerTimingsTracker timer(worker_times, ShenandoahPhaseTimings::FinishQueues, worker_id);
 285 
 286       // Step 3: Finally drain all outstanding work in queues.
 287       traversal_gc->main_loop(worker_id, _terminator, false);
 288     }
 289 
 290   }
 291 };
 292 
 293 void ShenandoahTraversalGC::flush_liveness(uint worker_id) {
 294   jushort* ld = get_liveness(worker_id);
 295   for (uint i = 0; i < _heap->num_regions(); i++) {
 296     ShenandoahHeapRegion* r = _heap->get_region(i);
 297     jushort live = ld[i];
 298     if (live > 0) {
 299       r->increase_live_data_gc_words(live);
 300       ld[i] = 0;
 301     }
 302   }
 303 }
 304 
 305 ShenandoahTraversalGC::ShenandoahTraversalGC(ShenandoahHeap* heap, size_t num_regions) :
 306   _heap(heap),
 307   _task_queues(new ShenandoahObjToScanQueueSet(heap->max_workers())),
 308   _traversal_set(new ShenandoahHeapRegionSet()),
 309   _root_regions(new ShenandoahHeapRegionSet()),
 310   _root_regions_iterator(_root_regions->iterator()),
 311   _matrix(heap->connection_matrix()) {
 312 
 313   uint num_queues = heap->max_workers();
 314   for (uint i = 0; i < num_queues; ++i) {
 315     ShenandoahObjToScanQueue* task_queue = new ShenandoahObjToScanQueue();
 316     task_queue->initialize();
 317     _task_queues->register_queue(i, task_queue);
 318   }
 319 
 320   uint workers = heap->max_workers();
 321   _liveness_local = NEW_C_HEAP_ARRAY(jushort*, workers, mtGC);
 322   for (uint worker = 0; worker < workers; worker++) {
 323      _liveness_local[worker] = NEW_C_HEAP_ARRAY(jushort, num_regions, mtGC);
 324   }
 325 
 326 }
 327 
 328 ShenandoahTraversalGC::~ShenandoahTraversalGC() {
 329 }
 330 
 331 void ShenandoahTraversalGC::prepare_regions() {
 332   ShenandoahHeap* heap = ShenandoahHeap::heap();
 333   size_t num_regions = heap->num_regions();
 334   ShenandoahConnectionMatrix* matrix = _heap->connection_matrix();
 335 
 336   for (size_t i = 0; i < num_regions; i++) {
 337     ShenandoahHeapRegion* region = heap->get_region(i);
 338     if (heap->is_bitmap_slice_committed(region)) {
 339       if (_traversal_set->is_in(i)) {
 340         heap->set_next_top_at_mark_start(region->bottom(), region->top());
 341         region->clear_live_data();
 342         assert(heap->is_next_bitmap_clear_range(region->bottom(), region->end()), "bitmap for traversal regions must be cleared");
 343       } else {
 344         // Everything outside the traversal set is always considered live.
 345         heap->set_next_top_at_mark_start(region->bottom(), region->bottom());
 346       }
 347       if (_root_regions->is_in(i)) {
 348         assert(!region->in_collection_set(), "roots must not overlap with cset");
 349         matrix->clear_region_outbound(i);
 350         // Since root region can be allocated at, we should bound the scans
 351         // in it at current top. Otherwise, one thread may evacuate objects
 352         // to that root region, while another would try to scan newly evac'ed
 353         // objects under the race.
 354         region->set_concurrent_iteration_safe_limit(region->top());
 355       }
 356     }
 357   }
 358 }
 359 
 360 void ShenandoahTraversalGC::prepare() {
 361   _heap->collection_set()->clear();
 362   assert(_heap->collection_set()->count() == 0, "collection set not clear");
 363 
 364   _heap->make_tlabs_parsable(true);
 365 
 366   assert(_heap->is_next_bitmap_clear(), "need clean mark bitmap");
 367 
 368   ShenandoahFreeSet* free_set = _heap->free_set();
 369   ShenandoahCollectionSet* collection_set = _heap->collection_set();
 370 
 371   // Find collection set
 372   _heap->shenandoahPolicy()->choose_collection_set(collection_set);
 373   prepare_regions();
 374 
 375   // Rebuild free set
 376   free_set->rebuild();
 377 
 378   log_info(gc,ergo)("Got "SIZE_FORMAT" collection set regions and "SIZE_FORMAT" root set regions", collection_set->count(), _root_regions->count());
 379 }
 380 
 381 void ShenandoahTraversalGC::init_traversal_collection() {
 382   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "STW traversal GC");
 383 
 384   if (ShenandoahVerify) {
 385     _heap->verifier()->verify_before_traversal();
 386   }
 387 
 388   {
 389     ShenandoahGCPhase phase_prepare(ShenandoahPhaseTimings::traversal_gc_prepare);
 390     ShenandoahHeapLocker lock(_heap->lock());
 391     prepare();
 392   }
 393 
 394   _heap->set_concurrent_traversal_in_progress(true);
 395 
 396   bool process_refs = _heap->process_references();
 397   if (process_refs) {
 398     ReferenceProcessor* rp = _heap->ref_processor();
 399     rp->enable_discovery(true /*verify_no_refs*/);
 400     rp->setup_policy(false);
 401   }
 402 
 403   {
 404     ShenandoahGCPhase phase_work(ShenandoahPhaseTimings::init_traversal_gc_work);
 405     assert(_task_queues->is_empty(), "queues must be empty before traversal GC");
 406 
 407 #if defined(COMPILER2) || INCLUDE_JVMCI
 408     DerivedPointerTable::clear();
 409 #endif
 410 
 411     {
 412       uint nworkers = _heap->workers()->active_workers();
 413       task_queues()->reserve(nworkers);
 414       ShenandoahRootProcessor rp(_heap, nworkers, ShenandoahPhaseTimings::init_traversal_gc_work);
 415 
 416       if (UseShenandoahOWST) {
 417         ShenandoahTaskTerminator terminator(nworkers, task_queues());
 418         ShenandoahInitTraversalCollectionTask traversal_task(&rp);
 419         _heap->workers()->run_task(&traversal_task);
 420       } else {
 421         ParallelTaskTerminator terminator(nworkers, task_queues());
 422         ShenandoahInitTraversalCollectionTask traversal_task(&rp);
 423         _heap->workers()->run_task(&traversal_task);
 424       }
 425     }
 426 
 427 #if defined(COMPILER2) || INCLUDE_JVMCI
 428     DerivedPointerTable::update_pointers();
 429 #endif
 430   }
 431 
 432   if (ShenandoahPacing) {
 433     _heap->pacer()->setup_for_traversal();
 434   }
 435 
 436   _root_regions_iterator = _root_regions->iterator();
 437 }
 438 
 439 void ShenandoahTraversalGC::main_loop(uint worker_id, ParallelTaskTerminator* terminator, bool do_satb) {
 440   if (do_satb) {
 441     main_loop_prework<true>(worker_id, terminator);
 442   } else {
 443     main_loop_prework<false>(worker_id, terminator);
 444   }
 445 }
 446 
 447 template <bool DO_SATB>
 448 void ShenandoahTraversalGC::main_loop_prework(uint w, ParallelTaskTerminator* t) {
 449   ShenandoahObjToScanQueue* q = task_queues()->queue(w);
 450 
 451   // Initialize live data.
 452   jushort* ld = get_liveness(w);
 453   Copy::fill_to_bytes(ld, _heap->num_regions() * sizeof(jushort));
 454 
 455   ReferenceProcessor* rp = NULL;
 456   if (_heap->process_references()) {
 457     rp = _heap->ref_processor();
 458   }
 459   if (UseShenandoahMatrix) {
 460     if (!_heap->is_degenerated_gc_in_progress()) {
 461       if (_heap->unload_classes()) {
 462         if (ShenandoahStringDedup::is_enabled()) {
 463           ShenandoahStrDedupQueue* dq = ShenandoahStringDedup::queue(w);
 464           ShenandoahTraversalMetadataDedupMatrixClosure cl(q, rp, dq);
 465           main_loop_work<ShenandoahTraversalMetadataDedupMatrixClosure, DO_SATB>(&cl, ld, w, t);
 466         } else {
 467           ShenandoahTraversalMetadataMatrixClosure cl(q, rp);
 468           main_loop_work<ShenandoahTraversalMetadataMatrixClosure, DO_SATB>(&cl, ld, w, t);
 469         }
 470       } else {
 471         if (ShenandoahStringDedup::is_enabled()) {
 472           ShenandoahStrDedupQueue* dq = ShenandoahStringDedup::queue(w);
 473           ShenandoahTraversalDedupMatrixClosure cl(q, rp, dq);
 474           main_loop_work<ShenandoahTraversalDedupMatrixClosure, DO_SATB>(&cl, ld, w, t);
 475         } else {
 476           ShenandoahTraversalMatrixClosure cl(q, rp);
 477           main_loop_work<ShenandoahTraversalMatrixClosure, DO_SATB>(&cl, ld, w, t);
 478         }
 479       }
 480     } else {
 481       if (_heap->unload_classes()) {
 482         if (ShenandoahStringDedup::is_enabled()) {
 483           ShenandoahStrDedupQueue* dq = ShenandoahStringDedup::queue(w);
 484           ShenandoahTraversalMetadataDedupDegenMatrixClosure cl(q, rp, dq);
 485           main_loop_work<ShenandoahTraversalMetadataDedupDegenMatrixClosure, DO_SATB>(&cl, ld, w, t);
 486         } else {
 487           ShenandoahTraversalMetadataDegenMatrixClosure cl(q, rp);
 488           main_loop_work<ShenandoahTraversalMetadataDegenMatrixClosure, DO_SATB>(&cl, ld, w, t);
 489         }
 490       } else {
 491         if (ShenandoahStringDedup::is_enabled()) {
 492           ShenandoahStrDedupQueue* dq = ShenandoahStringDedup::queue(w);
 493           ShenandoahTraversalDedupDegenMatrixClosure cl(q, rp, dq);
 494           main_loop_work<ShenandoahTraversalDedupDegenMatrixClosure, DO_SATB>(&cl, ld, w, t);
 495         } else {
 496           ShenandoahTraversalDegenMatrixClosure cl(q, rp);
 497           main_loop_work<ShenandoahTraversalDegenMatrixClosure, DO_SATB>(&cl, ld, w, t);
 498         }
 499       }
 500     }
 501   } else {
 502     if (!_heap->is_degenerated_gc_in_progress()) {
 503       if (_heap->unload_classes()) {
 504         if (ShenandoahStringDedup::is_enabled()) {
 505           ShenandoahStrDedupQueue* dq = ShenandoahStringDedup::queue(w);
 506           ShenandoahTraversalMetadataDedupClosure cl(q, rp, dq);
 507           main_loop_work<ShenandoahTraversalMetadataDedupClosure, DO_SATB>(&cl, ld, w, t);
 508         } else {
 509           ShenandoahTraversalMetadataClosure cl(q, rp);
 510           main_loop_work<ShenandoahTraversalMetadataClosure, DO_SATB>(&cl, ld, w, t);
 511         }
 512       } else {
 513         if (ShenandoahStringDedup::is_enabled()) {
 514           ShenandoahStrDedupQueue* dq = ShenandoahStringDedup::queue(w);
 515           ShenandoahTraversalDedupClosure cl(q, rp, dq);
 516           main_loop_work<ShenandoahTraversalDedupClosure, DO_SATB>(&cl, ld, w, t);
 517         } else {
 518           ShenandoahTraversalClosure cl(q, rp);
 519           main_loop_work<ShenandoahTraversalClosure, DO_SATB>(&cl, ld, w, t);
 520         }
 521       }
 522     } else {
 523       if (_heap->unload_classes()) {
 524         if (ShenandoahStringDedup::is_enabled()) {
 525           ShenandoahStrDedupQueue* dq = ShenandoahStringDedup::queue(w);
 526           ShenandoahTraversalMetadataDedupDegenClosure cl(q, rp, dq);
 527           main_loop_work<ShenandoahTraversalMetadataDedupDegenClosure, DO_SATB>(&cl, ld, w, t);
 528         } else {
 529           ShenandoahTraversalMetadataDegenClosure cl(q, rp);
 530           main_loop_work<ShenandoahTraversalMetadataDegenClosure, DO_SATB>(&cl, ld, w, t);
 531         }
 532       } else {
 533         if (ShenandoahStringDedup::is_enabled()) {
 534           ShenandoahStrDedupQueue* dq = ShenandoahStringDedup::queue(w);
 535           ShenandoahTraversalDedupDegenClosure cl(q, rp, dq);
 536           main_loop_work<ShenandoahTraversalDedupDegenClosure, DO_SATB>(&cl, ld, w, t);
 537         } else {
 538           ShenandoahTraversalDegenClosure cl(q, rp);
 539           main_loop_work<ShenandoahTraversalDegenClosure, DO_SATB>(&cl, ld, w, t);
 540         }
 541       }
 542     }
 543   }
 544   flush_liveness(w);
 545 
 546 }
 547 
 548 template <class T, bool DO_SATB>
 549 void ShenandoahTraversalGC::main_loop_work(T* cl, jushort* live_data, uint worker_id, ParallelTaskTerminator* terminator) {
 550   ShenandoahObjToScanQueueSet* queues = task_queues();
 551   ShenandoahObjToScanQueue* q = queues->queue(worker_id);
 552   ShenandoahConcurrentMark* conc_mark = _heap->concurrentMark();
 553 
 554   uintx stride = ShenandoahMarkLoopStride;
 555 
 556   ShenandoahMarkTask task;
 557 
 558   // Process outstanding queues, if any.
 559   q = queues->claim_next();
 560   while (q != NULL) {
 561     if (_heap->check_cancelled_concgc_and_yield()) {
 562       ShenandoahCancelledTerminatorTerminator tt;
 563       ShenandoahEvacOOMScopeLeaver oom_scope_leaver;
 564       while (!terminator->offer_termination(&tt));
 565       return;
 566     }
 567 
 568     for (uint i = 0; i < stride; i++) {
 569       if (q->pop_buffer(task) ||
 570           q->pop_local(task) ||
 571           q->pop_overflow(task)) {
 572         conc_mark->do_task<T, true>(q, cl, live_data, &task);
 573       } else {
 574         assert(q->is_empty(), "Must be empty");
 575         q = queues->claim_next();
 576         break;
 577       }
 578     }
 579   }
 580 
 581   if (check_and_handle_cancelled_gc(terminator)) return;
 582 
 583   // Step 2: Process all root regions.
 584   // TODO: Interleave this in the normal mark loop below.
 585   ShenandoahHeapRegion* r = _root_regions_iterator.claim_next();
 586   while (r != NULL) {
 587     _heap->marked_object_oop_safe_iterate(r, cl);
 588     if (ShenandoahPacing) {
 589       _heap->pacer()->report_partial(r->get_live_data_words());
 590     }
 591     if (check_and_handle_cancelled_gc(terminator)) return;
 592     r = _root_regions_iterator.claim_next();
 593   }
 594 
 595   if (check_and_handle_cancelled_gc(terminator)) return;
 596 
 597   // Normal loop.
 598   q = queues->queue(worker_id);
 599   ShenandoahTraversalSATBBufferClosure satb_cl(q);
 600   SATBMarkQueueSet& satb_mq_set = ShenandoahBarrierSet::satb_mark_queue_set();
 601 
 602   int seed = 17;
 603 
 604   while (true) {
 605     if (check_and_handle_cancelled_gc(terminator)) return;
 606 
 607     for (uint i = 0; i < stride; i++) {
 608       if (q->pop_buffer(task) ||
 609           q->pop_local(task) ||
 610           q->pop_overflow(task) ||
 611           (DO_SATB && satb_mq_set.apply_closure_to_completed_buffer(&satb_cl) && q->pop_buffer(task)) ||
 612           queues->steal(worker_id, &seed, task)) {
 613         conc_mark->do_task<T, true>(q, cl, live_data, &task);
 614       } else {
 615         ShenandoahEvacOOMScopeLeaver oom_scope_leaver;
 616         if (terminator->offer_termination()) return;
 617       }
 618     }
 619   }
 620 }
 621 
 622 bool ShenandoahTraversalGC::check_and_handle_cancelled_gc(ParallelTaskTerminator* terminator) {
 623   if (_heap->cancelled_concgc()) {
 624     ShenandoahCancelledTerminatorTerminator tt;
 625     ShenandoahEvacOOMScopeLeaver oom_scope_leaver;
 626     while (! terminator->offer_termination(&tt));
 627     return true;
 628   }
 629   return false;
 630 }
 631 
 632 void ShenandoahTraversalGC::concurrent_traversal_collection() {
 633   ClassLoaderDataGraph::clear_claimed_marks();
 634 
 635   ShenandoahGCPhase phase_work(ShenandoahPhaseTimings::conc_traversal);
 636   if (!_heap->cancelled_concgc()) {
 637     uint nworkers = _heap->workers()->active_workers();
 638     task_queues()->reserve(nworkers);
 639     if (UseShenandoahOWST) {
 640       ShenandoahTaskTerminator terminator(nworkers, task_queues());
 641       ShenandoahConcurrentTraversalCollectionTask traversal_task(&terminator);
 642       _heap->workers()->run_task(&traversal_task);
 643     } else {
 644       ParallelTaskTerminator terminator(nworkers, task_queues());
 645       ShenandoahConcurrentTraversalCollectionTask traversal_task(&terminator);
 646       _heap->workers()->run_task(&traversal_task);
 647     }
 648   }
 649 
 650   if (!_heap->cancelled_concgc() && ShenandoahPreclean && _heap->process_references()) {
 651     ShenandoahEvacOOMScope oom_evac_scope;
 652     preclean_weak_refs();
 653   }
 654 }
 655 
 656 void ShenandoahTraversalGC::final_traversal_collection() {
 657 
 658   _heap->make_tlabs_parsable(true);
 659 
 660   if (!_heap->cancelled_concgc()) {
 661 #if defined(COMPILER2) || INCLUDE_JVMCI
 662     DerivedPointerTable::clear();
 663 #endif
 664     ShenandoahGCPhase phase_work(ShenandoahPhaseTimings::final_traversal_gc_work);
 665     uint nworkers = _heap->workers()->active_workers();
 666     task_queues()->reserve(nworkers);
 667 
 668     // Finish traversal
 669     ShenandoahRootProcessor rp(_heap, nworkers, ShenandoahPhaseTimings::final_traversal_gc_work);
 670     if (UseShenandoahOWST) {
 671       ShenandoahTaskTerminator terminator(nworkers, task_queues());
 672       ShenandoahFinalTraversalCollectionTask traversal_task(&rp, &terminator);
 673       _heap->workers()->run_task(&traversal_task);
 674     } else {
 675       ParallelTaskTerminator terminator(nworkers, task_queues());
 676       ShenandoahFinalTraversalCollectionTask traversal_task(&rp, &terminator);
 677       _heap->workers()->run_task(&traversal_task);
 678     }
 679 #if defined(COMPILER2) || INCLUDE_JVMCI
 680     DerivedPointerTable::update_pointers();
 681 #endif
 682   }
 683 
 684   if (!_heap->cancelled_concgc() && _heap->process_references()) {
 685     weak_refs_work();
 686   }
 687 
 688   if (!_heap->cancelled_concgc() && _heap->unload_classes()) {
 689     _heap->unload_classes_and_cleanup_tables(false);
 690     fixup_roots();
 691   }
 692 
 693   if (!_heap->cancelled_concgc()) {
 694     // Still good? We can now trash the cset, and make final verification
 695     {
 696       ShenandoahGCPhase phase_cleanup(ShenandoahPhaseTimings::traversal_gc_cleanup);
 697       ShenandoahHeapLocker lock(_heap->lock());
 698 
 699       // Trash everything
 700       // Clear immediate garbage regions.
 701       size_t num_regions = _heap->num_regions();
 702 
 703       ShenandoahHeapRegionSet* traversal_regions = traversal_set();
 704       ShenandoahFreeSet* free_regions = _heap->free_set();
 705       free_regions->clear();
 706       for (size_t i = 0; i < num_regions; i++) {
 707         ShenandoahHeapRegion* r = _heap->get_region(i);
 708         bool not_allocated = _heap->next_top_at_mark_start(r->bottom()) == r->top();
 709 
 710         bool candidate = traversal_regions->is_in(r) && !r->has_live() && not_allocated;
 711         if (r->is_humongous_start() && candidate) {
 712           // Trash humongous.
 713           HeapWord* humongous_obj = r->bottom() + BrooksPointer::word_size();
 714           assert(!_heap->is_marked_next(oop(humongous_obj)), "must not be marked");
 715           r->make_trash();
 716           while (i + 1 < num_regions && _heap->get_region(i + 1)->is_humongous_continuation()) {
 717             i++;
 718             r = _heap->get_region(i);
 719             assert(r->is_humongous_continuation(), "must be humongous continuation");
 720             r->make_trash();
 721           }
 722         } else if (!r->is_empty() && candidate) {
 723           // Trash regular.
 724           assert(!r->is_humongous(), "handled above");
 725           assert(!r->is_trash(), "must not already be trashed");
 726           r->make_trash();
 727         }
 728       }
 729       _heap->collection_set()->clear();
 730       _heap->free_set()->rebuild();
 731       reset();
 732     }
 733 
 734     if (ShenandoahVerify) {
 735       _heap->verifier()->verify_after_traversal();
 736     }
 737 
 738     assert(_task_queues->is_empty(), "queues must be empty after traversal GC");
 739     _heap->set_concurrent_traversal_in_progress(false);
 740     assert(!_heap->cancelled_concgc(), "must not be cancelled when getting out here");
 741   }
 742 }
 743 
 744 class ShenandoahTraversalFixRootsClosure : public OopClosure {
 745 private:
 746 
 747   template <class T>
 748   inline void do_oop_work(T* p) {
 749     T o = RawAccess<>::oop_load(p);
 750     if (!CompressedOops::is_null(o)) {
 751       oop obj = CompressedOops::decode_not_null(o);
 752       oop forw = ShenandoahBarrierSet::resolve_forwarded_not_null(obj);
 753       if (!oopDesc::unsafe_equals(obj, forw)) {
 754         RawAccess<OOP_NOT_NULL>::oop_store(p, forw);
 755       }
 756     }
 757   }
 758 public:
 759   inline void do_oop(oop* p) { do_oop_work(p); }
 760   inline void do_oop(narrowOop* p) { do_oop_work(p); }
 761 };
 762 
 763 class ShenandoahTraversalFixRootsTask : public AbstractGangTask {
 764   ShenandoahRootProcessor* _rp;
 765 public:
 766 
 767   ShenandoahTraversalFixRootsTask(ShenandoahRootProcessor* rp) :
 768     AbstractGangTask("Shenandoah traversal fix roots"),
 769     _rp(rp)
 770   {
 771     // Nothing else to do.
 772   }
 773 
 774   void work(uint worker_id) {
 775     ShenandoahTraversalFixRootsClosure cl;
 776     MarkingCodeBlobClosure blobsCl(&cl, CodeBlobToOopClosure::FixRelocations);
 777     CLDToOopClosure cldCl(&cl);
 778     _rp->process_all_roots(&cl, &cl, &cldCl, &blobsCl, NULL, worker_id);
 779   }
 780 };
 781 
 782 void ShenandoahTraversalGC::fixup_roots() {
 783 #if defined(COMPILER2) || INCLUDE_JVMCI
 784   DerivedPointerTable::clear();
 785 #endif
 786   ShenandoahHeap* heap = ShenandoahHeap::heap();
 787   ShenandoahRootProcessor rp(heap, heap->workers()->active_workers(), ShenandoahPhaseTimings::final_traversal_update_roots);
 788   ShenandoahTraversalFixRootsTask update_roots_task(&rp);
 789   heap->workers()->run_task(&update_roots_task);
 790 #if defined(COMPILER2) || INCLUDE_JVMCI
 791   DerivedPointerTable::update_pointers();
 792 #endif
 793 }
 794 
 795 void ShenandoahTraversalGC::reset() {
 796   _task_queues->clear();
 797 }
 798 
 799 ShenandoahObjToScanQueueSet* ShenandoahTraversalGC::task_queues() {
 800   return _task_queues;
 801 }
 802 
 803 jushort* ShenandoahTraversalGC::get_liveness(uint worker_id) {
 804   return _liveness_local[worker_id];
 805 }
 806 
 807 class ShenandoahTraversalCancelledGCYieldClosure : public YieldClosure {
 808 private:
 809   ShenandoahHeap* const _heap;
 810 public:
 811   ShenandoahTraversalCancelledGCYieldClosure() : _heap(ShenandoahHeap::heap()) {};
 812   virtual bool should_return() { return _heap->cancelled_concgc(); }
 813 };
 814 
 815 class ShenandoahTraversalPrecleanCompleteGCClosure : public VoidClosure {
 816 public:
 817   void do_void() {
 818     ShenandoahHeap* sh = ShenandoahHeap::heap();
 819     ShenandoahTraversalGC* traversal_gc = sh->traversal_gc();
 820     assert(sh->process_references(), "why else would we be here?");
 821     ParallelTaskTerminator terminator(1, traversal_gc->task_queues());
 822     shenandoah_assert_rp_isalive_installed();
 823     traversal_gc->main_loop((uint) 0, &terminator, false);
 824   }
 825 };
 826 
 827 class ShenandoahTraversalKeepAliveUpdateClosure : public OopClosure {
 828 private:
 829   ShenandoahObjToScanQueue* _queue;
 830   Thread* _thread;
 831   ShenandoahTraversalGC* _traversal_gc;
 832   template <class T>
 833   inline void do_oop_nv(T* p) {
 834     _traversal_gc->process_oop<T, false /* string dedup */, false /* degen */, false /* matrix */>(p, _thread, _queue, NULL);
 835   }
 836 
 837 public:
 838   ShenandoahTraversalKeepAliveUpdateClosure(ShenandoahObjToScanQueue* q) :
 839     _queue(q), _thread(Thread::current()),
 840     _traversal_gc(ShenandoahHeap::heap()->traversal_gc()) {}
 841 
 842   void do_oop(narrowOop* p) { do_oop_nv(p); }
 843   void do_oop(oop* p)       { do_oop_nv(p); }
 844 };
 845 
 846 class ShenandoahTraversalKeepAliveUpdateDegenClosure : public OopClosure {
 847 private:
 848   ShenandoahObjToScanQueue* _queue;
 849   Thread* _thread;
 850   ShenandoahTraversalGC* _traversal_gc;
 851   template <class T>
 852   inline void do_oop_nv(T* p) {
 853     _traversal_gc->process_oop<T, false /* string dedup */, true /* degen */, false /* matrix */>(p, _thread, _queue, NULL);
 854   }
 855 
 856 public:
 857   ShenandoahTraversalKeepAliveUpdateDegenClosure(ShenandoahObjToScanQueue* q) :
 858     _queue(q), _thread(Thread::current()),
 859     _traversal_gc(ShenandoahHeap::heap()->traversal_gc()) {}
 860 
 861   void do_oop(narrowOop* p) { do_oop_nv(p); }
 862   void do_oop(oop* p)       { do_oop_nv(p); }
 863 };
 864 
 865 class ShenandoahTraversalKeepAliveUpdateMatrixClosure : public OopClosure {
 866 private:
 867   ShenandoahObjToScanQueue* _queue;
 868   Thread* _thread;
 869   ShenandoahTraversalGC* _traversal_gc;
 870   template <class T>
 871   inline void do_oop_nv(T* p) {
 872     // TODO: Need to somehow pass base_obj here?
 873     _traversal_gc->process_oop<T, false /* string dedup */, false /* degen */, true /* matrix */>(p, _thread, _queue, NULL);
 874   }
 875 
 876 public:
 877   ShenandoahTraversalKeepAliveUpdateMatrixClosure(ShenandoahObjToScanQueue* q) :
 878     _queue(q), _thread(Thread::current()),
 879     _traversal_gc(ShenandoahHeap::heap()->traversal_gc()) {}
 880 
 881   void do_oop(narrowOop* p) { do_oop_nv(p); }
 882   void do_oop(oop* p)       { do_oop_nv(p); }
 883 };
 884 
 885 class ShenandoahTraversalKeepAliveUpdateDegenMatrixClosure : public OopClosure {
 886 private:
 887   ShenandoahObjToScanQueue* _queue;
 888   Thread* _thread;
 889   ShenandoahTraversalGC* _traversal_gc;
 890   template <class T>
 891   inline void do_oop_nv(T* p) {
 892     // TODO: Need to somehow pass base_obj here?
 893     _traversal_gc->process_oop<T, false /* string dedup */, true /* degen */, true /* matrix */>(p, _thread, _queue, NULL);
 894   }
 895 
 896 public:
 897   ShenandoahTraversalKeepAliveUpdateDegenMatrixClosure(ShenandoahObjToScanQueue* q) :
 898     _queue(q), _thread(Thread::current()),
 899     _traversal_gc(ShenandoahHeap::heap()->traversal_gc()) {}
 900 
 901   void do_oop(narrowOop* p) { do_oop_nv(p); }
 902   void do_oop(oop* p)       { do_oop_nv(p); }
 903 };
 904 
 905 void ShenandoahTraversalGC::preclean_weak_refs() {
 906   // Pre-cleaning weak references before diving into STW makes sense at the
 907   // end of concurrent mark. This will filter out the references which referents
 908   // are alive. Note that ReferenceProcessor already filters out these on reference
 909   // discovery, and the bulk of work is done here. This phase processes leftovers
 910   // that missed the initial filtering, i.e. when referent was marked alive after
 911   // reference was discovered by RP.
 912 
 913   assert(_heap->process_references(), "sanity");
 914 
 915   ShenandoahHeap* sh = ShenandoahHeap::heap();
 916   ReferenceProcessor* rp = sh->ref_processor();
 917 
 918   // Shortcut if no references were discovered to avoid winding up threads.
 919   if (!rp->has_discovered_references()) {
 920     return;
 921   }
 922 
 923   ReferenceProcessorMTDiscoveryMutator fix_mt_discovery(rp, false);
 924 
 925   shenandoah_assert_rp_isalive_not_installed();
 926   ReferenceProcessorIsAliveMutator fix_isalive(rp, sh->is_alive_closure());
 927 
 928   // Interrupt on cancelled GC
 929   ShenandoahTraversalCancelledGCYieldClosure yield;
 930 
 931   assert(task_queues()->is_empty(), "Should be empty");
 932   assert(!sh->is_degenerated_gc_in_progress(), "must be in concurrent non-degenerated phase");
 933 
 934   ShenandoahTraversalPrecleanCompleteGCClosure complete_gc;
 935   ShenandoahForwardedIsAliveClosure is_alive;
 936   if (UseShenandoahMatrix) {
 937     ShenandoahTraversalKeepAliveUpdateMatrixClosure keep_alive(task_queues()->queue(0));
 938     ResourceMark rm;
 939     rp->preclean_discovered_references(&is_alive, &keep_alive,
 940                                        &complete_gc, &yield,
 941                                        NULL);
 942   } else {
 943     ShenandoahTraversalKeepAliveUpdateClosure keep_alive(task_queues()->queue(0));
 944     ResourceMark rm;
 945     rp->preclean_discovered_references(&is_alive, &keep_alive,
 946                                        &complete_gc, &yield,
 947                                        NULL);
 948   }
 949   assert(!sh->cancelled_concgc() || task_queues()->is_empty(), "Should be empty");
 950 }
 951 
 952 // Weak Reference Closures
 953 class ShenandoahTraversalDrainMarkingStackClosure: public VoidClosure {
 954   uint _worker_id;
 955   ParallelTaskTerminator* _terminator;
 956   bool _reset_terminator;
 957 
 958 public:
 959   ShenandoahTraversalDrainMarkingStackClosure(uint worker_id, ParallelTaskTerminator* t, bool reset_terminator = false):
 960     _worker_id(worker_id),
 961     _terminator(t),
 962     _reset_terminator(reset_terminator) {
 963   }
 964 
 965   void do_void() {
 966     assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
 967 
 968     ShenandoahHeap* sh = ShenandoahHeap::heap();
 969     ShenandoahTraversalGC* traversal_gc = sh->traversal_gc();
 970     assert(sh->process_references(), "why else would we be here?");
 971     shenandoah_assert_rp_isalive_installed();
 972 
 973     traversal_gc->main_loop(_worker_id, _terminator, false);
 974 
 975     if (_reset_terminator) {
 976       _terminator->reset_for_reuse();
 977     }
 978   }
 979 };
 980 
 981 void ShenandoahTraversalGC::weak_refs_work() {
 982   assert(_heap->process_references(), "sanity");
 983 
 984   ShenandoahHeap* sh = ShenandoahHeap::heap();
 985 
 986   ShenandoahPhaseTimings::Phase phase_root = ShenandoahPhaseTimings::weakrefs;
 987 
 988   ShenandoahGCPhase phase(phase_root);
 989 
 990   ReferenceProcessor* rp = sh->ref_processor();
 991 
 992   // NOTE: We cannot shortcut on has_discovered_references() here, because
 993   // we will miss marking JNI Weak refs then, see implementation in
 994   // ReferenceProcessor::process_discovered_references.
 995   weak_refs_work_doit();
 996 
 997   rp->verify_no_references_recorded();
 998   assert(!rp->discovery_enabled(), "Post condition");
 999 
1000 }
1001 
1002 class ShenandoahTraversalRefProcTaskProxy : public AbstractGangTask {
1003 
1004 private:
1005   AbstractRefProcTaskExecutor::ProcessTask& _proc_task;
1006   ParallelTaskTerminator* _terminator;
1007 public:
1008 
1009   ShenandoahTraversalRefProcTaskProxy(AbstractRefProcTaskExecutor::ProcessTask& proc_task,
1010                              ParallelTaskTerminator* t) :
1011     AbstractGangTask("Process reference objects in parallel"),
1012     _proc_task(proc_task),
1013     _terminator(t) {
1014   }
1015 
1016   void work(uint worker_id) {
1017     ShenandoahEvacOOMScope oom_evac_scope;
1018     assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
1019     ShenandoahHeap* heap = ShenandoahHeap::heap();
1020     ShenandoahTraversalDrainMarkingStackClosure complete_gc(worker_id, _terminator);
1021 
1022     ShenandoahForwardedIsAliveClosure is_alive;
1023     if (UseShenandoahMatrix) {
1024       if (!heap->is_degenerated_gc_in_progress()) {
1025         ShenandoahTraversalKeepAliveUpdateMatrixClosure keep_alive(heap->traversal_gc()->task_queues()->queue(worker_id));
1026         _proc_task.work(worker_id, is_alive, keep_alive, complete_gc);
1027       } else {
1028         ShenandoahTraversalKeepAliveUpdateDegenMatrixClosure keep_alive(heap->traversal_gc()->task_queues()->queue(worker_id));
1029         _proc_task.work(worker_id, is_alive, keep_alive, complete_gc);
1030       }
1031     } else {
1032       if (!heap->is_degenerated_gc_in_progress()) {
1033         ShenandoahTraversalKeepAliveUpdateClosure keep_alive(heap->traversal_gc()->task_queues()->queue(worker_id));
1034         _proc_task.work(worker_id, is_alive, keep_alive, complete_gc);
1035       } else {
1036         ShenandoahTraversalKeepAliveUpdateDegenClosure keep_alive(heap->traversal_gc()->task_queues()->queue(worker_id));
1037         _proc_task.work(worker_id, is_alive, keep_alive, complete_gc);
1038       }
1039     }
1040   }
1041 };
1042 
1043 class ShenandoahTraversalRefEnqueueTaskProxy : public AbstractGangTask {
1044 
1045 private:
1046   AbstractRefProcTaskExecutor::EnqueueTask& _enqueue_task;
1047 
1048 public:
1049 
1050   ShenandoahTraversalRefEnqueueTaskProxy(AbstractRefProcTaskExecutor::EnqueueTask& enqueue_task) :
1051     AbstractGangTask("Enqueue reference objects in parallel"),
1052     _enqueue_task(enqueue_task) {
1053   }
1054 
1055   void work(uint worker_id) {
1056     _enqueue_task.work(worker_id);
1057   }
1058 };
1059 
1060 class ShenandoahTraversalRefProcTaskExecutor : public AbstractRefProcTaskExecutor {
1061 
1062 private:
1063   WorkGang* _workers;
1064 
1065 public:
1066 
1067   ShenandoahTraversalRefProcTaskExecutor(WorkGang* workers) :
1068     _workers(workers) {
1069   }
1070 
1071   // Executes a task using worker threads.
1072   void execute(ProcessTask& task) {
1073     assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
1074 
1075     // Shortcut execution if task is empty.
1076     // This should be replaced with the generic ReferenceProcessor shortcut,
1077     // see JDK-8181214, JDK-8043575, JDK-6938732.
1078     if (task.is_empty()) {
1079       return;
1080     }
1081 
1082     ShenandoahHeap* heap = ShenandoahHeap::heap();
1083     ShenandoahTraversalGC* traversal_gc = heap->traversal_gc();
1084     uint nworkers = _workers->active_workers();
1085     traversal_gc->task_queues()->reserve(nworkers);
1086     if (UseShenandoahOWST) {
1087       ShenandoahTaskTerminator terminator(nworkers, traversal_gc->task_queues());
1088       ShenandoahTraversalRefProcTaskProxy proc_task_proxy(task, &terminator);
1089       _workers->run_task(&proc_task_proxy);
1090     } else {
1091       ParallelTaskTerminator terminator(nworkers, traversal_gc->task_queues());
1092       ShenandoahTraversalRefProcTaskProxy proc_task_proxy(task, &terminator);
1093       _workers->run_task(&proc_task_proxy);
1094     }
1095   }
1096 
1097   void execute(EnqueueTask& task) {
1098     ShenandoahTraversalRefEnqueueTaskProxy enqueue_task_proxy(task);
1099     _workers->run_task(&enqueue_task_proxy);
1100   }
1101 };
1102 
1103 void ShenandoahTraversalGC::weak_refs_work_doit() {
1104   ShenandoahHeap* sh = ShenandoahHeap::heap();
1105 
1106   ReferenceProcessor* rp = sh->ref_processor();
1107 
1108   ShenandoahPhaseTimings::Phase phase_process = ShenandoahPhaseTimings::weakrefs_process;
1109   ShenandoahPhaseTimings::Phase phase_enqueue = ShenandoahPhaseTimings::weakrefs_enqueue;
1110 
1111   shenandoah_assert_rp_isalive_not_installed();
1112   ReferenceProcessorIsAliveMutator fix_isalive(rp, sh->is_alive_closure());
1113 
1114   WorkGang* workers = sh->workers();
1115   uint nworkers = workers->active_workers();
1116 
1117   // Setup collector policy for softref cleaning.
1118   bool clear_soft_refs = sh->soft_ref_policy()->use_should_clear_all_soft_refs(true /* bogus arg*/);
1119   log_develop_debug(gc, ref)("clearing soft refs: %s", BOOL_TO_STR(clear_soft_refs));
1120   rp->setup_policy(clear_soft_refs);
1121   rp->set_active_mt_degree(nworkers);
1122 
1123   assert(task_queues()->is_empty(), "Should be empty");
1124 
1125   // complete_gc and keep_alive closures instantiated here are only needed for
1126   // single-threaded path in RP. They share the queue 0 for tracking work, which
1127   // simplifies implementation. Since RP may decide to call complete_gc several
1128   // times, we need to be able to reuse the terminator.
1129   uint serial_worker_id = 0;
1130   ParallelTaskTerminator terminator(1, task_queues());
1131   ShenandoahTraversalDrainMarkingStackClosure complete_gc(serial_worker_id, &terminator, /* reset_terminator = */ true);
1132 
1133   ShenandoahTraversalRefProcTaskExecutor executor(workers);
1134 
1135   ReferenceProcessorPhaseTimes pt(sh->gc_timer(), rp->num_q());
1136 
1137   {
1138     ShenandoahGCPhase phase(phase_process);
1139 
1140     ShenandoahForwardedIsAliveClosure is_alive;
1141     if (UseShenandoahMatrix) {
1142       if (!_heap->is_degenerated_gc_in_progress()) {
1143         ShenandoahTraversalKeepAliveUpdateMatrixClosure keep_alive(task_queues()->queue(serial_worker_id));
1144         rp->process_discovered_references(&is_alive, &keep_alive,
1145                                           &complete_gc, &executor,
1146                                           &pt);
1147         pt.print_all_references();
1148         WeakProcessor::weak_oops_do(&is_alive, &keep_alive);
1149       } else {
1150         ShenandoahTraversalKeepAliveUpdateDegenMatrixClosure keep_alive(task_queues()->queue(serial_worker_id));
1151         rp->process_discovered_references(&is_alive, &keep_alive,
1152                                           &complete_gc, &executor,
1153                                           &pt);
1154         pt.print_all_references();
1155         WeakProcessor::weak_oops_do(&is_alive, &keep_alive);
1156       }
1157     } else {
1158       if (!_heap->is_degenerated_gc_in_progress()) {
1159         ShenandoahTraversalKeepAliveUpdateClosure keep_alive(task_queues()->queue(serial_worker_id));
1160         rp->process_discovered_references(&is_alive, &keep_alive,
1161                                           &complete_gc, &executor,
1162                                           &pt);
1163         pt.print_all_references();
1164         WeakProcessor::weak_oops_do(&is_alive, &keep_alive);
1165       } else {
1166         ShenandoahTraversalKeepAliveUpdateDegenClosure keep_alive(task_queues()->queue(serial_worker_id));
1167         rp->process_discovered_references(&is_alive, &keep_alive,
1168                                           &complete_gc, &executor,
1169                                           &pt);
1170         pt.print_all_references();
1171         WeakProcessor::weak_oops_do(&is_alive, &keep_alive);
1172       }
1173     }
1174 
1175     assert(!_heap->cancelled_concgc() || task_queues()->is_empty(), "Should be empty");
1176   }
1177 
1178   if (_heap->cancelled_concgc()) return;
1179 
1180   {
1181     ShenandoahGCPhase phase(phase_enqueue);
1182     rp->enqueue_discovered_references(&executor, &pt);
1183     pt.print_enqueue_phase();
1184   }
1185 }