1 /*
   2  * Copyright (c) 2018, Red Hat, Inc. and/or its affiliates.
   3  *
   4  * This code is free software; you can redistribute it and/or modify it
   5  * under the terms of the GNU General Public License version 2 only, as
   6  * published by the Free Software Foundation.
   7  *
   8  * This code is distributed in the hope that it will be useful, but WITHOUT
   9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  11  * version 2 for more details (a copy is included in the LICENSE file that
  12  * accompanied this code).
  13  *
  14  * You should have received a copy of the GNU General Public License version
  15  * 2 along with this work; if not, write to the Free Software Foundation,
  16  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  17  *
  18  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  19  * or visit www.oracle.com if you need additional information or have any
  20  * questions.
  21  *
  22  */
  23 
  24 #include "precompiled.hpp"
  25 
  26 #include "gc/shared/gcTraceTime.inline.hpp"
  27 #include "gc/shared/markBitMap.inline.hpp"
  28 #include "gc/shared/referenceProcessor.hpp"
  29 #include "gc/shared/referenceProcessorPhaseTimes.hpp"
  30 #include "gc/shared/workgroup.hpp"
  31 #include "gc/shared/taskqueue.inline.hpp"
  32 #include "gc/shared/weakProcessor.hpp"
  33 #include "gc/shenandoah/shenandoahBarrierSet.hpp"
  34 #include "gc/shenandoah/shenandoahConnectionMatrix.hpp"
  35 #include "gc/shenandoah/shenandoahCollectionSet.hpp"
  36 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
  37 #include "gc/shenandoah/shenandoahFreeSet.hpp"
  38 #include "gc/shenandoah/shenandoahPhaseTimings.hpp"
  39 #include "gc/shenandoah/shenandoahHeapRegionSet.hpp"
  40 #include "gc/shenandoah/shenandoahHeap.hpp"
  41 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
  42 #include "gc/shenandoah/shenandoahHeapRegionSet.inline.hpp"
  43 #include "gc/shenandoah/shenandoahHeuristics.hpp"
  44 #include "gc/shenandoah/shenandoahMarkingContext.inline.hpp"
  45 #include "gc/shenandoah/shenandoahOopClosures.inline.hpp"
  46 #include "gc/shenandoah/shenandoahTraversalGC.hpp"
  47 #include "gc/shenandoah/shenandoahRootProcessor.hpp"
  48 #include "gc/shenandoah/shenandoahStringDedup.hpp"
  49 #include "gc/shenandoah/shenandoahTaskqueue.hpp"
  50 #include "gc/shenandoah/shenandoahUtils.hpp"
  51 #include "gc/shenandoah/shenandoahVerifier.hpp"
  52 #include "gc/shenandoah/shenandoahWorkGroup.hpp"
  53 #include "gc/shenandoah/shenandoahWorkerPolicy.hpp"
  54 
  55 #include "memory/iterator.hpp"
  56 
  57 /**
  58  * NOTE: We are using the SATB buffer in thread.hpp and satbMarkQueue.hpp, however, it is not an SATB algorithm.
  59  * We're using the buffer as generic oop buffer to enqueue new values in concurrent oop stores, IOW, the algorithm
  60  * is incremental-update-based.
  61  *
  62  * NOTE on interaction with TAMS: we want to avoid traversing new objects for
  63  * several reasons:
  64  * - We will not reclaim them in this cycle anyway, because they are not in the
  65  *   cset
  66  * - It makes up for the bulk of work during final-pause
  67  * - It also shortens the concurrent cycle because we don't need to
  68  *   pointlessly traverse through newly allocated objects.
  69  * - As a nice side-effect, it solves the I-U termination problem (mutators
  70  *   cannot outrun the GC by allocating like crazy)
  71  * - It is an easy way to achieve MWF. What MWF does is to also enqueue the
  72  *   target object of stores if it's new. Treating new objects live implicitely
  73  *   achieves the same, but without extra barriers. I think the effect of
  74  *   shortened final-pause (mentioned above) is the main advantage of MWF. In
  75  *   particular, we will not see the head of a completely new long linked list
  76  *   in final-pause and end up traversing huge chunks of the heap there.
  77  * - We don't need to see/update the fields of new objects either, because they
  78  *   are either still null, or anything that's been stored into them has been
  79  *   evacuated+enqueued before (and will thus be treated later).
  80  *
  81  * We achieve this by setting TAMS for each region, and everything allocated
  82  * beyond TAMS will be 'implicitely marked'.
  83  *
  84  * Gotchas:
  85  * - While we want new objects to be implicitely marked, we don't want to count
  86  *   them alive. Otherwise the next cycle wouldn't pick them up and consider
  87  *   them for cset. This means that we need to protect such regions from
  88  *   getting accidentally thrashed at the end of traversal cycle. This is why I
  89  *   keep track of alloc-regions and check is_alloc_region() in the trashing
  90  *   code.
  91  * - We *need* to traverse through evacuated objects. Those objects are
  92  *   pre-existing, and any references in them point to interesting objects that
  93  *   we need to see. We also want to count them as live, because we just
  94  *   determined that they are alive :-) I achieve this by upping TAMS
  95  *   concurrently for every gclab/gc-shared alloc before publishing the
  96  *   evacuated object. This way, the GC threads will not consider such objects
  97  *   implictely marked, and traverse through them as normal.
  98  */
  99 class ShenandoahTraversalSATBBufferClosure : public SATBBufferClosure {
 100 private:
 101   ShenandoahObjToScanQueue* _queue;
 102   ShenandoahTraversalGC* _traversal_gc;
 103   ShenandoahHeap* const _heap;
 104 
 105 public:
 106   ShenandoahTraversalSATBBufferClosure(ShenandoahObjToScanQueue* q) :
 107     _queue(q),
 108     _heap(ShenandoahHeap::heap())
 109  { }
 110 
 111   void do_buffer(void** buffer, size_t size) {
 112     for (size_t i = 0; i < size; ++i) {
 113       oop* p = (oop*) &buffer[i];
 114       oop obj = RawAccess<>::oop_load(p);
 115       shenandoah_assert_not_forwarded(p, obj);
 116       if (_heap->next_marking_context()->mark(obj)) {
 117         _queue->push(ShenandoahMarkTask(obj));
 118       }
 119     }
 120   }
 121 };
 122 
 123 class ShenandoahTraversalSATBThreadsClosure : public ThreadClosure {
 124   ShenandoahTraversalSATBBufferClosure* _satb_cl;
 125 
 126  public:
 127   ShenandoahTraversalSATBThreadsClosure(ShenandoahTraversalSATBBufferClosure* satb_cl) :
 128     _satb_cl(satb_cl) {}
 129 
 130   void do_thread(Thread* thread) {
 131     if (thread->is_Java_thread()) {
 132       JavaThread* jt = (JavaThread*)thread;
 133       ShenandoahThreadLocalData::satb_mark_queue(jt).apply_closure_and_empty(_satb_cl);
 134     } else if (thread->is_VM_thread()) {
 135       ShenandoahBarrierSet::satb_mark_queue_set().shared_satb_queue()->apply_closure_and_empty(_satb_cl);
 136     }
 137   }
 138 };
 139 
 140 // Like CLDToOopClosure, but clears has_modified_oops, so that we can record modified CLDs during traversal
 141 // and remark them later during final-traversal.
 142 class ShenandoahMarkCLDClosure : public CLDClosure {
 143 private:
 144   OopClosure* _cl;
 145 public:
 146   ShenandoahMarkCLDClosure(OopClosure* cl) : _cl(cl) {}
 147   void do_cld(ClassLoaderData* cld) {
 148     cld->oops_do(_cl, true, true);
 149   }
 150 };
 151 
 152 // Like CLDToOopClosure, but only process modified CLDs
 153 class ShenandoahRemarkCLDClosure : public CLDClosure {
 154 private:
 155   OopClosure* _cl;
 156 public:
 157   ShenandoahRemarkCLDClosure(OopClosure* cl) : _cl(cl) {}
 158   void do_cld(ClassLoaderData* cld) {
 159     if (cld->has_modified_oops()) {
 160       cld->oops_do(_cl, true, true);
 161     }
 162   }
 163 };
 164 
 165 class ShenandoahInitTraversalCollectionTask : public AbstractGangTask {
 166 private:
 167   ShenandoahRootProcessor* _rp;
 168   ShenandoahHeap* _heap;
 169 public:
 170   ShenandoahInitTraversalCollectionTask(ShenandoahRootProcessor* rp) :
 171     AbstractGangTask("Shenandoah Init Traversal Collection"),
 172     _rp(rp),
 173     _heap(ShenandoahHeap::heap()) {}
 174 
 175   void work(uint worker_id) {
 176     ShenandoahWorkerSession worker_session(worker_id);
 177 
 178     ShenandoahEvacOOMScope oom_evac_scope;
 179     ShenandoahObjToScanQueueSet* queues = _heap->traversal_gc()->task_queues();
 180     ShenandoahObjToScanQueue* q = queues->queue(worker_id);
 181 
 182     bool process_refs = _heap->process_references();
 183     bool unload_classes = _heap->unload_classes();
 184     ReferenceProcessor* rp = NULL;
 185     if (process_refs) {
 186       rp = _heap->ref_processor();
 187     }
 188 
 189     // Step 1: Process ordinary GC roots.
 190     {
 191       ShenandoahTraversalClosure roots_cl(q, rp);
 192       ShenandoahMarkCLDClosure cld_cl(&roots_cl);
 193       MarkingCodeBlobClosure code_cl(&roots_cl, CodeBlobToOopClosure::FixRelocations);
 194       if (unload_classes) {
 195         _rp->process_strong_roots(&roots_cl, process_refs ? NULL : &roots_cl, &cld_cl, NULL, &code_cl, NULL, worker_id);
 196       } else {
 197         _rp->process_all_roots(&roots_cl, process_refs ? NULL : &roots_cl, &cld_cl, &code_cl, NULL, worker_id);
 198       }
 199     }
 200   }
 201 };
 202 
 203 class ShenandoahConcurrentTraversalCollectionTask : public AbstractGangTask {
 204 private:
 205   ParallelTaskTerminator* _terminator;
 206   ShenandoahHeap* _heap;
 207 public:
 208   ShenandoahConcurrentTraversalCollectionTask(ParallelTaskTerminator* terminator) :
 209     AbstractGangTask("Shenandoah Concurrent Traversal Collection"),
 210     _terminator(terminator),
 211     _heap(ShenandoahHeap::heap()) {}
 212 
 213   void work(uint worker_id) {
 214     ShenandoahWorkerSession worker_session(worker_id);
 215 
 216     ShenandoahEvacOOMScope oom_evac_scope;
 217     ShenandoahTraversalGC* traversal_gc = _heap->traversal_gc();
 218 
 219     // Drain all outstanding work in queues.
 220     traversal_gc->main_loop(worker_id, _terminator);
 221   }
 222 };
 223 
 224 class ShenandoahFinalTraversalCollectionTask : public AbstractGangTask {
 225 private:
 226   ShenandoahRootProcessor* _rp;
 227   ParallelTaskTerminator* _terminator;
 228   ShenandoahHeap* _heap;
 229 public:
 230   ShenandoahFinalTraversalCollectionTask(ShenandoahRootProcessor* rp, ParallelTaskTerminator* terminator) :
 231     AbstractGangTask("Shenandoah Final Traversal Collection"),
 232     _rp(rp),
 233     _terminator(terminator),
 234     _heap(ShenandoahHeap::heap()) {}
 235 
 236   void work(uint worker_id) {
 237     ShenandoahWorkerSession worker_session(worker_id);
 238 
 239     ShenandoahEvacOOMScope oom_evac_scope;
 240     ShenandoahTraversalGC* traversal_gc = _heap->traversal_gc();
 241 
 242     ShenandoahObjToScanQueueSet* queues = traversal_gc->task_queues();
 243     ShenandoahObjToScanQueue* q = queues->queue(worker_id);
 244 
 245     bool process_refs = _heap->process_references();
 246     bool unload_classes = _heap->unload_classes();
 247     ReferenceProcessor* rp = NULL;
 248     if (process_refs) {
 249       rp = _heap->ref_processor();
 250     }
 251 
 252     // Step 0: Drain outstanding SATB queues.
 253     // NOTE: we piggy-back draining of remaining thread SATB buffers on the final root scan below.
 254     ShenandoahTraversalSATBBufferClosure satb_cl(q);
 255     {
 256       // Process remaining finished SATB buffers.
 257       SATBMarkQueueSet& satb_mq_set = ShenandoahBarrierSet::satb_mark_queue_set();
 258       while (satb_mq_set.apply_closure_to_completed_buffer(&satb_cl));
 259       // Process remaining threads SATB buffers below.
 260     }
 261 
 262     // Step 1: Process ordinary GC roots.
 263     if (!_heap->is_degenerated_gc_in_progress()) {
 264       ShenandoahTraversalClosure roots_cl(q, rp);
 265       CLDToOopClosure cld_cl(&roots_cl);
 266       MarkingCodeBlobClosure code_cl(&roots_cl, CodeBlobToOopClosure::FixRelocations);
 267       ShenandoahTraversalSATBThreadsClosure tc(&satb_cl);
 268       if (unload_classes) {
 269         ShenandoahRemarkCLDClosure weak_cld_cl(&roots_cl);
 270         _rp->process_strong_roots(&roots_cl, process_refs ? NULL : &roots_cl, &cld_cl, &weak_cld_cl, &code_cl, &tc, worker_id);
 271       } else {
 272         _rp->process_all_roots(&roots_cl, process_refs ? NULL : &roots_cl, &cld_cl, &code_cl, &tc, worker_id);
 273       }
 274     } else {
 275       ShenandoahTraversalDegenClosure roots_cl(q, rp);
 276       CLDToOopClosure cld_cl(&roots_cl);
 277       MarkingCodeBlobClosure code_cl(&roots_cl, CodeBlobToOopClosure::FixRelocations);
 278       ShenandoahTraversalSATBThreadsClosure tc(&satb_cl);
 279       if (unload_classes) {
 280         ShenandoahRemarkCLDClosure weak_cld_cl(&roots_cl);
 281         _rp->process_strong_roots(&roots_cl, process_refs ? NULL : &roots_cl, &cld_cl, &weak_cld_cl, &code_cl, &tc, worker_id);
 282       } else {
 283         _rp->process_all_roots(&roots_cl, process_refs ? NULL : &roots_cl, &cld_cl, &code_cl, &tc, worker_id);
 284       }
 285     }
 286 
 287     {
 288       ShenandoahWorkerTimings *worker_times = _heap->phase_timings()->worker_times();
 289       ShenandoahWorkerTimingsTracker timer(worker_times, ShenandoahPhaseTimings::FinishQueues, worker_id);
 290 
 291       // Step 3: Finally drain all outstanding work in queues.
 292       traversal_gc->main_loop(worker_id, _terminator);
 293     }
 294 
 295   }
 296 };
 297 
 298 void ShenandoahTraversalGC::flush_liveness(uint worker_id) {
 299   jushort* ld = get_liveness(worker_id);
 300   for (uint i = 0; i < _heap->num_regions(); i++) {
 301     ShenandoahHeapRegion* r = _heap->get_region(i);
 302     jushort live = ld[i];
 303     if (live > 0) {
 304       r->increase_live_data_gc_words(live);
 305       ld[i] = 0;
 306     }
 307   }
 308 }
 309 
 310 ShenandoahTraversalGC::ShenandoahTraversalGC(ShenandoahHeap* heap, size_t num_regions) :
 311   _heap(heap),
 312   _task_queues(new ShenandoahObjToScanQueueSet(heap->max_workers())),
 313   _traversal_set(ShenandoahHeapRegionSet()),
 314   _root_regions(ShenandoahHeapRegionSet()),
 315   _root_regions_iterator(&_root_regions),
 316   _matrix(heap->connection_matrix()) {
 317 
 318   uint num_queues = heap->max_workers();
 319   for (uint i = 0; i < num_queues; ++i) {
 320     ShenandoahObjToScanQueue* task_queue = new ShenandoahObjToScanQueue();
 321     task_queue->initialize();
 322     _task_queues->register_queue(i, task_queue);
 323   }
 324 
 325   uint workers = heap->max_workers();
 326   _liveness_local = NEW_C_HEAP_ARRAY(jushort*, workers, mtGC);
 327   for (uint worker = 0; worker < workers; worker++) {
 328      _liveness_local[worker] = NEW_C_HEAP_ARRAY(jushort, num_regions, mtGC);
 329   }
 330 
 331 }
 332 
 333 ShenandoahTraversalGC::~ShenandoahTraversalGC() {
 334 }
 335 
 336 void ShenandoahTraversalGC::prepare_regions() {
 337   ShenandoahHeap* heap = ShenandoahHeap::heap();
 338   size_t num_regions = heap->num_regions();
 339   ShenandoahConnectionMatrix* matrix = _heap->connection_matrix();
 340   ShenandoahMarkingContext* const ctx = _heap->next_marking_context();
 341   for (size_t i = 0; i < num_regions; i++) {
 342     ShenandoahHeapRegion* region = heap->get_region(i);
 343     if (heap->is_bitmap_slice_committed(region)) {
 344       if (_traversal_set.is_in(i)) {
 345         ctx->set_top_at_mark_start(region->region_number(), region->top());
 346         region->clear_live_data();
 347         assert(ctx->is_bitmap_clear_range(region->bottom(), region->end()), "bitmap for traversal regions must be cleared");
 348       } else {
 349         // Everything outside the traversal set is always considered live.
 350         ctx->set_top_at_mark_start(region->region_number(), region->bottom());
 351       }
 352       if (_root_regions.is_in(i)) {
 353         assert(!region->in_collection_set(), "roots must not overlap with cset");
 354         matrix->clear_region_outbound(i);
 355         // Since root region can be allocated at, we should bound the scans
 356         // in it at current top. Otherwise, one thread may evacuate objects
 357         // to that root region, while another would try to scan newly evac'ed
 358         // objects under the race.
 359         region->set_concurrent_iteration_safe_limit(region->top());
 360       }
 361     }
 362   }
 363 }
 364 
 365 void ShenandoahTraversalGC::prepare() {
 366   _heap->collection_set()->clear();
 367   assert(_heap->collection_set()->count() == 0, "collection set not clear");
 368 
 369   _heap->make_parsable(true);
 370 
 371   assert(_heap->next_marking_context()->is_bitmap_clear(), "need clean mark bitmap");
 372 
 373   ShenandoahFreeSet* free_set = _heap->free_set();
 374   ShenandoahCollectionSet* collection_set = _heap->collection_set();
 375 
 376   // Find collection set
 377   _heap->heuristics()->choose_collection_set(collection_set);
 378   prepare_regions();
 379 
 380   // Rebuild free set
 381   free_set->rebuild();
 382 
 383   log_info(gc,ergo)("Got " SIZE_FORMAT " collection set regions and " SIZE_FORMAT " root set regions", collection_set->count(), _root_regions.count());
 384 }
 385 
 386 void ShenandoahTraversalGC::init_traversal_collection() {
 387   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "STW traversal GC");
 388 
 389   if (ShenandoahVerify) {
 390     _heap->verifier()->verify_before_traversal();
 391   }
 392 
 393   {
 394     ShenandoahGCPhase phase_prepare(ShenandoahPhaseTimings::traversal_gc_prepare);
 395     ShenandoahHeapLocker lock(_heap->lock());
 396     prepare();
 397   }
 398 
 399   _heap->set_concurrent_traversal_in_progress(true);
 400 
 401   bool process_refs = _heap->process_references();
 402   if (process_refs) {
 403     ReferenceProcessor* rp = _heap->ref_processor();
 404     rp->enable_discovery(true /*verify_no_refs*/);
 405     rp->setup_policy(false);
 406   }
 407 
 408   {
 409     ShenandoahGCPhase phase_work(ShenandoahPhaseTimings::init_traversal_gc_work);
 410     assert(_task_queues->is_empty(), "queues must be empty before traversal GC");
 411     TASKQUEUE_STATS_ONLY(_task_queues->reset_taskqueue_stats());
 412 
 413 #if defined(COMPILER2) || INCLUDE_JVMCI
 414     DerivedPointerTable::clear();
 415 #endif
 416 
 417     {
 418       uint nworkers = _heap->workers()->active_workers();
 419       task_queues()->reserve(nworkers);
 420       ShenandoahRootProcessor rp(_heap, nworkers, ShenandoahPhaseTimings::init_traversal_gc_work);
 421 
 422       ShenandoahInitTraversalCollectionTask traversal_task(&rp);
 423       _heap->workers()->run_task(&traversal_task);
 424     }
 425 
 426 #if defined(COMPILER2) || INCLUDE_JVMCI
 427     DerivedPointerTable::update_pointers();
 428 #endif
 429   }
 430 
 431   if (ShenandoahPacing) {
 432     _heap->pacer()->setup_for_traversal();
 433   }
 434 
 435   _root_regions_iterator.reset(&_root_regions);
 436 }
 437 
 438 void ShenandoahTraversalGC::main_loop(uint w, ParallelTaskTerminator* t) {
 439   ShenandoahObjToScanQueue* q = task_queues()->queue(w);
 440 
 441   // Initialize live data.
 442   jushort* ld = get_liveness(w);
 443   Copy::fill_to_bytes(ld, _heap->num_regions() * sizeof(jushort));
 444 
 445   ReferenceProcessor* rp = NULL;
 446   if (_heap->process_references()) {
 447     rp = _heap->ref_processor();
 448   }
 449   if (UseShenandoahMatrix) {
 450     if (!_heap->is_degenerated_gc_in_progress()) {
 451       if (_heap->unload_classes()) {
 452         if (ShenandoahStringDedup::is_enabled()) {
 453           ShenandoahTraversalMetadataDedupMatrixClosure cl(q, rp);
 454           main_loop_work<ShenandoahTraversalMetadataDedupMatrixClosure>(&cl, ld, w, t);
 455         } else {
 456           ShenandoahTraversalMetadataMatrixClosure cl(q, rp);
 457           main_loop_work<ShenandoahTraversalMetadataMatrixClosure>(&cl, ld, w, t);
 458         }
 459       } else {
 460         if (ShenandoahStringDedup::is_enabled()) {
 461           ShenandoahTraversalDedupMatrixClosure cl(q, rp);
 462           main_loop_work<ShenandoahTraversalDedupMatrixClosure>(&cl, ld, w, t);
 463         } else {
 464           ShenandoahTraversalMatrixClosure cl(q, rp);
 465           main_loop_work<ShenandoahTraversalMatrixClosure>(&cl, ld, w, t);
 466         }
 467       }
 468     } else {
 469       if (_heap->unload_classes()) {
 470         if (ShenandoahStringDedup::is_enabled()) {
 471           ShenandoahTraversalMetadataDedupDegenMatrixClosure cl(q, rp);
 472           main_loop_work<ShenandoahTraversalMetadataDedupDegenMatrixClosure>(&cl, ld, w, t);
 473         } else {
 474           ShenandoahTraversalMetadataDegenMatrixClosure cl(q, rp);
 475           main_loop_work<ShenandoahTraversalMetadataDegenMatrixClosure>(&cl, ld, w, t);
 476         }
 477       } else {
 478         if (ShenandoahStringDedup::is_enabled()) {
 479           ShenandoahTraversalDedupDegenMatrixClosure cl(q, rp);
 480           main_loop_work<ShenandoahTraversalDedupDegenMatrixClosure>(&cl, ld, w, t);
 481         } else {
 482           ShenandoahTraversalDegenMatrixClosure cl(q, rp);
 483           main_loop_work<ShenandoahTraversalDegenMatrixClosure>(&cl, ld, w, t);
 484         }
 485       }
 486     }
 487   } else {
 488     if (!_heap->is_degenerated_gc_in_progress()) {
 489       if (_heap->unload_classes()) {
 490         if (ShenandoahStringDedup::is_enabled()) {
 491           ShenandoahTraversalMetadataDedupClosure cl(q, rp);
 492           main_loop_work<ShenandoahTraversalMetadataDedupClosure>(&cl, ld, w, t);
 493         } else {
 494           ShenandoahTraversalMetadataClosure cl(q, rp);
 495           main_loop_work<ShenandoahTraversalMetadataClosure>(&cl, ld, w, t);
 496         }
 497       } else {
 498         if (ShenandoahStringDedup::is_enabled()) {
 499           ShenandoahTraversalDedupClosure cl(q, rp);
 500           main_loop_work<ShenandoahTraversalDedupClosure>(&cl, ld, w, t);
 501         } else {
 502           ShenandoahTraversalClosure cl(q, rp);
 503           main_loop_work<ShenandoahTraversalClosure>(&cl, ld, w, t);
 504         }
 505       }
 506     } else {
 507       if (_heap->unload_classes()) {
 508         if (ShenandoahStringDedup::is_enabled()) {
 509           ShenandoahTraversalMetadataDedupDegenClosure cl(q, rp);
 510           main_loop_work<ShenandoahTraversalMetadataDedupDegenClosure>(&cl, ld, w, t);
 511         } else {
 512           ShenandoahTraversalMetadataDegenClosure cl(q, rp);
 513           main_loop_work<ShenandoahTraversalMetadataDegenClosure>(&cl, ld, w, t);
 514         }
 515       } else {
 516         if (ShenandoahStringDedup::is_enabled()) {
 517           ShenandoahTraversalDedupDegenClosure cl(q, rp);
 518           main_loop_work<ShenandoahTraversalDedupDegenClosure>(&cl, ld, w, t);
 519         } else {
 520           ShenandoahTraversalDegenClosure cl(q, rp);
 521           main_loop_work<ShenandoahTraversalDegenClosure>(&cl, ld, w, t);
 522         }
 523       }
 524     }
 525   }
 526   flush_liveness(w);
 527 
 528 }
 529 
 530 template <class T>
 531 void ShenandoahTraversalGC::main_loop_work(T* cl, jushort* live_data, uint worker_id, ParallelTaskTerminator* terminator) {
 532   ShenandoahObjToScanQueueSet* queues = task_queues();
 533   ShenandoahObjToScanQueue* q = queues->queue(worker_id);
 534   ShenandoahConcurrentMark* conc_mark = _heap->concurrentMark();
 535 
 536   uintx stride = ShenandoahMarkLoopStride;
 537 
 538   ShenandoahMarkTask task;
 539 
 540   // Process outstanding queues, if any.
 541   q = queues->claim_next();
 542   while (q != NULL) {
 543     if (_heap->check_cancelled_gc_and_yield()) {
 544       ShenandoahCancelledTerminatorTerminator tt;
 545       ShenandoahEvacOOMScopeLeaver oom_scope_leaver;
 546       while (!terminator->offer_termination(&tt));
 547       return;
 548     }
 549 
 550     for (uint i = 0; i < stride; i++) {
 551       if (q->pop_buffer(task) ||
 552           q->pop_local(task) ||
 553           q->pop_overflow(task)) {
 554         conc_mark->do_task<T>(q, cl, live_data, &task);
 555       } else {
 556         assert(q->is_empty(), "Must be empty");
 557         q = queues->claim_next();
 558         break;
 559       }
 560     }
 561   }
 562 
 563   if (check_and_handle_cancelled_gc(terminator)) return;
 564 
 565   // Step 2: Process all root regions.
 566   // TODO: Interleave this in the normal mark loop below.
 567   ShenandoahHeapRegion* r = _root_regions_iterator.claim_next();
 568   while (r != NULL) {
 569     _heap->marked_object_oop_safe_iterate(r, cl);
 570     if (ShenandoahPacing) {
 571       _heap->pacer()->report_partial(r->get_live_data_words());
 572     }
 573     if (check_and_handle_cancelled_gc(terminator)) return;
 574     r = _root_regions_iterator.claim_next();
 575   }
 576 
 577   if (check_and_handle_cancelled_gc(terminator)) return;
 578 
 579   // Normal loop.
 580   q = queues->queue(worker_id);
 581 
 582   ShenandoahTraversalSATBBufferClosure drain_satb(q);
 583   SATBMarkQueueSet& satb_mq_set = ShenandoahBarrierSet::satb_mark_queue_set();
 584 
 585   int seed = 17;
 586 
 587   while (true) {
 588     if (check_and_handle_cancelled_gc(terminator)) return;
 589 
 590     while (satb_mq_set.completed_buffers_num() > 0) {
 591       satb_mq_set.apply_closure_to_completed_buffer(&drain_satb);
 592     }
 593 
 594     if (_arraycopy_task_queue.length() > 0) {
 595       process_arraycopy_task(cl);
 596     }
 597 
 598     uint work = 0;
 599     for (uint i = 0; i < stride; i++) {
 600       if (q->pop_buffer(task) ||
 601           q->pop_local(task) ||
 602           q->pop_overflow(task) ||
 603           queues->steal(worker_id, &seed, task)) {
 604         conc_mark->do_task<T>(q, cl, live_data, &task);
 605         work++;
 606       } else {
 607         break;
 608       }
 609     }
 610 
 611     if (work == 0 &&
 612         _arraycopy_task_queue.length() == 0) {
 613       // No more work, try to terminate
 614       ShenandoahEvacOOMScopeLeaver oom_scope_leaver;
 615       ShenandoahTerminationTimingsTracker term_tracker(worker_id);
 616       if (terminator->offer_termination()) return;
 617     }
 618   }
 619 }
 620 
 621 bool ShenandoahTraversalGC::check_and_handle_cancelled_gc(ParallelTaskTerminator* terminator) {
 622   if (_heap->cancelled_gc()) {
 623     ShenandoahCancelledTerminatorTerminator tt;
 624     ShenandoahEvacOOMScopeLeaver oom_scope_leaver;
 625     while (! terminator->offer_termination(&tt));
 626     return true;
 627   }
 628   return false;
 629 }
 630 
 631 void ShenandoahTraversalGC::concurrent_traversal_collection() {
 632   ClassLoaderDataGraph::clear_claimed_marks();
 633 
 634   ShenandoahGCPhase phase_work(ShenandoahPhaseTimings::conc_traversal);
 635   if (!_heap->cancelled_gc()) {
 636     uint nworkers = _heap->workers()->active_workers();
 637     task_queues()->reserve(nworkers);
 638     ShenandoahTerminationTracker tracker(ShenandoahPhaseTimings::conc_traversal_termination);
 639 
 640     if (UseShenandoahOWST) {
 641       ShenandoahTaskTerminator terminator(nworkers, task_queues());
 642       ShenandoahConcurrentTraversalCollectionTask task(&terminator);
 643       _heap->workers()->run_task(&task);
 644     } else {
 645       ParallelTaskTerminator terminator(nworkers, task_queues());
 646       ShenandoahConcurrentTraversalCollectionTask task(&terminator);
 647       _heap->workers()->run_task(&task);
 648     }
 649   }
 650 
 651   if (!_heap->cancelled_gc() && ShenandoahPreclean && _heap->process_references()) {
 652     ShenandoahEvacOOMScope oom_evac_scope;
 653     preclean_weak_refs();
 654   }
 655 }
 656 
 657 void ShenandoahTraversalGC::final_traversal_collection() {
 658 
 659   _heap->make_parsable(true);
 660 
 661   if (!_heap->cancelled_gc()) {
 662 #if defined(COMPILER2) || INCLUDE_JVMCI
 663     DerivedPointerTable::clear();
 664 #endif
 665     ShenandoahGCPhase phase_work(ShenandoahPhaseTimings::final_traversal_gc_work);
 666     uint nworkers = _heap->workers()->active_workers();
 667     task_queues()->reserve(nworkers);
 668 
 669     // Finish traversal
 670     ShenandoahRootProcessor rp(_heap, nworkers, ShenandoahPhaseTimings::final_traversal_gc_work);
 671     ShenandoahTerminationTracker term(ShenandoahPhaseTimings::final_traversal_gc_termination);
 672 
 673     if (UseShenandoahOWST) {
 674       ShenandoahTaskTerminator terminator(nworkers, task_queues());
 675       ShenandoahFinalTraversalCollectionTask task(&rp, &terminator);
 676       _heap->workers()->run_task(&task);
 677     } else {
 678       ParallelTaskTerminator terminator(nworkers, task_queues());
 679       ShenandoahFinalTraversalCollectionTask task(&rp, &terminator);
 680       _heap->workers()->run_task(&task);
 681     }
 682 #if defined(COMPILER2) || INCLUDE_JVMCI
 683     DerivedPointerTable::update_pointers();
 684 #endif
 685 
 686     assert(_task_queues->is_empty(), "queues must be empty after traversal GC");
 687     TASKQUEUE_STATS_ONLY(_task_queues->print_taskqueue_stats());
 688     TASKQUEUE_STATS_ONLY(_task_queues->reset_taskqueue_stats());
 689   }
 690 
 691   if (!_heap->cancelled_gc() && _heap->process_references()) {
 692     weak_refs_work();
 693   }
 694 
 695   if (!_heap->cancelled_gc() && _heap->unload_classes()) {
 696     _heap->unload_classes_and_cleanup_tables(false);
 697     fixup_roots();
 698   }
 699 
 700   if (!_heap->cancelled_gc()) {
 701     // Still good? We can now trash the cset, and make final verification
 702     {
 703       ShenandoahGCPhase phase_cleanup(ShenandoahPhaseTimings::traversal_gc_cleanup);
 704       ShenandoahHeapLocker lock(_heap->lock());
 705 
 706       assert(_arraycopy_task_queue.length() == 0, "arraycopy tasks must be done");
 707 
 708       // Trash everything
 709       // Clear immediate garbage regions.
 710       size_t num_regions = _heap->num_regions();
 711 
 712       ShenandoahHeapRegionSet* traversal_regions = traversal_set();
 713       ShenandoahFreeSet* free_regions = _heap->free_set();
 714       ShenandoahMarkingContext* const ctx = _heap->next_marking_context();
 715       free_regions->clear();
 716       for (size_t i = 0; i < num_regions; i++) {
 717         ShenandoahHeapRegion* r = _heap->get_region(i);
 718         bool not_allocated = ctx->top_at_mark_start(r->region_number()) == r->top();
 719 
 720         bool candidate = traversal_regions->is_in(r) && !r->has_live() && not_allocated;
 721         if (r->is_humongous_start() && candidate) {
 722           // Trash humongous.
 723           HeapWord* humongous_obj = r->bottom() + BrooksPointer::word_size();
 724           assert(!ctx->is_marked(oop(humongous_obj)), "must not be marked");
 725           r->make_trash();
 726           while (i + 1 < num_regions && _heap->get_region(i + 1)->is_humongous_continuation()) {
 727             i++;
 728             r = _heap->get_region(i);
 729             assert(r->is_humongous_continuation(), "must be humongous continuation");
 730             r->make_trash();
 731           }
 732         } else if (!r->is_empty() && candidate) {
 733           // Trash regular.
 734           assert(!r->is_humongous(), "handled above");
 735           assert(!r->is_trash(), "must not already be trashed");
 736           r->make_trash();
 737         }
 738       }
 739       _heap->collection_set()->clear();
 740       _heap->free_set()->rebuild();
 741       reset();
 742     }
 743 
 744     assert(_task_queues->is_empty(), "queues must be empty after traversal GC");
 745     _heap->set_concurrent_traversal_in_progress(false);
 746     assert(!_heap->cancelled_gc(), "must not be cancelled when getting out here");
 747 
 748     if (ShenandoahVerify) {
 749       _heap->verifier()->verify_after_traversal();
 750     }
 751   }
 752 }
 753 
 754 class ShenandoahTraversalFixRootsClosure : public OopClosure {
 755 private:
 756 
 757   template <class T>
 758   inline void do_oop_work(T* p) {
 759     T o = RawAccess<>::oop_load(p);
 760     if (!CompressedOops::is_null(o)) {
 761       oop obj = CompressedOops::decode_not_null(o);
 762       oop forw = ShenandoahBarrierSet::resolve_forwarded_not_null(obj);
 763       if (!oopDesc::unsafe_equals(obj, forw)) {
 764         RawAccess<IS_NOT_NULL>::oop_store(p, forw);
 765       }
 766     }
 767   }
 768 public:
 769   inline void do_oop(oop* p) { do_oop_work(p); }
 770   inline void do_oop(narrowOop* p) { do_oop_work(p); }
 771 };
 772 
 773 class ShenandoahTraversalFixRootsTask : public AbstractGangTask {
 774   ShenandoahRootProcessor* _rp;
 775 public:
 776 
 777   ShenandoahTraversalFixRootsTask(ShenandoahRootProcessor* rp) :
 778     AbstractGangTask("Shenandoah traversal fix roots"),
 779     _rp(rp)
 780   {
 781     // Nothing else to do.
 782   }
 783 
 784   void work(uint worker_id) {
 785     ShenandoahTraversalFixRootsClosure cl;
 786     MarkingCodeBlobClosure blobsCl(&cl, CodeBlobToOopClosure::FixRelocations);
 787     CLDToOopClosure cldCl(&cl);
 788     _rp->process_all_roots(&cl, &cl, &cldCl, &blobsCl, NULL, worker_id);
 789   }
 790 };
 791 
 792 void ShenandoahTraversalGC::fixup_roots() {
 793 #if defined(COMPILER2) || INCLUDE_JVMCI
 794   DerivedPointerTable::clear();
 795 #endif
 796   ShenandoahHeap* heap = ShenandoahHeap::heap();
 797   ShenandoahRootProcessor rp(heap, heap->workers()->active_workers(), ShenandoahPhaseTimings::final_traversal_update_roots);
 798   ShenandoahTraversalFixRootsTask update_roots_task(&rp);
 799   heap->workers()->run_task(&update_roots_task);
 800 #if defined(COMPILER2) || INCLUDE_JVMCI
 801   DerivedPointerTable::update_pointers();
 802 #endif
 803 }
 804 
 805 void ShenandoahTraversalGC::reset() {
 806   _task_queues->clear();
 807   _arraycopy_task_queue.clear();
 808 }
 809 
 810 ShenandoahObjToScanQueueSet* ShenandoahTraversalGC::task_queues() {
 811   return _task_queues;
 812 }
 813 
 814 jushort* ShenandoahTraversalGC::get_liveness(uint worker_id) {
 815   return _liveness_local[worker_id];
 816 }
 817 
 818 class ShenandoahTraversalCancelledGCYieldClosure : public YieldClosure {
 819 private:
 820   ShenandoahHeap* const _heap;
 821 public:
 822   ShenandoahTraversalCancelledGCYieldClosure() : _heap(ShenandoahHeap::heap()) {};
 823   virtual bool should_return() { return _heap->cancelled_gc(); }
 824 };
 825 
 826 class ShenandoahTraversalPrecleanCompleteGCClosure : public VoidClosure {
 827 public:
 828   void do_void() {
 829     ShenandoahHeap* sh = ShenandoahHeap::heap();
 830     ShenandoahTraversalGC* traversal_gc = sh->traversal_gc();
 831     assert(sh->process_references(), "why else would we be here?");
 832     ParallelTaskTerminator terminator(1, traversal_gc->task_queues());
 833     shenandoah_assert_rp_isalive_installed();
 834     traversal_gc->main_loop((uint) 0, &terminator);
 835   }
 836 };
 837 
 838 class ShenandoahTraversalKeepAliveUpdateClosure : public OopClosure {
 839 private:
 840   ShenandoahObjToScanQueue* _queue;
 841   Thread* _thread;
 842   ShenandoahTraversalGC* _traversal_gc;
 843   template <class T>
 844   inline void do_oop_work(T* p) {
 845     _traversal_gc->process_oop<T, false /* string dedup */, false /* degen */, false /* matrix */>(p, _thread, _queue, NULL);
 846   }
 847 
 848 public:
 849   ShenandoahTraversalKeepAliveUpdateClosure(ShenandoahObjToScanQueue* q) :
 850     _queue(q), _thread(Thread::current()),
 851     _traversal_gc(ShenandoahHeap::heap()->traversal_gc()) {}
 852 
 853   void do_oop(narrowOop* p) { do_oop_work(p); }
 854   void do_oop(oop* p)       { do_oop_work(p); }
 855 };
 856 
 857 class ShenandoahTraversalKeepAliveUpdateDegenClosure : public OopClosure {
 858 private:
 859   ShenandoahObjToScanQueue* _queue;
 860   Thread* _thread;
 861   ShenandoahTraversalGC* _traversal_gc;
 862   template <class T>
 863   inline void do_oop_work(T* p) {
 864     _traversal_gc->process_oop<T, false /* string dedup */, true /* degen */, false /* matrix */>(p, _thread, _queue, NULL);
 865   }
 866 
 867 public:
 868   ShenandoahTraversalKeepAliveUpdateDegenClosure(ShenandoahObjToScanQueue* q) :
 869     _queue(q), _thread(Thread::current()),
 870     _traversal_gc(ShenandoahHeap::heap()->traversal_gc()) {}
 871 
 872   void do_oop(narrowOop* p) { do_oop_work(p); }
 873   void do_oop(oop* p)       { do_oop_work(p); }
 874 };
 875 
 876 class ShenandoahTraversalKeepAliveUpdateMatrixClosure : public OopClosure {
 877 private:
 878   ShenandoahObjToScanQueue* _queue;
 879   Thread* _thread;
 880   ShenandoahTraversalGC* _traversal_gc;
 881   template <class T>
 882   inline void do_oop_work(T* p) {
 883     // TODO: Need to somehow pass base_obj here?
 884     _traversal_gc->process_oop<T, false /* string dedup */, false /* degen */, true /* matrix */>(p, _thread, _queue, NULL);
 885   }
 886 
 887 public:
 888   ShenandoahTraversalKeepAliveUpdateMatrixClosure(ShenandoahObjToScanQueue* q) :
 889     _queue(q), _thread(Thread::current()),
 890     _traversal_gc(ShenandoahHeap::heap()->traversal_gc()) {}
 891 
 892   void do_oop(narrowOop* p) { do_oop_work(p); }
 893   void do_oop(oop* p)       { do_oop_work(p); }
 894 };
 895 
 896 class ShenandoahTraversalKeepAliveUpdateDegenMatrixClosure : public OopClosure {
 897 private:
 898   ShenandoahObjToScanQueue* _queue;
 899   Thread* _thread;
 900   ShenandoahTraversalGC* _traversal_gc;
 901   template <class T>
 902   inline void do_oop_work(T* p) {
 903     // TODO: Need to somehow pass base_obj here?
 904     _traversal_gc->process_oop<T, false /* string dedup */, true /* degen */, true /* matrix */>(p, _thread, _queue, NULL);
 905   }
 906 
 907 public:
 908   ShenandoahTraversalKeepAliveUpdateDegenMatrixClosure(ShenandoahObjToScanQueue* q) :
 909     _queue(q), _thread(Thread::current()),
 910     _traversal_gc(ShenandoahHeap::heap()->traversal_gc()) {}
 911 
 912   void do_oop(narrowOop* p) { do_oop_work(p); }
 913   void do_oop(oop* p)       { do_oop_work(p); }
 914 };
 915 
 916 void ShenandoahTraversalGC::preclean_weak_refs() {
 917   // Pre-cleaning weak references before diving into STW makes sense at the
 918   // end of concurrent mark. This will filter out the references which referents
 919   // are alive. Note that ReferenceProcessor already filters out these on reference
 920   // discovery, and the bulk of work is done here. This phase processes leftovers
 921   // that missed the initial filtering, i.e. when referent was marked alive after
 922   // reference was discovered by RP.
 923 
 924   assert(_heap->process_references(), "sanity");
 925 
 926   ShenandoahHeap* sh = ShenandoahHeap::heap();
 927   ReferenceProcessor* rp = sh->ref_processor();
 928 
 929   // Shortcut if no references were discovered to avoid winding up threads.
 930   if (!rp->has_discovered_references()) {
 931     return;
 932   }
 933 
 934   ReferenceProcessorMTDiscoveryMutator fix_mt_discovery(rp, false);
 935 
 936   shenandoah_assert_rp_isalive_not_installed();
 937   ReferenceProcessorIsAliveMutator fix_isalive(rp, sh->is_alive_closure());
 938 
 939   // Interrupt on cancelled GC
 940   ShenandoahTraversalCancelledGCYieldClosure yield;
 941 
 942   assert(task_queues()->is_empty(), "Should be empty");
 943   assert(!sh->is_degenerated_gc_in_progress(), "must be in concurrent non-degenerated phase");
 944 
 945   ShenandoahTraversalPrecleanCompleteGCClosure complete_gc;
 946   ShenandoahForwardedIsAliveClosure is_alive;
 947   if (UseShenandoahMatrix) {
 948     ShenandoahTraversalKeepAliveUpdateMatrixClosure keep_alive(task_queues()->queue(0));
 949     ResourceMark rm;
 950     rp->preclean_discovered_references(&is_alive, &keep_alive,
 951                                        &complete_gc, &yield,
 952                                        NULL);
 953   } else {
 954     ShenandoahTraversalKeepAliveUpdateClosure keep_alive(task_queues()->queue(0));
 955     ResourceMark rm;
 956     rp->preclean_discovered_references(&is_alive, &keep_alive,
 957                                        &complete_gc, &yield,
 958                                        NULL);
 959   }
 960   assert(sh->cancelled_gc() || task_queues()->is_empty(), "Should be empty");
 961 }
 962 
 963 // Weak Reference Closures
 964 class ShenandoahTraversalDrainMarkingStackClosure: public VoidClosure {
 965   uint _worker_id;
 966   ParallelTaskTerminator* _terminator;
 967   bool _reset_terminator;
 968 
 969 public:
 970   ShenandoahTraversalDrainMarkingStackClosure(uint worker_id, ParallelTaskTerminator* t, bool reset_terminator = false):
 971     _worker_id(worker_id),
 972     _terminator(t),
 973     _reset_terminator(reset_terminator) {
 974   }
 975 
 976   void do_void() {
 977     assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
 978 
 979     ShenandoahHeap* sh = ShenandoahHeap::heap();
 980     ShenandoahTraversalGC* traversal_gc = sh->traversal_gc();
 981     assert(sh->process_references(), "why else would we be here?");
 982     shenandoah_assert_rp_isalive_installed();
 983 
 984     traversal_gc->main_loop(_worker_id, _terminator);
 985 
 986     if (_reset_terminator) {
 987       _terminator->reset_for_reuse();
 988     }
 989   }
 990 };
 991 
 992 void ShenandoahTraversalGC::weak_refs_work() {
 993   assert(_heap->process_references(), "sanity");
 994 
 995   ShenandoahHeap* sh = ShenandoahHeap::heap();
 996 
 997   ShenandoahPhaseTimings::Phase phase_root = ShenandoahPhaseTimings::weakrefs;
 998 
 999   ShenandoahGCPhase phase(phase_root);
1000 
1001   ReferenceProcessor* rp = sh->ref_processor();
1002 
1003   // NOTE: We cannot shortcut on has_discovered_references() here, because
1004   // we will miss marking JNI Weak refs then, see implementation in
1005   // ReferenceProcessor::process_discovered_references.
1006   weak_refs_work_doit();
1007 
1008   rp->verify_no_references_recorded();
1009   assert(!rp->discovery_enabled(), "Post condition");
1010 
1011 }
1012 
1013 class ShenandoahTraversalRefProcTaskProxy : public AbstractGangTask {
1014 
1015 private:
1016   AbstractRefProcTaskExecutor::ProcessTask& _proc_task;
1017   ParallelTaskTerminator* _terminator;
1018 public:
1019 
1020   ShenandoahTraversalRefProcTaskProxy(AbstractRefProcTaskExecutor::ProcessTask& proc_task,
1021                              ParallelTaskTerminator* t) :
1022     AbstractGangTask("Process reference objects in parallel"),
1023     _proc_task(proc_task),
1024     _terminator(t) {
1025   }
1026 
1027   void work(uint worker_id) {
1028     ShenandoahEvacOOMScope oom_evac_scope;
1029     assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
1030     ShenandoahHeap* heap = ShenandoahHeap::heap();
1031     ShenandoahTraversalDrainMarkingStackClosure complete_gc(worker_id, _terminator);
1032 
1033     ShenandoahForwardedIsAliveClosure is_alive;
1034     if (UseShenandoahMatrix) {
1035       if (!heap->is_degenerated_gc_in_progress()) {
1036         ShenandoahTraversalKeepAliveUpdateMatrixClosure keep_alive(heap->traversal_gc()->task_queues()->queue(worker_id));
1037         _proc_task.work(worker_id, is_alive, keep_alive, complete_gc);
1038       } else {
1039         ShenandoahTraversalKeepAliveUpdateDegenMatrixClosure keep_alive(heap->traversal_gc()->task_queues()->queue(worker_id));
1040         _proc_task.work(worker_id, is_alive, keep_alive, complete_gc);
1041       }
1042     } else {
1043       if (!heap->is_degenerated_gc_in_progress()) {
1044         ShenandoahTraversalKeepAliveUpdateClosure keep_alive(heap->traversal_gc()->task_queues()->queue(worker_id));
1045         _proc_task.work(worker_id, is_alive, keep_alive, complete_gc);
1046       } else {
1047         ShenandoahTraversalKeepAliveUpdateDegenClosure keep_alive(heap->traversal_gc()->task_queues()->queue(worker_id));
1048         _proc_task.work(worker_id, is_alive, keep_alive, complete_gc);
1049       }
1050     }
1051   }
1052 };
1053 
1054 class ShenandoahTraversalRefProcTaskExecutor : public AbstractRefProcTaskExecutor {
1055 
1056 private:
1057   WorkGang* _workers;
1058 
1059 public:
1060 
1061   ShenandoahTraversalRefProcTaskExecutor(WorkGang* workers) :
1062     _workers(workers) {
1063   }
1064 
1065   // Executes a task using worker threads.
1066   void execute(ProcessTask& task, uint ergo_workers) {
1067     assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
1068 
1069     ShenandoahHeap* heap = ShenandoahHeap::heap();
1070     ShenandoahTraversalGC* traversal_gc = heap->traversal_gc();
1071     ShenandoahPushWorkerScope scope(_workers,
1072                                     ergo_workers,
1073                                     /* do_check = */ false);
1074     uint nworkers = _workers->active_workers();
1075     traversal_gc->task_queues()->reserve(nworkers);
1076     if (UseShenandoahOWST) {
1077       ShenandoahTaskTerminator terminator(nworkers, traversal_gc->task_queues());
1078       ShenandoahTraversalRefProcTaskProxy proc_task_proxy(task, &terminator);
1079       _workers->run_task(&proc_task_proxy);
1080     } else {
1081       ParallelTaskTerminator terminator(nworkers, traversal_gc->task_queues());
1082       ShenandoahTraversalRefProcTaskProxy proc_task_proxy(task, &terminator);
1083       _workers->run_task(&proc_task_proxy);
1084     }
1085   }
1086 };
1087 
1088 void ShenandoahTraversalGC::weak_refs_work_doit() {
1089   ShenandoahHeap* sh = ShenandoahHeap::heap();
1090 
1091   ReferenceProcessor* rp = sh->ref_processor();
1092 
1093   ShenandoahPhaseTimings::Phase phase_process = ShenandoahPhaseTimings::weakrefs_process;
1094 
1095   shenandoah_assert_rp_isalive_not_installed();
1096   ReferenceProcessorIsAliveMutator fix_isalive(rp, sh->is_alive_closure());
1097 
1098   WorkGang* workers = sh->workers();
1099   uint nworkers = workers->active_workers();
1100 
1101   // Setup collector policy for softref cleaning.
1102   bool clear_soft_refs = sh->soft_ref_policy()->use_should_clear_all_soft_refs(true /* bogus arg*/);
1103   log_develop_debug(gc, ref)("clearing soft refs: %s", BOOL_TO_STR(clear_soft_refs));
1104   rp->setup_policy(clear_soft_refs);
1105   rp->set_active_mt_degree(nworkers);
1106 
1107   assert(task_queues()->is_empty(), "Should be empty");
1108 
1109   // complete_gc and keep_alive closures instantiated here are only needed for
1110   // single-threaded path in RP. They share the queue 0 for tracking work, which
1111   // simplifies implementation. Since RP may decide to call complete_gc several
1112   // times, we need to be able to reuse the terminator.
1113   uint serial_worker_id = 0;
1114   ParallelTaskTerminator terminator(1, task_queues());
1115   ShenandoahTraversalDrainMarkingStackClosure complete_gc(serial_worker_id, &terminator, /* reset_terminator = */ true);
1116 
1117   ShenandoahTraversalRefProcTaskExecutor executor(workers);
1118 
1119   ReferenceProcessorPhaseTimes pt(sh->gc_timer(), rp->num_queues());
1120 
1121   {
1122     ShenandoahGCPhase phase(phase_process);
1123     ShenandoahTerminationTracker termination(ShenandoahPhaseTimings::weakrefs_termination);
1124 
1125     ShenandoahForwardedIsAliveClosure is_alive;
1126     if (UseShenandoahMatrix) {
1127       if (!_heap->is_degenerated_gc_in_progress()) {
1128         ShenandoahTraversalKeepAliveUpdateMatrixClosure keep_alive(task_queues()->queue(serial_worker_id));
1129         rp->process_discovered_references(&is_alive, &keep_alive,
1130                                           &complete_gc, &executor,
1131                                           &pt);
1132         pt.print_all_references();
1133         WeakProcessor::weak_oops_do(&is_alive, &keep_alive);
1134       } else {
1135         ShenandoahTraversalKeepAliveUpdateDegenMatrixClosure keep_alive(task_queues()->queue(serial_worker_id));
1136         rp->process_discovered_references(&is_alive, &keep_alive,
1137                                           &complete_gc, &executor,
1138                                           &pt);
1139         pt.print_all_references();
1140         WeakProcessor::weak_oops_do(&is_alive, &keep_alive);
1141       }
1142     } else {
1143       if (!_heap->is_degenerated_gc_in_progress()) {
1144         ShenandoahTraversalKeepAliveUpdateClosure keep_alive(task_queues()->queue(serial_worker_id));
1145         rp->process_discovered_references(&is_alive, &keep_alive,
1146                                           &complete_gc, &executor,
1147                                           &pt);
1148         pt.print_all_references();
1149         WeakProcessor::weak_oops_do(&is_alive, &keep_alive);
1150       } else {
1151         ShenandoahTraversalKeepAliveUpdateDegenClosure keep_alive(task_queues()->queue(serial_worker_id));
1152         rp->process_discovered_references(&is_alive, &keep_alive,
1153                                           &complete_gc, &executor,
1154                                           &pt);
1155         pt.print_all_references();
1156         WeakProcessor::weak_oops_do(&is_alive, &keep_alive);
1157       }
1158     }
1159 
1160     assert(!_heap->cancelled_gc() || task_queues()->is_empty(), "Should be empty");
1161   }
1162 }
1163 
1164 void ShenandoahTraversalGC::push_arraycopy(HeapWord* start, size_t count) {
1165   _arraycopy_task_queue.push(start, count);
1166 }
1167 
1168 template <class T>
1169 bool ShenandoahTraversalGC::process_arraycopy_task(T* cl) {
1170   ShenandoahArrayCopyTask task = _arraycopy_task_queue.pop();
1171   if (task.start() == NULL) {
1172     return false;
1173   }
1174   if (task.count() == 0) {
1175     // Handle clone.
1176     oop obj = oop(task.start());
1177     obj->oop_iterate(cl);
1178   } else {
1179     HeapWord* array = task.start();
1180     size_t count = task.count();
1181     if (UseCompressedOops) {
1182       narrowOop* p = reinterpret_cast<narrowOop*>(array);
1183       for (size_t i = 0; i < count; i++) {
1184         cl->do_oop(p++);
1185       }
1186     } else {
1187       oop* p = reinterpret_cast<oop*>(array);
1188       for (size_t i = 0; i < count; i++) {
1189         cl->do_oop(p++);
1190       }
1191     }
1192   }
1193   return true;
1194 }