1 /*
   2  * Copyright (c) 2018, Red Hat, Inc. and/or its affiliates.
   3  *
   4  * This code is free software; you can redistribute it and/or modify it
   5  * under the terms of the GNU General Public License version 2 only, as
   6  * published by the Free Software Foundation.
   7  *
   8  * This code is distributed in the hope that it will be useful, but WITHOUT
   9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  11  * version 2 for more details (a copy is included in the LICENSE file that
  12  * accompanied this code).
  13  *
  14  * You should have received a copy of the GNU General Public License version
  15  * 2 along with this work; if not, write to the Free Software Foundation,
  16  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  17  *
  18  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  19  * or visit www.oracle.com if you need additional information or have any
  20  * questions.
  21  *
  22  */
  23 
  24 #include "precompiled.hpp"
  25 
  26 #include "gc/shared/gcTraceTime.inline.hpp"
  27 #include "gc/shared/markBitMap.inline.hpp"
  28 #include "gc/shared/workgroup.hpp"
  29 #include "gc/shared/taskqueue.inline.hpp"
  30 #include "gc/shared/weakProcessor.hpp"
  31 #include "gc/shenandoah/shenandoahBarrierSet.hpp"
  32 #include "gc/shenandoah/shenandoahConnectionMatrix.hpp"
  33 #include "gc/shenandoah/shenandoahCollectionSet.hpp"
  34 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
  35 #include "gc/shenandoah/shenandoahFreeSet.hpp"
  36 #include "gc/shenandoah/shenandoahPhaseTimings.hpp"
  37 #include "gc/shenandoah/shenandoahHeapRegionSet.hpp"
  38 #include "gc/shenandoah/shenandoahHeap.hpp"
  39 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
  40 #include "gc/shenandoah/shenandoahHeapRegionSet.inline.hpp"
  41 #include "gc/shenandoah/shenandoahHeuristics.hpp"
  42 #include "gc/shenandoah/shenandoahOopClosures.inline.hpp"
  43 #include "gc/shenandoah/shenandoahTraversalGC.hpp"
  44 #include "gc/shenandoah/shenandoahRootProcessor.hpp"
  45 #include "gc/shenandoah/shenandoahStringDedup.hpp"
  46 #include "gc/shenandoah/shenandoahStrDedupQueue.inline.hpp"
  47 #include "gc/shenandoah/shenandoahTaskqueue.hpp"
  48 #include "gc/shenandoah/shenandoahUtils.hpp"
  49 #include "gc/shenandoah/shenandoahVerifier.hpp"
  50 #include "gc/shenandoah/shenandoahWorkGroup.hpp"
  51 #include "gc/shenandoah/shenandoahWorkerPolicy.hpp"
  52 
  53 #include "memory/iterator.hpp"
  54 
  55 /**
  56  * NOTE: We are using the SATB buffer in thread.hpp and satbMarkQueue.hpp, however, it is not an SATB algorithm.
  57  * We're using the buffer as generic oop buffer to enqueue new values in concurrent oop stores, IOW, the algorithm
  58  * is incremental-update-based.
  59  *
  60  * NOTE on interaction with TAMS: we want to avoid traversing new objects for
  61  * several reasons:
  62  * - We will not reclaim them in this cycle anyway, because they are not in the
  63  *   cset
  64  * - It makes up for the bulk of work during final-pause
  65  * - It also shortens the concurrent cycle because we don't need to
  66  *   pointlessly traverse through newly allocated objects.
  67  * - As a nice side-effect, it solves the I-U termination problem (mutators
  68  *   cannot outrun the GC by allocating like crazy)
  69  * - It is an easy way to achieve MWF. What MWF does is to also enqueue the
  70  *   target object of stores if it's new. Treating new objects live implicitely
  71  *   achieves the same, but without extra barriers. I think the effect of
  72  *   shortened final-pause (mentioned above) is the main advantage of MWF. In
  73  *   particular, we will not see the head of a completely new long linked list
  74  *   in final-pause and end up traversing huge chunks of the heap there.
  75  * - We don't need to see/update the fields of new objects either, because they
  76  *   are either still null, or anything that's been stored into them has been
  77  *   evacuated+enqueued before (and will thus be treated later).
  78  *
  79  * We achieve this by setting TAMS for each region, and everything allocated
  80  * beyond TAMS will be 'implicitely marked'.
  81  *
  82  * Gotchas:
  83  * - While we want new objects to be implicitely marked, we don't want to count
  84  *   them alive. Otherwise the next cycle wouldn't pick them up and consider
  85  *   them for cset. This means that we need to protect such regions from
  86  *   getting accidentally thrashed at the end of traversal cycle. This is why I
  87  *   keep track of alloc-regions and check is_alloc_region() in the trashing
  88  *   code.
  89  * - We *need* to traverse through evacuated objects. Those objects are
  90  *   pre-existing, and any references in them point to interesting objects that
  91  *   we need to see. We also want to count them as live, because we just
  92  *   determined that they are alive :-) I achieve this by upping TAMS
  93  *   concurrently for every gclab/gc-shared alloc before publishing the
  94  *   evacuated object. This way, the GC threads will not consider such objects
  95  *   implictely marked, and traverse through them as normal.
  96  */
  97 class ShenandoahTraversalSATBBufferClosure : public SATBBufferClosure {
  98 private:
  99   ShenandoahObjToScanQueue* _queue;
 100   ShenandoahTraversalGC* _traversal_gc;
 101   ShenandoahHeap* _heap;
 102   ShenandoahHeapRegionSet* _traversal_set;
 103 
 104 public:
 105   ShenandoahTraversalSATBBufferClosure(ShenandoahObjToScanQueue* q) :
 106     _queue(q), _traversal_gc(ShenandoahHeap::heap()->traversal_gc()),
 107     _heap(ShenandoahHeap::heap()),
 108     _traversal_set(ShenandoahHeap::heap()->traversal_gc()->traversal_set())
 109  { }
 110 
 111   void do_buffer(void** buffer, size_t size) {
 112     for (size_t i = 0; i < size; ++i) {
 113       oop* p = (oop*) &buffer[i];
 114       oop obj = RawAccess<>::oop_load(p);
 115       shenandoah_assert_not_forwarded(p, obj);
 116       if (_traversal_set->is_in((HeapWord*) obj) && !_heap->is_marked_next(obj) && _heap->mark_next(obj)) {
 117         _queue->push(ShenandoahMarkTask(obj));
 118       }
 119     }
 120   }
 121 };
 122 
 123 class ShenandoahTraversalSATBThreadsClosure : public ThreadClosure {
 124   ShenandoahTraversalSATBBufferClosure* _satb_cl;
 125 
 126  public:
 127   ShenandoahTraversalSATBThreadsClosure(ShenandoahTraversalSATBBufferClosure* satb_cl) :
 128     _satb_cl(satb_cl) {}
 129 
 130   void do_thread(Thread* thread) {
 131     if (thread->is_Java_thread()) {
 132       JavaThread* jt = (JavaThread*)thread;
 133       ShenandoahThreadLocalData::satb_mark_queue(jt).apply_closure_and_empty(_satb_cl);
 134     } else if (thread->is_VM_thread()) {
 135       ShenandoahBarrierSet::satb_mark_queue_set().shared_satb_queue()->apply_closure_and_empty(_satb_cl);
 136     }
 137   }
 138 };
 139 
 140 // Like CLDToOopClosure, but clears has_modified_oops, so that we can record modified CLDs during traversal
 141 // and remark them later during final-traversal.
 142 class ShenandoahMarkCLDClosure : public CLDClosure {
 143 private:
 144   OopClosure* _cl;
 145 public:
 146   ShenandoahMarkCLDClosure(OopClosure* cl) : _cl(cl) {}
 147   void do_cld(ClassLoaderData* cld) {
 148     cld->oops_do(_cl, true, true);
 149   }
 150 };
 151 
 152 // Like CLDToOopClosure, but only process modified CLDs
 153 class ShenandoahRemarkCLDClosure : public CLDClosure {
 154 private:
 155   OopClosure* _cl;
 156 public:
 157   ShenandoahRemarkCLDClosure(OopClosure* cl) : _cl(cl) {}
 158   void do_cld(ClassLoaderData* cld) {
 159     if (cld->has_modified_oops()) {
 160       cld->oops_do(_cl, true, true);
 161     }
 162   }
 163 };
 164 
 165 class ShenandoahInitTraversalCollectionTask : public AbstractGangTask {
 166 private:
 167   ShenandoahRootProcessor* _rp;
 168   ShenandoahHeap* _heap;
 169 public:
 170   ShenandoahInitTraversalCollectionTask(ShenandoahRootProcessor* rp) :
 171     AbstractGangTask("Shenandoah Init Traversal Collection"),
 172     _rp(rp),
 173     _heap(ShenandoahHeap::heap()) {}
 174 
 175   void work(uint worker_id) {
 176     ShenandoahEvacOOMScope oom_evac_scope;
 177     ShenandoahObjToScanQueueSet* queues = _heap->traversal_gc()->task_queues();
 178     ShenandoahObjToScanQueue* q = queues->queue(worker_id);
 179 
 180     bool process_refs = _heap->process_references();
 181     bool unload_classes = _heap->unload_classes();
 182     ReferenceProcessor* rp = NULL;
 183     if (process_refs) {
 184       rp = _heap->ref_processor();
 185     }
 186 
 187     // Step 1: Process ordinary GC roots.
 188     {
 189       ShenandoahTraversalClosure roots_cl(q, rp);
 190       ShenandoahMarkCLDClosure cld_cl(&roots_cl);
 191       MarkingCodeBlobClosure code_cl(&roots_cl, CodeBlobToOopClosure::FixRelocations);
 192       if (unload_classes) {
 193         _rp->process_strong_roots(&roots_cl, process_refs ? NULL : &roots_cl, &cld_cl, NULL, &code_cl, NULL, worker_id);
 194       } else {
 195         _rp->process_all_roots(&roots_cl, process_refs ? NULL : &roots_cl, &cld_cl, &code_cl, NULL, worker_id);
 196       }
 197     }
 198   }
 199 };
 200 
 201 class ShenandoahConcurrentTraversalCollectionTask : public AbstractGangTask {
 202 private:
 203   ParallelTaskTerminator* _terminator;
 204   ShenandoahHeap* _heap;
 205 public:
 206   ShenandoahConcurrentTraversalCollectionTask(ParallelTaskTerminator* terminator) :
 207     AbstractGangTask("Shenandoah Concurrent Traversal Collection"),
 208     _terminator(terminator),
 209     _heap(ShenandoahHeap::heap()) {}
 210 
 211   void work(uint worker_id) {
 212     ShenandoahEvacOOMScope oom_evac_scope;
 213     ShenandoahTraversalGC* traversal_gc = _heap->traversal_gc();
 214 
 215     // Drain all outstanding work in queues.
 216     traversal_gc->main_loop(worker_id, _terminator, true);
 217   }
 218 };
 219 
 220 class ShenandoahFinalTraversalCollectionTask : public AbstractGangTask {
 221 private:
 222   ShenandoahRootProcessor* _rp;
 223   ParallelTaskTerminator* _terminator;
 224   ShenandoahHeap* _heap;
 225 public:
 226   ShenandoahFinalTraversalCollectionTask(ShenandoahRootProcessor* rp, ParallelTaskTerminator* terminator) :
 227     AbstractGangTask("Shenandoah Final Traversal Collection"),
 228     _rp(rp),
 229     _terminator(terminator),
 230     _heap(ShenandoahHeap::heap()) {}
 231 
 232   void work(uint worker_id) {
 233     ShenandoahEvacOOMScope oom_evac_scope;
 234     ShenandoahTraversalGC* traversal_gc = _heap->traversal_gc();
 235 
 236     ShenandoahObjToScanQueueSet* queues = traversal_gc->task_queues();
 237     ShenandoahObjToScanQueue* q = queues->queue(worker_id);
 238 
 239     bool process_refs = _heap->process_references();
 240     bool unload_classes = _heap->unload_classes();
 241     ReferenceProcessor* rp = NULL;
 242     if (process_refs) {
 243       rp = _heap->ref_processor();
 244     }
 245 
 246     // Step 1: Drain outstanding SATB queues.
 247     // NOTE: we piggy-back draining of remaining thread SATB buffers on the final root scan below.
 248     ShenandoahTraversalSATBBufferClosure satb_cl(q);
 249     {
 250       // Process remaining finished SATB buffers.
 251       SATBMarkQueueSet& satb_mq_set = ShenandoahBarrierSet::satb_mark_queue_set();
 252       while (satb_mq_set.apply_closure_to_completed_buffer(&satb_cl));
 253       // Process remaining threads SATB buffers below.
 254     }
 255 
 256     // Step 1: Process ordinary GC roots.
 257     if (!_heap->is_degenerated_gc_in_progress()) {
 258       ShenandoahTraversalClosure roots_cl(q, rp);
 259       CLDToOopClosure cld_cl(&roots_cl);
 260       MarkingCodeBlobClosure code_cl(&roots_cl, CodeBlobToOopClosure::FixRelocations);
 261       ShenandoahTraversalSATBThreadsClosure tc(&satb_cl);
 262       if (unload_classes) {
 263         ShenandoahRemarkCLDClosure weak_cld_cl(&roots_cl);
 264         _rp->process_strong_roots(&roots_cl, process_refs ? NULL : &roots_cl, &cld_cl, &weak_cld_cl, &code_cl, &tc, worker_id);
 265       } else {
 266         _rp->process_all_roots(&roots_cl, process_refs ? NULL : &roots_cl, &cld_cl, &code_cl, &tc, worker_id);
 267       }
 268     } else {
 269       ShenandoahTraversalDegenClosure roots_cl(q, rp);
 270       CLDToOopClosure cld_cl(&roots_cl);
 271       MarkingCodeBlobClosure code_cl(&roots_cl, CodeBlobToOopClosure::FixRelocations);
 272       ShenandoahTraversalSATBThreadsClosure tc(&satb_cl);
 273       if (unload_classes) {
 274         ShenandoahRemarkCLDClosure weak_cld_cl(&roots_cl);
 275         _rp->process_strong_roots(&roots_cl, process_refs ? NULL : &roots_cl, &cld_cl, &weak_cld_cl, &code_cl, &tc, worker_id);
 276       } else {
 277         _rp->process_all_roots(&roots_cl, process_refs ? NULL : &roots_cl, &cld_cl, &code_cl, &tc, worker_id);
 278       }
 279     }
 280 
 281     {
 282       ShenandoahWorkerTimings *worker_times = _heap->phase_timings()->worker_times();
 283       ShenandoahWorkerTimingsTracker timer(worker_times, ShenandoahPhaseTimings::FinishQueues, worker_id);
 284 
 285       // Step 3: Finally drain all outstanding work in queues.
 286       traversal_gc->main_loop(worker_id, _terminator, false);
 287     }
 288 
 289   }
 290 };
 291 
 292 void ShenandoahTraversalGC::flush_liveness(uint worker_id) {
 293   jushort* ld = get_liveness(worker_id);
 294   for (uint i = 0; i < _heap->num_regions(); i++) {
 295     ShenandoahHeapRegion* r = _heap->get_region(i);
 296     jushort live = ld[i];
 297     if (live > 0) {
 298       r->increase_live_data_gc_words(live);
 299       ld[i] = 0;
 300     }
 301   }
 302 }
 303 
 304 ShenandoahTraversalGC::ShenandoahTraversalGC(ShenandoahHeap* heap, size_t num_regions) :
 305   _heap(heap),
 306   _task_queues(new ShenandoahObjToScanQueueSet(heap->max_workers())),
 307   _traversal_set(new ShenandoahHeapRegionSet()),
 308   _root_regions(new ShenandoahHeapRegionSet()),
 309   _root_regions_iterator(_root_regions),
 310   _matrix(heap->connection_matrix()) {
 311 
 312   uint num_queues = heap->max_workers();
 313   for (uint i = 0; i < num_queues; ++i) {
 314     ShenandoahObjToScanQueue* task_queue = new ShenandoahObjToScanQueue();
 315     task_queue->initialize();
 316     _task_queues->register_queue(i, task_queue);
 317   }
 318 
 319   uint workers = heap->max_workers();
 320   _liveness_local = NEW_C_HEAP_ARRAY(jushort*, workers, mtGC);
 321   for (uint worker = 0; worker < workers; worker++) {
 322      _liveness_local[worker] = NEW_C_HEAP_ARRAY(jushort, num_regions, mtGC);
 323   }
 324 
 325 }
 326 
 327 ShenandoahTraversalGC::~ShenandoahTraversalGC() {
 328 }
 329 
 330 void ShenandoahTraversalGC::prepare_regions() {
 331   ShenandoahHeap* heap = ShenandoahHeap::heap();
 332   size_t num_regions = heap->num_regions();
 333   ShenandoahConnectionMatrix* matrix = _heap->connection_matrix();
 334 
 335   for (size_t i = 0; i < num_regions; i++) {
 336     ShenandoahHeapRegion* region = heap->get_region(i);
 337     if (heap->is_bitmap_slice_committed(region)) {
 338       if (_traversal_set->is_in(i)) {
 339         heap->set_next_top_at_mark_start(region->bottom(), region->top());
 340         region->clear_live_data();
 341         assert(heap->is_next_bitmap_clear_range(region->bottom(), region->end()), "bitmap for traversal regions must be cleared");
 342       } else {
 343         // Everything outside the traversal set is always considered live.
 344         heap->set_next_top_at_mark_start(region->bottom(), region->bottom());
 345       }
 346       if (_root_regions->is_in(i)) {
 347         assert(!region->in_collection_set(), "roots must not overlap with cset");
 348         matrix->clear_region_outbound(i);
 349         // Since root region can be allocated at, we should bound the scans
 350         // in it at current top. Otherwise, one thread may evacuate objects
 351         // to that root region, while another would try to scan newly evac'ed
 352         // objects under the race.
 353         region->set_concurrent_iteration_safe_limit(region->top());
 354       }
 355     }
 356   }
 357 }
 358 
 359 void ShenandoahTraversalGC::prepare() {
 360   _heap->collection_set()->clear();
 361   assert(_heap->collection_set()->count() == 0, "collection set not clear");
 362 
 363   _heap->make_parsable(true);
 364 
 365   assert(_heap->is_next_bitmap_clear(), "need clean mark bitmap");
 366 
 367   ShenandoahFreeSet* free_set = _heap->free_set();
 368   ShenandoahCollectionSet* collection_set = _heap->collection_set();
 369 
 370   // Find collection set
 371   _heap->heuristics()->choose_collection_set(collection_set);
 372   prepare_regions();
 373 
 374   // Rebuild free set
 375   free_set->rebuild();
 376 
 377   log_info(gc,ergo)("Got "SIZE_FORMAT" collection set regions and "SIZE_FORMAT" root set regions", collection_set->count(), _root_regions->count());
 378 }
 379 
 380 void ShenandoahTraversalGC::init_traversal_collection() {
 381   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "STW traversal GC");
 382 
 383   if (ShenandoahVerify) {
 384     _heap->verifier()->verify_before_traversal();
 385   }
 386 
 387   {
 388     ShenandoahGCPhase phase_prepare(ShenandoahPhaseTimings::traversal_gc_prepare);
 389     ShenandoahHeapLocker lock(_heap->lock());
 390     prepare();
 391   }
 392 
 393   _heap->set_concurrent_traversal_in_progress(true);
 394 
 395   bool process_refs = _heap->process_references();
 396   if (process_refs) {
 397     ReferenceProcessor* rp = _heap->ref_processor();
 398     rp->enable_discovery(true /*verify_no_refs*/);
 399     rp->setup_policy(false);
 400   }
 401 
 402   {
 403     ShenandoahGCPhase phase_work(ShenandoahPhaseTimings::init_traversal_gc_work);
 404     assert(_task_queues->is_empty(), "queues must be empty before traversal GC");
 405 
 406 #if defined(COMPILER2) || INCLUDE_JVMCI
 407     DerivedPointerTable::clear();
 408 #endif
 409 
 410     {
 411       uint nworkers = _heap->workers()->active_workers();
 412       task_queues()->reserve(nworkers);
 413       ShenandoahRootProcessor rp(_heap, nworkers, ShenandoahPhaseTimings::init_traversal_gc_work);
 414 
 415       if (UseShenandoahOWST) {
 416         ShenandoahTaskTerminator terminator(nworkers, task_queues());
 417         ShenandoahInitTraversalCollectionTask traversal_task(&rp);
 418         _heap->workers()->run_task(&traversal_task);
 419       } else {
 420         ParallelTaskTerminator terminator(nworkers, task_queues());
 421         ShenandoahInitTraversalCollectionTask traversal_task(&rp);
 422         _heap->workers()->run_task(&traversal_task);
 423       }
 424     }
 425 
 426 #if defined(COMPILER2) || INCLUDE_JVMCI
 427     DerivedPointerTable::update_pointers();
 428 #endif
 429   }
 430 
 431   if (ShenandoahPacing) {
 432     _heap->pacer()->setup_for_traversal();
 433   }
 434 
 435   _root_regions_iterator.reset(_root_regions);
 436 }
 437 
 438 void ShenandoahTraversalGC::main_loop(uint worker_id, ParallelTaskTerminator* terminator, bool do_satb) {
 439   if (do_satb) {
 440     main_loop_prework<true>(worker_id, terminator);
 441   } else {
 442     main_loop_prework<false>(worker_id, terminator);
 443   }
 444 }
 445 
 446 template <bool DO_SATB>
 447 void ShenandoahTraversalGC::main_loop_prework(uint w, ParallelTaskTerminator* t) {
 448   ShenandoahObjToScanQueue* q = task_queues()->queue(w);
 449 
 450   // Initialize live data.
 451   jushort* ld = get_liveness(w);
 452   Copy::fill_to_bytes(ld, _heap->num_regions() * sizeof(jushort));
 453 
 454   ReferenceProcessor* rp = NULL;
 455   if (_heap->process_references()) {
 456     rp = _heap->ref_processor();
 457   }
 458   if (UseShenandoahMatrix) {
 459     if (!_heap->is_degenerated_gc_in_progress()) {
 460       if (_heap->unload_classes()) {
 461         if (ShenandoahStringDedup::is_enabled()) {
 462           ShenandoahStrDedupQueue* dq = ShenandoahStringDedup::queue(w);
 463           ShenandoahTraversalMetadataDedupMatrixClosure cl(q, rp, dq);
 464           main_loop_work<ShenandoahTraversalMetadataDedupMatrixClosure, DO_SATB>(&cl, ld, w, t);
 465         } else {
 466           ShenandoahTraversalMetadataMatrixClosure cl(q, rp);
 467           main_loop_work<ShenandoahTraversalMetadataMatrixClosure, DO_SATB>(&cl, ld, w, t);
 468         }
 469       } else {
 470         if (ShenandoahStringDedup::is_enabled()) {
 471           ShenandoahStrDedupQueue* dq = ShenandoahStringDedup::queue(w);
 472           ShenandoahTraversalDedupMatrixClosure cl(q, rp, dq);
 473           main_loop_work<ShenandoahTraversalDedupMatrixClosure, DO_SATB>(&cl, ld, w, t);
 474         } else {
 475           ShenandoahTraversalMatrixClosure cl(q, rp);
 476           main_loop_work<ShenandoahTraversalMatrixClosure, DO_SATB>(&cl, ld, w, t);
 477         }
 478       }
 479     } else {
 480       if (_heap->unload_classes()) {
 481         if (ShenandoahStringDedup::is_enabled()) {
 482           ShenandoahStrDedupQueue* dq = ShenandoahStringDedup::queue(w);
 483           ShenandoahTraversalMetadataDedupDegenMatrixClosure cl(q, rp, dq);
 484           main_loop_work<ShenandoahTraversalMetadataDedupDegenMatrixClosure, DO_SATB>(&cl, ld, w, t);
 485         } else {
 486           ShenandoahTraversalMetadataDegenMatrixClosure cl(q, rp);
 487           main_loop_work<ShenandoahTraversalMetadataDegenMatrixClosure, DO_SATB>(&cl, ld, w, t);
 488         }
 489       } else {
 490         if (ShenandoahStringDedup::is_enabled()) {
 491           ShenandoahStrDedupQueue* dq = ShenandoahStringDedup::queue(w);
 492           ShenandoahTraversalDedupDegenMatrixClosure cl(q, rp, dq);
 493           main_loop_work<ShenandoahTraversalDedupDegenMatrixClosure, DO_SATB>(&cl, ld, w, t);
 494         } else {
 495           ShenandoahTraversalDegenMatrixClosure cl(q, rp);
 496           main_loop_work<ShenandoahTraversalDegenMatrixClosure, DO_SATB>(&cl, ld, w, t);
 497         }
 498       }
 499     }
 500   } else {
 501     if (!_heap->is_degenerated_gc_in_progress()) {
 502       if (_heap->unload_classes()) {
 503         if (ShenandoahStringDedup::is_enabled()) {
 504           ShenandoahStrDedupQueue* dq = ShenandoahStringDedup::queue(w);
 505           ShenandoahTraversalMetadataDedupClosure cl(q, rp, dq);
 506           main_loop_work<ShenandoahTraversalMetadataDedupClosure, DO_SATB>(&cl, ld, w, t);
 507         } else {
 508           ShenandoahTraversalMetadataClosure cl(q, rp);
 509           main_loop_work<ShenandoahTraversalMetadataClosure, DO_SATB>(&cl, ld, w, t);
 510         }
 511       } else {
 512         if (ShenandoahStringDedup::is_enabled()) {
 513           ShenandoahStrDedupQueue* dq = ShenandoahStringDedup::queue(w);
 514           ShenandoahTraversalDedupClosure cl(q, rp, dq);
 515           main_loop_work<ShenandoahTraversalDedupClosure, DO_SATB>(&cl, ld, w, t);
 516         } else {
 517           ShenandoahTraversalClosure cl(q, rp);
 518           main_loop_work<ShenandoahTraversalClosure, DO_SATB>(&cl, ld, w, t);
 519         }
 520       }
 521     } else {
 522       if (_heap->unload_classes()) {
 523         if (ShenandoahStringDedup::is_enabled()) {
 524           ShenandoahStrDedupQueue* dq = ShenandoahStringDedup::queue(w);
 525           ShenandoahTraversalMetadataDedupDegenClosure cl(q, rp, dq);
 526           main_loop_work<ShenandoahTraversalMetadataDedupDegenClosure, DO_SATB>(&cl, ld, w, t);
 527         } else {
 528           ShenandoahTraversalMetadataDegenClosure cl(q, rp);
 529           main_loop_work<ShenandoahTraversalMetadataDegenClosure, DO_SATB>(&cl, ld, w, t);
 530         }
 531       } else {
 532         if (ShenandoahStringDedup::is_enabled()) {
 533           ShenandoahStrDedupQueue* dq = ShenandoahStringDedup::queue(w);
 534           ShenandoahTraversalDedupDegenClosure cl(q, rp, dq);
 535           main_loop_work<ShenandoahTraversalDedupDegenClosure, DO_SATB>(&cl, ld, w, t);
 536         } else {
 537           ShenandoahTraversalDegenClosure cl(q, rp);
 538           main_loop_work<ShenandoahTraversalDegenClosure, DO_SATB>(&cl, ld, w, t);
 539         }
 540       }
 541     }
 542   }
 543   flush_liveness(w);
 544 
 545 }
 546 
 547 template <class T, bool DO_SATB>
 548 void ShenandoahTraversalGC::main_loop_work(T* cl, jushort* live_data, uint worker_id, ParallelTaskTerminator* terminator) {
 549   ShenandoahObjToScanQueueSet* queues = task_queues();
 550   ShenandoahObjToScanQueue* q = queues->queue(worker_id);
 551   ShenandoahConcurrentMark* conc_mark = _heap->concurrentMark();
 552 
 553   uintx stride = ShenandoahMarkLoopStride;
 554 
 555   ShenandoahMarkTask task;
 556 
 557   // Process outstanding queues, if any.
 558   q = queues->claim_next();
 559   while (q != NULL) {
 560     if (_heap->check_cancelled_gc_and_yield()) {
 561       ShenandoahCancelledTerminatorTerminator tt;
 562       ShenandoahEvacOOMScopeLeaver oom_scope_leaver;
 563       while (!terminator->offer_termination(&tt));
 564       return;
 565     }
 566 
 567     for (uint i = 0; i < stride; i++) {
 568       if (q->pop_buffer(task) ||
 569           q->pop_local(task) ||
 570           q->pop_overflow(task)) {
 571         conc_mark->do_task<T>(q, cl, live_data, &task);
 572       } else {
 573         assert(q->is_empty(), "Must be empty");
 574         q = queues->claim_next();
 575         break;
 576       }
 577     }
 578   }
 579 
 580   if (check_and_handle_cancelled_gc(terminator)) return;
 581 
 582   // Step 2: Process all root regions.
 583   // TODO: Interleave this in the normal mark loop below.
 584   ShenandoahHeapRegion* r = _root_regions_iterator.claim_next();
 585   while (r != NULL) {
 586     _heap->marked_object_oop_safe_iterate(r, cl);
 587     if (ShenandoahPacing) {
 588       _heap->pacer()->report_partial(r->get_live_data_words());
 589     }
 590     if (check_and_handle_cancelled_gc(terminator)) return;
 591     r = _root_regions_iterator.claim_next();
 592   }
 593 
 594   if (check_and_handle_cancelled_gc(terminator)) return;
 595 
 596   // Normal loop.
 597   q = queues->queue(worker_id);
 598 
 599   ShenandoahTraversalSATBBufferClosure drain_satb(q);
 600   SATBMarkQueueSet& satb_mq_set = ShenandoahBarrierSet::satb_mark_queue_set();
 601 
 602   int seed = 17;
 603 
 604   while (true) {
 605     if (check_and_handle_cancelled_gc(terminator)) return;
 606 
 607     if (DO_SATB) {
 608       while (satb_mq_set.completed_buffers_num() > 0) {
 609         satb_mq_set.apply_closure_to_completed_buffer(&drain_satb);
 610       }
 611     }
 612 
 613     if (_arraycopy_task_queue.length() > 0) {
 614       process_arraycopy_task(cl);
 615     }
 616 
 617     uint work = 0;
 618     for (uint i = 0; i < stride; i++) {
 619       if (q->pop_buffer(task) ||
 620           q->pop_local(task) ||
 621           q->pop_overflow(task) ||
 622           queues->steal(worker_id, &seed, task)) {
 623         conc_mark->do_task<T>(q, cl, live_data, &task);
 624         work++;
 625       } else {
 626         break;
 627       }
 628     }
 629 
 630     if (work == 0 &&
 631         _arraycopy_task_queue.length() == 0) {
 632       // No more work, try to terminate
 633       ShenandoahEvacOOMScopeLeaver oom_scope_leaver;
 634       if (terminator->offer_termination()) return;
 635     }
 636   }
 637 }
 638 
 639 bool ShenandoahTraversalGC::check_and_handle_cancelled_gc(ParallelTaskTerminator* terminator) {
 640   if (_heap->cancelled_gc()) {
 641     ShenandoahCancelledTerminatorTerminator tt;
 642     ShenandoahEvacOOMScopeLeaver oom_scope_leaver;
 643     while (! terminator->offer_termination(&tt));
 644     return true;
 645   }
 646   return false;
 647 }
 648 
 649 void ShenandoahTraversalGC::concurrent_traversal_collection() {
 650   ClassLoaderDataGraph::clear_claimed_marks();
 651 
 652   ShenandoahGCPhase phase_work(ShenandoahPhaseTimings::conc_traversal);
 653   if (!_heap->cancelled_gc()) {
 654     uint nworkers = _heap->workers()->active_workers();
 655     task_queues()->reserve(nworkers);
 656     if (UseShenandoahOWST) {
 657       ShenandoahTaskTerminator terminator(nworkers, task_queues());
 658       ShenandoahConcurrentTraversalCollectionTask traversal_task(&terminator);
 659       _heap->workers()->run_task(&traversal_task);
 660     } else {
 661       ParallelTaskTerminator terminator(nworkers, task_queues());
 662       ShenandoahConcurrentTraversalCollectionTask traversal_task(&terminator);
 663       _heap->workers()->run_task(&traversal_task);
 664     }
 665   }
 666 
 667   if (!_heap->cancelled_gc() && ShenandoahPreclean && _heap->process_references()) {
 668     ShenandoahEvacOOMScope oom_evac_scope;
 669     preclean_weak_refs();
 670   }
 671 }
 672 
 673 void ShenandoahTraversalGC::final_traversal_collection() {
 674 
 675   _heap->make_parsable(true);
 676 
 677   if (!_heap->cancelled_gc()) {
 678 #if defined(COMPILER2) || INCLUDE_JVMCI
 679     DerivedPointerTable::clear();
 680 #endif
 681     ShenandoahGCPhase phase_work(ShenandoahPhaseTimings::final_traversal_gc_work);
 682     uint nworkers = _heap->workers()->active_workers();
 683     task_queues()->reserve(nworkers);
 684 
 685     // Finish traversal
 686     ShenandoahRootProcessor rp(_heap, nworkers, ShenandoahPhaseTimings::final_traversal_gc_work);
 687     if (UseShenandoahOWST) {
 688       ShenandoahTaskTerminator terminator(nworkers, task_queues());
 689       ShenandoahFinalTraversalCollectionTask traversal_task(&rp, &terminator);
 690       _heap->workers()->run_task(&traversal_task);
 691     } else {
 692       ParallelTaskTerminator terminator(nworkers, task_queues());
 693       ShenandoahFinalTraversalCollectionTask traversal_task(&rp, &terminator);
 694       _heap->workers()->run_task(&traversal_task);
 695     }
 696 #if defined(COMPILER2) || INCLUDE_JVMCI
 697     DerivedPointerTable::update_pointers();
 698 #endif
 699   }
 700 
 701   if (!_heap->cancelled_gc() && _heap->process_references()) {
 702     weak_refs_work();
 703   }
 704 
 705   if (!_heap->cancelled_gc() && _heap->unload_classes()) {
 706     _heap->unload_classes_and_cleanup_tables(false);
 707     fixup_roots();
 708   }
 709 
 710   if (!_heap->cancelled_gc()) {
 711     // Still good? We can now trash the cset, and make final verification
 712     {
 713       ShenandoahGCPhase phase_cleanup(ShenandoahPhaseTimings::traversal_gc_cleanup);
 714       ShenandoahHeapLocker lock(_heap->lock());
 715 
 716       assert(_arraycopy_task_queue.length() == 0, "arraycopy tasks must be done");
 717 
 718       // Trash everything
 719       // Clear immediate garbage regions.
 720       size_t num_regions = _heap->num_regions();
 721 
 722       ShenandoahHeapRegionSet* traversal_regions = traversal_set();
 723       ShenandoahFreeSet* free_regions = _heap->free_set();
 724       free_regions->clear();
 725       for (size_t i = 0; i < num_regions; i++) {
 726         ShenandoahHeapRegion* r = _heap->get_region(i);
 727         bool not_allocated = _heap->next_top_at_mark_start(r->bottom()) == r->top();
 728 
 729         bool candidate = traversal_regions->is_in(r) && !r->has_live() && not_allocated;
 730         if (r->is_humongous_start() && candidate) {
 731           // Trash humongous.
 732           HeapWord* humongous_obj = r->bottom() + BrooksPointer::word_size();
 733           assert(!_heap->is_marked_next(oop(humongous_obj)), "must not be marked");
 734           r->make_trash();
 735           while (i + 1 < num_regions && _heap->get_region(i + 1)->is_humongous_continuation()) {
 736             i++;
 737             r = _heap->get_region(i);
 738             assert(r->is_humongous_continuation(), "must be humongous continuation");
 739             r->make_trash();
 740           }
 741         } else if (!r->is_empty() && candidate) {
 742           // Trash regular.
 743           assert(!r->is_humongous(), "handled above");
 744           assert(!r->is_trash(), "must not already be trashed");
 745           r->make_trash();
 746         }
 747       }
 748       _heap->collection_set()->clear();
 749       _heap->free_set()->rebuild();
 750       reset();
 751     }
 752 
 753     if (ShenandoahVerify) {
 754       _heap->verifier()->verify_after_traversal();
 755     }
 756 
 757     assert(_task_queues->is_empty(), "queues must be empty after traversal GC");
 758     _heap->set_concurrent_traversal_in_progress(false);
 759     assert(!_heap->cancelled_gc(), "must not be cancelled when getting out here");
 760   }
 761 }
 762 
 763 class ShenandoahTraversalFixRootsClosure : public OopClosure {
 764 private:
 765 
 766   template <class T>
 767   inline void do_oop_work(T* p) {
 768     T o = RawAccess<>::oop_load(p);
 769     if (!CompressedOops::is_null(o)) {
 770       oop obj = CompressedOops::decode_not_null(o);
 771       oop forw = ShenandoahBarrierSet::resolve_forwarded_not_null(obj);
 772       if (!oopDesc::unsafe_equals(obj, forw)) {
 773         RawAccess<OOP_NOT_NULL>::oop_store(p, forw);
 774       }
 775     }
 776   }
 777 public:
 778   inline void do_oop(oop* p) { do_oop_work(p); }
 779   inline void do_oop(narrowOop* p) { do_oop_work(p); }
 780 };
 781 
 782 class ShenandoahTraversalFixRootsTask : public AbstractGangTask {
 783   ShenandoahRootProcessor* _rp;
 784 public:
 785 
 786   ShenandoahTraversalFixRootsTask(ShenandoahRootProcessor* rp) :
 787     AbstractGangTask("Shenandoah traversal fix roots"),
 788     _rp(rp)
 789   {
 790     // Nothing else to do.
 791   }
 792 
 793   void work(uint worker_id) {
 794     ShenandoahTraversalFixRootsClosure cl;
 795     MarkingCodeBlobClosure blobsCl(&cl, CodeBlobToOopClosure::FixRelocations);
 796     CLDToOopClosure cldCl(&cl);
 797     _rp->process_all_roots(&cl, &cl, &cldCl, &blobsCl, NULL, worker_id);
 798   }
 799 };
 800 
 801 void ShenandoahTraversalGC::fixup_roots() {
 802 #if defined(COMPILER2) || INCLUDE_JVMCI
 803   DerivedPointerTable::clear();
 804 #endif
 805   ShenandoahHeap* heap = ShenandoahHeap::heap();
 806   ShenandoahRootProcessor rp(heap, heap->workers()->active_workers(), ShenandoahPhaseTimings::final_traversal_update_roots);
 807   ShenandoahTraversalFixRootsTask update_roots_task(&rp);
 808   heap->workers()->run_task(&update_roots_task);
 809 #if defined(COMPILER2) || INCLUDE_JVMCI
 810   DerivedPointerTable::update_pointers();
 811 #endif
 812 }
 813 
 814 void ShenandoahTraversalGC::reset() {
 815   _task_queues->clear();
 816   _arraycopy_task_queue.clear();
 817 }
 818 
 819 ShenandoahObjToScanQueueSet* ShenandoahTraversalGC::task_queues() {
 820   return _task_queues;
 821 }
 822 
 823 jushort* ShenandoahTraversalGC::get_liveness(uint worker_id) {
 824   return _liveness_local[worker_id];
 825 }
 826 
 827 class ShenandoahTraversalCancelledGCYieldClosure : public YieldClosure {
 828 private:
 829   ShenandoahHeap* const _heap;
 830 public:
 831   ShenandoahTraversalCancelledGCYieldClosure() : _heap(ShenandoahHeap::heap()) {};
 832   virtual bool should_return() { return _heap->cancelled_gc(); }
 833 };
 834 
 835 class ShenandoahTraversalPrecleanCompleteGCClosure : public VoidClosure {
 836 public:
 837   void do_void() {
 838     ShenandoahHeap* sh = ShenandoahHeap::heap();
 839     ShenandoahTraversalGC* traversal_gc = sh->traversal_gc();
 840     assert(sh->process_references(), "why else would we be here?");
 841     ParallelTaskTerminator terminator(1, traversal_gc->task_queues());
 842     shenandoah_assert_rp_isalive_installed();
 843     traversal_gc->main_loop((uint) 0, &terminator, false);
 844   }
 845 };
 846 
 847 class ShenandoahTraversalKeepAliveUpdateClosure : public OopClosure {
 848 private:
 849   ShenandoahObjToScanQueue* _queue;
 850   Thread* _thread;
 851   ShenandoahTraversalGC* _traversal_gc;
 852   template <class T>
 853   inline void do_oop_nv(T* p) {
 854     _traversal_gc->process_oop<T, false /* string dedup */, false /* degen */, false /* matrix */>(p, _thread, _queue, NULL);
 855   }
 856 
 857 public:
 858   ShenandoahTraversalKeepAliveUpdateClosure(ShenandoahObjToScanQueue* q) :
 859     _queue(q), _thread(Thread::current()),
 860     _traversal_gc(ShenandoahHeap::heap()->traversal_gc()) {}
 861 
 862   void do_oop(narrowOop* p) { do_oop_nv(p); }
 863   void do_oop(oop* p)       { do_oop_nv(p); }
 864 };
 865 
 866 class ShenandoahTraversalKeepAliveUpdateDegenClosure : public OopClosure {
 867 private:
 868   ShenandoahObjToScanQueue* _queue;
 869   Thread* _thread;
 870   ShenandoahTraversalGC* _traversal_gc;
 871   template <class T>
 872   inline void do_oop_nv(T* p) {
 873     _traversal_gc->process_oop<T, false /* string dedup */, true /* degen */, false /* matrix */>(p, _thread, _queue, NULL);
 874   }
 875 
 876 public:
 877   ShenandoahTraversalKeepAliveUpdateDegenClosure(ShenandoahObjToScanQueue* q) :
 878     _queue(q), _thread(Thread::current()),
 879     _traversal_gc(ShenandoahHeap::heap()->traversal_gc()) {}
 880 
 881   void do_oop(narrowOop* p) { do_oop_nv(p); }
 882   void do_oop(oop* p)       { do_oop_nv(p); }
 883 };
 884 
 885 class ShenandoahTraversalKeepAliveUpdateMatrixClosure : public OopClosure {
 886 private:
 887   ShenandoahObjToScanQueue* _queue;
 888   Thread* _thread;
 889   ShenandoahTraversalGC* _traversal_gc;
 890   template <class T>
 891   inline void do_oop_nv(T* p) {
 892     // TODO: Need to somehow pass base_obj here?
 893     _traversal_gc->process_oop<T, false /* string dedup */, false /* degen */, true /* matrix */>(p, _thread, _queue, NULL);
 894   }
 895 
 896 public:
 897   ShenandoahTraversalKeepAliveUpdateMatrixClosure(ShenandoahObjToScanQueue* q) :
 898     _queue(q), _thread(Thread::current()),
 899     _traversal_gc(ShenandoahHeap::heap()->traversal_gc()) {}
 900 
 901   void do_oop(narrowOop* p) { do_oop_nv(p); }
 902   void do_oop(oop* p)       { do_oop_nv(p); }
 903 };
 904 
 905 class ShenandoahTraversalKeepAliveUpdateDegenMatrixClosure : public OopClosure {
 906 private:
 907   ShenandoahObjToScanQueue* _queue;
 908   Thread* _thread;
 909   ShenandoahTraversalGC* _traversal_gc;
 910   template <class T>
 911   inline void do_oop_nv(T* p) {
 912     // TODO: Need to somehow pass base_obj here?
 913     _traversal_gc->process_oop<T, false /* string dedup */, true /* degen */, true /* matrix */>(p, _thread, _queue, NULL);
 914   }
 915 
 916 public:
 917   ShenandoahTraversalKeepAliveUpdateDegenMatrixClosure(ShenandoahObjToScanQueue* q) :
 918     _queue(q), _thread(Thread::current()),
 919     _traversal_gc(ShenandoahHeap::heap()->traversal_gc()) {}
 920 
 921   void do_oop(narrowOop* p) { do_oop_nv(p); }
 922   void do_oop(oop* p)       { do_oop_nv(p); }
 923 };
 924 
 925 void ShenandoahTraversalGC::preclean_weak_refs() {
 926   // Pre-cleaning weak references before diving into STW makes sense at the
 927   // end of concurrent mark. This will filter out the references which referents
 928   // are alive. Note that ReferenceProcessor already filters out these on reference
 929   // discovery, and the bulk of work is done here. This phase processes leftovers
 930   // that missed the initial filtering, i.e. when referent was marked alive after
 931   // reference was discovered by RP.
 932 
 933   assert(_heap->process_references(), "sanity");
 934 
 935   ShenandoahHeap* sh = ShenandoahHeap::heap();
 936   ReferenceProcessor* rp = sh->ref_processor();
 937 
 938   // Shortcut if no references were discovered to avoid winding up threads.
 939   if (!rp->has_discovered_references()) {
 940     return;
 941   }
 942 
 943   ReferenceProcessorMTDiscoveryMutator fix_mt_discovery(rp, false);
 944 
 945   shenandoah_assert_rp_isalive_not_installed();
 946   ReferenceProcessorIsAliveMutator fix_isalive(rp, sh->is_alive_closure());
 947 
 948   // Interrupt on cancelled GC
 949   ShenandoahTraversalCancelledGCYieldClosure yield;
 950 
 951   assert(task_queues()->is_empty(), "Should be empty");
 952   assert(!sh->is_degenerated_gc_in_progress(), "must be in concurrent non-degenerated phase");
 953 
 954   ShenandoahTraversalPrecleanCompleteGCClosure complete_gc;
 955   ShenandoahForwardedIsAliveClosure is_alive;
 956   if (UseShenandoahMatrix) {
 957     ShenandoahTraversalKeepAliveUpdateMatrixClosure keep_alive(task_queues()->queue(0));
 958     ResourceMark rm;
 959     rp->preclean_discovered_references(&is_alive, &keep_alive,
 960                                        &complete_gc, &yield,
 961                                        NULL);
 962   } else {
 963     ShenandoahTraversalKeepAliveUpdateClosure keep_alive(task_queues()->queue(0));
 964     ResourceMark rm;
 965     rp->preclean_discovered_references(&is_alive, &keep_alive,
 966                                        &complete_gc, &yield,
 967                                        NULL);
 968   }
 969   assert(!sh->cancelled_gc() || task_queues()->is_empty(), "Should be empty");
 970 }
 971 
 972 // Weak Reference Closures
 973 class ShenandoahTraversalDrainMarkingStackClosure: public VoidClosure {
 974   uint _worker_id;
 975   ParallelTaskTerminator* _terminator;
 976   bool _reset_terminator;
 977 
 978 public:
 979   ShenandoahTraversalDrainMarkingStackClosure(uint worker_id, ParallelTaskTerminator* t, bool reset_terminator = false):
 980     _worker_id(worker_id),
 981     _terminator(t),
 982     _reset_terminator(reset_terminator) {
 983   }
 984 
 985   void do_void() {
 986     assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
 987 
 988     ShenandoahHeap* sh = ShenandoahHeap::heap();
 989     ShenandoahTraversalGC* traversal_gc = sh->traversal_gc();
 990     assert(sh->process_references(), "why else would we be here?");
 991     shenandoah_assert_rp_isalive_installed();
 992 
 993     traversal_gc->main_loop(_worker_id, _terminator, false);
 994 
 995     if (_reset_terminator) {
 996       _terminator->reset_for_reuse();
 997     }
 998   }
 999 };
1000 
1001 void ShenandoahTraversalGC::weak_refs_work() {
1002   assert(_heap->process_references(), "sanity");
1003 
1004   ShenandoahHeap* sh = ShenandoahHeap::heap();
1005 
1006   ShenandoahPhaseTimings::Phase phase_root = ShenandoahPhaseTimings::weakrefs;
1007 
1008   ShenandoahGCPhase phase(phase_root);
1009 
1010   ReferenceProcessor* rp = sh->ref_processor();
1011 
1012   // NOTE: We cannot shortcut on has_discovered_references() here, because
1013   // we will miss marking JNI Weak refs then, see implementation in
1014   // ReferenceProcessor::process_discovered_references.
1015   weak_refs_work_doit();
1016 
1017   rp->verify_no_references_recorded();
1018   assert(!rp->discovery_enabled(), "Post condition");
1019 
1020 }
1021 
1022 class ShenandoahTraversalRefProcTaskProxy : public AbstractGangTask {
1023 
1024 private:
1025   AbstractRefProcTaskExecutor::ProcessTask& _proc_task;
1026   ParallelTaskTerminator* _terminator;
1027 public:
1028 
1029   ShenandoahTraversalRefProcTaskProxy(AbstractRefProcTaskExecutor::ProcessTask& proc_task,
1030                              ParallelTaskTerminator* t) :
1031     AbstractGangTask("Process reference objects in parallel"),
1032     _proc_task(proc_task),
1033     _terminator(t) {
1034   }
1035 
1036   void work(uint worker_id) {
1037     ShenandoahEvacOOMScope oom_evac_scope;
1038     assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
1039     ShenandoahHeap* heap = ShenandoahHeap::heap();
1040     ShenandoahTraversalDrainMarkingStackClosure complete_gc(worker_id, _terminator);
1041 
1042     ShenandoahForwardedIsAliveClosure is_alive;
1043     if (UseShenandoahMatrix) {
1044       if (!heap->is_degenerated_gc_in_progress()) {
1045         ShenandoahTraversalKeepAliveUpdateMatrixClosure keep_alive(heap->traversal_gc()->task_queues()->queue(worker_id));
1046         _proc_task.work(worker_id, is_alive, keep_alive, complete_gc);
1047       } else {
1048         ShenandoahTraversalKeepAliveUpdateDegenMatrixClosure keep_alive(heap->traversal_gc()->task_queues()->queue(worker_id));
1049         _proc_task.work(worker_id, is_alive, keep_alive, complete_gc);
1050       }
1051     } else {
1052       if (!heap->is_degenerated_gc_in_progress()) {
1053         ShenandoahTraversalKeepAliveUpdateClosure keep_alive(heap->traversal_gc()->task_queues()->queue(worker_id));
1054         _proc_task.work(worker_id, is_alive, keep_alive, complete_gc);
1055       } else {
1056         ShenandoahTraversalKeepAliveUpdateDegenClosure keep_alive(heap->traversal_gc()->task_queues()->queue(worker_id));
1057         _proc_task.work(worker_id, is_alive, keep_alive, complete_gc);
1058       }
1059     }
1060   }
1061 };
1062 
1063 class ShenandoahTraversalRefProcTaskExecutor : public AbstractRefProcTaskExecutor {
1064 
1065 private:
1066   WorkGang* _workers;
1067 
1068 public:
1069 
1070   ShenandoahTraversalRefProcTaskExecutor(WorkGang* workers) :
1071     _workers(workers) {
1072   }
1073 
1074   // Executes a task using worker threads.
1075   void execute(ProcessTask& task) {
1076     assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
1077 
1078     ShenandoahHeap* heap = ShenandoahHeap::heap();
1079     ShenandoahTraversalGC* traversal_gc = heap->traversal_gc();
1080     uint nworkers = _workers->active_workers();
1081     traversal_gc->task_queues()->reserve(nworkers);
1082     if (UseShenandoahOWST) {
1083       ShenandoahTaskTerminator terminator(nworkers, traversal_gc->task_queues());
1084       ShenandoahTraversalRefProcTaskProxy proc_task_proxy(task, &terminator);
1085       _workers->run_task(&proc_task_proxy);
1086     } else {
1087       ParallelTaskTerminator terminator(nworkers, traversal_gc->task_queues());
1088       ShenandoahTraversalRefProcTaskProxy proc_task_proxy(task, &terminator);
1089       _workers->run_task(&proc_task_proxy);
1090     }
1091   }
1092 };
1093 
1094 void ShenandoahTraversalGC::weak_refs_work_doit() {
1095   ShenandoahHeap* sh = ShenandoahHeap::heap();
1096 
1097   ReferenceProcessor* rp = sh->ref_processor();
1098 
1099   ShenandoahPhaseTimings::Phase phase_process = ShenandoahPhaseTimings::weakrefs_process;
1100 
1101   shenandoah_assert_rp_isalive_not_installed();
1102   ReferenceProcessorIsAliveMutator fix_isalive(rp, sh->is_alive_closure());
1103 
1104   WorkGang* workers = sh->workers();
1105   uint nworkers = workers->active_workers();
1106 
1107   // Setup collector policy for softref cleaning.
1108   bool clear_soft_refs = sh->soft_ref_policy()->use_should_clear_all_soft_refs(true /* bogus arg*/);
1109   log_develop_debug(gc, ref)("clearing soft refs: %s", BOOL_TO_STR(clear_soft_refs));
1110   rp->setup_policy(clear_soft_refs);
1111   rp->set_active_mt_degree(nworkers);
1112 
1113   assert(task_queues()->is_empty(), "Should be empty");
1114 
1115   // complete_gc and keep_alive closures instantiated here are only needed for
1116   // single-threaded path in RP. They share the queue 0 for tracking work, which
1117   // simplifies implementation. Since RP may decide to call complete_gc several
1118   // times, we need to be able to reuse the terminator.
1119   uint serial_worker_id = 0;
1120   ParallelTaskTerminator terminator(1, task_queues());
1121   ShenandoahTraversalDrainMarkingStackClosure complete_gc(serial_worker_id, &terminator, /* reset_terminator = */ true);
1122 
1123   ShenandoahTraversalRefProcTaskExecutor executor(workers);
1124 
1125   ReferenceProcessorPhaseTimes pt(sh->gc_timer(), rp->num_queues());
1126 
1127   {
1128     ShenandoahGCPhase phase(phase_process);
1129 
1130     ShenandoahForwardedIsAliveClosure is_alive;
1131     if (UseShenandoahMatrix) {
1132       if (!_heap->is_degenerated_gc_in_progress()) {
1133         ShenandoahTraversalKeepAliveUpdateMatrixClosure keep_alive(task_queues()->queue(serial_worker_id));
1134         rp->process_discovered_references(&is_alive, &keep_alive,
1135                                           &complete_gc, &executor,
1136                                           &pt);
1137         pt.print_all_references();
1138         WeakProcessor::weak_oops_do(&is_alive, &keep_alive);
1139       } else {
1140         ShenandoahTraversalKeepAliveUpdateDegenMatrixClosure keep_alive(task_queues()->queue(serial_worker_id));
1141         rp->process_discovered_references(&is_alive, &keep_alive,
1142                                           &complete_gc, &executor,
1143                                           &pt);
1144         pt.print_all_references();
1145         WeakProcessor::weak_oops_do(&is_alive, &keep_alive);
1146       }
1147     } else {
1148       if (!_heap->is_degenerated_gc_in_progress()) {
1149         ShenandoahTraversalKeepAliveUpdateClosure keep_alive(task_queues()->queue(serial_worker_id));
1150         rp->process_discovered_references(&is_alive, &keep_alive,
1151                                           &complete_gc, &executor,
1152                                           &pt);
1153         pt.print_all_references();
1154         WeakProcessor::weak_oops_do(&is_alive, &keep_alive);
1155       } else {
1156         ShenandoahTraversalKeepAliveUpdateDegenClosure keep_alive(task_queues()->queue(serial_worker_id));
1157         rp->process_discovered_references(&is_alive, &keep_alive,
1158                                           &complete_gc, &executor,
1159                                           &pt);
1160         pt.print_all_references();
1161         WeakProcessor::weak_oops_do(&is_alive, &keep_alive);
1162       }
1163     }
1164 
1165     assert(!_heap->cancelled_gc() || task_queues()->is_empty(), "Should be empty");
1166   }
1167 }
1168 
1169 void ShenandoahTraversalGC::push_arraycopy(HeapWord* start, size_t count) {
1170   _arraycopy_task_queue.push(start, count);
1171 }
1172 
1173 template <class T>
1174 bool ShenandoahTraversalGC::process_arraycopy_task(T* cl) {
1175   ShenandoahArrayCopyTask task = _arraycopy_task_queue.pop();
1176   if (task.start() == NULL) {
1177     return false;
1178   }
1179   if (task.count() == 0) {
1180     // Handle clone.
1181     oop obj = oop(task.start());
1182     obj->oop_iterate(cl);
1183   } else {
1184     HeapWord* array = task.start();
1185     size_t count = task.count();
1186     if (UseCompressedOops) {
1187       narrowOop* p = reinterpret_cast<narrowOop*>(array);
1188       for (size_t i = 0; i < count; i++) {
1189         cl->do_oop(p++);
1190       }
1191     } else {
1192       oop* p = reinterpret_cast<oop*>(array);
1193       for (size_t i = 0; i < count; i++) {
1194         cl->do_oop(p++);
1195       }
1196     }
1197   }
1198   return true;
1199 }