1 /*
   2  * Copyright (c) 2018, Red Hat, Inc. and/or its affiliates.
   3  *
   4  * This code is free software; you can redistribute it and/or modify it
   5  * under the terms of the GNU General Public License version 2 only, as
   6  * published by the Free Software Foundation.
   7  *
   8  * This code is distributed in the hope that it will be useful, but WITHOUT
   9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  11  * version 2 for more details (a copy is included in the LICENSE file that
  12  * accompanied this code).
  13  *
  14  * You should have received a copy of the GNU General Public License version
  15  * 2 along with this work; if not, write to the Free Software Foundation,
  16  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  17  *
  18  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  19  * or visit www.oracle.com if you need additional information or have any
  20  * questions.
  21  *
  22  */
  23 
  24 #include "precompiled.hpp"
  25 
  26 #include "gc/shared/gcTraceTime.inline.hpp"
  27 #include "gc/shared/markBitMap.inline.hpp"
  28 #include "gc/shared/referenceProcessor.hpp"
  29 #include "gc/shared/referenceProcessorPhaseTimes.hpp"
  30 #include "gc/shared/workgroup.hpp"
  31 #include "gc/shared/taskqueue.inline.hpp"
  32 #include "gc/shared/weakProcessor.hpp"
  33 #include "gc/shenandoah/shenandoahBarrierSet.hpp"
  34 #include "gc/shenandoah/shenandoahConnectionMatrix.hpp"
  35 #include "gc/shenandoah/shenandoahCollectionSet.hpp"
  36 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
  37 #include "gc/shenandoah/shenandoahFreeSet.hpp"
  38 #include "gc/shenandoah/shenandoahPhaseTimings.hpp"
  39 #include "gc/shenandoah/shenandoahHeapRegionSet.hpp"
  40 #include "gc/shenandoah/shenandoahHeap.hpp"
  41 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
  42 #include "gc/shenandoah/shenandoahHeapRegionSet.inline.hpp"
  43 #include "gc/shenandoah/shenandoahHeuristics.hpp"
  44 #include "gc/shenandoah/shenandoahOopClosures.inline.hpp"
  45 #include "gc/shenandoah/shenandoahTraversalGC.hpp"
  46 #include "gc/shenandoah/shenandoahRootProcessor.hpp"
  47 #include "gc/shenandoah/shenandoahStringDedup.hpp"
  48 #include "gc/shenandoah/shenandoahTaskqueue.hpp"
  49 #include "gc/shenandoah/shenandoahUtils.hpp"
  50 #include "gc/shenandoah/shenandoahVerifier.hpp"
  51 #include "gc/shenandoah/shenandoahWorkGroup.hpp"
  52 #include "gc/shenandoah/shenandoahWorkerPolicy.hpp"
  53 
  54 #include "memory/iterator.hpp"
  55 
  56 /**
  57  * NOTE: We are using the SATB buffer in thread.hpp and satbMarkQueue.hpp, however, it is not an SATB algorithm.
  58  * We're using the buffer as generic oop buffer to enqueue new values in concurrent oop stores, IOW, the algorithm
  59  * is incremental-update-based.
  60  *
  61  * NOTE on interaction with TAMS: we want to avoid traversing new objects for
  62  * several reasons:
  63  * - We will not reclaim them in this cycle anyway, because they are not in the
  64  *   cset
  65  * - It makes up for the bulk of work during final-pause
  66  * - It also shortens the concurrent cycle because we don't need to
  67  *   pointlessly traverse through newly allocated objects.
  68  * - As a nice side-effect, it solves the I-U termination problem (mutators
  69  *   cannot outrun the GC by allocating like crazy)
  70  * - It is an easy way to achieve MWF. What MWF does is to also enqueue the
  71  *   target object of stores if it's new. Treating new objects live implicitely
  72  *   achieves the same, but without extra barriers. I think the effect of
  73  *   shortened final-pause (mentioned above) is the main advantage of MWF. In
  74  *   particular, we will not see the head of a completely new long linked list
  75  *   in final-pause and end up traversing huge chunks of the heap there.
  76  * - We don't need to see/update the fields of new objects either, because they
  77  *   are either still null, or anything that's been stored into them has been
  78  *   evacuated+enqueued before (and will thus be treated later).
  79  *
  80  * We achieve this by setting TAMS for each region, and everything allocated
  81  * beyond TAMS will be 'implicitely marked'.
  82  *
  83  * Gotchas:
  84  * - While we want new objects to be implicitely marked, we don't want to count
  85  *   them alive. Otherwise the next cycle wouldn't pick them up and consider
  86  *   them for cset. This means that we need to protect such regions from
  87  *   getting accidentally thrashed at the end of traversal cycle. This is why I
  88  *   keep track of alloc-regions and check is_alloc_region() in the trashing
  89  *   code.
  90  * - We *need* to traverse through evacuated objects. Those objects are
  91  *   pre-existing, and any references in them point to interesting objects that
  92  *   we need to see. We also want to count them as live, because we just
  93  *   determined that they are alive :-) I achieve this by upping TAMS
  94  *   concurrently for every gclab/gc-shared alloc before publishing the
  95  *   evacuated object. This way, the GC threads will not consider such objects
  96  *   implictely marked, and traverse through them as normal.
  97  */
  98 class ShenandoahTraversalSATBBufferClosure : public SATBBufferClosure {
  99 private:
 100   ShenandoahObjToScanQueue* _queue;
 101   ShenandoahTraversalGC* _traversal_gc;
 102   ShenandoahHeap* const _heap;
 103 
 104 public:
 105   ShenandoahTraversalSATBBufferClosure(ShenandoahObjToScanQueue* q) :
 106     _queue(q),
 107     _heap(ShenandoahHeap::heap())
 108  { }
 109 
 110   void do_buffer(void** buffer, size_t size) {
 111     for (size_t i = 0; i < size; ++i) {
 112       oop* p = (oop*) &buffer[i];
 113       oop obj = RawAccess<>::oop_load(p);
 114       shenandoah_assert_not_forwarded(p, obj);
 115       if (_heap->mark_next(obj)) {
 116         _queue->push(ShenandoahMarkTask(obj));
 117       }
 118     }
 119   }
 120 };
 121 
 122 class ShenandoahTraversalSATBThreadsClosure : public ThreadClosure {
 123   ShenandoahTraversalSATBBufferClosure* _satb_cl;
 124 
 125  public:
 126   ShenandoahTraversalSATBThreadsClosure(ShenandoahTraversalSATBBufferClosure* satb_cl) :
 127     _satb_cl(satb_cl) {}
 128 
 129   void do_thread(Thread* thread) {
 130     if (thread->is_Java_thread()) {
 131       JavaThread* jt = (JavaThread*)thread;
 132       ShenandoahThreadLocalData::satb_mark_queue(jt).apply_closure_and_empty(_satb_cl);
 133     } else if (thread->is_VM_thread()) {
 134       ShenandoahBarrierSet::satb_mark_queue_set().shared_satb_queue()->apply_closure_and_empty(_satb_cl);
 135     }
 136   }
 137 };
 138 
 139 // Like CLDToOopClosure, but clears has_modified_oops, so that we can record modified CLDs during traversal
 140 // and remark them later during final-traversal.
 141 class ShenandoahMarkCLDClosure : public CLDClosure {
 142 private:
 143   OopClosure* _cl;
 144 public:
 145   ShenandoahMarkCLDClosure(OopClosure* cl) : _cl(cl) {}
 146   void do_cld(ClassLoaderData* cld) {
 147     cld->oops_do(_cl, true, true);
 148   }
 149 };
 150 
 151 // Like CLDToOopClosure, but only process modified CLDs
 152 class ShenandoahRemarkCLDClosure : public CLDClosure {
 153 private:
 154   OopClosure* _cl;
 155 public:
 156   ShenandoahRemarkCLDClosure(OopClosure* cl) : _cl(cl) {}
 157   void do_cld(ClassLoaderData* cld) {
 158     if (cld->has_modified_oops()) {
 159       cld->oops_do(_cl, true, true);
 160     }
 161   }
 162 };
 163 
 164 class ShenandoahInitTraversalCollectionTask : public AbstractGangTask {
 165 private:
 166   ShenandoahRootProcessor* _rp;
 167   ShenandoahHeap* _heap;
 168 public:
 169   ShenandoahInitTraversalCollectionTask(ShenandoahRootProcessor* rp) :
 170     AbstractGangTask("Shenandoah Init Traversal Collection"),
 171     _rp(rp),
 172     _heap(ShenandoahHeap::heap()) {}
 173 
 174   void work(uint worker_id) {
 175     ShenandoahWorkerSession worker_session(worker_id);
 176 
 177     ShenandoahEvacOOMScope oom_evac_scope;
 178     ShenandoahObjToScanQueueSet* queues = _heap->traversal_gc()->task_queues();
 179     ShenandoahObjToScanQueue* q = queues->queue(worker_id);
 180 
 181     bool process_refs = _heap->process_references();
 182     bool unload_classes = _heap->unload_classes();
 183     ReferenceProcessor* rp = NULL;
 184     if (process_refs) {
 185       rp = _heap->ref_processor();
 186     }
 187 
 188     // Step 1: Process ordinary GC roots.
 189     {
 190       ShenandoahTraversalClosure roots_cl(q, rp);
 191       ShenandoahMarkCLDClosure cld_cl(&roots_cl);
 192       MarkingCodeBlobClosure code_cl(&roots_cl, CodeBlobToOopClosure::FixRelocations);
 193       if (unload_classes) {
 194         _rp->process_strong_roots(&roots_cl, process_refs ? NULL : &roots_cl, &cld_cl, NULL, &code_cl, NULL, worker_id);
 195       } else {
 196         _rp->process_all_roots(&roots_cl, process_refs ? NULL : &roots_cl, &cld_cl, &code_cl, NULL, worker_id);
 197       }
 198     }
 199   }
 200 };
 201 
 202 class ShenandoahConcurrentTraversalCollectionTask : public AbstractGangTask {
 203 private:
 204   ParallelTaskTerminator* _terminator;
 205   ShenandoahHeap* _heap;
 206 public:
 207   ShenandoahConcurrentTraversalCollectionTask(ParallelTaskTerminator* terminator) :
 208     AbstractGangTask("Shenandoah Concurrent Traversal Collection"),
 209     _terminator(terminator),
 210     _heap(ShenandoahHeap::heap()) {}
 211 
 212   void work(uint worker_id) {
 213     ShenandoahWorkerSession worker_session(worker_id);
 214 
 215     ShenandoahEvacOOMScope oom_evac_scope;
 216     ShenandoahTraversalGC* traversal_gc = _heap->traversal_gc();
 217 
 218     // Drain all outstanding work in queues.
 219     traversal_gc->main_loop(worker_id, _terminator);
 220   }
 221 };
 222 
 223 class ShenandoahFinalTraversalCollectionTask : public AbstractGangTask {
 224 private:
 225   ShenandoahRootProcessor* _rp;
 226   ParallelTaskTerminator* _terminator;
 227   ShenandoahHeap* _heap;
 228 public:
 229   ShenandoahFinalTraversalCollectionTask(ShenandoahRootProcessor* rp, ParallelTaskTerminator* terminator) :
 230     AbstractGangTask("Shenandoah Final Traversal Collection"),
 231     _rp(rp),
 232     _terminator(terminator),
 233     _heap(ShenandoahHeap::heap()) {}
 234 
 235   void work(uint worker_id) {
 236     ShenandoahWorkerSession worker_session(worker_id);
 237 
 238     ShenandoahEvacOOMScope oom_evac_scope;
 239     ShenandoahTraversalGC* traversal_gc = _heap->traversal_gc();
 240 
 241     ShenandoahObjToScanQueueSet* queues = traversal_gc->task_queues();
 242     ShenandoahObjToScanQueue* q = queues->queue(worker_id);
 243 
 244     bool process_refs = _heap->process_references();
 245     bool unload_classes = _heap->unload_classes();
 246     ReferenceProcessor* rp = NULL;
 247     if (process_refs) {
 248       rp = _heap->ref_processor();
 249     }
 250 
 251     // Step 0: Drain outstanding SATB queues.
 252     // NOTE: we piggy-back draining of remaining thread SATB buffers on the final root scan below.
 253     ShenandoahTraversalSATBBufferClosure satb_cl(q);
 254     {
 255       // Process remaining finished SATB buffers.
 256       SATBMarkQueueSet& satb_mq_set = ShenandoahBarrierSet::satb_mark_queue_set();
 257       while (satb_mq_set.apply_closure_to_completed_buffer(&satb_cl));
 258       // Process remaining threads SATB buffers below.
 259     }
 260 
 261     // Step 1: Process ordinary GC roots.
 262     if (!_heap->is_degenerated_gc_in_progress()) {
 263       ShenandoahTraversalClosure roots_cl(q, rp);
 264       CLDToOopClosure cld_cl(&roots_cl);
 265       MarkingCodeBlobClosure code_cl(&roots_cl, CodeBlobToOopClosure::FixRelocations);
 266       ShenandoahTraversalSATBThreadsClosure tc(&satb_cl);
 267       if (unload_classes) {
 268         ShenandoahRemarkCLDClosure weak_cld_cl(&roots_cl);
 269         _rp->process_strong_roots(&roots_cl, process_refs ? NULL : &roots_cl, &cld_cl, &weak_cld_cl, &code_cl, &tc, worker_id);
 270       } else {
 271         _rp->process_all_roots(&roots_cl, process_refs ? NULL : &roots_cl, &cld_cl, &code_cl, &tc, worker_id);
 272       }
 273     } else {
 274       ShenandoahTraversalDegenClosure roots_cl(q, rp);
 275       CLDToOopClosure cld_cl(&roots_cl);
 276       MarkingCodeBlobClosure code_cl(&roots_cl, CodeBlobToOopClosure::FixRelocations);
 277       ShenandoahTraversalSATBThreadsClosure tc(&satb_cl);
 278       if (unload_classes) {
 279         ShenandoahRemarkCLDClosure weak_cld_cl(&roots_cl);
 280         _rp->process_strong_roots(&roots_cl, process_refs ? NULL : &roots_cl, &cld_cl, &weak_cld_cl, &code_cl, &tc, worker_id);
 281       } else {
 282         _rp->process_all_roots(&roots_cl, process_refs ? NULL : &roots_cl, &cld_cl, &code_cl, &tc, worker_id);
 283       }
 284     }
 285 
 286     {
 287       ShenandoahWorkerTimings *worker_times = _heap->phase_timings()->worker_times();
 288       ShenandoahWorkerTimingsTracker timer(worker_times, ShenandoahPhaseTimings::FinishQueues, worker_id);
 289 
 290       // Step 3: Finally drain all outstanding work in queues.
 291       traversal_gc->main_loop(worker_id, _terminator);
 292     }
 293 
 294   }
 295 };
 296 
 297 void ShenandoahTraversalGC::flush_liveness(uint worker_id) {
 298   jushort* ld = get_liveness(worker_id);
 299   for (uint i = 0; i < _heap->num_regions(); i++) {
 300     ShenandoahHeapRegion* r = _heap->get_region(i);
 301     jushort live = ld[i];
 302     if (live > 0) {
 303       r->increase_live_data_gc_words(live);
 304       ld[i] = 0;
 305     }
 306   }
 307 }
 308 
 309 ShenandoahTraversalGC::ShenandoahTraversalGC(ShenandoahHeap* heap, size_t num_regions) :
 310   _heap(heap),
 311   _task_queues(new ShenandoahObjToScanQueueSet(heap->max_workers())),
 312   _traversal_set(ShenandoahHeapRegionSet()),
 313   _root_regions(ShenandoahHeapRegionSet()),
 314   _root_regions_iterator(&_root_regions),
 315   _matrix(heap->connection_matrix()) {
 316 
 317   uint num_queues = heap->max_workers();
 318   for (uint i = 0; i < num_queues; ++i) {
 319     ShenandoahObjToScanQueue* task_queue = new ShenandoahObjToScanQueue();
 320     task_queue->initialize();
 321     _task_queues->register_queue(i, task_queue);
 322   }
 323 
 324   uint workers = heap->max_workers();
 325   _liveness_local = NEW_C_HEAP_ARRAY(jushort*, workers, mtGC);
 326   for (uint worker = 0; worker < workers; worker++) {
 327      _liveness_local[worker] = NEW_C_HEAP_ARRAY(jushort, num_regions, mtGC);
 328   }
 329 
 330 }
 331 
 332 ShenandoahTraversalGC::~ShenandoahTraversalGC() {
 333 }
 334 
 335 void ShenandoahTraversalGC::prepare_regions() {
 336   ShenandoahHeap* heap = ShenandoahHeap::heap();
 337   size_t num_regions = heap->num_regions();
 338   ShenandoahConnectionMatrix* matrix = _heap->connection_matrix();
 339 
 340   for (size_t i = 0; i < num_regions; i++) {
 341     ShenandoahHeapRegion* region = heap->get_region(i);
 342     if (heap->is_bitmap_slice_committed(region)) {
 343       if (_traversal_set.is_in(i)) {
 344         heap->set_next_top_at_mark_start(region->bottom(), region->top());
 345         region->clear_live_data();
 346         assert(heap->is_next_bitmap_clear_range(region->bottom(), region->end()), "bitmap for traversal regions must be cleared");
 347       } else {
 348         // Everything outside the traversal set is always considered live.
 349         heap->set_next_top_at_mark_start(region->bottom(), region->bottom());
 350       }
 351       if (_root_regions.is_in(i)) {
 352         assert(!region->in_collection_set(), "roots must not overlap with cset");
 353         matrix->clear_region_outbound(i);
 354         // Since root region can be allocated at, we should bound the scans
 355         // in it at current top. Otherwise, one thread may evacuate objects
 356         // to that root region, while another would try to scan newly evac'ed
 357         // objects under the race.
 358         region->set_concurrent_iteration_safe_limit(region->top());
 359       }
 360     }
 361   }
 362 }
 363 
 364 void ShenandoahTraversalGC::prepare() {
 365   _heap->collection_set()->clear();
 366   assert(_heap->collection_set()->count() == 0, "collection set not clear");
 367 
 368   _heap->make_parsable(true);
 369 
 370   assert(_heap->is_next_bitmap_clear(), "need clean mark bitmap");
 371 
 372   ShenandoahFreeSet* free_set = _heap->free_set();
 373   ShenandoahCollectionSet* collection_set = _heap->collection_set();
 374 
 375   // Find collection set
 376   _heap->heuristics()->choose_collection_set(collection_set);
 377   prepare_regions();
 378 
 379   // Rebuild free set
 380   free_set->rebuild();
 381 
 382   log_info(gc,ergo)("Got " SIZE_FORMAT " collection set regions and " SIZE_FORMAT " root set regions", collection_set->count(), _root_regions.count());
 383 }
 384 
 385 void ShenandoahTraversalGC::init_traversal_collection() {
 386   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "STW traversal GC");
 387 
 388   if (ShenandoahVerify) {
 389     _heap->verifier()->verify_before_traversal();
 390   }
 391 
 392   {
 393     ShenandoahGCPhase phase_prepare(ShenandoahPhaseTimings::traversal_gc_prepare);
 394     ShenandoahHeapLocker lock(_heap->lock());
 395     prepare();
 396   }
 397 
 398   _heap->set_concurrent_traversal_in_progress(true);
 399 
 400   bool process_refs = _heap->process_references();
 401   if (process_refs) {
 402     ReferenceProcessor* rp = _heap->ref_processor();
 403     rp->enable_discovery(true /*verify_no_refs*/);
 404     rp->setup_policy(false);
 405   }
 406 
 407   {
 408     ShenandoahGCPhase phase_work(ShenandoahPhaseTimings::init_traversal_gc_work);
 409     assert(_task_queues->is_empty(), "queues must be empty before traversal GC");
 410     TASKQUEUE_STATS_ONLY(_task_queues->reset_taskqueue_stats());
 411 
 412 #if defined(COMPILER2) || INCLUDE_JVMCI
 413     DerivedPointerTable::clear();
 414 #endif
 415 
 416     {
 417       uint nworkers = _heap->workers()->active_workers();
 418       task_queues()->reserve(nworkers);
 419       ShenandoahRootProcessor rp(_heap, nworkers, ShenandoahPhaseTimings::init_traversal_gc_work);
 420 
 421       ShenandoahInitTraversalCollectionTask traversal_task(&rp);
 422       _heap->workers()->run_task(&traversal_task);
 423     }
 424 
 425 #if defined(COMPILER2) || INCLUDE_JVMCI
 426     DerivedPointerTable::update_pointers();
 427 #endif
 428   }
 429 
 430   if (ShenandoahPacing) {
 431     _heap->pacer()->setup_for_traversal();
 432   }
 433 
 434   _root_regions_iterator.reset(&_root_regions);
 435 }
 436 
 437 void ShenandoahTraversalGC::main_loop(uint w, ParallelTaskTerminator* t) {
 438   ShenandoahObjToScanQueue* q = task_queues()->queue(w);
 439 
 440   // Initialize live data.
 441   jushort* ld = get_liveness(w);
 442   Copy::fill_to_bytes(ld, _heap->num_regions() * sizeof(jushort));
 443 
 444   ReferenceProcessor* rp = NULL;
 445   if (_heap->process_references()) {
 446     rp = _heap->ref_processor();
 447   }
 448   if (UseShenandoahMatrix) {
 449     if (!_heap->is_degenerated_gc_in_progress()) {
 450       if (_heap->unload_classes()) {
 451         if (ShenandoahStringDedup::is_enabled()) {
 452           ShenandoahTraversalMetadataDedupMatrixClosure cl(q, rp);
 453           main_loop_work<ShenandoahTraversalMetadataDedupMatrixClosure>(&cl, ld, w, t);
 454         } else {
 455           ShenandoahTraversalMetadataMatrixClosure cl(q, rp);
 456           main_loop_work<ShenandoahTraversalMetadataMatrixClosure>(&cl, ld, w, t);
 457         }
 458       } else {
 459         if (ShenandoahStringDedup::is_enabled()) {
 460           ShenandoahTraversalDedupMatrixClosure cl(q, rp);
 461           main_loop_work<ShenandoahTraversalDedupMatrixClosure>(&cl, ld, w, t);
 462         } else {
 463           ShenandoahTraversalMatrixClosure cl(q, rp);
 464           main_loop_work<ShenandoahTraversalMatrixClosure>(&cl, ld, w, t);
 465         }
 466       }
 467     } else {
 468       if (_heap->unload_classes()) {
 469         if (ShenandoahStringDedup::is_enabled()) {
 470           ShenandoahTraversalMetadataDedupDegenMatrixClosure cl(q, rp);
 471           main_loop_work<ShenandoahTraversalMetadataDedupDegenMatrixClosure>(&cl, ld, w, t);
 472         } else {
 473           ShenandoahTraversalMetadataDegenMatrixClosure cl(q, rp);
 474           main_loop_work<ShenandoahTraversalMetadataDegenMatrixClosure>(&cl, ld, w, t);
 475         }
 476       } else {
 477         if (ShenandoahStringDedup::is_enabled()) {
 478           ShenandoahTraversalDedupDegenMatrixClosure cl(q, rp);
 479           main_loop_work<ShenandoahTraversalDedupDegenMatrixClosure>(&cl, ld, w, t);
 480         } else {
 481           ShenandoahTraversalDegenMatrixClosure cl(q, rp);
 482           main_loop_work<ShenandoahTraversalDegenMatrixClosure>(&cl, ld, w, t);
 483         }
 484       }
 485     }
 486   } else {
 487     if (!_heap->is_degenerated_gc_in_progress()) {
 488       if (_heap->unload_classes()) {
 489         if (ShenandoahStringDedup::is_enabled()) {
 490           ShenandoahTraversalMetadataDedupClosure cl(q, rp);
 491           main_loop_work<ShenandoahTraversalMetadataDedupClosure>(&cl, ld, w, t);
 492         } else {
 493           ShenandoahTraversalMetadataClosure cl(q, rp);
 494           main_loop_work<ShenandoahTraversalMetadataClosure>(&cl, ld, w, t);
 495         }
 496       } else {
 497         if (ShenandoahStringDedup::is_enabled()) {
 498           ShenandoahTraversalDedupClosure cl(q, rp);
 499           main_loop_work<ShenandoahTraversalDedupClosure>(&cl, ld, w, t);
 500         } else {
 501           ShenandoahTraversalClosure cl(q, rp);
 502           main_loop_work<ShenandoahTraversalClosure>(&cl, ld, w, t);
 503         }
 504       }
 505     } else {
 506       if (_heap->unload_classes()) {
 507         if (ShenandoahStringDedup::is_enabled()) {
 508           ShenandoahTraversalMetadataDedupDegenClosure cl(q, rp);
 509           main_loop_work<ShenandoahTraversalMetadataDedupDegenClosure>(&cl, ld, w, t);
 510         } else {
 511           ShenandoahTraversalMetadataDegenClosure cl(q, rp);
 512           main_loop_work<ShenandoahTraversalMetadataDegenClosure>(&cl, ld, w, t);
 513         }
 514       } else {
 515         if (ShenandoahStringDedup::is_enabled()) {
 516           ShenandoahTraversalDedupDegenClosure cl(q, rp);
 517           main_loop_work<ShenandoahTraversalDedupDegenClosure>(&cl, ld, w, t);
 518         } else {
 519           ShenandoahTraversalDegenClosure cl(q, rp);
 520           main_loop_work<ShenandoahTraversalDegenClosure>(&cl, ld, w, t);
 521         }
 522       }
 523     }
 524   }
 525   flush_liveness(w);
 526 
 527 }
 528 
 529 template <class T>
 530 void ShenandoahTraversalGC::main_loop_work(T* cl, jushort* live_data, uint worker_id, ParallelTaskTerminator* terminator) {
 531   ShenandoahObjToScanQueueSet* queues = task_queues();
 532   ShenandoahObjToScanQueue* q = queues->queue(worker_id);
 533   ShenandoahConcurrentMark* conc_mark = _heap->concurrentMark();
 534 
 535   uintx stride = ShenandoahMarkLoopStride;
 536 
 537   ShenandoahMarkTask task;
 538 
 539   // Process outstanding queues, if any.
 540   q = queues->claim_next();
 541   while (q != NULL) {
 542     if (_heap->check_cancelled_gc_and_yield()) {
 543       ShenandoahCancelledTerminatorTerminator tt;
 544       ShenandoahEvacOOMScopeLeaver oom_scope_leaver;
 545       while (!terminator->offer_termination(&tt));
 546       return;
 547     }
 548 
 549     for (uint i = 0; i < stride; i++) {
 550       if (q->pop_buffer(task) ||
 551           q->pop_local(task) ||
 552           q->pop_overflow(task)) {
 553         conc_mark->do_task<T>(q, cl, live_data, &task);
 554       } else {
 555         assert(q->is_empty(), "Must be empty");
 556         q = queues->claim_next();
 557         break;
 558       }
 559     }
 560   }
 561 
 562   if (check_and_handle_cancelled_gc(terminator)) return;
 563 
 564   // Step 2: Process all root regions.
 565   // TODO: Interleave this in the normal mark loop below.
 566   ShenandoahHeapRegion* r = _root_regions_iterator.claim_next();
 567   while (r != NULL) {
 568     _heap->marked_object_oop_safe_iterate(r, cl);
 569     if (ShenandoahPacing) {
 570       _heap->pacer()->report_partial(r->get_live_data_words());
 571     }
 572     if (check_and_handle_cancelled_gc(terminator)) return;
 573     r = _root_regions_iterator.claim_next();
 574   }
 575 
 576   if (check_and_handle_cancelled_gc(terminator)) return;
 577 
 578   // Normal loop.
 579   q = queues->queue(worker_id);
 580 
 581   ShenandoahTraversalSATBBufferClosure drain_satb(q);
 582   SATBMarkQueueSet& satb_mq_set = ShenandoahBarrierSet::satb_mark_queue_set();
 583 
 584   int seed = 17;
 585 
 586   while (true) {
 587     if (check_and_handle_cancelled_gc(terminator)) return;
 588 
 589     while (satb_mq_set.completed_buffers_num() > 0) {
 590       satb_mq_set.apply_closure_to_completed_buffer(&drain_satb);
 591     }
 592 
 593     if (_arraycopy_task_queue.length() > 0) {
 594       process_arraycopy_task(cl);
 595     }
 596 
 597     uint work = 0;
 598     for (uint i = 0; i < stride; i++) {
 599       if (q->pop_buffer(task) ||
 600           q->pop_local(task) ||
 601           q->pop_overflow(task) ||
 602           queues->steal(worker_id, &seed, task)) {
 603         conc_mark->do_task<T>(q, cl, live_data, &task);
 604         work++;
 605       } else {
 606         break;
 607       }
 608     }
 609 
 610     if (work == 0 &&
 611         _arraycopy_task_queue.length() == 0) {
 612       // No more work, try to terminate
 613       ShenandoahEvacOOMScopeLeaver oom_scope_leaver;
 614       ShenandoahTerminationTimingsTracker term_tracker(worker_id);
 615       if (terminator->offer_termination()) return;
 616     }
 617   }
 618 }
 619 
 620 bool ShenandoahTraversalGC::check_and_handle_cancelled_gc(ParallelTaskTerminator* terminator) {
 621   if (_heap->cancelled_gc()) {
 622     ShenandoahCancelledTerminatorTerminator tt;
 623     ShenandoahEvacOOMScopeLeaver oom_scope_leaver;
 624     while (! terminator->offer_termination(&tt));
 625     return true;
 626   }
 627   return false;
 628 }
 629 
 630 void ShenandoahTraversalGC::concurrent_traversal_collection() {
 631   ClassLoaderDataGraph::clear_claimed_marks();
 632 
 633   ShenandoahGCPhase phase_work(ShenandoahPhaseTimings::conc_traversal);
 634   if (!_heap->cancelled_gc()) {
 635     uint nworkers = _heap->workers()->active_workers();
 636     task_queues()->reserve(nworkers);
 637     ShenandoahTerminationTracker tracker(ShenandoahPhaseTimings::conc_traversal_termination);
 638 
 639     if (UseShenandoahOWST) {
 640       ShenandoahTaskTerminator terminator(nworkers, task_queues());
 641       ShenandoahConcurrentTraversalCollectionTask task(&terminator);
 642       _heap->workers()->run_task(&task);
 643     } else {
 644       ParallelTaskTerminator terminator(nworkers, task_queues());
 645       ShenandoahConcurrentTraversalCollectionTask task(&terminator);
 646       _heap->workers()->run_task(&task);
 647     }
 648   }
 649 
 650   if (!_heap->cancelled_gc() && ShenandoahPreclean && _heap->process_references()) {
 651     ShenandoahEvacOOMScope oom_evac_scope;
 652     preclean_weak_refs();
 653   }
 654 }
 655 
 656 void ShenandoahTraversalGC::final_traversal_collection() {
 657 
 658   _heap->make_parsable(true);
 659 
 660   if (!_heap->cancelled_gc()) {
 661 #if defined(COMPILER2) || INCLUDE_JVMCI
 662     DerivedPointerTable::clear();
 663 #endif
 664     ShenandoahGCPhase phase_work(ShenandoahPhaseTimings::final_traversal_gc_work);
 665     uint nworkers = _heap->workers()->active_workers();
 666     task_queues()->reserve(nworkers);
 667 
 668     // Finish traversal
 669     ShenandoahRootProcessor rp(_heap, nworkers, ShenandoahPhaseTimings::final_traversal_gc_work);
 670     ShenandoahTerminationTracker term(ShenandoahPhaseTimings::final_traversal_gc_termination);
 671 
 672     if (UseShenandoahOWST) {
 673       ShenandoahTaskTerminator terminator(nworkers, task_queues());
 674       ShenandoahFinalTraversalCollectionTask task(&rp, &terminator);
 675       _heap->workers()->run_task(&task);
 676     } else {
 677       ParallelTaskTerminator terminator(nworkers, task_queues());
 678       ShenandoahFinalTraversalCollectionTask task(&rp, &terminator);
 679       _heap->workers()->run_task(&task);
 680     }
 681 #if defined(COMPILER2) || INCLUDE_JVMCI
 682     DerivedPointerTable::update_pointers();
 683 #endif
 684 
 685     assert(_task_queues->is_empty(), "queues must be empty after traversal GC");
 686     TASKQUEUE_STATS_ONLY(_task_queues->print_taskqueue_stats());
 687     TASKQUEUE_STATS_ONLY(_task_queues->reset_taskqueue_stats());
 688   }
 689 
 690   if (!_heap->cancelled_gc() && _heap->process_references()) {
 691     weak_refs_work();
 692   }
 693 
 694   if (!_heap->cancelled_gc() && _heap->unload_classes()) {
 695     _heap->unload_classes_and_cleanup_tables(false);
 696     fixup_roots();
 697   }
 698 
 699   if (!_heap->cancelled_gc()) {
 700     // Still good? We can now trash the cset, and make final verification
 701     {
 702       ShenandoahGCPhase phase_cleanup(ShenandoahPhaseTimings::traversal_gc_cleanup);
 703       ShenandoahHeapLocker lock(_heap->lock());
 704 
 705       assert(_arraycopy_task_queue.length() == 0, "arraycopy tasks must be done");
 706 
 707       // Trash everything
 708       // Clear immediate garbage regions.
 709       size_t num_regions = _heap->num_regions();
 710 
 711       ShenandoahHeapRegionSet* traversal_regions = traversal_set();
 712       ShenandoahFreeSet* free_regions = _heap->free_set();
 713       free_regions->clear();
 714       for (size_t i = 0; i < num_regions; i++) {
 715         ShenandoahHeapRegion* r = _heap->get_region(i);
 716         bool not_allocated = _heap->next_top_at_mark_start(r->bottom()) == r->top();
 717 
 718         bool candidate = traversal_regions->is_in(r) && !r->has_live() && not_allocated;
 719         if (r->is_humongous_start() && candidate) {
 720           // Trash humongous.
 721           HeapWord* humongous_obj = r->bottom() + BrooksPointer::word_size();
 722           assert(!_heap->is_marked_next(oop(humongous_obj)), "must not be marked");
 723           r->make_trash();
 724           while (i + 1 < num_regions && _heap->get_region(i + 1)->is_humongous_continuation()) {
 725             i++;
 726             r = _heap->get_region(i);
 727             assert(r->is_humongous_continuation(), "must be humongous continuation");
 728             r->make_trash();
 729           }
 730         } else if (!r->is_empty() && candidate) {
 731           // Trash regular.
 732           assert(!r->is_humongous(), "handled above");
 733           assert(!r->is_trash(), "must not already be trashed");
 734           r->make_trash();
 735         }
 736       }
 737       _heap->collection_set()->clear();
 738       _heap->free_set()->rebuild();
 739       reset();
 740     }
 741 
 742     assert(_task_queues->is_empty(), "queues must be empty after traversal GC");
 743     _heap->set_concurrent_traversal_in_progress(false);
 744     assert(!_heap->cancelled_gc(), "must not be cancelled when getting out here");
 745 
 746     if (ShenandoahVerify) {
 747       _heap->verifier()->verify_after_traversal();
 748     }
 749   }
 750 }
 751 
 752 class ShenandoahTraversalFixRootsClosure : public OopClosure {
 753 private:
 754 
 755   template <class T>
 756   inline void do_oop_work(T* p) {
 757     T o = RawAccess<>::oop_load(p);
 758     if (!CompressedOops::is_null(o)) {
 759       oop obj = CompressedOops::decode_not_null(o);
 760       oop forw = ShenandoahBarrierSet::resolve_forwarded_not_null(obj);
 761       if (!oopDesc::unsafe_equals(obj, forw)) {
 762         RawAccess<IS_NOT_NULL>::oop_store(p, forw);
 763       }
 764     }
 765   }
 766 public:
 767   inline void do_oop(oop* p) { do_oop_work(p); }
 768   inline void do_oop(narrowOop* p) { do_oop_work(p); }
 769 };
 770 
 771 class ShenandoahTraversalFixRootsTask : public AbstractGangTask {
 772   ShenandoahRootProcessor* _rp;
 773 public:
 774 
 775   ShenandoahTraversalFixRootsTask(ShenandoahRootProcessor* rp) :
 776     AbstractGangTask("Shenandoah traversal fix roots"),
 777     _rp(rp)
 778   {
 779     // Nothing else to do.
 780   }
 781 
 782   void work(uint worker_id) {
 783     ShenandoahTraversalFixRootsClosure cl;
 784     MarkingCodeBlobClosure blobsCl(&cl, CodeBlobToOopClosure::FixRelocations);
 785     CLDToOopClosure cldCl(&cl);
 786     _rp->process_all_roots(&cl, &cl, &cldCl, &blobsCl, NULL, worker_id);
 787   }
 788 };
 789 
 790 void ShenandoahTraversalGC::fixup_roots() {
 791 #if defined(COMPILER2) || INCLUDE_JVMCI
 792   DerivedPointerTable::clear();
 793 #endif
 794   ShenandoahHeap* heap = ShenandoahHeap::heap();
 795   ShenandoahRootProcessor rp(heap, heap->workers()->active_workers(), ShenandoahPhaseTimings::final_traversal_update_roots);
 796   ShenandoahTraversalFixRootsTask update_roots_task(&rp);
 797   heap->workers()->run_task(&update_roots_task);
 798 #if defined(COMPILER2) || INCLUDE_JVMCI
 799   DerivedPointerTable::update_pointers();
 800 #endif
 801 }
 802 
 803 void ShenandoahTraversalGC::reset() {
 804   _task_queues->clear();
 805   _arraycopy_task_queue.clear();
 806 }
 807 
 808 ShenandoahObjToScanQueueSet* ShenandoahTraversalGC::task_queues() {
 809   return _task_queues;
 810 }
 811 
 812 jushort* ShenandoahTraversalGC::get_liveness(uint worker_id) {
 813   return _liveness_local[worker_id];
 814 }
 815 
 816 class ShenandoahTraversalCancelledGCYieldClosure : public YieldClosure {
 817 private:
 818   ShenandoahHeap* const _heap;
 819 public:
 820   ShenandoahTraversalCancelledGCYieldClosure() : _heap(ShenandoahHeap::heap()) {};
 821   virtual bool should_return() { return _heap->cancelled_gc(); }
 822 };
 823 
 824 class ShenandoahTraversalPrecleanCompleteGCClosure : public VoidClosure {
 825 public:
 826   void do_void() {
 827     ShenandoahHeap* sh = ShenandoahHeap::heap();
 828     ShenandoahTraversalGC* traversal_gc = sh->traversal_gc();
 829     assert(sh->process_references(), "why else would we be here?");
 830     ParallelTaskTerminator terminator(1, traversal_gc->task_queues());
 831     shenandoah_assert_rp_isalive_installed();
 832     traversal_gc->main_loop((uint) 0, &terminator);
 833   }
 834 };
 835 
 836 class ShenandoahTraversalKeepAliveUpdateClosure : public OopClosure {
 837 private:
 838   ShenandoahObjToScanQueue* _queue;
 839   Thread* _thread;
 840   ShenandoahTraversalGC* _traversal_gc;
 841   template <class T>
 842   inline void do_oop_work(T* p) {
 843     _traversal_gc->process_oop<T, false /* string dedup */, false /* degen */, false /* matrix */>(p, _thread, _queue, NULL);
 844   }
 845 
 846 public:
 847   ShenandoahTraversalKeepAliveUpdateClosure(ShenandoahObjToScanQueue* q) :
 848     _queue(q), _thread(Thread::current()),
 849     _traversal_gc(ShenandoahHeap::heap()->traversal_gc()) {}
 850 
 851   void do_oop(narrowOop* p) { do_oop_work(p); }
 852   void do_oop(oop* p)       { do_oop_work(p); }
 853 };
 854 
 855 class ShenandoahTraversalKeepAliveUpdateDegenClosure : public OopClosure {
 856 private:
 857   ShenandoahObjToScanQueue* _queue;
 858   Thread* _thread;
 859   ShenandoahTraversalGC* _traversal_gc;
 860   template <class T>
 861   inline void do_oop_work(T* p) {
 862     _traversal_gc->process_oop<T, false /* string dedup */, true /* degen */, false /* matrix */>(p, _thread, _queue, NULL);
 863   }
 864 
 865 public:
 866   ShenandoahTraversalKeepAliveUpdateDegenClosure(ShenandoahObjToScanQueue* q) :
 867     _queue(q), _thread(Thread::current()),
 868     _traversal_gc(ShenandoahHeap::heap()->traversal_gc()) {}
 869 
 870   void do_oop(narrowOop* p) { do_oop_work(p); }
 871   void do_oop(oop* p)       { do_oop_work(p); }
 872 };
 873 
 874 class ShenandoahTraversalKeepAliveUpdateMatrixClosure : public OopClosure {
 875 private:
 876   ShenandoahObjToScanQueue* _queue;
 877   Thread* _thread;
 878   ShenandoahTraversalGC* _traversal_gc;
 879   template <class T>
 880   inline void do_oop_work(T* p) {
 881     // TODO: Need to somehow pass base_obj here?
 882     _traversal_gc->process_oop<T, false /* string dedup */, false /* degen */, true /* matrix */>(p, _thread, _queue, NULL);
 883   }
 884 
 885 public:
 886   ShenandoahTraversalKeepAliveUpdateMatrixClosure(ShenandoahObjToScanQueue* q) :
 887     _queue(q), _thread(Thread::current()),
 888     _traversal_gc(ShenandoahHeap::heap()->traversal_gc()) {}
 889 
 890   void do_oop(narrowOop* p) { do_oop_work(p); }
 891   void do_oop(oop* p)       { do_oop_work(p); }
 892 };
 893 
 894 class ShenandoahTraversalKeepAliveUpdateDegenMatrixClosure : public OopClosure {
 895 private:
 896   ShenandoahObjToScanQueue* _queue;
 897   Thread* _thread;
 898   ShenandoahTraversalGC* _traversal_gc;
 899   template <class T>
 900   inline void do_oop_work(T* p) {
 901     // TODO: Need to somehow pass base_obj here?
 902     _traversal_gc->process_oop<T, false /* string dedup */, true /* degen */, true /* matrix */>(p, _thread, _queue, NULL);
 903   }
 904 
 905 public:
 906   ShenandoahTraversalKeepAliveUpdateDegenMatrixClosure(ShenandoahObjToScanQueue* q) :
 907     _queue(q), _thread(Thread::current()),
 908     _traversal_gc(ShenandoahHeap::heap()->traversal_gc()) {}
 909 
 910   void do_oop(narrowOop* p) { do_oop_work(p); }
 911   void do_oop(oop* p)       { do_oop_work(p); }
 912 };
 913 
 914 void ShenandoahTraversalGC::preclean_weak_refs() {
 915   // Pre-cleaning weak references before diving into STW makes sense at the
 916   // end of concurrent mark. This will filter out the references which referents
 917   // are alive. Note that ReferenceProcessor already filters out these on reference
 918   // discovery, and the bulk of work is done here. This phase processes leftovers
 919   // that missed the initial filtering, i.e. when referent was marked alive after
 920   // reference was discovered by RP.
 921 
 922   assert(_heap->process_references(), "sanity");
 923 
 924   ShenandoahHeap* sh = ShenandoahHeap::heap();
 925   ReferenceProcessor* rp = sh->ref_processor();
 926 
 927   // Shortcut if no references were discovered to avoid winding up threads.
 928   if (!rp->has_discovered_references()) {
 929     return;
 930   }
 931 
 932   ReferenceProcessorMTDiscoveryMutator fix_mt_discovery(rp, false);
 933 
 934   shenandoah_assert_rp_isalive_not_installed();
 935   ReferenceProcessorIsAliveMutator fix_isalive(rp, sh->is_alive_closure());
 936 
 937   // Interrupt on cancelled GC
 938   ShenandoahTraversalCancelledGCYieldClosure yield;
 939 
 940   assert(task_queues()->is_empty(), "Should be empty");
 941   assert(!sh->is_degenerated_gc_in_progress(), "must be in concurrent non-degenerated phase");
 942 
 943   ShenandoahTraversalPrecleanCompleteGCClosure complete_gc;
 944   ShenandoahForwardedIsAliveClosure is_alive;
 945   if (UseShenandoahMatrix) {
 946     ShenandoahTraversalKeepAliveUpdateMatrixClosure keep_alive(task_queues()->queue(0));
 947     ResourceMark rm;
 948     rp->preclean_discovered_references(&is_alive, &keep_alive,
 949                                        &complete_gc, &yield,
 950                                        NULL);
 951   } else {
 952     ShenandoahTraversalKeepAliveUpdateClosure keep_alive(task_queues()->queue(0));
 953     ResourceMark rm;
 954     rp->preclean_discovered_references(&is_alive, &keep_alive,
 955                                        &complete_gc, &yield,
 956                                        NULL);
 957   }
 958   assert(sh->cancelled_gc() || task_queues()->is_empty(), "Should be empty");
 959 }
 960 
 961 // Weak Reference Closures
 962 class ShenandoahTraversalDrainMarkingStackClosure: public VoidClosure {
 963   uint _worker_id;
 964   ParallelTaskTerminator* _terminator;
 965   bool _reset_terminator;
 966 
 967 public:
 968   ShenandoahTraversalDrainMarkingStackClosure(uint worker_id, ParallelTaskTerminator* t, bool reset_terminator = false):
 969     _worker_id(worker_id),
 970     _terminator(t),
 971     _reset_terminator(reset_terminator) {
 972   }
 973 
 974   void do_void() {
 975     assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
 976 
 977     ShenandoahHeap* sh = ShenandoahHeap::heap();
 978     ShenandoahTraversalGC* traversal_gc = sh->traversal_gc();
 979     assert(sh->process_references(), "why else would we be here?");
 980     shenandoah_assert_rp_isalive_installed();
 981 
 982     traversal_gc->main_loop(_worker_id, _terminator);
 983 
 984     if (_reset_terminator) {
 985       _terminator->reset_for_reuse();
 986     }
 987   }
 988 };
 989 
 990 void ShenandoahTraversalGC::weak_refs_work() {
 991   assert(_heap->process_references(), "sanity");
 992 
 993   ShenandoahHeap* sh = ShenandoahHeap::heap();
 994 
 995   ShenandoahPhaseTimings::Phase phase_root = ShenandoahPhaseTimings::weakrefs;
 996 
 997   ShenandoahGCPhase phase(phase_root);
 998 
 999   ReferenceProcessor* rp = sh->ref_processor();
1000 
1001   // NOTE: We cannot shortcut on has_discovered_references() here, because
1002   // we will miss marking JNI Weak refs then, see implementation in
1003   // ReferenceProcessor::process_discovered_references.
1004   weak_refs_work_doit();
1005 
1006   rp->verify_no_references_recorded();
1007   assert(!rp->discovery_enabled(), "Post condition");
1008 
1009 }
1010 
1011 class ShenandoahTraversalRefProcTaskProxy : public AbstractGangTask {
1012 
1013 private:
1014   AbstractRefProcTaskExecutor::ProcessTask& _proc_task;
1015   ParallelTaskTerminator* _terminator;
1016 public:
1017 
1018   ShenandoahTraversalRefProcTaskProxy(AbstractRefProcTaskExecutor::ProcessTask& proc_task,
1019                              ParallelTaskTerminator* t) :
1020     AbstractGangTask("Process reference objects in parallel"),
1021     _proc_task(proc_task),
1022     _terminator(t) {
1023   }
1024 
1025   void work(uint worker_id) {
1026     ShenandoahEvacOOMScope oom_evac_scope;
1027     assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
1028     ShenandoahHeap* heap = ShenandoahHeap::heap();
1029     ShenandoahTraversalDrainMarkingStackClosure complete_gc(worker_id, _terminator);
1030 
1031     ShenandoahForwardedIsAliveClosure is_alive;
1032     if (UseShenandoahMatrix) {
1033       if (!heap->is_degenerated_gc_in_progress()) {
1034         ShenandoahTraversalKeepAliveUpdateMatrixClosure keep_alive(heap->traversal_gc()->task_queues()->queue(worker_id));
1035         _proc_task.work(worker_id, is_alive, keep_alive, complete_gc);
1036       } else {
1037         ShenandoahTraversalKeepAliveUpdateDegenMatrixClosure keep_alive(heap->traversal_gc()->task_queues()->queue(worker_id));
1038         _proc_task.work(worker_id, is_alive, keep_alive, complete_gc);
1039       }
1040     } else {
1041       if (!heap->is_degenerated_gc_in_progress()) {
1042         ShenandoahTraversalKeepAliveUpdateClosure keep_alive(heap->traversal_gc()->task_queues()->queue(worker_id));
1043         _proc_task.work(worker_id, is_alive, keep_alive, complete_gc);
1044       } else {
1045         ShenandoahTraversalKeepAliveUpdateDegenClosure keep_alive(heap->traversal_gc()->task_queues()->queue(worker_id));
1046         _proc_task.work(worker_id, is_alive, keep_alive, complete_gc);
1047       }
1048     }
1049   }
1050 };
1051 
1052 class ShenandoahTraversalRefProcTaskExecutor : public AbstractRefProcTaskExecutor {
1053 
1054 private:
1055   WorkGang* _workers;
1056 
1057 public:
1058 
1059   ShenandoahTraversalRefProcTaskExecutor(WorkGang* workers) :
1060     _workers(workers) {
1061   }
1062 
1063   // Executes a task using worker threads.
1064   void execute(ProcessTask& task, uint ergo_workers) {
1065     assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
1066 
1067     ShenandoahHeap* heap = ShenandoahHeap::heap();
1068     ShenandoahTraversalGC* traversal_gc = heap->traversal_gc();
1069     ShenandoahPushWorkerScope scope(_workers,
1070                                     ergo_workers,
1071                                     /* do_check = */ false);
1072     uint nworkers = _workers->active_workers();
1073     traversal_gc->task_queues()->reserve(nworkers);
1074     if (UseShenandoahOWST) {
1075       ShenandoahTaskTerminator terminator(nworkers, traversal_gc->task_queues());
1076       ShenandoahTraversalRefProcTaskProxy proc_task_proxy(task, &terminator);
1077       _workers->run_task(&proc_task_proxy);
1078     } else {
1079       ParallelTaskTerminator terminator(nworkers, traversal_gc->task_queues());
1080       ShenandoahTraversalRefProcTaskProxy proc_task_proxy(task, &terminator);
1081       _workers->run_task(&proc_task_proxy);
1082     }
1083   }
1084 };
1085 
1086 void ShenandoahTraversalGC::weak_refs_work_doit() {
1087   ShenandoahHeap* sh = ShenandoahHeap::heap();
1088 
1089   ReferenceProcessor* rp = sh->ref_processor();
1090 
1091   ShenandoahPhaseTimings::Phase phase_process = ShenandoahPhaseTimings::weakrefs_process;
1092 
1093   shenandoah_assert_rp_isalive_not_installed();
1094   ReferenceProcessorIsAliveMutator fix_isalive(rp, sh->is_alive_closure());
1095 
1096   WorkGang* workers = sh->workers();
1097   uint nworkers = workers->active_workers();
1098 
1099   // Setup collector policy for softref cleaning.
1100   bool clear_soft_refs = sh->soft_ref_policy()->use_should_clear_all_soft_refs(true /* bogus arg*/);
1101   log_develop_debug(gc, ref)("clearing soft refs: %s", BOOL_TO_STR(clear_soft_refs));
1102   rp->setup_policy(clear_soft_refs);
1103   rp->set_active_mt_degree(nworkers);
1104 
1105   assert(task_queues()->is_empty(), "Should be empty");
1106 
1107   // complete_gc and keep_alive closures instantiated here are only needed for
1108   // single-threaded path in RP. They share the queue 0 for tracking work, which
1109   // simplifies implementation. Since RP may decide to call complete_gc several
1110   // times, we need to be able to reuse the terminator.
1111   uint serial_worker_id = 0;
1112   ParallelTaskTerminator terminator(1, task_queues());
1113   ShenandoahTraversalDrainMarkingStackClosure complete_gc(serial_worker_id, &terminator, /* reset_terminator = */ true);
1114 
1115   ShenandoahTraversalRefProcTaskExecutor executor(workers);
1116 
1117   ReferenceProcessorPhaseTimes pt(sh->gc_timer(), rp->num_queues());
1118 
1119   {
1120     ShenandoahGCPhase phase(phase_process);
1121     ShenandoahTerminationTracker termination(ShenandoahPhaseTimings::weakrefs_termination);
1122 
1123     ShenandoahForwardedIsAliveClosure is_alive;
1124     if (UseShenandoahMatrix) {
1125       if (!_heap->is_degenerated_gc_in_progress()) {
1126         ShenandoahTraversalKeepAliveUpdateMatrixClosure keep_alive(task_queues()->queue(serial_worker_id));
1127         rp->process_discovered_references(&is_alive, &keep_alive,
1128                                           &complete_gc, &executor,
1129                                           &pt);
1130         pt.print_all_references();
1131         WeakProcessor::weak_oops_do(&is_alive, &keep_alive);
1132       } else {
1133         ShenandoahTraversalKeepAliveUpdateDegenMatrixClosure keep_alive(task_queues()->queue(serial_worker_id));
1134         rp->process_discovered_references(&is_alive, &keep_alive,
1135                                           &complete_gc, &executor,
1136                                           &pt);
1137         pt.print_all_references();
1138         WeakProcessor::weak_oops_do(&is_alive, &keep_alive);
1139       }
1140     } else {
1141       if (!_heap->is_degenerated_gc_in_progress()) {
1142         ShenandoahTraversalKeepAliveUpdateClosure keep_alive(task_queues()->queue(serial_worker_id));
1143         rp->process_discovered_references(&is_alive, &keep_alive,
1144                                           &complete_gc, &executor,
1145                                           &pt);
1146         pt.print_all_references();
1147         WeakProcessor::weak_oops_do(&is_alive, &keep_alive);
1148       } else {
1149         ShenandoahTraversalKeepAliveUpdateDegenClosure keep_alive(task_queues()->queue(serial_worker_id));
1150         rp->process_discovered_references(&is_alive, &keep_alive,
1151                                           &complete_gc, &executor,
1152                                           &pt);
1153         pt.print_all_references();
1154         WeakProcessor::weak_oops_do(&is_alive, &keep_alive);
1155       }
1156     }
1157 
1158     assert(!_heap->cancelled_gc() || task_queues()->is_empty(), "Should be empty");
1159   }
1160 }
1161 
1162 void ShenandoahTraversalGC::push_arraycopy(HeapWord* start, size_t count) {
1163   _arraycopy_task_queue.push(start, count);
1164 }
1165 
1166 template <class T>
1167 bool ShenandoahTraversalGC::process_arraycopy_task(T* cl) {
1168   ShenandoahArrayCopyTask task = _arraycopy_task_queue.pop();
1169   if (task.start() == NULL) {
1170     return false;
1171   }
1172   if (task.count() == 0) {
1173     // Handle clone.
1174     oop obj = oop(task.start());
1175     obj->oop_iterate(cl);
1176   } else {
1177     HeapWord* array = task.start();
1178     size_t count = task.count();
1179     if (UseCompressedOops) {
1180       narrowOop* p = reinterpret_cast<narrowOop*>(array);
1181       for (size_t i = 0; i < count; i++) {
1182         cl->do_oop(p++);
1183       }
1184     } else {
1185       oop* p = reinterpret_cast<oop*>(array);
1186       for (size_t i = 0; i < count; i++) {
1187         cl->do_oop(p++);
1188       }
1189     }
1190   }
1191   return true;
1192 }