1 /*
   2  * Copyright (c) 2018, Red Hat, Inc. and/or its affiliates.
   3  *
   4  * This code is free software; you can redistribute it and/or modify it
   5  * under the terms of the GNU General Public License version 2 only, as
   6  * published by the Free Software Foundation.
   7  *
   8  * This code is distributed in the hope that it will be useful, but WITHOUT
   9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  11  * version 2 for more details (a copy is included in the LICENSE file that
  12  * accompanied this code).
  13  *
  14  * You should have received a copy of the GNU General Public License version
  15  * 2 along with this work; if not, write to the Free Software Foundation,
  16  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  17  *
  18  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  19  * or visit www.oracle.com if you need additional information or have any
  20  * questions.
  21  *
  22  */
  23 
  24 #include "precompiled.hpp"
  25 
  26 #include "gc/shared/gcTraceTime.inline.hpp"
  27 #include "gc/shared/markBitMap.inline.hpp"
  28 #include "gc/shared/referenceProcessor.hpp"
  29 #include "gc/shared/referenceProcessorPhaseTimes.hpp"
  30 #include "gc/shared/workgroup.hpp"
  31 #include "gc/shared/taskqueue.inline.hpp"
  32 #include "gc/shared/weakProcessor.hpp"
  33 #include "gc/shenandoah/shenandoahBarrierSet.hpp"
  34 #include "gc/shenandoah/shenandoahConnectionMatrix.hpp"
  35 #include "gc/shenandoah/shenandoahCodeRoots.hpp"
  36 #include "gc/shenandoah/shenandoahCollectionSet.hpp"
  37 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
  38 #include "gc/shenandoah/shenandoahFreeSet.hpp"
  39 #include "gc/shenandoah/shenandoahPhaseTimings.hpp"
  40 #include "gc/shenandoah/shenandoahHeapRegionSet.hpp"
  41 #include "gc/shenandoah/shenandoahHeap.hpp"
  42 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
  43 #include "gc/shenandoah/shenandoahHeapRegionSet.inline.hpp"
  44 #include "gc/shenandoah/shenandoahHeuristics.hpp"
  45 #include "gc/shenandoah/shenandoahMarkingContext.inline.hpp"
  46 #include "gc/shenandoah/shenandoahOopClosures.inline.hpp"
  47 #include "gc/shenandoah/shenandoahTraversalGC.hpp"
  48 #include "gc/shenandoah/shenandoahRootProcessor.hpp"
  49 #include "gc/shenandoah/shenandoahStringDedup.hpp"
  50 #include "gc/shenandoah/shenandoahTaskqueue.hpp"
  51 #include "gc/shenandoah/shenandoahUtils.hpp"
  52 #include "gc/shenandoah/shenandoahVerifier.hpp"
  53 #include "gc/shenandoah/shenandoahWorkGroup.hpp"
  54 #include "gc/shenandoah/shenandoahWorkerPolicy.hpp"
  55 
  56 #include "memory/iterator.hpp"
  57 
  58 /**
  59  * NOTE: We are using the SATB buffer in thread.hpp and satbMarkQueue.hpp, however, it is not an SATB algorithm.
  60  * We're using the buffer as generic oop buffer to enqueue new values in concurrent oop stores, IOW, the algorithm
  61  * is incremental-update-based.
  62  *
  63  * NOTE on interaction with TAMS: we want to avoid traversing new objects for
  64  * several reasons:
  65  * - We will not reclaim them in this cycle anyway, because they are not in the
  66  *   cset
  67  * - It makes up for the bulk of work during final-pause
  68  * - It also shortens the concurrent cycle because we don't need to
  69  *   pointlessly traverse through newly allocated objects.
  70  * - As a nice side-effect, it solves the I-U termination problem (mutators
  71  *   cannot outrun the GC by allocating like crazy)
  72  * - It is an easy way to achieve MWF. What MWF does is to also enqueue the
  73  *   target object of stores if it's new. Treating new objects live implicitely
  74  *   achieves the same, but without extra barriers. I think the effect of
  75  *   shortened final-pause (mentioned above) is the main advantage of MWF. In
  76  *   particular, we will not see the head of a completely new long linked list
  77  *   in final-pause and end up traversing huge chunks of the heap there.
  78  * - We don't need to see/update the fields of new objects either, because they
  79  *   are either still null, or anything that's been stored into them has been
  80  *   evacuated+enqueued before (and will thus be treated later).
  81  *
  82  * We achieve this by setting TAMS for each region, and everything allocated
  83  * beyond TAMS will be 'implicitely marked'.
  84  *
  85  * Gotchas:
  86  * - While we want new objects to be implicitely marked, we don't want to count
  87  *   them alive. Otherwise the next cycle wouldn't pick them up and consider
  88  *   them for cset. This means that we need to protect such regions from
  89  *   getting accidentally thrashed at the end of traversal cycle. This is why I
  90  *   keep track of alloc-regions and check is_alloc_region() in the trashing
  91  *   code.
  92  * - We *need* to traverse through evacuated objects. Those objects are
  93  *   pre-existing, and any references in them point to interesting objects that
  94  *   we need to see. We also want to count them as live, because we just
  95  *   determined that they are alive :-) I achieve this by upping TAMS
  96  *   concurrently for every gclab/gc-shared alloc before publishing the
  97  *   evacuated object. This way, the GC threads will not consider such objects
  98  *   implictely marked, and traverse through them as normal.
  99  */
 100 class ShenandoahTraversalSATBBufferClosure : public SATBBufferClosure {
 101 private:
 102   ShenandoahObjToScanQueue* _queue;
 103   ShenandoahTraversalGC* _traversal_gc;
 104   ShenandoahHeap* const _heap;
 105 
 106 public:
 107   ShenandoahTraversalSATBBufferClosure(ShenandoahObjToScanQueue* q) :
 108     _queue(q),
 109     _heap(ShenandoahHeap::heap())
 110  { }
 111 
 112   void do_buffer(void** buffer, size_t size) {
 113     for (size_t i = 0; i < size; ++i) {
 114       oop* p = (oop*) &buffer[i];
 115       oop obj = RawAccess<>::oop_load(p);
 116       shenandoah_assert_not_forwarded(p, obj);
 117       if (_heap->next_marking_context()->mark(obj)) {
 118         _queue->push(ShenandoahMarkTask(obj));
 119       }
 120     }
 121   }
 122 };
 123 
 124 class ShenandoahTraversalSATBThreadsClosure : public ThreadClosure {
 125   ShenandoahTraversalSATBBufferClosure* _satb_cl;
 126 
 127  public:
 128   ShenandoahTraversalSATBThreadsClosure(ShenandoahTraversalSATBBufferClosure* satb_cl) :
 129     _satb_cl(satb_cl) {}
 130 
 131   void do_thread(Thread* thread) {
 132     if (thread->is_Java_thread()) {
 133       JavaThread* jt = (JavaThread*)thread;
 134       ShenandoahThreadLocalData::satb_mark_queue(jt).apply_closure_and_empty(_satb_cl);
 135     } else if (thread->is_VM_thread()) {
 136       ShenandoahBarrierSet::satb_mark_queue_set().shared_satb_queue()->apply_closure_and_empty(_satb_cl);
 137     }
 138   }
 139 };
 140 
 141 // Like CLDToOopClosure, but clears has_modified_oops, so that we can record modified CLDs during traversal
 142 // and remark them later during final-traversal.
 143 class ShenandoahMarkCLDClosure : public CLDClosure {
 144 private:
 145   OopClosure* _cl;
 146 public:
 147   ShenandoahMarkCLDClosure(OopClosure* cl) : _cl(cl) {}
 148   void do_cld(ClassLoaderData* cld) {
 149     cld->oops_do(_cl, true, true);
 150   }
 151 };
 152 
 153 // Like CLDToOopClosure, but only process modified CLDs
 154 class ShenandoahRemarkCLDClosure : public CLDClosure {
 155 private:
 156   OopClosure* _cl;
 157 public:
 158   ShenandoahRemarkCLDClosure(OopClosure* cl) : _cl(cl) {}
 159   void do_cld(ClassLoaderData* cld) {
 160     if (cld->has_modified_oops()) {
 161       cld->oops_do(_cl, true, true);
 162     }
 163   }
 164 };
 165 
 166 class ShenandoahInitTraversalCollectionTask : public AbstractGangTask {
 167 private:
 168   ShenandoahRootProcessor* _rp;
 169   ShenandoahHeap* _heap;
 170 public:
 171   ShenandoahInitTraversalCollectionTask(ShenandoahRootProcessor* rp) :
 172     AbstractGangTask("Shenandoah Init Traversal Collection"),
 173     _rp(rp),
 174     _heap(ShenandoahHeap::heap()) {}
 175 
 176   void work(uint worker_id) {
 177     ShenandoahWorkerSession worker_session(worker_id);
 178 
 179     ShenandoahEvacOOMScope oom_evac_scope;
 180     ShenandoahObjToScanQueueSet* queues = _heap->traversal_gc()->task_queues();
 181     ShenandoahObjToScanQueue* q = queues->queue(worker_id);
 182 
 183     bool process_refs = _heap->process_references();
 184     bool unload_classes = _heap->unload_classes();
 185     ReferenceProcessor* rp = NULL;
 186     if (process_refs) {
 187       rp = _heap->ref_processor();
 188     }
 189 
 190     // Step 1: Process ordinary GC roots.
 191     {
 192       ShenandoahTraversalClosure roots_cl(q, rp);
 193       ShenandoahMarkCLDClosure cld_cl(&roots_cl);
 194       MarkingCodeBlobClosure code_cl(&roots_cl, CodeBlobToOopClosure::FixRelocations);
 195       if (unload_classes) {
 196         _rp->process_strong_roots(&roots_cl, process_refs ? NULL : &roots_cl, &cld_cl, NULL, NULL, NULL, worker_id);
 197         // Need to pre-evac code roots here. Otherwise we might see from-space constants.
 198         ShenandoahWorkerTimings* worker_times = _heap->phase_timings()->worker_times();
 199         ShenandoahWorkerTimingsTracker timer(worker_times, ShenandoahPhaseTimings::CodeCacheRoots, worker_id);
 200         ShenandoahAllCodeRootsIterator coderoots = ShenandoahCodeRoots::iterator();
 201         coderoots.possibly_parallel_blobs_do(&code_cl);
 202       } else {
 203         _rp->process_all_roots(&roots_cl, process_refs ? NULL : &roots_cl, &cld_cl, &code_cl, NULL, worker_id);
 204       }
 205     }
 206   }
 207 };
 208 
 209 class ShenandoahConcurrentTraversalCollectionTask : public AbstractGangTask {
 210 private:
 211   ParallelTaskTerminator* _terminator;
 212   ShenandoahHeap* _heap;
 213 public:
 214   ShenandoahConcurrentTraversalCollectionTask(ParallelTaskTerminator* terminator) :
 215     AbstractGangTask("Shenandoah Concurrent Traversal Collection"),
 216     _terminator(terminator),
 217     _heap(ShenandoahHeap::heap()) {}
 218 
 219   void work(uint worker_id) {
 220     ShenandoahWorkerSession worker_session(worker_id);
 221 
 222     ShenandoahEvacOOMScope oom_evac_scope;
 223     ShenandoahTraversalGC* traversal_gc = _heap->traversal_gc();
 224 
 225     // Drain all outstanding work in queues.
 226     traversal_gc->main_loop(worker_id, _terminator);
 227   }
 228 };
 229 
 230 class ShenandoahFinalTraversalCollectionTask : public AbstractGangTask {
 231 private:
 232   ShenandoahRootProcessor* _rp;
 233   ParallelTaskTerminator* _terminator;
 234   ShenandoahHeap* _heap;
 235 public:
 236   ShenandoahFinalTraversalCollectionTask(ShenandoahRootProcessor* rp, ParallelTaskTerminator* terminator) :
 237     AbstractGangTask("Shenandoah Final Traversal Collection"),
 238     _rp(rp),
 239     _terminator(terminator),
 240     _heap(ShenandoahHeap::heap()) {}
 241 
 242   void work(uint worker_id) {
 243     ShenandoahWorkerSession worker_session(worker_id);
 244 
 245     ShenandoahEvacOOMScope oom_evac_scope;
 246     ShenandoahTraversalGC* traversal_gc = _heap->traversal_gc();
 247 
 248     ShenandoahObjToScanQueueSet* queues = traversal_gc->task_queues();
 249     ShenandoahObjToScanQueue* q = queues->queue(worker_id);
 250 
 251     bool process_refs = _heap->process_references();
 252     bool unload_classes = _heap->unload_classes();
 253     ReferenceProcessor* rp = NULL;
 254     if (process_refs) {
 255       rp = _heap->ref_processor();
 256     }
 257 
 258     // Step 0: Drain outstanding SATB queues.
 259     // NOTE: we piggy-back draining of remaining thread SATB buffers on the final root scan below.
 260     ShenandoahTraversalSATBBufferClosure satb_cl(q);
 261     {
 262       // Process remaining finished SATB buffers.
 263       SATBMarkQueueSet& satb_mq_set = ShenandoahBarrierSet::satb_mark_queue_set();
 264       while (satb_mq_set.apply_closure_to_completed_buffer(&satb_cl));
 265       // Process remaining threads SATB buffers below.
 266     }
 267 
 268     // Step 1: Process ordinary GC roots.
 269     if (!_heap->is_degenerated_gc_in_progress()) {
 270       ShenandoahTraversalClosure roots_cl(q, rp);
 271       CLDToOopClosure cld_cl(&roots_cl);
 272       MarkingCodeBlobClosure code_cl(&roots_cl, CodeBlobToOopClosure::FixRelocations);
 273       ShenandoahTraversalSATBThreadsClosure tc(&satb_cl);
 274       if (unload_classes) {
 275         ShenandoahRemarkCLDClosure weak_cld_cl(&roots_cl);
 276         _rp->process_strong_roots(&roots_cl, process_refs ? NULL : &roots_cl, &cld_cl, &weak_cld_cl, &code_cl, &tc, worker_id);
 277       } else {
 278         _rp->process_all_roots(&roots_cl, process_refs ? NULL : &roots_cl, &cld_cl, &code_cl, &tc, worker_id);
 279       }
 280     } else {
 281       ShenandoahTraversalDegenClosure roots_cl(q, rp);
 282       CLDToOopClosure cld_cl(&roots_cl);
 283       MarkingCodeBlobClosure code_cl(&roots_cl, CodeBlobToOopClosure::FixRelocations);
 284       ShenandoahTraversalSATBThreadsClosure tc(&satb_cl);
 285       if (unload_classes) {
 286         ShenandoahRemarkCLDClosure weak_cld_cl(&roots_cl);
 287         _rp->process_strong_roots(&roots_cl, process_refs ? NULL : &roots_cl, &cld_cl, &weak_cld_cl, &code_cl, &tc, worker_id);
 288       } else {
 289         _rp->process_all_roots(&roots_cl, process_refs ? NULL : &roots_cl, &cld_cl, &code_cl, &tc, worker_id);
 290       }
 291     }
 292 
 293     {
 294       ShenandoahWorkerTimings *worker_times = _heap->phase_timings()->worker_times();
 295       ShenandoahWorkerTimingsTracker timer(worker_times, ShenandoahPhaseTimings::FinishQueues, worker_id);
 296 
 297       // Step 3: Finally drain all outstanding work in queues.
 298       traversal_gc->main_loop(worker_id, _terminator);
 299     }
 300 
 301   }
 302 };
 303 
 304 void ShenandoahTraversalGC::flush_liveness(uint worker_id) {
 305   jushort* ld = get_liveness(worker_id);
 306   for (uint i = 0; i < _heap->num_regions(); i++) {
 307     ShenandoahHeapRegion* r = _heap->get_region(i);
 308     jushort live = ld[i];
 309     if (live > 0) {
 310       r->increase_live_data_gc_words(live);
 311       ld[i] = 0;
 312     }
 313   }
 314 }
 315 
 316 ShenandoahTraversalGC::ShenandoahTraversalGC(ShenandoahHeap* heap, size_t num_regions) :
 317   _heap(heap),
 318   _task_queues(new ShenandoahObjToScanQueueSet(heap->max_workers())),
 319   _traversal_set(ShenandoahHeapRegionSet()),
 320   _root_regions(ShenandoahHeapRegionSet()),
 321   _root_regions_iterator(&_root_regions),
 322   _matrix(heap->connection_matrix()) {
 323 
 324   uint num_queues = heap->max_workers();
 325   for (uint i = 0; i < num_queues; ++i) {
 326     ShenandoahObjToScanQueue* task_queue = new ShenandoahObjToScanQueue();
 327     task_queue->initialize();
 328     _task_queues->register_queue(i, task_queue);
 329   }
 330 
 331   uint workers = heap->max_workers();
 332   _liveness_local = NEW_C_HEAP_ARRAY(jushort*, workers, mtGC);
 333   for (uint worker = 0; worker < workers; worker++) {
 334      _liveness_local[worker] = NEW_C_HEAP_ARRAY(jushort, num_regions, mtGC);
 335   }
 336 
 337 }
 338 
 339 ShenandoahTraversalGC::~ShenandoahTraversalGC() {
 340 }
 341 
 342 void ShenandoahTraversalGC::prepare_regions() {
 343   ShenandoahHeap* heap = ShenandoahHeap::heap();
 344   size_t num_regions = heap->num_regions();
 345   ShenandoahConnectionMatrix* matrix = _heap->connection_matrix();
 346   ShenandoahMarkingContext* const ctx = _heap->next_marking_context();
 347   for (size_t i = 0; i < num_regions; i++) {
 348     ShenandoahHeapRegion* region = heap->get_region(i);
 349     if (heap->is_bitmap_slice_committed(region)) {
 350       if (_traversal_set.is_in(i)) {
 351         ctx->set_top_at_mark_start(region->region_number(), region->top());
 352         region->clear_live_data();
 353         assert(ctx->is_bitmap_clear_range(region->bottom(), region->end()), "bitmap for traversal regions must be cleared");
 354       } else {
 355         // Everything outside the traversal set is always considered live.
 356         ctx->set_top_at_mark_start(region->region_number(), region->bottom());
 357       }
 358       if (_root_regions.is_in(i)) {
 359         assert(!region->in_collection_set(), "roots must not overlap with cset");
 360         matrix->clear_region_outbound(i);
 361         // Since root region can be allocated at, we should bound the scans
 362         // in it at current top. Otherwise, one thread may evacuate objects
 363         // to that root region, while another would try to scan newly evac'ed
 364         // objects under the race.
 365         region->set_concurrent_iteration_safe_limit(region->top());
 366       }
 367     } else {
 368       // FreeSet may contain uncommitted empty regions, once they are recommitted,
 369       // their TAMS may have old values, so reset them here.
 370       ctx->set_top_at_mark_start(region->region_number(), region->bottom());
 371     }
 372   }
 373 }
 374 
 375 void ShenandoahTraversalGC::prepare() {
 376   _heap->collection_set()->clear();
 377   assert(_heap->collection_set()->count() == 0, "collection set not clear");
 378 
 379   if (UseTLAB) {
 380     ShenandoahGCPhase phase(ShenandoahPhaseTimings::traversal_gc_accumulate_stats);
 381     _heap->accumulate_statistics_tlabs();
 382   }
 383 
 384   {
 385     ShenandoahGCPhase phase(ShenandoahPhaseTimings::traversal_gc_make_parsable);
 386     _heap->make_parsable(true);
 387   }
 388 
 389   if (UseTLAB) {
 390     ShenandoahGCPhase phase(ShenandoahPhaseTimings::traversal_gc_resize_tlabs);
 391     _heap->resize_tlabs();
 392   }
 393 
 394   assert(_heap->next_marking_context()->is_bitmap_clear(), "need clean mark bitmap");
 395 
 396   ShenandoahFreeSet* free_set = _heap->free_set();
 397   ShenandoahCollectionSet* collection_set = _heap->collection_set();
 398 
 399   // Find collection set
 400   _heap->heuristics()->choose_collection_set(collection_set);
 401   prepare_regions();
 402 
 403   // Rebuild free set
 404   free_set->rebuild();
 405 
 406   log_info(gc,ergo)("Got " SIZE_FORMAT " collection set regions and " SIZE_FORMAT " root set regions", collection_set->count(), _root_regions.count());
 407 }
 408 
 409 void ShenandoahTraversalGC::init_traversal_collection() {
 410   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "STW traversal GC");
 411 
 412   if (ShenandoahVerify) {
 413     _heap->verifier()->verify_before_traversal();
 414   }
 415 
 416   {
 417     ShenandoahGCPhase phase_prepare(ShenandoahPhaseTimings::traversal_gc_prepare);
 418     ShenandoahHeapLocker lock(_heap->lock());
 419     prepare();
 420   }
 421 
 422   _heap->set_concurrent_traversal_in_progress(true);
 423 
 424   bool process_refs = _heap->process_references();
 425   if (process_refs) {
 426     ReferenceProcessor* rp = _heap->ref_processor();
 427     rp->enable_discovery(true /*verify_no_refs*/);
 428     rp->setup_policy(false);
 429   }
 430 
 431   {
 432     ShenandoahGCPhase phase_work(ShenandoahPhaseTimings::init_traversal_gc_work);
 433     assert(_task_queues->is_empty(), "queues must be empty before traversal GC");
 434     TASKQUEUE_STATS_ONLY(_task_queues->reset_taskqueue_stats());
 435 
 436 #if defined(COMPILER2) || INCLUDE_JVMCI
 437     DerivedPointerTable::clear();
 438 #endif
 439 
 440     {
 441       uint nworkers = _heap->workers()->active_workers();
 442       task_queues()->reserve(nworkers);
 443       ShenandoahRootProcessor rp(_heap, nworkers, ShenandoahPhaseTimings::init_traversal_gc_work);
 444 
 445       ShenandoahInitTraversalCollectionTask traversal_task(&rp);
 446       _heap->workers()->run_task(&traversal_task);
 447     }
 448 
 449 #if defined(COMPILER2) || INCLUDE_JVMCI
 450     DerivedPointerTable::update_pointers();
 451 #endif
 452   }
 453 
 454   if (ShenandoahPacing) {
 455     _heap->pacer()->setup_for_traversal();
 456   }
 457 
 458   _root_regions_iterator.reset(&_root_regions);
 459 }
 460 
 461 void ShenandoahTraversalGC::main_loop(uint w, ParallelTaskTerminator* t) {
 462   ShenandoahObjToScanQueue* q = task_queues()->queue(w);
 463 
 464   // Initialize live data.
 465   jushort* ld = get_liveness(w);
 466   Copy::fill_to_bytes(ld, _heap->num_regions() * sizeof(jushort));
 467 
 468   ReferenceProcessor* rp = NULL;
 469   if (_heap->process_references()) {
 470     rp = _heap->ref_processor();
 471   }
 472   if (UseShenandoahMatrix) {
 473     if (!_heap->is_degenerated_gc_in_progress()) {
 474       if (_heap->unload_classes()) {
 475         if (ShenandoahStringDedup::is_enabled()) {
 476           ShenandoahTraversalMetadataDedupMatrixClosure cl(q, rp);
 477           main_loop_work<ShenandoahTraversalMetadataDedupMatrixClosure>(&cl, ld, w, t);
 478         } else {
 479           ShenandoahTraversalMetadataMatrixClosure cl(q, rp);
 480           main_loop_work<ShenandoahTraversalMetadataMatrixClosure>(&cl, ld, w, t);
 481         }
 482       } else {
 483         if (ShenandoahStringDedup::is_enabled()) {
 484           ShenandoahTraversalDedupMatrixClosure cl(q, rp);
 485           main_loop_work<ShenandoahTraversalDedupMatrixClosure>(&cl, ld, w, t);
 486         } else {
 487           ShenandoahTraversalMatrixClosure cl(q, rp);
 488           main_loop_work<ShenandoahTraversalMatrixClosure>(&cl, ld, w, t);
 489         }
 490       }
 491     } else {
 492       if (_heap->unload_classes()) {
 493         if (ShenandoahStringDedup::is_enabled()) {
 494           ShenandoahTraversalMetadataDedupDegenMatrixClosure cl(q, rp);
 495           main_loop_work<ShenandoahTraversalMetadataDedupDegenMatrixClosure>(&cl, ld, w, t);
 496         } else {
 497           ShenandoahTraversalMetadataDegenMatrixClosure cl(q, rp);
 498           main_loop_work<ShenandoahTraversalMetadataDegenMatrixClosure>(&cl, ld, w, t);
 499         }
 500       } else {
 501         if (ShenandoahStringDedup::is_enabled()) {
 502           ShenandoahTraversalDedupDegenMatrixClosure cl(q, rp);
 503           main_loop_work<ShenandoahTraversalDedupDegenMatrixClosure>(&cl, ld, w, t);
 504         } else {
 505           ShenandoahTraversalDegenMatrixClosure cl(q, rp);
 506           main_loop_work<ShenandoahTraversalDegenMatrixClosure>(&cl, ld, w, t);
 507         }
 508       }
 509     }
 510   } else {
 511     if (!_heap->is_degenerated_gc_in_progress()) {
 512       if (_heap->unload_classes()) {
 513         if (ShenandoahStringDedup::is_enabled()) {
 514           ShenandoahTraversalMetadataDedupClosure cl(q, rp);
 515           main_loop_work<ShenandoahTraversalMetadataDedupClosure>(&cl, ld, w, t);
 516         } else {
 517           ShenandoahTraversalMetadataClosure cl(q, rp);
 518           main_loop_work<ShenandoahTraversalMetadataClosure>(&cl, ld, w, t);
 519         }
 520       } else {
 521         if (ShenandoahStringDedup::is_enabled()) {
 522           ShenandoahTraversalDedupClosure cl(q, rp);
 523           main_loop_work<ShenandoahTraversalDedupClosure>(&cl, ld, w, t);
 524         } else {
 525           ShenandoahTraversalClosure cl(q, rp);
 526           main_loop_work<ShenandoahTraversalClosure>(&cl, ld, w, t);
 527         }
 528       }
 529     } else {
 530       if (_heap->unload_classes()) {
 531         if (ShenandoahStringDedup::is_enabled()) {
 532           ShenandoahTraversalMetadataDedupDegenClosure cl(q, rp);
 533           main_loop_work<ShenandoahTraversalMetadataDedupDegenClosure>(&cl, ld, w, t);
 534         } else {
 535           ShenandoahTraversalMetadataDegenClosure cl(q, rp);
 536           main_loop_work<ShenandoahTraversalMetadataDegenClosure>(&cl, ld, w, t);
 537         }
 538       } else {
 539         if (ShenandoahStringDedup::is_enabled()) {
 540           ShenandoahTraversalDedupDegenClosure cl(q, rp);
 541           main_loop_work<ShenandoahTraversalDedupDegenClosure>(&cl, ld, w, t);
 542         } else {
 543           ShenandoahTraversalDegenClosure cl(q, rp);
 544           main_loop_work<ShenandoahTraversalDegenClosure>(&cl, ld, w, t);
 545         }
 546       }
 547     }
 548   }
 549   flush_liveness(w);
 550 
 551 }
 552 
 553 template <class T>
 554 void ShenandoahTraversalGC::main_loop_work(T* cl, jushort* live_data, uint worker_id, ParallelTaskTerminator* terminator) {
 555   ShenandoahObjToScanQueueSet* queues = task_queues();
 556   ShenandoahObjToScanQueue* q = queues->queue(worker_id);
 557   ShenandoahConcurrentMark* conc_mark = _heap->concurrentMark();
 558 
 559   uintx stride = ShenandoahMarkLoopStride;
 560 
 561   ShenandoahMarkTask task;
 562 
 563   // Process outstanding queues, if any.
 564   q = queues->claim_next();
 565   while (q != NULL) {
 566     if (_heap->check_cancelled_gc_and_yield()) {
 567       ShenandoahCancelledTerminatorTerminator tt;
 568       ShenandoahEvacOOMScopeLeaver oom_scope_leaver;
 569       while (!terminator->offer_termination(&tt));
 570       return;
 571     }
 572 
 573     for (uint i = 0; i < stride; i++) {
 574       if (q->pop_buffer(task) ||
 575           q->pop_local(task) ||
 576           q->pop_overflow(task)) {
 577         conc_mark->do_task<T>(q, cl, live_data, &task);
 578       } else {
 579         assert(q->is_empty(), "Must be empty");
 580         q = queues->claim_next();
 581         break;
 582       }
 583     }
 584   }
 585 
 586   if (check_and_handle_cancelled_gc(terminator)) return;
 587 
 588   // Step 2: Process all root regions.
 589   // TODO: Interleave this in the normal mark loop below.
 590   ShenandoahHeapRegion* r = _root_regions_iterator.claim_next();
 591   while (r != NULL) {
 592     _heap->marked_object_oop_safe_iterate(r, cl);
 593     if (ShenandoahPacing) {
 594       _heap->pacer()->report_partial(r->get_live_data_words());
 595     }
 596     if (check_and_handle_cancelled_gc(terminator)) return;
 597     r = _root_regions_iterator.claim_next();
 598   }
 599 
 600   if (check_and_handle_cancelled_gc(terminator)) return;
 601 
 602   // Normal loop.
 603   q = queues->queue(worker_id);
 604 
 605   ShenandoahTraversalSATBBufferClosure drain_satb(q);
 606   SATBMarkQueueSet& satb_mq_set = ShenandoahBarrierSet::satb_mark_queue_set();
 607 
 608   int seed = 17;
 609 
 610   while (true) {
 611     if (check_and_handle_cancelled_gc(terminator)) return;
 612 
 613     while (satb_mq_set.completed_buffers_num() > 0) {
 614       satb_mq_set.apply_closure_to_completed_buffer(&drain_satb);
 615     }
 616 
 617     if (_arraycopy_task_queue.length() > 0) {
 618       process_arraycopy_task(cl);
 619     }
 620 
 621     uint work = 0;
 622     for (uint i = 0; i < stride; i++) {
 623       if (q->pop_buffer(task) ||
 624           q->pop_local(task) ||
 625           q->pop_overflow(task) ||
 626           queues->steal(worker_id, &seed, task)) {
 627         conc_mark->do_task<T>(q, cl, live_data, &task);
 628         work++;
 629       } else {
 630         break;
 631       }
 632     }
 633 
 634     if (work == 0 &&
 635         _arraycopy_task_queue.length() == 0) {
 636       // No more work, try to terminate
 637       ShenandoahEvacOOMScopeLeaver oom_scope_leaver;
 638       ShenandoahTerminationTimingsTracker term_tracker(worker_id);
 639       if (terminator->offer_termination()) return;
 640     }
 641   }
 642 }
 643 
 644 bool ShenandoahTraversalGC::check_and_handle_cancelled_gc(ParallelTaskTerminator* terminator) {
 645   if (_heap->cancelled_gc()) {
 646     ShenandoahCancelledTerminatorTerminator tt;
 647     ShenandoahEvacOOMScopeLeaver oom_scope_leaver;
 648     while (! terminator->offer_termination(&tt));
 649     return true;
 650   }
 651   return false;
 652 }
 653 
 654 void ShenandoahTraversalGC::concurrent_traversal_collection() {
 655   ClassLoaderDataGraph::clear_claimed_marks();
 656 
 657   ShenandoahGCPhase phase_work(ShenandoahPhaseTimings::conc_traversal);
 658   if (!_heap->cancelled_gc()) {
 659     uint nworkers = _heap->workers()->active_workers();
 660     task_queues()->reserve(nworkers);
 661     ShenandoahTerminationTracker tracker(ShenandoahPhaseTimings::conc_traversal_termination);
 662 
 663     if (UseShenandoahOWST) {
 664       ShenandoahTaskTerminator terminator(nworkers, task_queues());
 665       ShenandoahConcurrentTraversalCollectionTask task(&terminator);
 666       _heap->workers()->run_task(&task);
 667     } else {
 668       ParallelTaskTerminator terminator(nworkers, task_queues());
 669       ShenandoahConcurrentTraversalCollectionTask task(&terminator);
 670       _heap->workers()->run_task(&task);
 671     }
 672   }
 673 
 674   if (!_heap->cancelled_gc() && ShenandoahPreclean && _heap->process_references()) {
 675     ShenandoahEvacOOMScope oom_evac_scope;
 676     preclean_weak_refs();
 677   }
 678 }
 679 
 680 void ShenandoahTraversalGC::final_traversal_collection() {
 681 
 682   _heap->make_parsable(true);
 683 
 684   if (!_heap->cancelled_gc()) {
 685 #if defined(COMPILER2) || INCLUDE_JVMCI
 686     DerivedPointerTable::clear();
 687 #endif
 688     ShenandoahGCPhase phase_work(ShenandoahPhaseTimings::final_traversal_gc_work);
 689     uint nworkers = _heap->workers()->active_workers();
 690     task_queues()->reserve(nworkers);
 691 
 692     // Finish traversal
 693     ShenandoahRootProcessor rp(_heap, nworkers, ShenandoahPhaseTimings::final_traversal_gc_work);
 694     ShenandoahTerminationTracker term(ShenandoahPhaseTimings::final_traversal_gc_termination);
 695 
 696     if (UseShenandoahOWST) {
 697       ShenandoahTaskTerminator terminator(nworkers, task_queues());
 698       ShenandoahFinalTraversalCollectionTask task(&rp, &terminator);
 699       _heap->workers()->run_task(&task);
 700     } else {
 701       ParallelTaskTerminator terminator(nworkers, task_queues());
 702       ShenandoahFinalTraversalCollectionTask task(&rp, &terminator);
 703       _heap->workers()->run_task(&task);
 704     }
 705 #if defined(COMPILER2) || INCLUDE_JVMCI
 706     DerivedPointerTable::update_pointers();
 707 #endif
 708   }
 709 
 710   if (!_heap->cancelled_gc() && _heap->process_references()) {
 711     weak_refs_work();
 712   }
 713 
 714   if (!_heap->cancelled_gc() && _heap->unload_classes()) {
 715     _heap->unload_classes_and_cleanup_tables(false);
 716     fixup_roots();
 717   }
 718 
 719   if (!_heap->cancelled_gc()) {
 720     assert(_task_queues->is_empty(), "queues must be empty after traversal GC");
 721     TASKQUEUE_STATS_ONLY(_task_queues->print_taskqueue_stats());
 722     TASKQUEUE_STATS_ONLY(_task_queues->reset_taskqueue_stats());
 723 
 724     // Still good? We can now trash the cset, and make final verification
 725     {
 726       ShenandoahGCPhase phase_cleanup(ShenandoahPhaseTimings::traversal_gc_cleanup);
 727       ShenandoahHeapLocker lock(_heap->lock());
 728 
 729       assert(_arraycopy_task_queue.length() == 0, "arraycopy tasks must be done");
 730 
 731       // Trash everything
 732       // Clear immediate garbage regions.
 733       size_t num_regions = _heap->num_regions();
 734 
 735       ShenandoahHeapRegionSet* traversal_regions = traversal_set();
 736       ShenandoahFreeSet* free_regions = _heap->free_set();
 737       ShenandoahMarkingContext* const ctx = _heap->next_marking_context();
 738       free_regions->clear();
 739       for (size_t i = 0; i < num_regions; i++) {
 740         ShenandoahHeapRegion* r = _heap->get_region(i);
 741         bool not_allocated = ctx->top_at_mark_start(r->region_number()) == r->top();
 742 
 743         bool candidate = traversal_regions->is_in(r) && !r->has_live() && not_allocated;
 744         if (r->is_humongous_start() && candidate) {
 745           // Trash humongous.
 746           HeapWord* humongous_obj = r->bottom() + BrooksPointer::word_size();
 747           assert(!ctx->is_marked(oop(humongous_obj)), "must not be marked");
 748           r->make_trash();
 749           while (i + 1 < num_regions && _heap->get_region(i + 1)->is_humongous_continuation()) {
 750             i++;
 751             r = _heap->get_region(i);
 752             assert(r->is_humongous_continuation(), "must be humongous continuation");
 753             r->make_trash();
 754           }
 755         } else if (!r->is_empty() && candidate) {
 756           // Trash regular.
 757           assert(!r->is_humongous(), "handled above");
 758           assert(!r->is_trash(), "must not already be trashed");
 759           r->make_trash();
 760         }
 761       }
 762       _heap->collection_set()->clear();
 763       _heap->free_set()->rebuild();
 764       reset();
 765     }
 766 
 767     assert(_task_queues->is_empty(), "queues must be empty after traversal GC");
 768     _heap->set_concurrent_traversal_in_progress(false);
 769     assert(!_heap->cancelled_gc(), "must not be cancelled when getting out here");
 770 
 771     if (ShenandoahVerify) {
 772       _heap->verifier()->verify_after_traversal();
 773     }
 774   }
 775 }
 776 
 777 class ShenandoahTraversalFixRootsClosure : public OopClosure {
 778 private:
 779 
 780   template <class T>
 781   inline void do_oop_work(T* p) {
 782     T o = RawAccess<>::oop_load(p);
 783     if (!CompressedOops::is_null(o)) {
 784       oop obj = CompressedOops::decode_not_null(o);
 785       oop forw = ShenandoahBarrierSet::resolve_forwarded_not_null(obj);
 786       if (!oopDesc::unsafe_equals(obj, forw)) {
 787         RawAccess<IS_NOT_NULL>::oop_store(p, forw);
 788       }
 789     }
 790   }
 791 public:
 792   inline void do_oop(oop* p) { do_oop_work(p); }
 793   inline void do_oop(narrowOop* p) { do_oop_work(p); }
 794 };
 795 
 796 class ShenandoahTraversalFixRootsTask : public AbstractGangTask {
 797   ShenandoahRootProcessor* _rp;
 798 public:
 799 
 800   ShenandoahTraversalFixRootsTask(ShenandoahRootProcessor* rp) :
 801     AbstractGangTask("Shenandoah traversal fix roots"),
 802     _rp(rp)
 803   {
 804     // Nothing else to do.
 805   }
 806 
 807   void work(uint worker_id) {
 808     ShenandoahTraversalFixRootsClosure cl;
 809     MarkingCodeBlobClosure blobsCl(&cl, CodeBlobToOopClosure::FixRelocations);
 810     CLDToOopClosure cldCl(&cl);
 811     _rp->process_all_roots(&cl, &cl, &cldCl, &blobsCl, NULL, worker_id);
 812   }
 813 };
 814 
 815 void ShenandoahTraversalGC::fixup_roots() {
 816 #if defined(COMPILER2) || INCLUDE_JVMCI
 817   DerivedPointerTable::clear();
 818 #endif
 819   ShenandoahHeap* heap = ShenandoahHeap::heap();
 820   ShenandoahRootProcessor rp(heap, heap->workers()->active_workers(), ShenandoahPhaseTimings::final_traversal_update_roots);
 821   ShenandoahTraversalFixRootsTask update_roots_task(&rp);
 822   heap->workers()->run_task(&update_roots_task);
 823 #if defined(COMPILER2) || INCLUDE_JVMCI
 824   DerivedPointerTable::update_pointers();
 825 #endif
 826 }
 827 
 828 void ShenandoahTraversalGC::reset() {
 829   _task_queues->clear();
 830   _arraycopy_task_queue.clear();
 831 }
 832 
 833 ShenandoahObjToScanQueueSet* ShenandoahTraversalGC::task_queues() {
 834   return _task_queues;
 835 }
 836 
 837 jushort* ShenandoahTraversalGC::get_liveness(uint worker_id) {
 838   return _liveness_local[worker_id];
 839 }
 840 
 841 class ShenandoahTraversalCancelledGCYieldClosure : public YieldClosure {
 842 private:
 843   ShenandoahHeap* const _heap;
 844 public:
 845   ShenandoahTraversalCancelledGCYieldClosure() : _heap(ShenandoahHeap::heap()) {};
 846   virtual bool should_return() { return _heap->cancelled_gc(); }
 847 };
 848 
 849 class ShenandoahTraversalPrecleanCompleteGCClosure : public VoidClosure {
 850 public:
 851   void do_void() {
 852     ShenandoahHeap* sh = ShenandoahHeap::heap();
 853     ShenandoahTraversalGC* traversal_gc = sh->traversal_gc();
 854     assert(sh->process_references(), "why else would we be here?");
 855     ParallelTaskTerminator terminator(1, traversal_gc->task_queues());
 856     shenandoah_assert_rp_isalive_installed();
 857     traversal_gc->main_loop((uint) 0, &terminator);
 858   }
 859 };
 860 
 861 class ShenandoahTraversalKeepAliveUpdateClosure : public OopClosure {
 862 private:
 863   ShenandoahObjToScanQueue* _queue;
 864   Thread* _thread;
 865   ShenandoahTraversalGC* _traversal_gc;
 866   ShenandoahMarkingContext* const _mark_context;
 867 
 868   template <class T>
 869   inline void do_oop_work(T* p) {
 870     _traversal_gc->process_oop<T, false /* string dedup */, false /* degen */, false /* matrix */>(p, _thread, _queue, _mark_context, NULL);
 871   }
 872 
 873 public:
 874   ShenandoahTraversalKeepAliveUpdateClosure(ShenandoahObjToScanQueue* q) :
 875     _queue(q), _thread(Thread::current()),
 876     _traversal_gc(ShenandoahHeap::heap()->traversal_gc()),
 877     _mark_context(ShenandoahHeap::heap()->next_marking_context()) {}
 878 
 879   void do_oop(narrowOop* p) { do_oop_work(p); }
 880   void do_oop(oop* p)       { do_oop_work(p); }
 881 };
 882 
 883 class ShenandoahTraversalKeepAliveUpdateDegenClosure : public OopClosure {
 884 private:
 885   ShenandoahObjToScanQueue* _queue;
 886   Thread* _thread;
 887   ShenandoahTraversalGC* _traversal_gc;
 888   ShenandoahMarkingContext* const _mark_context;
 889 
 890   template <class T>
 891   inline void do_oop_work(T* p) {
 892     _traversal_gc->process_oop<T, false /* string dedup */, true /* degen */, false /* matrix */>(p, _thread, _queue, _mark_context, NULL);
 893   }
 894 
 895 public:
 896   ShenandoahTraversalKeepAliveUpdateDegenClosure(ShenandoahObjToScanQueue* q) :
 897     _queue(q), _thread(Thread::current()),
 898     _traversal_gc(ShenandoahHeap::heap()->traversal_gc()),
 899     _mark_context(ShenandoahHeap::heap()->next_marking_context()) {}
 900 
 901   void do_oop(narrowOop* p) { do_oop_work(p); }
 902   void do_oop(oop* p)       { do_oop_work(p); }
 903 };
 904 
 905 class ShenandoahTraversalKeepAliveUpdateMatrixClosure : public OopClosure {
 906 private:
 907   ShenandoahObjToScanQueue* _queue;
 908   Thread* _thread;
 909   ShenandoahTraversalGC* _traversal_gc;
 910   ShenandoahMarkingContext* const _mark_context;
 911 
 912   template <class T>
 913   inline void do_oop_work(T* p) {
 914     // TODO: Need to somehow pass base_obj here?
 915     _traversal_gc->process_oop<T, false /* string dedup */, false /* degen */, true /* matrix */>(p, _thread, _queue, _mark_context, NULL);
 916   }
 917 
 918 public:
 919   ShenandoahTraversalKeepAliveUpdateMatrixClosure(ShenandoahObjToScanQueue* q) :
 920     _queue(q), _thread(Thread::current()),
 921     _traversal_gc(ShenandoahHeap::heap()->traversal_gc()),
 922     _mark_context(ShenandoahHeap::heap()->next_marking_context()) {}
 923 
 924   void do_oop(narrowOop* p) { do_oop_work(p); }
 925   void do_oop(oop* p)       { do_oop_work(p); }
 926 };
 927 
 928 class ShenandoahTraversalKeepAliveUpdateDegenMatrixClosure : public OopClosure {
 929 private:
 930   ShenandoahObjToScanQueue* _queue;
 931   Thread* _thread;
 932   ShenandoahTraversalGC* _traversal_gc;
 933   ShenandoahMarkingContext* const _mark_context;
 934 
 935   template <class T>
 936   inline void do_oop_work(T* p) {
 937     // TODO: Need to somehow pass base_obj here?
 938     _traversal_gc->process_oop<T, false /* string dedup */, true /* degen */, true /* matrix */>(p, _thread, _queue, _mark_context, NULL);
 939   }
 940 
 941 public:
 942   ShenandoahTraversalKeepAliveUpdateDegenMatrixClosure(ShenandoahObjToScanQueue* q) :
 943     _queue(q), _thread(Thread::current()),
 944     _traversal_gc(ShenandoahHeap::heap()->traversal_gc()),
 945     _mark_context(ShenandoahHeap::heap()->next_marking_context()) {}
 946 
 947   void do_oop(narrowOop* p) { do_oop_work(p); }
 948   void do_oop(oop* p)       { do_oop_work(p); }
 949 };
 950 
 951 void ShenandoahTraversalGC::preclean_weak_refs() {
 952   // Pre-cleaning weak references before diving into STW makes sense at the
 953   // end of concurrent mark. This will filter out the references which referents
 954   // are alive. Note that ReferenceProcessor already filters out these on reference
 955   // discovery, and the bulk of work is done here. This phase processes leftovers
 956   // that missed the initial filtering, i.e. when referent was marked alive after
 957   // reference was discovered by RP.
 958 
 959   assert(_heap->process_references(), "sanity");
 960 
 961   ShenandoahHeap* sh = ShenandoahHeap::heap();
 962   ReferenceProcessor* rp = sh->ref_processor();
 963 
 964   // Shortcut if no references were discovered to avoid winding up threads.
 965   if (!rp->has_discovered_references()) {
 966     return;
 967   }
 968 
 969   ReferenceProcessorMTDiscoveryMutator fix_mt_discovery(rp, false);
 970 
 971   shenandoah_assert_rp_isalive_not_installed();
 972   ShenandoahForwardedIsAliveClosure is_alive;
 973   ReferenceProcessorIsAliveMutator fix_isalive(rp, &is_alive);
 974 
 975   // Interrupt on cancelled GC
 976   ShenandoahTraversalCancelledGCYieldClosure yield;
 977 
 978   assert(task_queues()->is_empty(), "Should be empty");
 979   assert(!sh->is_degenerated_gc_in_progress(), "must be in concurrent non-degenerated phase");
 980 
 981   ShenandoahTraversalPrecleanCompleteGCClosure complete_gc;
 982   if (UseShenandoahMatrix) {
 983     ShenandoahTraversalKeepAliveUpdateMatrixClosure keep_alive(task_queues()->queue(0));
 984     ResourceMark rm;
 985     rp->preclean_discovered_references(&is_alive, &keep_alive,
 986                                        &complete_gc, &yield,
 987                                        NULL);
 988   } else {
 989     ShenandoahTraversalKeepAliveUpdateClosure keep_alive(task_queues()->queue(0));
 990     ResourceMark rm;
 991     rp->preclean_discovered_references(&is_alive, &keep_alive,
 992                                        &complete_gc, &yield,
 993                                        NULL);
 994   }
 995   assert(sh->cancelled_gc() || task_queues()->is_empty(), "Should be empty");
 996 }
 997 
 998 // Weak Reference Closures
 999 class ShenandoahTraversalDrainMarkingStackClosure: public VoidClosure {
1000   uint _worker_id;
1001   ParallelTaskTerminator* _terminator;
1002   bool _reset_terminator;
1003 
1004 public:
1005   ShenandoahTraversalDrainMarkingStackClosure(uint worker_id, ParallelTaskTerminator* t, bool reset_terminator = false):
1006     _worker_id(worker_id),
1007     _terminator(t),
1008     _reset_terminator(reset_terminator) {
1009   }
1010 
1011   void do_void() {
1012     assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
1013 
1014     ShenandoahHeap* sh = ShenandoahHeap::heap();
1015     ShenandoahTraversalGC* traversal_gc = sh->traversal_gc();
1016     assert(sh->process_references(), "why else would we be here?");
1017     shenandoah_assert_rp_isalive_installed();
1018 
1019     traversal_gc->main_loop(_worker_id, _terminator);
1020 
1021     if (_reset_terminator) {
1022       _terminator->reset_for_reuse();
1023     }
1024   }
1025 };
1026 
1027 void ShenandoahTraversalGC::weak_refs_work() {
1028   assert(_heap->process_references(), "sanity");
1029 
1030   ShenandoahHeap* sh = ShenandoahHeap::heap();
1031 
1032   ShenandoahPhaseTimings::Phase phase_root = ShenandoahPhaseTimings::weakrefs;
1033 
1034   ShenandoahGCPhase phase(phase_root);
1035 
1036   ReferenceProcessor* rp = sh->ref_processor();
1037 
1038   // NOTE: We cannot shortcut on has_discovered_references() here, because
1039   // we will miss marking JNI Weak refs then, see implementation in
1040   // ReferenceProcessor::process_discovered_references.
1041   weak_refs_work_doit();
1042 
1043   rp->verify_no_references_recorded();
1044   assert(!rp->discovery_enabled(), "Post condition");
1045 
1046 }
1047 
1048 class ShenandoahTraversalRefProcTaskProxy : public AbstractGangTask {
1049 
1050 private:
1051   AbstractRefProcTaskExecutor::ProcessTask& _proc_task;
1052   ParallelTaskTerminator* _terminator;
1053 public:
1054 
1055   ShenandoahTraversalRefProcTaskProxy(AbstractRefProcTaskExecutor::ProcessTask& proc_task,
1056                              ParallelTaskTerminator* t) :
1057     AbstractGangTask("Process reference objects in parallel"),
1058     _proc_task(proc_task),
1059     _terminator(t) {
1060   }
1061 
1062   void work(uint worker_id) {
1063     ShenandoahEvacOOMScope oom_evac_scope;
1064     assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
1065     ShenandoahHeap* heap = ShenandoahHeap::heap();
1066     ShenandoahTraversalDrainMarkingStackClosure complete_gc(worker_id, _terminator);
1067 
1068     ShenandoahForwardedIsAliveClosure is_alive;
1069     if (UseShenandoahMatrix) {
1070       if (!heap->is_degenerated_gc_in_progress()) {
1071         ShenandoahTraversalKeepAliveUpdateMatrixClosure keep_alive(heap->traversal_gc()->task_queues()->queue(worker_id));
1072         _proc_task.work(worker_id, is_alive, keep_alive, complete_gc);
1073       } else {
1074         ShenandoahTraversalKeepAliveUpdateDegenMatrixClosure keep_alive(heap->traversal_gc()->task_queues()->queue(worker_id));
1075         _proc_task.work(worker_id, is_alive, keep_alive, complete_gc);
1076       }
1077     } else {
1078       if (!heap->is_degenerated_gc_in_progress()) {
1079         ShenandoahTraversalKeepAliveUpdateClosure keep_alive(heap->traversal_gc()->task_queues()->queue(worker_id));
1080         _proc_task.work(worker_id, is_alive, keep_alive, complete_gc);
1081       } else {
1082         ShenandoahTraversalKeepAliveUpdateDegenClosure keep_alive(heap->traversal_gc()->task_queues()->queue(worker_id));
1083         _proc_task.work(worker_id, is_alive, keep_alive, complete_gc);
1084       }
1085     }
1086   }
1087 };
1088 
1089 class ShenandoahTraversalRefProcTaskExecutor : public AbstractRefProcTaskExecutor {
1090 
1091 private:
1092   WorkGang* _workers;
1093 
1094 public:
1095 
1096   ShenandoahTraversalRefProcTaskExecutor(WorkGang* workers) :
1097     _workers(workers) {
1098   }
1099 
1100   // Executes a task using worker threads.
1101   void execute(ProcessTask& task, uint ergo_workers) {
1102     assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
1103 
1104     ShenandoahHeap* heap = ShenandoahHeap::heap();
1105     ShenandoahTraversalGC* traversal_gc = heap->traversal_gc();
1106     ShenandoahPushWorkerQueuesScope scope(_workers,
1107                                           traversal_gc->task_queues(),
1108                                           ergo_workers,
1109                                           /* do_check = */ false);
1110     uint nworkers = _workers->active_workers();
1111     traversal_gc->task_queues()->reserve(nworkers);
1112     if (UseShenandoahOWST) {
1113       ShenandoahTaskTerminator terminator(nworkers, traversal_gc->task_queues());
1114       ShenandoahTraversalRefProcTaskProxy proc_task_proxy(task, &terminator);
1115       _workers->run_task(&proc_task_proxy);
1116     } else {
1117       ParallelTaskTerminator terminator(nworkers, traversal_gc->task_queues());
1118       ShenandoahTraversalRefProcTaskProxy proc_task_proxy(task, &terminator);
1119       _workers->run_task(&proc_task_proxy);
1120     }
1121   }
1122 };
1123 
1124 void ShenandoahTraversalGC::weak_refs_work_doit() {
1125   ShenandoahHeap* sh = ShenandoahHeap::heap();
1126 
1127   ReferenceProcessor* rp = sh->ref_processor();
1128 
1129   ShenandoahPhaseTimings::Phase phase_process = ShenandoahPhaseTimings::weakrefs_process;
1130 
1131   shenandoah_assert_rp_isalive_not_installed();
1132   ShenandoahForwardedIsAliveClosure is_alive;
1133   ReferenceProcessorIsAliveMutator fix_isalive(rp, &is_alive);
1134 
1135   WorkGang* workers = sh->workers();
1136   uint nworkers = workers->active_workers();
1137 
1138   // Setup collector policy for softref cleaning.
1139   bool clear_soft_refs = sh->soft_ref_policy()->use_should_clear_all_soft_refs(true /* bogus arg*/);
1140   log_develop_debug(gc, ref)("clearing soft refs: %s", BOOL_TO_STR(clear_soft_refs));
1141   rp->setup_policy(clear_soft_refs);
1142   rp->set_active_mt_degree(nworkers);
1143 
1144   assert(task_queues()->is_empty(), "Should be empty");
1145 
1146   // complete_gc and keep_alive closures instantiated here are only needed for
1147   // single-threaded path in RP. They share the queue 0 for tracking work, which
1148   // simplifies implementation. Since RP may decide to call complete_gc several
1149   // times, we need to be able to reuse the terminator.
1150   uint serial_worker_id = 0;
1151   ParallelTaskTerminator terminator(1, task_queues());
1152   ShenandoahTraversalDrainMarkingStackClosure complete_gc(serial_worker_id, &terminator, /* reset_terminator = */ true);
1153 
1154   ShenandoahTraversalRefProcTaskExecutor executor(workers);
1155 
1156   ReferenceProcessorPhaseTimes pt(sh->gc_timer(), rp->num_queues());
1157 
1158   {
1159     ShenandoahGCPhase phase(phase_process);
1160     ShenandoahTerminationTracker termination(ShenandoahPhaseTimings::weakrefs_termination);
1161 
1162     // Prepare for single-threaded mode
1163     ShenandoahPushWorkerQueuesScope scope(workers, task_queues(), 1, /* do_check = */ false);
1164     ShenandoahEvacOOMScope oom_scope;
1165 
1166     ShenandoahForwardedIsAliveClosure is_alive;
1167     if (UseShenandoahMatrix) {
1168       if (!_heap->is_degenerated_gc_in_progress()) {
1169         ShenandoahTraversalKeepAliveUpdateMatrixClosure keep_alive(task_queues()->queue(serial_worker_id));
1170         rp->process_discovered_references(&is_alive, &keep_alive,
1171                                           &complete_gc, &executor,
1172                                           &pt);
1173         pt.print_all_references();
1174         WeakProcessor::weak_oops_do(&is_alive, &keep_alive);
1175       } else {
1176         ShenandoahTraversalKeepAliveUpdateDegenMatrixClosure keep_alive(task_queues()->queue(serial_worker_id));
1177         rp->process_discovered_references(&is_alive, &keep_alive,
1178                                           &complete_gc, &executor,
1179                                           &pt);
1180         pt.print_all_references();
1181         WeakProcessor::weak_oops_do(&is_alive, &keep_alive);
1182       }
1183     } else {
1184       if (!_heap->is_degenerated_gc_in_progress()) {
1185         ShenandoahTraversalKeepAliveUpdateClosure keep_alive(task_queues()->queue(serial_worker_id));
1186         rp->process_discovered_references(&is_alive, &keep_alive,
1187                                           &complete_gc, &executor,
1188                                           &pt);
1189         pt.print_all_references();
1190         WeakProcessor::weak_oops_do(&is_alive, &keep_alive);
1191       } else {
1192         ShenandoahTraversalKeepAliveUpdateDegenClosure keep_alive(task_queues()->queue(serial_worker_id));
1193         rp->process_discovered_references(&is_alive, &keep_alive,
1194                                           &complete_gc, &executor,
1195                                           &pt);
1196         pt.print_all_references();
1197         WeakProcessor::weak_oops_do(&is_alive, &keep_alive);
1198       }
1199     }
1200 
1201     assert(!_heap->cancelled_gc() || task_queues()->is_empty(), "Should be empty");
1202   }
1203 }
1204 
1205 void ShenandoahTraversalGC::push_arraycopy(HeapWord* start, size_t count) {
1206   _arraycopy_task_queue.push(start, count);
1207 }
1208 
1209 template <class T>
1210 bool ShenandoahTraversalGC::process_arraycopy_task(T* cl) {
1211   ShenandoahArrayCopyTask task = _arraycopy_task_queue.pop();
1212   if (task.start() == NULL) {
1213     return false;
1214   }
1215   if (task.count() == 0) {
1216     // Handle clone.
1217     oop obj = oop(task.start());
1218     obj->oop_iterate(cl);
1219   } else {
1220     HeapWord* array = task.start();
1221     size_t count = task.count();
1222     if (UseCompressedOops) {
1223       narrowOop* p = reinterpret_cast<narrowOop*>(array);
1224       for (size_t i = 0; i < count; i++) {
1225         cl->do_oop(p++);
1226       }
1227     } else {
1228       oop* p = reinterpret_cast<oop*>(array);
1229       for (size_t i = 0; i < count; i++) {
1230         cl->do_oop(p++);
1231       }
1232     }
1233   }
1234   return true;
1235 }