1 /*
   2  * Copyright (c) 2018, Red Hat, Inc. and/or its affiliates.
   3  *
   4  * This code is free software; you can redistribute it and/or modify it
   5  * under the terms of the GNU General Public License version 2 only, as
   6  * published by the Free Software Foundation.
   7  *
   8  * This code is distributed in the hope that it will be useful, but WITHOUT
   9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  11  * version 2 for more details (a copy is included in the LICENSE file that
  12  * accompanied this code).
  13  *
  14  * You should have received a copy of the GNU General Public License version
  15  * 2 along with this work; if not, write to the Free Software Foundation,
  16  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  17  *
  18  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  19  * or visit www.oracle.com if you need additional information or have any
  20  * questions.
  21  *
  22  */
  23 
  24 #include "precompiled.hpp"
  25 
  26 #include "gc/shared/gcTraceTime.inline.hpp"
  27 #include "gc/shared/markBitMap.inline.hpp"
  28 #include "gc/shared/workgroup.hpp"
  29 #include "gc/shared/taskqueue.inline.hpp"
  30 #include "gc/shared/weakProcessor.hpp"
  31 #include "gc/shenandoah/shenandoahBarrierSet.hpp"
  32 #include "gc/shenandoah/shenandoahCollectionSet.hpp"
  33 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
  34 #include "gc/shenandoah/shenandoahFreeSet.hpp"
  35 #include "gc/shenandoah/shenandoahPhaseTimings.hpp"
  36 #include "gc/shenandoah/shenandoahHeapRegionSet.hpp"
  37 #include "gc/shenandoah/shenandoahHeap.hpp"
  38 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
  39 #include "gc/shenandoah/shenandoahOopClosures.inline.hpp"
  40 #include "gc/shenandoah/shenandoahTraversalGC.hpp"
  41 #include "gc/shenandoah/shenandoahRootProcessor.hpp"
  42 #include "gc/shenandoah/shenandoahStringDedup.hpp"
  43 #include "gc/shenandoah/shenandoahStrDedupQueue.inline.hpp"
  44 #include "gc/shenandoah/shenandoahTaskqueue.hpp"
  45 #include "gc/shenandoah/shenandoahUtils.hpp"
  46 #include "gc/shenandoah/shenandoahVerifier.hpp"
  47 #include "gc/shenandoah/shenandoahWorkGroup.hpp"
  48 #include "gc/shenandoah/shenandoahWorkerPolicy.hpp"
  49 
  50 #include "memory/iterator.hpp"
  51 
  52 /**
  53  * NOTE: We are using the SATB buffer in thread.hpp and satbMarkQueue.hpp, however, it is not an SATB algorithm.
  54  * We're using the buffer as generic oop buffer to enqueue new values in concurrent oop stores, IOW, the algorithm
  55  * is incremental-update-based.
  56  *
  57  * NOTE on interaction with TAMS: we want to avoid traversing new objects for
  58  * several reasons:
  59  * - We will not reclaim them in this cycle anyway, because they are not in the
  60  *   cset
  61  * - It makes up for the bulk of work during final-pause
  62  * - It also shortens the concurrent cycle because we don't need to
  63  *   pointlessly traverse through newly allocated objects.
  64  * - As a nice side-effect, it solves the I-U termination problem (mutators
  65  *   cannot outrun the GC by allocating like crazy)
  66  * - It is an easy way to achieve MWF. What MWF does is to also enqueue the
  67  *   target object of stores if it's new. Treating new objects live implicitely
  68  *   achieves the same, but without extra barriers. I think the effect of
  69  *   shortened final-pause (mentioned above) is the main advantage of MWF. In
  70  *   particular, we will not see the head of a completely new long linked list
  71  *   in final-pause and end up traversing huge chunks of the heap there.
  72  * - We don't need to see/update the fields of new objects either, because they
  73  *   are either still null, or anything that's been stored into them has been
  74  *   evacuated+enqueued before (and will thus be treated later).
  75  *
  76  * We achieve this by setting TAMS for each region, and everything allocated
  77  * beyond TAMS will be 'implicitely marked'.
  78  *
  79  * Gotchas:
  80  * - While we want new objects to be implicitely marked, we don't want to count
  81  *   them alive. Otherwise the next cycle wouldn't pick them up and consider
  82  *   them for cset. This means that we need to protect such regions from
  83  *   getting accidentally thrashed at the end of traversal cycle. This is why I
  84  *   keep track of alloc-regions and check is_alloc_region() in the trashing
  85  *   code.
  86  * - We *need* to traverse through evacuated objects. Those objects are
  87  *   pre-existing, and any references in them point to interesting objects that
  88  *   we need to see. We also want to count them as live, because we just
  89  *   determined that they are alive :-) I achieve this by upping TAMS
  90  *   concurrently for every gclab/gc-shared alloc before publishing the
  91  *   evacuated object. This way, the GC threads will not consider such objects
  92  *   implictely marked, and traverse through them as normal.
  93  */
  94 class ShenandoahTraversalSATBBufferClosure : public SATBBufferClosure {
  95 private:
  96   ShenandoahObjToScanQueue* _queue;
  97   ShenandoahTraversalGC* _traversal_gc;
  98   ShenandoahHeap* _heap;
  99 public:
 100   ShenandoahTraversalSATBBufferClosure(ShenandoahObjToScanQueue* q) :
 101     _queue(q), _traversal_gc(ShenandoahHeap::heap()->traversal_gc()),
 102     _heap(ShenandoahHeap::heap())
 103  { }
 104 
 105   void do_buffer(void** buffer, size_t size) {
 106     for (size_t i = 0; i < size; ++i) {
 107       oop* p = (oop*) &buffer[i];
 108       oop obj = oopDesc::load_heap_oop(p);
 109       shenandoah_assert_not_forwarded(p, obj);
 110       if (!_heap->is_marked_next(obj) && _heap->mark_next(obj)) {
 111         _queue->push(ShenandoahMarkTask(obj));
 112       }
 113     }
 114   }
 115 };
 116 
 117 class ShenandoahTraversalSATBThreadsClosure : public ThreadClosure {
 118   ShenandoahTraversalSATBBufferClosure* _satb_cl;
 119 
 120  public:
 121   ShenandoahTraversalSATBThreadsClosure(ShenandoahTraversalSATBBufferClosure* satb_cl) :
 122     _satb_cl(satb_cl) {}
 123 
 124   void do_thread(Thread* thread) {
 125     if (thread->is_Java_thread()) {
 126       JavaThread* jt = (JavaThread*)thread;
 127       jt->satb_mark_queue().apply_closure_and_empty(_satb_cl);
 128     } else if (thread->is_VM_thread()) {
 129       JavaThread::satb_mark_queue_set().shared_satb_queue()->apply_closure_and_empty(_satb_cl);
 130     }
 131   }
 132 };
 133 
 134 // Like CLDToOopClosure, but clears has_modified_oops, so that we can record modified CLDs during traversal
 135 // and remark them later during final-traversal.
 136 class ShenandoahMarkCLDClosure : public CLDClosure {
 137 private:
 138   OopClosure* _cl;
 139 public:
 140   ShenandoahMarkCLDClosure(OopClosure* cl) : _cl(cl) {}
 141   void do_cld(ClassLoaderData* cld) {
 142     cld->oops_do(_cl, true, true);
 143   }
 144 };
 145 
 146 // Like CLDToOopClosure, but only process modified CLDs
 147 class ShenandoahRemarkCLDClosure : public CLDClosure {
 148 private:
 149   OopClosure* _cl;
 150 public:
 151   ShenandoahRemarkCLDClosure(OopClosure* cl) : _cl(cl) {}
 152   void do_cld(ClassLoaderData* cld) {
 153     if (cld->has_modified_oops()) {
 154       cld->oops_do(_cl, true, true);
 155     }
 156   }
 157 };
 158 
 159 class ShenandoahInitTraversalCollectionTask : public AbstractGangTask {
 160 private:
 161   ShenandoahRootProcessor* _rp;
 162   ShenandoahHeap* _heap;
 163 public:
 164   ShenandoahInitTraversalCollectionTask(ShenandoahRootProcessor* rp) :
 165     AbstractGangTask("Shenandoah Init Traversal Collection"),
 166     _rp(rp),
 167     _heap(ShenandoahHeap::heap()) {}
 168 
 169   void work(uint worker_id) {
 170     ShenandoahObjToScanQueueSet* queues = _heap->traversal_gc()->task_queues();
 171     ShenandoahObjToScanQueue* q = queues->queue(worker_id);
 172 
 173     // Initialize live data.
 174     jushort* ld = _heap->traversal_gc()->get_liveness(worker_id);
 175     Copy::fill_to_bytes(ld, _heap->num_regions() * sizeof(jushort));
 176 
 177     bool process_refs = _heap->shenandoahPolicy()->process_references();
 178     bool unload_classes = _heap->shenandoahPolicy()->unload_classes();
 179     ReferenceProcessor* rp = NULL;
 180     if (process_refs) {
 181       rp = _heap->ref_processor();
 182     }
 183 
 184     // Step 1: Process ordinary GC roots.
 185     {
 186       ShenandoahTraversalClosure roots_cl(q, rp);
 187       ShenandoahMarkCLDClosure cld_cl(&roots_cl);
 188       MarkingCodeBlobClosure code_cl(&roots_cl, CodeBlobToOopClosure::FixRelocations);
 189       if (unload_classes) {
 190         _rp->process_strong_roots(&roots_cl, process_refs ? NULL : &roots_cl, &cld_cl, NULL, &code_cl, NULL, worker_id);
 191       } else {
 192         _rp->process_all_roots(&roots_cl, process_refs ? NULL : &roots_cl, &cld_cl, &code_cl, NULL, worker_id);
 193       }
 194     }
 195   }
 196 };
 197 
 198 class ShenandoahConcurrentTraversalCollectionTask : public AbstractGangTask {
 199 private:
 200   ParallelTaskTerminator* _terminator;
 201   ShenandoahHeap* _heap;
 202 public:
 203   ShenandoahConcurrentTraversalCollectionTask(ParallelTaskTerminator* terminator) :
 204     AbstractGangTask("Shenandoah Concurrent Traversal Collection"),
 205     _terminator(terminator),
 206     _heap(ShenandoahHeap::heap()) {}
 207 
 208   void work(uint worker_id) {
 209     ShenandoahTraversalGC* traversal_gc = _heap->traversal_gc();
 210     ShenandoahObjToScanQueueSet* queues = traversal_gc->task_queues();
 211     ShenandoahObjToScanQueue* q = queues->queue(worker_id);
 212 
 213     // Drain all outstanding work in queues.
 214     traversal_gc->main_loop(worker_id, _terminator, true);
 215   }
 216 };
 217 
 218 class ShenandoahFinalTraversalCollectionTask : public AbstractGangTask {
 219 private:
 220   ShenandoahRootProcessor* _rp;
 221   ParallelTaskTerminator* _terminator;
 222   ShenandoahHeap* _heap;
 223 public:
 224   ShenandoahFinalTraversalCollectionTask(ShenandoahRootProcessor* rp, ParallelTaskTerminator* terminator) :
 225     AbstractGangTask("Shenandoah Final Traversal Collection"),
 226     _rp(rp),
 227     _terminator(terminator),
 228     _heap(ShenandoahHeap::heap()) {}
 229 
 230   void work(uint worker_id) {
 231     ShenandoahTraversalGC* traversal_gc = _heap->traversal_gc();
 232 
 233     ShenandoahObjToScanQueueSet* queues = traversal_gc->task_queues();
 234     ShenandoahObjToScanQueue* q = queues->queue(worker_id);
 235 
 236     bool process_refs = _heap->shenandoahPolicy()->process_references();
 237     bool unload_classes = _heap->shenandoahPolicy()->unload_classes();
 238     ReferenceProcessor* rp = NULL;
 239     if (process_refs) {
 240       rp = _heap->ref_processor();
 241     }
 242 
 243     // Step 1: Drain outstanding SATB queues.
 244     // NOTE: we piggy-back draining of remaining thread SATB buffers on the final root scan below.
 245     ShenandoahTraversalSATBBufferClosure satb_cl(q);
 246     {
 247       // Process remaining finished SATB buffers.
 248       SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
 249       while (satb_mq_set.apply_closure_to_completed_buffer(&satb_cl));
 250       // Process remaining threads SATB buffers below.
 251     }
 252 
 253     // Step 1: Process ordinary GC roots.
 254     {
 255       ShenandoahTraversalClosure roots_cl(q, rp);
 256       CLDToOopClosure cld_cl(&roots_cl);
 257       MarkingCodeBlobClosure code_cl(&roots_cl, CodeBlobToOopClosure::FixRelocations);
 258       ShenandoahTraversalSATBThreadsClosure tc(&satb_cl);
 259       if (unload_classes) {
 260         ShenandoahRemarkCLDClosure weak_cld_cl(&roots_cl);
 261         _rp->process_strong_roots(&roots_cl, process_refs ? NULL : &roots_cl, &cld_cl, &weak_cld_cl, &code_cl, &tc, worker_id);
 262       } else {
 263         _rp->process_all_roots(&roots_cl, process_refs ? NULL : &roots_cl, &cld_cl, &code_cl, &tc, worker_id);
 264       }
 265     }
 266 
 267     {
 268       ShenandoahWorkerTimings *worker_times = _heap->phase_timings()->worker_times();
 269       ShenandoahWorkerTimingsTracker timer(worker_times, ShenandoahPhaseTimings::FinishQueues, worker_id);
 270 
 271       // Step 3: Finally drain all outstanding work in queues.
 272       traversal_gc->main_loop(worker_id, _terminator, false);
 273     }
 274 
 275     // Flush remaining liveness data.
 276     traversal_gc->flush_liveness(worker_id);
 277 
 278   }
 279 };
 280 
 281 void ShenandoahTraversalGC::flush_liveness(uint worker_id) {
 282   jushort* ld = get_liveness(worker_id);
 283   for (uint i = 0; i < _heap->regions()->active_regions(); i++) {
 284     ShenandoahHeapRegion* r = _heap->regions()->get(i);
 285     jushort live = ld[i];
 286     if (live > 0) {
 287       r->increase_live_data_words(live);
 288       ld[i] = 0;
 289     }
 290   }
 291 }
 292 
 293 ShenandoahTraversalGC::ShenandoahTraversalGC(ShenandoahHeap* heap, size_t num_regions) :
 294   _heap(heap),
 295   _task_queues(new ShenandoahObjToScanQueueSet(heap->max_workers())) {
 296 
 297   uint num_queues = heap->max_workers();
 298   for (uint i = 0; i < num_queues; ++i) {
 299     ShenandoahObjToScanQueue* task_queue = new ShenandoahObjToScanQueue();
 300     task_queue->initialize();
 301     _task_queues->register_queue(i, task_queue);
 302   }
 303 
 304   uint workers = heap->max_workers();
 305   _liveness_local = NEW_C_HEAP_ARRAY(jushort*, workers, mtGC);
 306   for (uint worker = 0; worker < workers; worker++) {
 307      _liveness_local[worker] = NEW_C_HEAP_ARRAY(jushort, num_regions, mtGC);
 308   }
 309 
 310 }
 311 
 312 ShenandoahTraversalGC::~ShenandoahTraversalGC() {
 313 }
 314 
 315 void ShenandoahTraversalGC::prepare() {
 316   _heap->collection_set()->clear();
 317   assert(_heap->collection_set()->count() == 0, "collection set not clear");
 318 
 319   _heap->make_tlabs_parsable(true);
 320 
 321   assert(_heap->is_next_bitmap_clear(), "need clean mark bitmap");
 322 
 323   ShenandoahHeapRegionSet* regions = _heap->regions();
 324   ShenandoahCollectionSet* collection_set = _heap->collection_set();
 325   size_t num_regions = _heap->num_regions();
 326 
 327   // Find collection set
 328   _heap->shenandoahPolicy()->choose_collection_set(collection_set, false);
 329 
 330   // Rebuild free set
 331   ShenandoahFreeSet* _free_regions = _heap->free_regions();
 332   _free_regions->clear();
 333 
 334   for (uint from_idx = 0; from_idx < num_regions; from_idx++) {
 335     ShenandoahHeapRegion* r = regions->get(from_idx);
 336     if (r->is_alloc_allowed()) {
 337       _free_regions->add_region(r);
 338     }
 339   }
 340 
 341   log_info(gc,ergo)("Got "SIZE_FORMAT" collection set regions", collection_set->count());
 342 }
 343 
 344 void ShenandoahTraversalGC::init_traversal_collection() {
 345   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "STW traversal GC");
 346 
 347   if (ShenandoahVerify) {
 348     _heap->verifier()->verify_before_traversal();
 349   }
 350 
 351   {
 352     ShenandoahGCPhase phase_prepare(ShenandoahPhaseTimings::traversal_gc_prepare);
 353     ShenandoahHeapLocker lock(_heap->lock());
 354     prepare();
 355   }
 356 
 357   _heap->set_concurrent_traversal_in_progress(true);
 358 
 359   bool process_refs = _heap->shenandoahPolicy()->process_references();
 360   if (process_refs) {
 361     ReferenceProcessor* rp = _heap->ref_processor();
 362     rp->enable_discovery(true /*verify_no_refs*/);
 363     rp->setup_policy(false);
 364   }
 365 
 366   {
 367     ShenandoahGCPhase phase_work(ShenandoahPhaseTimings::init_traversal_gc_work);
 368     assert(_task_queues->is_empty(), "queues must be empty before traversal GC");
 369 
 370 #if defined(COMPILER2) || INCLUDE_JVMCI
 371     DerivedPointerTable::clear();
 372 #endif
 373 
 374     {
 375       uint nworkers = _heap->workers()->active_workers();
 376       task_queues()->reserve(nworkers);
 377       ShenandoahRootProcessor rp(_heap, nworkers, ShenandoahPhaseTimings::init_traversal_gc_work);
 378 
 379       if (UseShenandoahOWST) {
 380         ShenandoahTaskTerminator terminator(nworkers, task_queues());
 381         ShenandoahInitTraversalCollectionTask traversal_task(&rp);
 382         _heap->workers()->run_task(&traversal_task);
 383       } else {
 384         ParallelTaskTerminator terminator(nworkers, task_queues());
 385         ShenandoahInitTraversalCollectionTask traversal_task(&rp);
 386         _heap->workers()->run_task(&traversal_task);
 387       }
 388     }
 389 
 390 #if defined(COMPILER2) || INCLUDE_JVMCI
 391     DerivedPointerTable::update_pointers();
 392 #endif
 393     if (_heap->cancelled_concgc()) {
 394       _heap->fixup_roots();
 395       reset();
 396       _heap->set_concurrent_traversal_in_progress(false);
 397     }
 398   }
 399 }
 400 
 401 void ShenandoahTraversalGC::main_loop(uint worker_id, ParallelTaskTerminator* terminator, bool do_satb) {
 402   if (do_satb) {
 403     main_loop_prework<true>(worker_id, terminator);
 404   } else {
 405     main_loop_prework<false>(worker_id, terminator);
 406   }
 407 }
 408 
 409 template <bool DO_SATB>
 410 void ShenandoahTraversalGC::main_loop_prework(uint w, ParallelTaskTerminator* t) {
 411   ShenandoahObjToScanQueue* q = task_queues()->queue(w);
 412   jushort* ld = get_liveness(w);
 413 
 414   ReferenceProcessor* rp = NULL;
 415   if (_heap->shenandoahPolicy()->process_references()) {
 416     rp = _heap->ref_processor();
 417   }
 418   if (_heap->shenandoahPolicy()->unload_classes()) {
 419     if (ShenandoahStringDedup::is_enabled()) {
 420       ShenandoahStrDedupQueue* dq = ShenandoahStringDedup::queue(w);
 421       ShenandoahTraversalMetadataDedupClosure cl(q, rp, dq);
 422       main_loop_work<ShenandoahTraversalMetadataDedupClosure, DO_SATB>(&cl, ld, w, t);
 423     } else {
 424       ShenandoahTraversalMetadataClosure cl(q, rp);
 425       main_loop_work<ShenandoahTraversalMetadataClosure, DO_SATB>(&cl, ld, w, t);
 426     }
 427   } else {
 428     if (ShenandoahStringDedup::is_enabled()) {
 429       ShenandoahStrDedupQueue* dq = ShenandoahStringDedup::queue(w);
 430       ShenandoahTraversalDedupClosure cl(q, rp, dq);
 431       main_loop_work<ShenandoahTraversalDedupClosure, DO_SATB>(&cl, ld, w, t);
 432     } else {
 433       ShenandoahTraversalClosure cl(q, rp);
 434       main_loop_work<ShenandoahTraversalClosure, DO_SATB>(&cl, ld, w, t);
 435     }
 436   }
 437 }
 438 
 439 template <class T, bool DO_SATB>
 440 void ShenandoahTraversalGC::main_loop_work(T* cl, jushort* live_data, uint worker_id, ParallelTaskTerminator* terminator) {
 441   ShenandoahObjToScanQueueSet* queues = task_queues();
 442   ShenandoahObjToScanQueue* q = queues->queue(worker_id);
 443   ShenandoahConcurrentMark* conc_mark = _heap->concurrentMark();
 444 
 445   uintx stride = ShenandoahMarkLoopStride;
 446 
 447   ShenandoahMarkTask task;
 448 
 449   // Process outstanding queues, if any.
 450   q = queues->claim_next();
 451   while (q != NULL) {
 452     if (_heap->check_cancelled_concgc_and_yield()) {
 453       ShenandoahCancelledTerminatorTerminator tt;
 454       while (!terminator->offer_termination(&tt));
 455       return;
 456     }
 457 
 458     for (uint i = 0; i < stride; i++) {
 459       if (q->pop_buffer(task) ||
 460           q->pop_local(task) ||
 461           q->pop_overflow(task)) {
 462         conc_mark->do_task<T, true>(q, cl, live_data, &task);
 463       } else {
 464         assert(q->is_empty(), "Must be empty");
 465         q = queues->claim_next();
 466         break;
 467       }
 468     }
 469   }
 470   // Normal loop.
 471   q = queues->queue(worker_id);
 472   ShenandoahTraversalSATBBufferClosure satb_cl(q);
 473   SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
 474 
 475   int seed = 17;
 476 
 477   while (true) {
 478     if (check_and_handle_cancelled_gc(terminator)) return;
 479 
 480     for (uint i = 0; i < stride; i++) {
 481       if ((q->pop_buffer(task) ||
 482            q->pop_local(task) ||
 483            q->pop_overflow(task) ||
 484            (DO_SATB && satb_mq_set.apply_closure_to_completed_buffer(&satb_cl) && q->pop_buffer(task)) ||
 485            queues->steal(worker_id, &seed, task))) {
 486         conc_mark->do_task<T, true>(q, cl, live_data, &task);
 487       } else {
 488         if (terminator->offer_termination()) return;
 489       }
 490     }
 491   }
 492 }
 493 
 494 bool ShenandoahTraversalGC::check_and_handle_cancelled_gc(ParallelTaskTerminator* terminator) {
 495   if (_heap->cancelled_concgc()) {
 496     ShenandoahCancelledTerminatorTerminator tt;
 497     while (! terminator->offer_termination(&tt));
 498     return true;
 499   }
 500   return false;
 501 }
 502 
 503 void ShenandoahTraversalGC::concurrent_traversal_collection() {
 504   ClassLoaderDataGraph::clear_claimed_marks();
 505 
 506   ShenandoahGCPhase phase_work(ShenandoahPhaseTimings::conc_traversal);
 507   if (!_heap->cancelled_concgc()) {
 508     uint nworkers = _heap->workers()->active_workers();
 509     task_queues()->reserve(nworkers);
 510     if (UseShenandoahOWST) {
 511       ShenandoahTaskTerminator terminator(nworkers, task_queues());
 512       ShenandoahConcurrentTraversalCollectionTask traversal_task(&terminator);
 513       _heap->workers()->run_task(&traversal_task);
 514     } else {
 515       ParallelTaskTerminator terminator(nworkers, task_queues());
 516       ShenandoahConcurrentTraversalCollectionTask traversal_task(&terminator);
 517       _heap->workers()->run_task(&traversal_task);
 518     }
 519   }
 520 
 521   if (!_heap->cancelled_concgc() && ShenandoahPreclean && _heap->shenandoahPolicy()->process_references()) {
 522     preclean_weak_refs();
 523   }
 524 
 525   if (_heap->cancelled_concgc()) {
 526     _task_queues->clear();
 527   }
 528   assert(_task_queues->is_empty(), "queues must be empty after traversal GC");
 529 }
 530 
 531 void ShenandoahTraversalGC::final_traversal_collection() {
 532 
 533   _heap->make_tlabs_parsable(true);
 534 
 535   if (!_heap->cancelled_concgc()) {
 536 #if defined(COMPILER2) || INCLUDE_JVMCI
 537     DerivedPointerTable::clear();
 538 #endif
 539     ShenandoahGCPhase phase_work(ShenandoahPhaseTimings::final_traversal_gc_work);
 540     uint nworkers = _heap->workers()->active_workers();
 541     task_queues()->reserve(nworkers);
 542 
 543     // Finish traversal
 544     ShenandoahRootProcessor rp(_heap, nworkers, ShenandoahPhaseTimings::final_traversal_gc_work);
 545     if (UseShenandoahOWST) {
 546       ShenandoahTaskTerminator terminator(nworkers, task_queues());
 547       ShenandoahFinalTraversalCollectionTask traversal_task(&rp, &terminator);
 548       _heap->workers()->run_task(&traversal_task);
 549     } else {
 550       ParallelTaskTerminator terminator(nworkers, task_queues());
 551       ShenandoahFinalTraversalCollectionTask traversal_task(&rp, &terminator);
 552       _heap->workers()->run_task(&traversal_task);
 553     }
 554 #if defined(COMPILER2) || INCLUDE_JVMCI
 555     DerivedPointerTable::update_pointers();
 556 #endif
 557   }
 558 
 559   if (!_heap->cancelled_concgc() && _heap->shenandoahPolicy()->process_references()) {
 560     weak_refs_work();
 561   }
 562 
 563   if (!_heap->cancelled_concgc() && _heap->shenandoahPolicy()->unload_classes()) {
 564     _heap->unload_classes_and_cleanup_tables(false);
 565     _heap->concurrentMark()->update_roots(ShenandoahPhaseTimings::final_traversal_update_roots);
 566   }
 567 
 568   if (!_heap->cancelled_concgc()) {
 569     // Still good? We can now trash the cset, and make final verification
 570     {
 571       ShenandoahGCPhase phase_cleanup(ShenandoahPhaseTimings::traversal_gc_cleanup);
 572       ShenandoahHeapLocker lock(_heap->lock());
 573 
 574       // Trash everything
 575       // Clear immediate garbage regions.
 576       ShenandoahHeapRegionSet* regions = _heap->regions();
 577       size_t active = regions->active_regions();
 578       ShenandoahFreeSet* free_regions = _heap->free_regions();
 579       free_regions->clear();
 580       for (size_t i = 0; i < active; i++) {
 581         ShenandoahHeapRegion* r = regions->get(i);
 582         bool not_allocated = _heap->next_top_at_mark_start(r->bottom()) == r->top();
 583         if (r->is_humongous_start() && !r->has_live() && not_allocated) {
 584           // Trash humongous.
 585           HeapWord* humongous_obj = r->bottom() + BrooksPointer::word_size();
 586           assert(!_heap->is_marked_next(oop(humongous_obj)), "must not be marked");
 587           r->make_trash();
 588           while (i + 1 < active && regions->get(i + 1)->is_humongous_continuation()) {
 589             i++;
 590             r = regions->get(i);
 591             assert(r->is_humongous_continuation(), "must be humongous continuation");
 592             r->make_trash();
 593           }
 594         } else if (!r->is_empty() && !r->has_live() && not_allocated) {
 595           // Trash regular.
 596           assert(!r->is_humongous(), "handled above");
 597           assert(!r->is_trash(), "must not already be trashed");
 598           r->make_trash();
 599         } else if (r->is_alloc_allowed()) {
 600           free_regions->add_region(r);
 601         }
 602       }
 603       _heap->collection_set()->clear();
 604       reset();
 605     }
 606 
 607     if (ShenandoahVerify) {
 608       _heap->verifier()->verify_after_traversal();
 609     }
 610   } else {
 611     // On cancellation path, fixup roots to make them consistent
 612     _heap->fixup_roots();
 613     reset();
 614   }
 615 
 616   assert(_task_queues->is_empty(), "queues must be empty after traversal GC");
 617   _heap->set_concurrent_traversal_in_progress(false);
 618 }
 619 
 620 void ShenandoahTraversalGC::reset() {
 621   _task_queues->clear();
 622 }
 623 
 624 ShenandoahObjToScanQueueSet* ShenandoahTraversalGC::task_queues() {
 625   return _task_queues;
 626 }
 627 
 628 jushort* ShenandoahTraversalGC::get_liveness(uint worker_id) {
 629   return _liveness_local[worker_id];
 630 }
 631 
 632 class ShenandoahTraversalCancelledGCYieldClosure : public YieldClosure {
 633 private:
 634   ShenandoahHeap* const _heap;
 635 public:
 636   ShenandoahTraversalCancelledGCYieldClosure() : _heap(ShenandoahHeap::heap()) {};
 637   virtual bool should_return() { return _heap->cancelled_concgc(); }
 638 };
 639 
 640 class ShenandoahTraversalPrecleanCompleteGCClosure : public VoidClosure {
 641 public:
 642   void do_void() {
 643     ShenandoahHeap* sh = ShenandoahHeap::heap();
 644     ShenandoahTraversalGC* traversal_gc = sh->traversal_gc();
 645     assert(sh->shenandoahPolicy()->process_references(), "why else would we be here?");
 646     ReferenceProcessor* rp = sh->ref_processor();
 647     ParallelTaskTerminator terminator(1, traversal_gc->task_queues());
 648     ReferenceProcessorIsAliveMutator fix_alive(rp, sh->is_alive_closure());
 649     traversal_gc->main_loop((uint) 0, &terminator, false);
 650   }
 651 };
 652 
 653 class ShenandoahTraversalKeepAliveUpdateClosure : public OopClosure {
 654 private:
 655   ShenandoahObjToScanQueue* _queue;
 656   Thread* _thread;
 657   ShenandoahTraversalGC* _traversal_gc;
 658   template <class T>
 659   inline void do_oop_nv(T* p) {
 660     _traversal_gc->process_oop<T, false /* string dedup */>(p, _thread, _queue);
 661   }
 662 
 663 public:
 664   ShenandoahTraversalKeepAliveUpdateClosure(ShenandoahObjToScanQueue* q) :
 665     _queue(q), _thread(Thread::current()),
 666     _traversal_gc(ShenandoahHeap::heap()->traversal_gc()) {}
 667 
 668   void do_oop(narrowOop* p) { do_oop_nv(p); }
 669   void do_oop(oop* p)       { do_oop_nv(p); }
 670 };
 671 
 672 void ShenandoahTraversalGC::preclean_weak_refs() {
 673   // Pre-cleaning weak references before diving into STW makes sense at the
 674   // end of concurrent mark. This will filter out the references which referents
 675   // are alive. Note that ReferenceProcessor already filters out these on reference
 676   // discovery, and the bulk of work is done here. This phase processes leftovers
 677   // that missed the initial filtering, i.e. when referent was marked alive after
 678   // reference was discovered by RP.
 679 
 680   assert(_heap->shenandoahPolicy()->process_references(), "sanity");
 681 
 682   ShenandoahHeap* sh = ShenandoahHeap::heap();
 683   ReferenceProcessor* rp = sh->ref_processor();
 684 
 685   // Shortcut if no references were discovered to avoid winding up threads.
 686   if (!rp->has_discovered_references()) {
 687     return;
 688   }
 689 
 690   ReferenceProcessorMTDiscoveryMutator fix_mt_discovery(rp, false);
 691   ReferenceProcessorIsAliveMutator fix_alive(rp, sh->is_alive_closure());
 692 
 693   // Interrupt on cancelled GC
 694   ShenandoahTraversalCancelledGCYieldClosure yield;
 695 
 696   assert(task_queues()->is_empty(), "Should be empty");
 697 
 698   ShenandoahTraversalPrecleanCompleteGCClosure complete_gc;
 699   ShenandoahForwardedIsAliveClosure is_alive;
 700   ShenandoahTraversalKeepAliveUpdateClosure keep_alive(task_queues()->queue(0));
 701   ResourceMark rm;
 702   rp->preclean_discovered_references(&is_alive, &keep_alive,
 703                                      &complete_gc, &yield,
 704                                      NULL);
 705   assert(!sh->cancelled_concgc() || task_queues()->is_empty(), "Should be empty");
 706 }
 707 
 708 // Weak Reference Closures
 709 class ShenandoahTraversalDrainMarkingStackClosure: public VoidClosure {
 710   uint _worker_id;
 711   ParallelTaskTerminator* _terminator;
 712   bool _reset_terminator;
 713 
 714 public:
 715   ShenandoahTraversalDrainMarkingStackClosure(uint worker_id, ParallelTaskTerminator* t, bool reset_terminator = false):
 716     _worker_id(worker_id),
 717     _terminator(t),
 718     _reset_terminator(reset_terminator) {
 719   }
 720 
 721   void do_void() {
 722     assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
 723 
 724     ShenandoahHeap* sh = ShenandoahHeap::heap();
 725     ShenandoahTraversalGC* traversal_gc = sh->traversal_gc();
 726     assert(sh->shenandoahPolicy()->process_references(), "why else would we be here?");
 727     ReferenceProcessor* rp = sh->ref_processor();
 728     ReferenceProcessorIsAliveMutator fix_alive(rp, sh->is_alive_closure());
 729 
 730     traversal_gc->main_loop(_worker_id, _terminator, false);
 731     traversal_gc->flush_liveness(_worker_id);
 732 
 733     if (_reset_terminator) {
 734       _terminator->reset_for_reuse();
 735     }
 736   }
 737 };
 738 
 739 void ShenandoahTraversalGC::weak_refs_work() {
 740   assert(_heap->shenandoahPolicy()->process_references(), "sanity");
 741 
 742   ShenandoahHeap* sh = ShenandoahHeap::heap();
 743 
 744   ShenandoahPhaseTimings::Phase phase_root = ShenandoahPhaseTimings::weakrefs;
 745 
 746   ShenandoahGCPhase phase(phase_root);
 747 
 748   ReferenceProcessor* rp = sh->ref_processor();
 749 
 750   // NOTE: We cannot shortcut on has_discovered_references() here, because
 751   // we will miss marking JNI Weak refs then, see implementation in
 752   // ReferenceProcessor::process_discovered_references.
 753   weak_refs_work_doit();
 754 
 755   rp->verify_no_references_recorded();
 756   assert(!rp->discovery_enabled(), "Post condition");
 757 
 758 }
 759 
 760 class ShenandoahTraversalRefProcTaskProxy : public AbstractGangTask {
 761 
 762 private:
 763   AbstractRefProcTaskExecutor::ProcessTask& _proc_task;
 764   ParallelTaskTerminator* _terminator;
 765 public:
 766 
 767   ShenandoahTraversalRefProcTaskProxy(AbstractRefProcTaskExecutor::ProcessTask& proc_task,
 768                              ParallelTaskTerminator* t) :
 769     AbstractGangTask("Process reference objects in parallel"),
 770     _proc_task(proc_task),
 771     _terminator(t) {
 772   }
 773 
 774   void work(uint worker_id) {
 775     assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
 776     ShenandoahHeap* heap = ShenandoahHeap::heap();
 777     ShenandoahTraversalDrainMarkingStackClosure complete_gc(worker_id, _terminator);
 778 
 779     ShenandoahForwardedIsAliveClosure is_alive;
 780     ShenandoahTraversalKeepAliveUpdateClosure keep_alive(heap->traversal_gc()->task_queues()->queue(worker_id));
 781     _proc_task.work(worker_id, is_alive, keep_alive, complete_gc);
 782   }
 783 };
 784 
 785 class ShenandoahTraversalRefEnqueueTaskProxy : public AbstractGangTask {
 786 
 787 private:
 788   AbstractRefProcTaskExecutor::EnqueueTask& _enqueue_task;
 789 
 790 public:
 791 
 792   ShenandoahTraversalRefEnqueueTaskProxy(AbstractRefProcTaskExecutor::EnqueueTask& enqueue_task) :
 793     AbstractGangTask("Enqueue reference objects in parallel"),
 794     _enqueue_task(enqueue_task) {
 795   }
 796 
 797   void work(uint worker_id) {
 798     _enqueue_task.work(worker_id);
 799   }
 800 };
 801 
 802 class ShenandoahTraversalRefProcTaskExecutor : public AbstractRefProcTaskExecutor {
 803 
 804 private:
 805   WorkGang* _workers;
 806 
 807 public:
 808 
 809   ShenandoahTraversalRefProcTaskExecutor(WorkGang* workers) :
 810     _workers(workers) {
 811   }
 812 
 813   // Executes a task using worker threads.
 814   void execute(ProcessTask& task) {
 815     assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
 816 
 817     // Shortcut execution if task is empty.
 818     // This should be replaced with the generic ReferenceProcessor shortcut,
 819     // see JDK-8181214, JDK-8043575, JDK-6938732.
 820     if (task.is_empty()) {
 821       return;
 822     }
 823 
 824     ShenandoahHeap* heap = ShenandoahHeap::heap();
 825     ShenandoahTraversalGC* traversal_gc = heap->traversal_gc();
 826     uint nworkers = _workers->active_workers();
 827     traversal_gc->task_queues()->reserve(nworkers);
 828     if (UseShenandoahOWST) {
 829       ShenandoahTaskTerminator terminator(nworkers, traversal_gc->task_queues());
 830       ShenandoahTraversalRefProcTaskProxy proc_task_proxy(task, &terminator);
 831       _workers->run_task(&proc_task_proxy);
 832     } else {
 833       ParallelTaskTerminator terminator(nworkers, traversal_gc->task_queues());
 834       ShenandoahTraversalRefProcTaskProxy proc_task_proxy(task, &terminator);
 835       _workers->run_task(&proc_task_proxy);
 836     }
 837   }
 838 
 839   void execute(EnqueueTask& task) {
 840     ShenandoahTraversalRefEnqueueTaskProxy enqueue_task_proxy(task);
 841     _workers->run_task(&enqueue_task_proxy);
 842   }
 843 };
 844 
 845 void ShenandoahTraversalGC::weak_refs_work_doit() {
 846   ShenandoahHeap* sh = ShenandoahHeap::heap();
 847 
 848   ReferenceProcessor* rp = sh->ref_processor();
 849 
 850   ShenandoahPhaseTimings::Phase phase_process = ShenandoahPhaseTimings::weakrefs_process;
 851   ShenandoahPhaseTimings::Phase phase_enqueue = ShenandoahPhaseTimings::weakrefs_enqueue;
 852 
 853   ReferenceProcessorIsAliveMutator fix_alive(rp, sh->is_alive_closure());
 854 
 855   WorkGang* workers = sh->workers();
 856   uint nworkers = workers->active_workers();
 857 
 858   // Setup collector policy for softref cleaning.
 859   bool clear_soft_refs = sh->collector_policy()->use_should_clear_all_soft_refs(true /* bogus arg*/);
 860   log_develop_debug(gc, ref)("clearing soft refs: %s", BOOL_TO_STR(clear_soft_refs));
 861   rp->setup_policy(clear_soft_refs);
 862   rp->set_active_mt_degree(nworkers);
 863 
 864   assert(task_queues()->is_empty(), "Should be empty");
 865 
 866   // complete_gc and keep_alive closures instantiated here are only needed for
 867   // single-threaded path in RP. They share the queue 0 for tracking work, which
 868   // simplifies implementation. Since RP may decide to call complete_gc several
 869   // times, we need to be able to reuse the terminator.
 870   uint serial_worker_id = 0;
 871   ParallelTaskTerminator terminator(1, task_queues());
 872   ShenandoahTraversalDrainMarkingStackClosure complete_gc(serial_worker_id, &terminator, /* reset_terminator = */ true);
 873 
 874   ShenandoahTraversalRefProcTaskExecutor executor(workers);
 875 
 876   ReferenceProcessorPhaseTimes pt(sh->gc_timer(), rp->num_q());
 877 
 878   {
 879     ShenandoahGCPhase phase(phase_process);
 880 
 881     ShenandoahForwardedIsAliveClosure is_alive;
 882     ShenandoahTraversalKeepAliveUpdateClosure keep_alive(task_queues()->queue(serial_worker_id));
 883     rp->process_discovered_references(&is_alive, &keep_alive,
 884                                       &complete_gc, &executor,
 885                                       &pt);
 886     pt.print_all_references();
 887 
 888     WeakProcessor::weak_oops_do(&is_alive, &keep_alive);
 889 
 890     assert(!_heap->cancelled_concgc() || task_queues()->is_empty(), "Should be empty");
 891   }
 892 
 893   if (_heap->cancelled_concgc()) return;
 894 
 895   {
 896     ShenandoahGCPhase phase(phase_enqueue);
 897     rp->enqueue_discovered_references(&executor, &pt);
 898     pt.print_enqueue_phase();
 899   }
 900 }