< prev index next >

src/share/vm/gc_implementation/shenandoah/shenandoahConcurrentMark.cpp

Print this page
rev 10493 : [Backport] Shenandoah string deduplication
rev 10496 : [backport] Rename "cancel_concgc" to "cancel_gc"
rev 10504 : [backport] Full GC always comes with liveness data
rev 10531 : [backport] Improve scheduling and interleaving of SATB processing in mark loop
rev 10536 : [backport] Process remaining SATB buffers in final mark/traverse loop instead of separate phase
rev 10546 : [backport] Wrap worker id in thread local worker session
rev 10547 : [backport] Non-cancellable mark loops should have sensible stride
rev 10554 : [backport] Cleanup UseShenandoahOWST blocks
rev 10561 : [backport] Add task termination and enhanced task queue state tracking + weakrefs
rev 10574 : [backport] Print task queue statistics at the end of GC cycle
rev 10581 : [backport] Refactor alive-closures to deal better with new marking contexts
rev 10582 : [backport] Avoid indirection to next-mark-context
rev 10589 : [backport] Purge support for ShenandoahConcurrentEvacCodeRoots and ShenandoahBarriersForConst
rev 10613 : [backport] Remove obsolete/unused logging usages
rev 10625 : [backport] Soft refs should be purged reliably on allocation failure, or with compact heuristics


  30 #include "gc_implementation/shenandoah/shenandoahHeap.inline.hpp"
  31 #include "gc_implementation/shenandoah/shenandoahRootProcessor.hpp"
  32 #include "gc_implementation/shenandoah/shenandoah_specialized_oop_closures.hpp"
  33 #include "gc_implementation/shenandoah/shenandoahOopClosures.inline.hpp"
  34 #include "gc_implementation/shenandoah/brooksPointer.hpp"
  35 #include "gc_implementation/shenandoah/shenandoahUtils.hpp"
  36 #include "memory/referenceProcessor.hpp"
  37 #include "gc_implementation/shenandoah/shenandoahTaskqueue.hpp"
  38 #include "code/codeCache.hpp"
  39 #include "classfile/symbolTable.hpp"
  40 #include "classfile/systemDictionary.hpp"
  41 #include "memory/iterator.inline.hpp"
  42 #include "oops/oop.inline.hpp"
  43 #include "utilities/taskqueue.hpp"
  44 
  45 template<UpdateRefsMode UPDATE_REFS>
  46 class ShenandoahInitMarkRootsClosure : public OopClosure {
  47 private:
  48   ShenandoahObjToScanQueue* _queue;
  49   ShenandoahHeap* _heap;

  50 
  51   template <class T>
  52   inline void do_oop_nv(T* p) {
  53     ShenandoahConcurrentMark::mark_through_ref<T, UPDATE_REFS>(p, _heap, _queue);
  54   }
  55 
  56 public:
  57   ShenandoahInitMarkRootsClosure(ShenandoahObjToScanQueue* q) :
  58     _queue(q), _heap(ShenandoahHeap::heap()) {};


  59 
  60   void do_oop(narrowOop* p) { do_oop_nv(p); }
  61   void do_oop(oop* p)       { do_oop_nv(p); }
  62 };
  63 
  64 ShenandoahMarkRefsSuperClosure::ShenandoahMarkRefsSuperClosure(ShenandoahObjToScanQueue* q, ReferenceProcessor* rp) :
  65   MetadataAwareOopClosure(rp),
  66   _queue(q),
  67   _heap((ShenandoahHeap*) Universe::heap())
  68 {
  69 }











  70 
  71 template<UpdateRefsMode UPDATE_REFS>
  72 class ShenandoahInitMarkRootsTask : public AbstractGangTask {
  73 private:
  74   ShenandoahRootProcessor* _rp;
  75   bool _process_refs;
  76 public:
  77   ShenandoahInitMarkRootsTask(ShenandoahRootProcessor* rp, bool process_refs) :
  78     AbstractGangTask("Shenandoah init mark roots task"),
  79     _rp(rp),
  80     _process_refs(process_refs) {
  81   }
  82 
  83   void work(uint worker_id) {
  84     assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");

  85 
  86     ShenandoahHeap* heap = ShenandoahHeap::heap();
  87     ShenandoahObjToScanQueueSet* queues = heap->concurrentMark()->task_queues();
  88     assert(queues->get_reserved() > worker_id, err_msg("Queue has not been reserved for worker id: %d", worker_id));
  89 
  90     ShenandoahObjToScanQueue* q = queues->queue(worker_id);
  91     ShenandoahInitMarkRootsClosure<UPDATE_REFS> mark_cl(q);
  92     CLDToOopClosure cldCl(&mark_cl);
  93     MarkingCodeBlobClosure blobsCl(&mark_cl, ! CodeBlobToOopClosure::FixRelocations);
  94 
  95     // The rationale for selecting the roots to scan is as follows:
  96     //   a. With unload_classes = true, we only want to scan the actual strong roots from the
  97     //      code cache. This will allow us to identify the dead classes, unload them, *and*
  98     //      invalidate the relevant code cache blobs. This could be only done together with
  99     //      class unloading.
 100     //   b. With unload_classes = false, we have to nominally retain all the references from code
 101     //      cache, because there could be the case of embedded class/oop in the generated code,
 102     //      which we will never visit during mark. Without code cache invalidation, as in (a),
 103     //      we risk executing that code cache blob, and crashing.
 104     //   c. With ShenandoahConcurrentScanCodeRoots, we avoid scanning the entire code cache here,
 105     //      and instead do that in concurrent phase under the relevant lock. This saves init mark
 106     //      pause time.
 107 
 108     ResourceMark m;
 109     if (heap->concurrentMark()->unload_classes()) {
 110       _rp->process_strong_roots(&mark_cl, _process_refs ? NULL : &mark_cl, &cldCl, NULL, &blobsCl, NULL, worker_id);
 111     } else {
 112       if (ShenandoahConcurrentScanCodeRoots) {
 113         CodeBlobClosure* code_blobs = NULL;
 114 #ifdef ASSERT
 115         ShenandoahAssertToSpaceClosure assert_to_space_oops;
 116         CodeBlobToOopClosure assert_to_space(&assert_to_space_oops, !CodeBlobToOopClosure::FixRelocations);
 117         // If conc code cache evac is disabled, code cache should have only to-space ptrs.
 118         // Otherwise, it should have to-space ptrs only if mark does not update refs.
 119         if (!ShenandoahConcurrentEvacCodeRoots && !heap->has_forwarded_objects()) {
 120           code_blobs = &assert_to_space;
 121         }
 122 #endif
 123         _rp->process_all_roots(&mark_cl, _process_refs ? NULL : &mark_cl, &cldCl, code_blobs, NULL, worker_id);
 124       } else {
 125         _rp->process_all_roots(&mark_cl, _process_refs ? NULL : &mark_cl, &cldCl, &blobsCl, NULL, worker_id);
 126       }
 127     }
 128   }
 129 };
 130 
 131 class ShenandoahUpdateRootsTask : public AbstractGangTask {
 132 private:
 133   ShenandoahRootProcessor* _rp;
 134   const bool _update_code_cache;
 135 public:
 136   ShenandoahUpdateRootsTask(ShenandoahRootProcessor* rp, bool update_code_cache) :
 137     AbstractGangTask("Shenandoah update roots task"),
 138     _rp(rp),
 139     _update_code_cache(update_code_cache) {
 140   }
 141 
 142   void work(uint worker_id) {
 143     assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");

 144 
 145     ShenandoahHeap* heap = ShenandoahHeap::heap();
 146     ShenandoahUpdateRefsClosure cl;
 147     CLDToOopClosure cldCl(&cl);
 148 
 149     CodeBlobClosure* code_blobs;
 150     CodeBlobToOopClosure update_blobs(&cl, CodeBlobToOopClosure::FixRelocations);
 151 #ifdef ASSERT
 152     ShenandoahAssertToSpaceClosure assert_to_space_oops;
 153     CodeBlobToOopClosure assert_to_space(&assert_to_space_oops, !CodeBlobToOopClosure::FixRelocations);
 154 #endif
 155     if (_update_code_cache) {
 156       code_blobs = &update_blobs;
 157     } else {
 158       code_blobs =
 159         DEBUG_ONLY(&assert_to_space)
 160         NOT_DEBUG(NULL);
 161     }
 162     _rp->process_all_roots(&cl, &cl, &cldCl, code_blobs, NULL, worker_id);
 163   }
 164 };
 165 
 166 class ShenandoahConcurrentMarkingTask : public AbstractGangTask {
 167 private:
 168   ShenandoahConcurrentMark* _cm;
 169   ParallelTaskTerminator* _terminator;
 170   bool _update_refs;
 171 
 172 public:
 173   ShenandoahConcurrentMarkingTask(ShenandoahConcurrentMark* cm, ParallelTaskTerminator* terminator, bool update_refs) :
 174     AbstractGangTask("Root Region Scan"), _cm(cm), _terminator(terminator), _update_refs(update_refs) {
 175   }
 176 
 177 
 178   void work(uint worker_id) {

 179     ShenandoahObjToScanQueue* q = _cm->get_queue(worker_id);
 180     jushort* live_data = _cm->get_liveness(worker_id);
 181     ReferenceProcessor* rp;
 182     if (_cm->process_references()) {
 183       rp = ShenandoahHeap::heap()->ref_processor();
 184       shenandoah_assert_rp_isalive_installed();
 185     } else {
 186       rp = NULL;
 187     }
 188 
 189     _cm->concurrent_scan_code_roots(worker_id, rp, _update_refs);
 190     _cm->mark_loop(worker_id, _terminator, rp,
 191                    true, // cancellable
 192                    true, // drain SATBs as we go
 193                    true, // count liveness
 194                    _cm->unload_classes(),
 195                    _update_refs);
























 196   }
 197 };
 198 
 199 class ShenandoahFinalMarkingTask : public AbstractGangTask {
 200 private:
 201   ShenandoahConcurrentMark* _cm;
 202   ParallelTaskTerminator* _terminator;
 203   bool _update_refs;
 204   bool _count_live;
 205   bool _unload_classes;

 206 
 207 public:
 208   ShenandoahFinalMarkingTask(ShenandoahConcurrentMark* cm, ParallelTaskTerminator* terminator, bool update_refs, bool count_live, bool unload_classes) :
 209     AbstractGangTask("Shenandoah Final Marking"), _cm(cm), _terminator(terminator), _update_refs(update_refs), _count_live(count_live), _unload_classes(unload_classes) {


 210   }
 211 
 212   void work(uint worker_id) {
 213     // First drain remaining SATB buffers.
 214     // Notice that this is not strictly necessary for mark-compact. But since
 215     // it requires a StrongRootsScope around the task, we need to claim the
 216     // threads, and performance-wise it doesn't really matter. Adds about 1ms to
 217     // full-gc.
 218     _cm->drain_satb_buffers(worker_id, true);







 219 
 220     ReferenceProcessor* rp;
 221     if (_cm->process_references()) {
 222       rp = ShenandoahHeap::heap()->ref_processor();
 223       shenandoah_assert_rp_isalive_installed();
 224     } else {
 225       rp = NULL;
 226     }
 227 
 228     // Degenerated cycle may bypass concurrent cycle, so code roots might not be scanned,
 229     // let's check here.
 230     _cm->concurrent_scan_code_roots(worker_id, rp, _update_refs);
 231     _cm->mark_loop(worker_id, _terminator, rp,
 232                    false, // not cancellable
 233                    false, // do not drain SATBs, already drained
 234                    _count_live,
 235                    _unload_classes,
 236                    _update_refs);

 237 
 238     assert(_cm->task_queues()->is_empty(), "Should be empty");
 239   }
 240 };
 241 
 242 void ShenandoahConcurrentMark::mark_roots(ShenandoahPhaseTimings::Phase root_phase) {
 243   assert(Thread::current()->is_VM_thread(), "can only do this in VMThread");
 244   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
 245 
 246   ShenandoahHeap* heap = ShenandoahHeap::heap();
 247 
 248   ShenandoahGCPhase phase(root_phase);
 249 
 250   WorkGang* workers = heap->workers();
 251   uint nworkers = workers->active_workers();
 252 
 253   assert(nworkers <= task_queues()->size(), "Just check");
 254 
 255   ShenandoahRootProcessor root_proc(heap, nworkers, root_phase);
 256   TASKQUEUE_STATS_ONLY(reset_taskqueue_stats());
 257   task_queues()->reserve(nworkers);
 258 
 259   if (heap->has_forwarded_objects()) {
 260     ShenandoahInitMarkRootsTask<RESOLVE> mark_roots(&root_proc, process_references());
 261     workers->run_task(&mark_roots);
 262   } else {
 263     // No need to update references, which means the heap is stable.
 264     // Can save time not walking through forwarding pointers.
 265     ShenandoahInitMarkRootsTask<NONE> mark_roots(&root_proc, process_references());
 266     workers->run_task(&mark_roots);
 267   }
 268 
 269   if (ShenandoahConcurrentScanCodeRoots) {
 270     clear_claim_codecache();
 271   }
 272 }
 273 
 274 void ShenandoahConcurrentMark::init_mark_roots() {
 275   assert(Thread::current()->is_VM_thread(), "can only do this in VMThread");
 276   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
 277 
 278   mark_roots(ShenandoahPhaseTimings::scan_roots);
 279 }
 280 
 281 void ShenandoahConcurrentMark::update_roots(ShenandoahPhaseTimings::Phase root_phase) {
 282   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
 283 
 284   bool update_code_cache = true; // initialize to safer value
 285   switch (root_phase) {
 286     case ShenandoahPhaseTimings::update_roots:
 287     case ShenandoahPhaseTimings::final_update_refs_roots:
 288       // If code cache was evacuated concurrently, we need to update code cache roots.
 289       update_code_cache = ShenandoahConcurrentEvacCodeRoots;
 290       break;
 291     case ShenandoahPhaseTimings::full_gc_roots:
 292       update_code_cache = true;
 293       break;
 294     default:
 295       ShouldNotReachHere();
 296   }
 297 
 298   ShenandoahHeap* heap = ShenandoahHeap::heap();
 299 
 300   ShenandoahGCPhase phase(root_phase);
 301 
 302   COMPILER2_PRESENT(DerivedPointerTable::clear());
 303 
 304   uint nworkers = heap->workers()->active_workers();
 305 
 306   ShenandoahRootProcessor root_proc(heap, nworkers, root_phase);
 307   ShenandoahUpdateRootsTask update_roots(&root_proc, update_code_cache);
 308   heap->workers()->run_task(&update_roots);
 309 


 348       }
 349     }
 350   }
 351 }
 352 
 353 void ShenandoahConcurrentMark::mark_from_roots() {
 354   ShenandoahHeap* sh = ShenandoahHeap::heap();
 355   WorkGang* workers = sh->workers();
 356   uint nworkers = workers->active_workers();
 357 
 358   bool update_refs = sh->has_forwarded_objects();
 359 
 360   ShenandoahGCPhase conc_mark_phase(ShenandoahPhaseTimings::conc_mark);
 361 
 362   if (process_references()) {
 363     ReferenceProcessor* rp = sh->ref_processor();
 364     rp->set_active_mt_degree(nworkers);
 365 
 366     // enable ("weak") refs discovery
 367     rp->enable_discovery(true /*verify_no_refs*/, true);
 368     rp->setup_policy(sh->is_full_gc_in_progress()); // snapshot the soft ref policy to be used in this cycle
 369   }
 370 
 371   shenandoah_assert_rp_isalive_not_installed();
 372   ReferenceProcessorIsAliveMutator fix_isalive(sh->ref_processor(), sh->is_alive_closure());

 373 
 374   task_queues()->reserve(nworkers);
 375 


 376   if (UseShenandoahOWST) {
 377     ShenandoahTaskTerminator terminator(nworkers, task_queues());
 378     ShenandoahConcurrentMarkingTask markingTask = ShenandoahConcurrentMarkingTask(this, &terminator, update_refs);
 379     workers->run_task(&markingTask);
 380   } else {
 381     ParallelTaskTerminator terminator(nworkers, task_queues());
 382     ShenandoahConcurrentMarkingTask markingTask = ShenandoahConcurrentMarkingTask(this, &terminator, update_refs);
 383     workers->run_task(&markingTask);

 384   }
 385 
 386   assert(task_queues()->is_empty() || sh->cancelled_concgc(), "Should be empty when not cancelled");
 387   if (! sh->cancelled_concgc()) {
 388     TASKQUEUE_STATS_ONLY(print_taskqueue_stats());
 389   }
 390 
 391   TASKQUEUE_STATS_ONLY(reset_taskqueue_stats());
 392 }
 393 
 394 void ShenandoahConcurrentMark::finish_mark_from_roots() {
 395   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
 396 
 397   ShenandoahHeap* sh = ShenandoahHeap::heap();
 398 
 399   TASKQUEUE_STATS_ONLY(reset_taskqueue_stats());
 400 
 401   shared_finish_mark_from_roots(/* full_gc = */ false);
 402 
 403   if (sh->has_forwarded_objects()) {
 404     update_roots(ShenandoahPhaseTimings::update_roots);
 405   }
 406 
 407   TASKQUEUE_STATS_ONLY(print_taskqueue_stats());
 408 }
 409 
 410 void ShenandoahConcurrentMark::shared_finish_mark_from_roots(bool full_gc) {
 411   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
 412 
 413   ShenandoahHeap* sh = ShenandoahHeap::heap();
 414 
 415   uint nworkers = sh->workers()->active_workers();
 416 
 417   // Finally mark everything else we've got in our queues during the previous steps.
 418   // It does two different things for concurrent vs. mark-compact GC:
 419   // - For concurrent GC, it starts with empty task queues, drains the remaining
 420   //   SATB buffers, and then completes the marking closure.
 421   // - For mark-compact GC, it starts out with the task queues seeded by initial
 422   //   root scan, and completes the closure, thus marking through all live objects
 423   // The implementation is the same, so it's shared here.
 424   {
 425     ShenandoahGCPhase phase(full_gc ?
 426                                ShenandoahPhaseTimings::full_gc_mark_finish_queues :
 427                                ShenandoahPhaseTimings::finish_queues);
 428     bool count_live = !(ShenandoahNoLivenessFullGC && full_gc); // we do not need liveness data for full GC
 429     task_queues()->reserve(nworkers);
 430 
 431     shenandoah_assert_rp_isalive_not_installed();
 432     ReferenceProcessorIsAliveMutator fix_isalive(sh->ref_processor(), sh->is_alive_closure());





 433 
 434     SharedHeap::StrongRootsScope scope(sh, true);
 435     if (UseShenandoahOWST) {
 436       ShenandoahTaskTerminator terminator(nworkers, task_queues());
 437       ShenandoahFinalMarkingTask task(this, &terminator, sh->has_forwarded_objects(), count_live, unload_classes());

 438       sh->workers()->run_task(&task);
 439     } else {
 440       ParallelTaskTerminator terminator(nworkers, task_queues());
 441       ShenandoahFinalMarkingTask task(this, &terminator, sh->has_forwarded_objects(), count_live, unload_classes());

 442       sh->workers()->run_task(&task);
 443     }
 444   }
 445 
 446   assert(task_queues()->is_empty(), "Should be empty");
 447 
 448   // When we're done marking everything, we process weak references.
 449   if (process_references()) {
 450     weak_refs_work(full_gc);
 451   }
 452 
 453   // And finally finish class unloading
 454   if (unload_classes()) {
 455     sh->unload_classes_and_cleanup_tables(full_gc);
 456   }
 457 
 458   assert(task_queues()->is_empty(), "Should be empty");
 459 

 460 }
 461 
 462 class ShenandoahSATBThreadsClosure : public ThreadClosure {
 463   ShenandoahSATBBufferClosure* _satb_cl;
 464   int _thread_parity;
 465 
 466  public:
 467   ShenandoahSATBThreadsClosure(ShenandoahSATBBufferClosure* satb_cl) :
 468     _satb_cl(satb_cl),
 469     _thread_parity(SharedHeap::heap()->strong_roots_parity()) {}
 470 
 471   void do_thread(Thread* thread) {
 472     if (thread->is_Java_thread()) {
 473       if (thread->claim_oops_do(true, _thread_parity)) {
 474         JavaThread* jt = (JavaThread*)thread;
 475         jt->satb_mark_queue().apply_closure_and_empty(_satb_cl);
 476       }
 477     } else if (thread->is_VM_thread()) {
 478       if (thread->claim_oops_do(true, _thread_parity)) {
 479         JavaThread::satb_mark_queue_set().shared_satb_queue()->apply_closure_and_empty(_satb_cl);
 480       }
 481     }
 482   }
 483 };
 484 
 485 void ShenandoahConcurrentMark::drain_satb_buffers(uint worker_id, bool remark) {
 486   ShenandoahObjToScanQueue* q = get_queue(worker_id);
 487   ShenandoahSATBBufferClosure cl(q);
 488 
 489   SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
 490   while (satb_mq_set.apply_closure_to_completed_buffer(&cl));
 491 
 492   if (remark) {
 493     ShenandoahSATBThreadsClosure tc(&cl);
 494     Threads::threads_do(&tc);
 495   }
 496 }
 497 
 498 #if TASKQUEUE_STATS
 499 void ShenandoahConcurrentMark::print_taskqueue_stats_hdr(outputStream* const st) {
 500   st->print_raw_cr("GC Task Stats");
 501   st->print_raw("thr "); TaskQueueStats::print_header(1, st); st->cr();
 502   st->print_raw("--- "); TaskQueueStats::print_header(2, st); st->cr();
 503 }
 504 
 505 void ShenandoahConcurrentMark::print_taskqueue_stats() const {
 506   if (! ShenandoahLogTrace) {
 507     return;
 508   }
 509   ResourceMark rm;
 510   outputStream* st = gclog_or_tty;
 511   print_taskqueue_stats_hdr(st);
 512 
 513   TaskQueueStats totals;
 514   const uint n = _task_queues->size();
 515   for (uint i = 0; i < n; ++i) {
 516     st->print(UINT32_FORMAT_W(3), i);
 517     _task_queues->queue(i)->stats.print(st);
 518     st->cr();
 519     totals += _task_queues->queue(i)->stats;
 520   }
 521   st->print("tot "); totals.print(st); st->cr();
 522   DEBUG_ONLY(totals.verify());
 523 
 524 }
 525 
 526 void ShenandoahConcurrentMark::reset_taskqueue_stats() {
 527   const uint n = task_queues()->size();
 528   for (uint i = 0; i < n; ++i) {
 529     task_queues()->queue(i)->stats.reset();
 530   }
 531 }
 532 #endif // TASKQUEUE_STATS
 533 
 534 // Weak Reference Closures
 535 class ShenandoahCMDrainMarkingStackClosure: public VoidClosure {
 536   uint _worker_id;
 537   ParallelTaskTerminator* _terminator;
 538   bool _reset_terminator;
 539 
 540 public:
 541   ShenandoahCMDrainMarkingStackClosure(uint worker_id, ParallelTaskTerminator* t, bool reset_terminator = false):
 542     _worker_id(worker_id),
 543     _terminator(t),
 544     _reset_terminator(reset_terminator) {
 545   }
 546 
 547   void do_void() {
 548     assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
 549 
 550     ShenandoahHeap* sh = ShenandoahHeap::heap();
 551     ShenandoahConcurrentMark* scm = sh->concurrentMark();
 552     assert(scm->process_references(), "why else would we be here?");
 553     ReferenceProcessor* rp = sh->ref_processor();
 554 
 555     shenandoah_assert_rp_isalive_installed();
 556 
 557     scm->mark_loop(_worker_id, _terminator, rp,
 558                    false, // not cancellable
 559                    false, // do not drain SATBs
 560                    true,  // count liveness
 561                    scm->unload_classes(),
 562                    sh->has_forwarded_objects());

 563 
 564     if (_reset_terminator) {
 565       _terminator->reset_for_reuse();
 566     }
 567   }
 568 };
 569 
 570 
 571 class ShenandoahCMKeepAliveClosure : public OopClosure {
 572 private:
 573   ShenandoahObjToScanQueue* _queue;
 574   ShenandoahHeap* _heap;

 575 
 576   template <class T>
 577   inline void do_oop_nv(T* p) {
 578     ShenandoahConcurrentMark::mark_through_ref<T, NONE>(p, _heap, _queue);
 579   }
 580 
 581 public:
 582   ShenandoahCMKeepAliveClosure(ShenandoahObjToScanQueue* q) :
 583     _queue(q), _heap(ShenandoahHeap::heap()) {};


 584 
 585   void do_oop(narrowOop* p) { do_oop_nv(p); }
 586   void do_oop(oop* p)       { do_oop_nv(p); }
 587 };
 588 
 589 class ShenandoahCMKeepAliveUpdateClosure : public OopClosure {
 590 private:
 591   ShenandoahObjToScanQueue* _queue;
 592   ShenandoahHeap* _heap;

 593 
 594   template <class T>
 595   inline void do_oop_nv(T* p) {
 596     ShenandoahConcurrentMark::mark_through_ref<T, SIMPLE>(p, _heap, _queue);
 597   }
 598 
 599 public:
 600   ShenandoahCMKeepAliveUpdateClosure(ShenandoahObjToScanQueue* q) :
 601     _queue(q), _heap(ShenandoahHeap::heap()) {};


 602 
 603   void do_oop(narrowOop* p) { do_oop_nv(p); }
 604   void do_oop(oop* p)       { do_oop_nv(p); }
 605 };
 606 
 607 class ShenandoahRefProcTaskProxy : public AbstractGangTask {
 608 
 609 private:
 610   AbstractRefProcTaskExecutor::ProcessTask& _proc_task;
 611   ParallelTaskTerminator* _terminator;
 612 public:
 613 
 614   ShenandoahRefProcTaskProxy(AbstractRefProcTaskExecutor::ProcessTask& proc_task,
 615                              ParallelTaskTerminator* t) :
 616     AbstractGangTask("Process reference objects in parallel"),
 617     _proc_task(proc_task),
 618     _terminator(t) {
 619   }
 620 
 621   void work(uint worker_id) {


 713   rp->verify_no_references_recorded();
 714   assert(!rp->discovery_enabled(), "Post condition");
 715 
 716 }
 717 
 718 void ShenandoahConcurrentMark::weak_refs_work_doit(bool full_gc) {
 719   ShenandoahHeap* sh = ShenandoahHeap::heap();
 720 
 721   ReferenceProcessor* rp = sh->ref_processor();
 722 
 723   ShenandoahPhaseTimings::Phase phase_process =
 724           full_gc ?
 725           ShenandoahPhaseTimings::full_gc_weakrefs_process :
 726           ShenandoahPhaseTimings::weakrefs_process;
 727 
 728   ShenandoahPhaseTimings::Phase phase_enqueue =
 729           full_gc ?
 730           ShenandoahPhaseTimings::full_gc_weakrefs_enqueue :
 731           ShenandoahPhaseTimings::weakrefs_enqueue;
 732 





 733   shenandoah_assert_rp_isalive_not_installed();
 734   ReferenceProcessorIsAliveMutator fix_isalive(rp, sh->is_alive_closure());

 735 
 736   WorkGang* workers = sh->workers();
 737   uint nworkers = workers->active_workers();
 738 
 739   // Setup collector policy for softref cleaning.
 740   bool clear_soft_refs = sh->collector_policy()->use_should_clear_all_soft_refs(true /* bogus arg*/);
 741   log_develop_debug(gc, ref)("clearing soft refs: %s", BOOL_TO_STR(clear_soft_refs));
 742   rp->setup_policy(clear_soft_refs);
 743   rp->set_active_mt_degree(nworkers);
 744 
 745   assert(task_queues()->is_empty(), "Should be empty");
 746 
 747   // complete_gc and keep_alive closures instantiated here are only needed for
 748   // single-threaded path in RP. They share the queue 0 for tracking work, which
 749   // simplifies implementation. Since RP may decide to call complete_gc several
 750   // times, we need to be able to reuse the terminator.
 751   uint serial_worker_id = 0;
 752   ParallelTaskTerminator terminator(1, task_queues());
 753   ShenandoahCMDrainMarkingStackClosure complete_gc(serial_worker_id, &terminator, /* reset_terminator = */ true);
 754 
 755   ShenandoahRefProcTaskExecutor executor(workers);
 756 
 757   {
 758     ShenandoahGCPhase phase(phase_process);

 759 
 760     if (sh->has_forwarded_objects()) {
 761       ShenandoahForwardedIsAliveClosure is_alive;
 762       ShenandoahCMKeepAliveUpdateClosure keep_alive(get_queue(serial_worker_id));
 763       rp->process_discovered_references(&is_alive, &keep_alive,
 764                                         &complete_gc, &executor,
 765                                         NULL, sh->shenandoahPolicy()->tracer()->gc_id());
 766     } else {
 767       ShenandoahIsAliveClosure is_alive;
 768       ShenandoahCMKeepAliveClosure keep_alive(get_queue(serial_worker_id));
 769       rp->process_discovered_references(&is_alive, &keep_alive,
 770                                         &complete_gc, &executor,
 771                                         NULL, sh->shenandoahPolicy()->tracer()->gc_id());
 772     }
 773 
 774     assert(task_queues()->is_empty(), "Should be empty");
 775   }
 776 
 777   {
 778     ShenandoahGCPhase phase(phase_enqueue);
 779     rp->enqueue_discovered_references(&executor);
 780   }
 781 }
 782 
 783 class ShenandoahCancelledGCYieldClosure : public YieldClosure {
 784 private:
 785   ShenandoahHeap* const _heap;
 786 public:
 787   ShenandoahCancelledGCYieldClosure() : _heap(ShenandoahHeap::heap()) {};
 788   virtual bool should_return() { return _heap->cancelled_concgc(); }
 789 };
 790 
 791 class ShenandoahPrecleanCompleteGCClosure : public VoidClosure {
 792 public:
 793   void do_void() {
 794     ShenandoahHeap* sh = ShenandoahHeap::heap();
 795     ShenandoahConcurrentMark* scm = sh->concurrentMark();
 796     assert(scm->process_references(), "why else would we be here?");
 797     ParallelTaskTerminator terminator(1, scm->task_queues());
 798 
 799     ReferenceProcessor* rp = sh->ref_processor();
 800     shenandoah_assert_rp_isalive_installed();
 801 
 802     scm->mark_loop(0, &terminator, rp,
 803                    false, // not cancellable
 804                    true,  // drain SATBs
 805                    true,  // count liveness
 806                    scm->unload_classes(),
 807                    sh->has_forwarded_objects());

 808   }
 809 };
 810 
 811 class ShenandoahPrecleanKeepAliveUpdateClosure : public OopClosure {
 812 private:
 813   ShenandoahObjToScanQueue* _queue;
 814   ShenandoahHeap* _heap;

 815 
 816   template <class T>
 817   inline void do_oop_nv(T* p) {
 818     ShenandoahConcurrentMark::mark_through_ref<T, CONCURRENT>(p, _heap, _queue);
 819   }
 820 
 821 public:
 822   ShenandoahPrecleanKeepAliveUpdateClosure(ShenandoahObjToScanQueue* q) :
 823     _queue(q), _heap(ShenandoahHeap::heap()) {}


 824 
 825   void do_oop(narrowOop* p) { do_oop_nv(p); }
 826   void do_oop(oop* p)       { do_oop_nv(p); }
 827 };
 828 
 829 void ShenandoahConcurrentMark::preclean_weak_refs() {
 830   // Pre-cleaning weak references before diving into STW makes sense at the
 831   // end of concurrent mark. This will filter out the references which referents
 832   // are alive. Note that ReferenceProcessor already filters out these on reference
 833   // discovery, and the bulk of work is done here. This phase processes leftovers
 834   // that missed the initial filtering, i.e. when referent was marked alive after
 835   // reference was discovered by RP.
 836 
 837   assert(process_references(), "sanity");
 838 
 839   ShenandoahHeap* sh = ShenandoahHeap::heap();
 840   ReferenceProcessor* rp = sh->ref_processor();
 841   ReferenceProcessorMTDiscoveryMutator fix_mt_discovery(rp, false);
 842 
 843   shenandoah_assert_rp_isalive_not_installed();
 844   ReferenceProcessorIsAliveMutator fix_isalive(rp, sh->is_alive_closure());

 845 
 846   // Interrupt on cancelled GC
 847   ShenandoahCancelledGCYieldClosure yield;
 848 
 849   assert(task_queues()->is_empty(), "Should be empty");
 850 
 851   ShenandoahPrecleanCompleteGCClosure complete_gc;
 852   if (sh->has_forwarded_objects()) {
 853     ShenandoahForwardedIsAliveClosure is_alive;
 854     ShenandoahPrecleanKeepAliveUpdateClosure keep_alive(get_queue(0));
 855     ResourceMark rm;
 856     rp->preclean_discovered_references(&is_alive, &keep_alive,
 857                                        &complete_gc, &yield,
 858                                        NULL, sh->shenandoahPolicy()->tracer()->gc_id());
 859   } else {
 860     ShenandoahIsAliveClosure is_alive;
 861     ShenandoahCMKeepAliveClosure keep_alive(get_queue(0));
 862     ResourceMark rm;
 863     rp->preclean_discovered_references(&is_alive, &keep_alive,
 864                                        &complete_gc, &yield,


 871 void ShenandoahConcurrentMark::cancel() {
 872   // Clean up marking stacks.
 873   ShenandoahObjToScanQueueSet* queues = task_queues();
 874   queues->clear();
 875 
 876   // Cancel SATB buffers.
 877   JavaThread::satb_mark_queue_set().abandon_partial_marking();
 878 }
 879 
 880 ShenandoahObjToScanQueue* ShenandoahConcurrentMark::get_queue(uint worker_id) {
 881   assert(task_queues()->get_reserved() > worker_id, err_msg("No reserved queue for worker id: %d", worker_id));
 882   return _task_queues->queue(worker_id);
 883 }
 884 
 885 void ShenandoahConcurrentMark::clear_queue(ShenandoahObjToScanQueue *q) {
 886   q->set_empty();
 887   q->overflow_stack()->clear();
 888   q->clear_buffer();
 889 }
 890 
 891 template <bool CANCELLABLE, bool DRAIN_SATB, bool COUNT_LIVENESS>
 892 void ShenandoahConcurrentMark::mark_loop_prework(uint w, ParallelTaskTerminator *t, ReferenceProcessor *rp, bool class_unload, bool update_refs) {

 893   ShenandoahObjToScanQueue* q = get_queue(w);
 894 
 895   jushort* ld;
 896   if (COUNT_LIVENESS) {
 897     ld = get_liveness(w);
 898     Copy::fill_to_bytes(ld, _heap->num_regions() * sizeof(jushort));
 899   } else {
 900     ld = NULL;
 901   }
 902 
 903   // TODO: We can clean up this if we figure out how to do templated oop closures that
 904   // play nice with specialized_oop_iterators.
 905   if (class_unload) {
 906     if (update_refs) {





 907       ShenandoahMarkUpdateRefsMetadataClosure cl(q, rp);
 908       mark_loop_work<ShenandoahMarkUpdateRefsMetadataClosure, CANCELLABLE, DRAIN_SATB, COUNT_LIVENESS>(&cl, ld, w, t);






 909     } else {
 910       ShenandoahMarkRefsMetadataClosure cl(q, rp);
 911       mark_loop_work<ShenandoahMarkRefsMetadataClosure, CANCELLABLE, DRAIN_SATB, COUNT_LIVENESS>(&cl, ld, w, t);

 912     }
 913   } else {
 914     if (update_refs) {





 915       ShenandoahMarkUpdateRefsClosure cl(q, rp);
 916       mark_loop_work<ShenandoahMarkUpdateRefsClosure, CANCELLABLE, DRAIN_SATB, COUNT_LIVENESS>(&cl, ld, w, t);






 917     } else {
 918       ShenandoahMarkRefsClosure cl(q, rp);
 919       mark_loop_work<ShenandoahMarkRefsClosure, CANCELLABLE, DRAIN_SATB, COUNT_LIVENESS>(&cl, ld, w, t);

 920     }
 921   }
 922 
 923   if (COUNT_LIVENESS) {
 924     for (uint i = 0; i < _heap->num_regions(); i++) {
 925       ShenandoahHeapRegion* r = _heap->get_region(i);
 926       jushort live = ld[i];
 927       if (live > 0) {
 928         r->increase_live_data_gc_words(live);
 929       }
 930     }
 931   }
 932 }
 933 
 934 template <class T, bool CANCELLABLE, bool DRAIN_SATB, bool COUNT_LIVENESS>
 935 void ShenandoahConcurrentMark::mark_loop_work(T* cl, jushort* live_data, uint worker_id, ParallelTaskTerminator *terminator) {
 936   int seed = 17;
 937   uintx stride = CANCELLABLE ? ShenandoahMarkLoopStride : 1;
 938 
 939   ShenandoahHeap* heap = ShenandoahHeap::heap();
 940   ShenandoahObjToScanQueueSet* queues = task_queues();
 941   ShenandoahObjToScanQueue* q;
 942   ShenandoahMarkTask t;
 943 
 944   /*
 945    * Process outstanding queues, if any.
 946    *
 947    * There can be more queues than workers. To deal with the imbalance, we claim
 948    * extra queues first. Since marking can push new tasks into the queue associated
 949    * with this worker id, we come back to process this queue in the normal loop.
 950    */
 951   assert(queues->get_reserved() == heap->workers()->active_workers(),
 952     "Need to reserve proper number of queues");
 953 
 954   q = queues->claim_next();
 955   while (q != NULL) {
 956     if (CANCELLABLE && heap->cancelled_concgc()) {
 957       ShenandoahCancelledTerminatorTerminator tt;
 958       while (!terminator->offer_termination(&tt));
 959       return;
 960     }
 961 
 962     for (uint i = 0; i < stride; i++) {
 963       if (try_queue(q, t)) {
 964         do_task<T, COUNT_LIVENESS>(q, cl, live_data, &t);
 965       } else {
 966         assert(q->is_empty(), "Must be empty");
 967         q = queues->claim_next();
 968         break;
 969       }
 970     }
 971   }
 972 
 973   q = get_queue(worker_id);
 974 



 975   /*
 976    * Normal marking loop:
 977    */
 978   while (true) {
 979     if (CANCELLABLE && heap->cancelled_concgc()) {
 980       ShenandoahCancelledTerminatorTerminator tt;
 981       while (!terminator->offer_termination(&tt));
 982       return;
 983     }
 984 





 985     for (uint i = 0; i < stride; i++) {
 986       if (try_queue(q, t) ||
 987               (DRAIN_SATB && try_draining_satb_buffer(q, t)) ||
 988               queues->steal(worker_id, &seed, t)) {
 989         do_task<T, COUNT_LIVENESS>(q, cl, live_data, &t);

 990       } else {
 991         if (terminator->offer_termination()) return;
 992       }






 993     }
 994   }
 995 }
 996 
 997 bool ShenandoahConcurrentMark::process_references() const {
 998   return _heap->process_references();
 999 }
1000 
1001 bool ShenandoahConcurrentMark::unload_classes() const {
1002   return _heap->unload_classes();
1003 }
1004 
1005 bool ShenandoahConcurrentMark::claim_codecache() {
1006   assert(ShenandoahConcurrentScanCodeRoots, "must not be called otherwise");
1007   return _claimed_codecache.try_set();
1008 }
1009 
1010 void ShenandoahConcurrentMark::clear_claim_codecache() {
1011   assert(ShenandoahConcurrentScanCodeRoots, "must not be called otherwise");
1012   _claimed_codecache.unset();


  30 #include "gc_implementation/shenandoah/shenandoahHeap.inline.hpp"
  31 #include "gc_implementation/shenandoah/shenandoahRootProcessor.hpp"
  32 #include "gc_implementation/shenandoah/shenandoah_specialized_oop_closures.hpp"
  33 #include "gc_implementation/shenandoah/shenandoahOopClosures.inline.hpp"
  34 #include "gc_implementation/shenandoah/brooksPointer.hpp"
  35 #include "gc_implementation/shenandoah/shenandoahUtils.hpp"
  36 #include "memory/referenceProcessor.hpp"
  37 #include "gc_implementation/shenandoah/shenandoahTaskqueue.hpp"
  38 #include "code/codeCache.hpp"
  39 #include "classfile/symbolTable.hpp"
  40 #include "classfile/systemDictionary.hpp"
  41 #include "memory/iterator.inline.hpp"
  42 #include "oops/oop.inline.hpp"
  43 #include "utilities/taskqueue.hpp"
  44 
  45 template<UpdateRefsMode UPDATE_REFS>
  46 class ShenandoahInitMarkRootsClosure : public OopClosure {
  47 private:
  48   ShenandoahObjToScanQueue* _queue;
  49   ShenandoahHeap* _heap;
  50   ShenandoahMarkingContext* const _mark_context;
  51 
  52   template <class T>
  53   inline void do_oop_nv(T* p) {
  54     ShenandoahConcurrentMark::mark_through_ref<T, UPDATE_REFS, false /* string dedup */>(p, _heap, _queue, _mark_context);
  55   }
  56 
  57 public:
  58   ShenandoahInitMarkRootsClosure(ShenandoahObjToScanQueue* q) :
  59     _queue(q),
  60     _heap(ShenandoahHeap::heap()),
  61     _mark_context(_heap->next_marking_context()) {};
  62 
  63   void do_oop(narrowOop* p) { do_oop_nv(p); }
  64   void do_oop(oop* p)       { do_oop_nv(p); }
  65 };
  66 
  67 ShenandoahMarkRefsSuperClosure::ShenandoahMarkRefsSuperClosure(ShenandoahObjToScanQueue* q, ReferenceProcessor* rp) :
  68   MetadataAwareOopClosure(rp),
  69   _queue(q),
  70   _dedup_queue(NULL),
  71   _heap(ShenandoahHeap::heap()),
  72   _mark_context(_heap->next_marking_context())
  73 { }
  74 
  75 
  76 ShenandoahMarkRefsSuperClosure::ShenandoahMarkRefsSuperClosure(ShenandoahObjToScanQueue* q, ShenandoahStrDedupQueue* dq, ReferenceProcessor* rp) :
  77   MetadataAwareOopClosure(rp),
  78   _queue(q),
  79   _dedup_queue(dq),
  80   _heap(ShenandoahHeap::heap()),
  81   _mark_context(_heap->next_marking_context())
  82 { }
  83 
  84 
  85 template<UpdateRefsMode UPDATE_REFS>
  86 class ShenandoahInitMarkRootsTask : public AbstractGangTask {
  87 private:
  88   ShenandoahRootProcessor* _rp;
  89   bool _process_refs;
  90 public:
  91   ShenandoahInitMarkRootsTask(ShenandoahRootProcessor* rp, bool process_refs) :
  92     AbstractGangTask("Shenandoah init mark roots task"),
  93     _rp(rp),
  94     _process_refs(process_refs) {
  95   }
  96 
  97   void work(uint worker_id) {
  98     assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
  99     ShenandoahWorkerSession worker_session(worker_id);
 100 
 101     ShenandoahHeap* heap = ShenandoahHeap::heap();
 102     ShenandoahObjToScanQueueSet* queues = heap->concurrentMark()->task_queues();
 103     assert(queues->get_reserved() > worker_id, err_msg("Queue has not been reserved for worker id: %d", worker_id));
 104 
 105     ShenandoahObjToScanQueue* q = queues->queue(worker_id);
 106     ShenandoahInitMarkRootsClosure<UPDATE_REFS> mark_cl(q);
 107     CLDToOopClosure cldCl(&mark_cl);
 108     MarkingCodeBlobClosure blobsCl(&mark_cl, ! CodeBlobToOopClosure::FixRelocations);
 109 
 110     // The rationale for selecting the roots to scan is as follows:
 111     //   a. With unload_classes = true, we only want to scan the actual strong roots from the
 112     //      code cache. This will allow us to identify the dead classes, unload them, *and*
 113     //      invalidate the relevant code cache blobs. This could be only done together with
 114     //      class unloading.
 115     //   b. With unload_classes = false, we have to nominally retain all the references from code
 116     //      cache, because there could be the case of embedded class/oop in the generated code,
 117     //      which we will never visit during mark. Without code cache invalidation, as in (a),
 118     //      we risk executing that code cache blob, and crashing.
 119     //   c. With ShenandoahConcurrentScanCodeRoots, we avoid scanning the entire code cache here,
 120     //      and instead do that in concurrent phase under the relevant lock. This saves init mark
 121     //      pause time.
 122 
 123     ResourceMark m;
 124     if (heap->concurrentMark()->unload_classes()) {
 125       _rp->process_strong_roots(&mark_cl, _process_refs ? NULL : &mark_cl, &cldCl, NULL, &blobsCl, NULL, worker_id);
 126     } else {
 127       if (ShenandoahConcurrentScanCodeRoots) {
 128         CodeBlobClosure* code_blobs = NULL;
 129 #ifdef ASSERT
 130         ShenandoahAssertToSpaceClosure assert_to_space_oops;
 131         CodeBlobToOopClosure assert_to_space(&assert_to_space_oops, !CodeBlobToOopClosure::FixRelocations);
 132         // If conc code cache evac is disabled, code cache should have only to-space ptrs.
 133         // Otherwise, it should have to-space ptrs only if mark does not update refs.
 134         if (!heap->has_forwarded_objects()) {
 135           code_blobs = &assert_to_space;
 136         }
 137 #endif
 138         _rp->process_all_roots(&mark_cl, _process_refs ? NULL : &mark_cl, &cldCl, code_blobs, NULL, worker_id);
 139       } else {
 140         _rp->process_all_roots(&mark_cl, _process_refs ? NULL : &mark_cl, &cldCl, &blobsCl, NULL, worker_id);
 141       }
 142     }
 143   }
 144 };
 145 
 146 class ShenandoahUpdateRootsTask : public AbstractGangTask {
 147 private:
 148   ShenandoahRootProcessor* _rp;
 149   const bool _update_code_cache;
 150 public:
 151   ShenandoahUpdateRootsTask(ShenandoahRootProcessor* rp, bool update_code_cache) :
 152     AbstractGangTask("Shenandoah update roots task"),
 153     _rp(rp),
 154     _update_code_cache(update_code_cache) {
 155   }
 156 
 157   void work(uint worker_id) {
 158     assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
 159     ShenandoahWorkerSession worker_session(worker_id);
 160 
 161     ShenandoahHeap* heap = ShenandoahHeap::heap();
 162     ShenandoahUpdateRefsClosure cl;
 163     CLDToOopClosure cldCl(&cl);
 164 
 165     CodeBlobClosure* code_blobs;
 166     CodeBlobToOopClosure update_blobs(&cl, CodeBlobToOopClosure::FixRelocations);
 167 #ifdef ASSERT
 168     ShenandoahAssertToSpaceClosure assert_to_space_oops;
 169     CodeBlobToOopClosure assert_to_space(&assert_to_space_oops, !CodeBlobToOopClosure::FixRelocations);
 170 #endif
 171     if (_update_code_cache) {
 172       code_blobs = &update_blobs;
 173     } else {
 174       code_blobs =
 175         DEBUG_ONLY(&assert_to_space)
 176         NOT_DEBUG(NULL);
 177     }
 178     _rp->process_all_roots(&cl, &cl, &cldCl, code_blobs, NULL, worker_id);
 179   }
 180 };
 181 
 182 class ShenandoahConcurrentMarkingTask : public AbstractGangTask {
 183 private:
 184   ShenandoahConcurrentMark* _cm;
 185   ParallelTaskTerminator* _terminator;
 186   bool _update_refs;
 187 
 188 public:
 189   ShenandoahConcurrentMarkingTask(ShenandoahConcurrentMark* cm, ParallelTaskTerminator* terminator, bool update_refs) :
 190     AbstractGangTask("Root Region Scan"), _cm(cm), _terminator(terminator), _update_refs(update_refs) {
 191   }
 192 
 193 
 194   void work(uint worker_id) {
 195     ShenandoahWorkerSession worker_session(worker_id);
 196     ShenandoahObjToScanQueue* q = _cm->get_queue(worker_id);
 197     jushort* live_data = _cm->get_liveness(worker_id);
 198     ReferenceProcessor* rp;
 199     if (_cm->process_references()) {
 200       rp = ShenandoahHeap::heap()->ref_processor();
 201       shenandoah_assert_rp_isalive_installed();
 202     } else {
 203       rp = NULL;
 204     }
 205 
 206     _cm->concurrent_scan_code_roots(worker_id, rp, _update_refs);
 207     _cm->mark_loop(worker_id, _terminator, rp,
 208                    true, // cancellable


 209                    _cm->unload_classes(),
 210                    _update_refs,
 211                    ShenandoahStringDedup::is_enabled()); // perform string dedup
 212   }
 213 };
 214 
 215 class ShenandoahSATBThreadsClosure : public ThreadClosure {
 216   ShenandoahSATBBufferClosure* _satb_cl;
 217   int _thread_parity;
 218 
 219  public:
 220   ShenandoahSATBThreadsClosure(ShenandoahSATBBufferClosure* satb_cl) :
 221     _satb_cl(satb_cl),
 222     _thread_parity(SharedHeap::heap()->strong_roots_parity()) {}
 223 
 224   void do_thread(Thread* thread) {
 225     if (thread->is_Java_thread()) {
 226       if (thread->claim_oops_do(true, _thread_parity)) {
 227         JavaThread* jt = (JavaThread*)thread;
 228         jt->satb_mark_queue().apply_closure_and_empty(_satb_cl);
 229       }
 230     } else if (thread->is_VM_thread()) {
 231       if (thread->claim_oops_do(true, _thread_parity)) {
 232         JavaThread::satb_mark_queue_set().shared_satb_queue()->apply_closure_and_empty(_satb_cl);
 233       }
 234     }
 235   }
 236 };
 237 
 238 class ShenandoahFinalMarkingTask : public AbstractGangTask {
 239 private:
 240   ShenandoahConcurrentMark* _cm;
 241   ParallelTaskTerminator* _terminator;
 242   bool _update_refs;

 243   bool _unload_classes;
 244   bool _dedup_string;
 245 
 246 public:
 247   ShenandoahFinalMarkingTask(ShenandoahConcurrentMark* cm, ParallelTaskTerminator* terminator,
 248                              bool update_refs, bool unload_classes, bool dedup_string) :
 249     AbstractGangTask("Shenandoah Final Marking"), _cm(cm), _terminator(terminator),
 250     _update_refs(update_refs), _unload_classes(unload_classes), _dedup_string(dedup_string) {
 251   }
 252 
 253   void work(uint worker_id) {
 254     // First drain remaining SATB buffers.
 255     // Notice that this is not strictly necessary for mark-compact. But since
 256     // it requires a StrongRootsScope around the task, we need to claim the
 257     // threads, and performance-wise it doesn't really matter. Adds about 1ms to
 258     // full-gc.
 259     {
 260       ShenandoahObjToScanQueue* q = _cm->get_queue(worker_id);
 261       ShenandoahSATBBufferClosure cl(q);
 262       SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
 263       while (satb_mq_set.apply_closure_to_completed_buffer(&cl));
 264       ShenandoahSATBThreadsClosure tc(&cl);
 265       Threads::threads_do(&tc);
 266     }
 267 
 268     ReferenceProcessor* rp;
 269     if (_cm->process_references()) {
 270       rp = ShenandoahHeap::heap()->ref_processor();
 271       shenandoah_assert_rp_isalive_installed();
 272     } else {
 273       rp = NULL;
 274     }
 275 
 276     // Degenerated cycle may bypass concurrent cycle, so code roots might not be scanned,
 277     // let's check here.
 278     _cm->concurrent_scan_code_roots(worker_id, rp, _update_refs);
 279     _cm->mark_loop(worker_id, _terminator, rp,
 280                    false, // not cancellable


 281                    _unload_classes,
 282                    _update_refs,
 283                    _dedup_string);
 284 
 285     assert(_cm->task_queues()->is_empty(), "Should be empty");
 286   }
 287 };
 288 
 289 void ShenandoahConcurrentMark::mark_roots(ShenandoahPhaseTimings::Phase root_phase) {
 290   assert(Thread::current()->is_VM_thread(), "can only do this in VMThread");
 291   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
 292 
 293   ShenandoahHeap* heap = ShenandoahHeap::heap();
 294 
 295   ShenandoahGCPhase phase(root_phase);
 296 
 297   WorkGang* workers = heap->workers();
 298   uint nworkers = workers->active_workers();
 299 
 300   assert(nworkers <= task_queues()->size(), "Just check");
 301 
 302   ShenandoahRootProcessor root_proc(heap, nworkers, root_phase);
 303   TASKQUEUE_STATS_ONLY(task_queues()->reset_taskqueue_stats());
 304   task_queues()->reserve(nworkers);
 305 
 306   if (heap->has_forwarded_objects()) {
 307     ShenandoahInitMarkRootsTask<RESOLVE> mark_roots(&root_proc, process_references());
 308     workers->run_task(&mark_roots);
 309   } else {
 310     // No need to update references, which means the heap is stable.
 311     // Can save time not walking through forwarding pointers.
 312     ShenandoahInitMarkRootsTask<NONE> mark_roots(&root_proc, process_references());
 313     workers->run_task(&mark_roots);
 314   }
 315 
 316   if (ShenandoahConcurrentScanCodeRoots) {
 317     clear_claim_codecache();
 318   }
 319 }
 320 
 321 void ShenandoahConcurrentMark::init_mark_roots() {
 322   assert(Thread::current()->is_VM_thread(), "can only do this in VMThread");
 323   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
 324 
 325   mark_roots(ShenandoahPhaseTimings::scan_roots);
 326 }
 327 
 328 void ShenandoahConcurrentMark::update_roots(ShenandoahPhaseTimings::Phase root_phase) {
 329   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
 330 
 331   bool update_code_cache = true; // initialize to safer value
 332   switch (root_phase) {
 333     case ShenandoahPhaseTimings::update_roots:
 334     case ShenandoahPhaseTimings::final_update_refs_roots:
 335       update_code_cache = false;

 336       break;
 337     case ShenandoahPhaseTimings::full_gc_roots:
 338       update_code_cache = true;
 339       break;
 340     default:
 341       ShouldNotReachHere();
 342   }
 343 
 344   ShenandoahHeap* heap = ShenandoahHeap::heap();
 345 
 346   ShenandoahGCPhase phase(root_phase);
 347 
 348   COMPILER2_PRESENT(DerivedPointerTable::clear());
 349 
 350   uint nworkers = heap->workers()->active_workers();
 351 
 352   ShenandoahRootProcessor root_proc(heap, nworkers, root_phase);
 353   ShenandoahUpdateRootsTask update_roots(&root_proc, update_code_cache);
 354   heap->workers()->run_task(&update_roots);
 355 


 394       }
 395     }
 396   }
 397 }
 398 
 399 void ShenandoahConcurrentMark::mark_from_roots() {
 400   ShenandoahHeap* sh = ShenandoahHeap::heap();
 401   WorkGang* workers = sh->workers();
 402   uint nworkers = workers->active_workers();
 403 
 404   bool update_refs = sh->has_forwarded_objects();
 405 
 406   ShenandoahGCPhase conc_mark_phase(ShenandoahPhaseTimings::conc_mark);
 407 
 408   if (process_references()) {
 409     ReferenceProcessor* rp = sh->ref_processor();
 410     rp->set_active_mt_degree(nworkers);
 411 
 412     // enable ("weak") refs discovery
 413     rp->enable_discovery(true /*verify_no_refs*/, true);
 414     rp->setup_policy(sh->collector_policy()->should_clear_all_soft_refs());
 415   }
 416 
 417   shenandoah_assert_rp_isalive_not_installed();
 418   ShenandoahIsAliveSelector is_alive;
 419   ReferenceProcessorIsAliveMutator fix_isalive(sh->ref_processor(), is_alive.is_alive_closure());
 420 
 421   task_queues()->reserve(nworkers);
 422 
 423   {
 424     ShenandoahTerminationTracker term(ShenandoahPhaseTimings::conc_termination);
 425     if (UseShenandoahOWST) {
 426       ShenandoahTaskTerminator terminator(nworkers, task_queues());
 427       ShenandoahConcurrentMarkingTask task(this, &terminator, update_refs);
 428       workers->run_task(&task);
 429     } else {
 430       ParallelTaskTerminator terminator(nworkers, task_queues());
 431       ShenandoahConcurrentMarkingTask task(this, &terminator, update_refs);
 432       workers->run_task(&task);
 433     }
 434   }
 435 
 436   assert(task_queues()->is_empty() || sh->cancelled_gc(), "Should be empty when not cancelled");
 437   if (!sh->cancelled_gc()) {
 438     TASKQUEUE_STATS_ONLY(task_queues()->print_taskqueue_stats());
 439   }
 440 
 441   TASKQUEUE_STATS_ONLY(task_queues()->reset_taskqueue_stats());
 442 }
 443 
 444 void ShenandoahConcurrentMark::finish_mark_from_roots() {
 445   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
 446 
 447   ShenandoahHeap* sh = ShenandoahHeap::heap();
 448 
 449   TASKQUEUE_STATS_ONLY(task_queues()->reset_taskqueue_stats());
 450 
 451   shared_finish_mark_from_roots(/* full_gc = */ false);
 452 
 453   if (sh->has_forwarded_objects()) {
 454     update_roots(ShenandoahPhaseTimings::update_roots);
 455   }
 456 
 457   TASKQUEUE_STATS_ONLY(task_queues()->print_taskqueue_stats());
 458 }
 459 
 460 void ShenandoahConcurrentMark::shared_finish_mark_from_roots(bool full_gc) {
 461   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
 462 
 463   ShenandoahHeap* sh = ShenandoahHeap::heap();
 464 
 465   uint nworkers = sh->workers()->active_workers();
 466 
 467   // Finally mark everything else we've got in our queues during the previous steps.
 468   // It does two different things for concurrent vs. mark-compact GC:
 469   // - For concurrent GC, it starts with empty task queues, drains the remaining
 470   //   SATB buffers, and then completes the marking closure.
 471   // - For mark-compact GC, it starts out with the task queues seeded by initial
 472   //   root scan, and completes the closure, thus marking through all live objects
 473   // The implementation is the same, so it's shared here.
 474   {
 475     ShenandoahGCPhase phase(full_gc ?
 476                             ShenandoahPhaseTimings::full_gc_mark_finish_queues :
 477                             ShenandoahPhaseTimings::finish_queues);

 478     task_queues()->reserve(nworkers);
 479 
 480     shenandoah_assert_rp_isalive_not_installed();
 481     ShenandoahIsAliveSelector is_alive;
 482     ReferenceProcessorIsAliveMutator fix_isalive(sh->ref_processor(), is_alive.is_alive_closure());
 483 
 484     ShenandoahTerminationTracker termination_tracker(full_gc ?
 485                                                      ShenandoahPhaseTimings::full_gc_mark_termination :
 486                                                      ShenandoahPhaseTimings::termination);
 487 
 488     SharedHeap::StrongRootsScope scope(sh, true);
 489     if (UseShenandoahOWST) {
 490       ShenandoahTaskTerminator terminator(nworkers, task_queues());
 491       ShenandoahFinalMarkingTask task(this, &terminator, sh->has_forwarded_objects(),
 492         unload_classes(), full_gc && ShenandoahStringDedup::is_enabled());
 493       sh->workers()->run_task(&task);
 494     } else {
 495       ParallelTaskTerminator terminator(nworkers, task_queues());
 496       ShenandoahFinalMarkingTask task(this, &terminator, sh->has_forwarded_objects(),
 497         unload_classes(), full_gc && ShenandoahStringDedup::is_enabled());
 498       sh->workers()->run_task(&task);
 499     }
 500   }
 501 
 502   assert(task_queues()->is_empty(), "Should be empty");
 503 
 504   // When we're done marking everything, we process weak references.
 505   if (process_references()) {
 506     weak_refs_work(full_gc);
 507   }
 508 
 509   // And finally finish class unloading
 510   if (unload_classes()) {
 511     sh->unload_classes_and_cleanup_tables(full_gc);
 512   }
 513 
 514   assert(task_queues()->is_empty(), "Should be empty");
 515   TASKQUEUE_STATS_ONLY(task_queues()->print_taskqueue_stats());
 516   TASKQUEUE_STATS_ONLY(task_queues()->reset_taskqueue_stats());
 517 }
 518 








































































 519 // Weak Reference Closures
 520 class ShenandoahCMDrainMarkingStackClosure: public VoidClosure {
 521   uint _worker_id;
 522   ParallelTaskTerminator* _terminator;
 523   bool _reset_terminator;
 524 
 525 public:
 526   ShenandoahCMDrainMarkingStackClosure(uint worker_id, ParallelTaskTerminator* t, bool reset_terminator = false):
 527     _worker_id(worker_id),
 528     _terminator(t),
 529     _reset_terminator(reset_terminator) {
 530   }
 531 
 532   void do_void() {
 533     assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
 534 
 535     ShenandoahHeap* sh = ShenandoahHeap::heap();
 536     ShenandoahConcurrentMark* scm = sh->concurrentMark();
 537     assert(scm->process_references(), "why else would we be here?");
 538     ReferenceProcessor* rp = sh->ref_processor();
 539 
 540     shenandoah_assert_rp_isalive_installed();
 541 
 542     scm->mark_loop(_worker_id, _terminator, rp,
 543                    false, // not cancellable


 544                    scm->unload_classes(),
 545                    sh->has_forwarded_objects(),
 546                    false);  // do not do strdedup
 547 
 548     if (_reset_terminator) {
 549       _terminator->reset_for_reuse();
 550     }
 551   }
 552 };
 553 
 554 
 555 class ShenandoahCMKeepAliveClosure : public OopClosure {
 556 private:
 557   ShenandoahObjToScanQueue* _queue;
 558   ShenandoahHeap* _heap;
 559   ShenandoahMarkingContext* const _mark_context;
 560 
 561   template <class T>
 562   inline void do_oop_nv(T* p) {
 563     ShenandoahConcurrentMark::mark_through_ref<T, NONE, false /* string dedup */>(p, _heap, _queue, _mark_context);
 564   }
 565 
 566 public:
 567   ShenandoahCMKeepAliveClosure(ShenandoahObjToScanQueue* q) :
 568     _queue(q),
 569     _heap(ShenandoahHeap::heap()),
 570     _mark_context(_heap->next_marking_context()) {}
 571 
 572   void do_oop(narrowOop* p) { do_oop_nv(p); }
 573   void do_oop(oop* p)       { do_oop_nv(p); }
 574 };
 575 
 576 class ShenandoahCMKeepAliveUpdateClosure : public OopClosure {
 577 private:
 578   ShenandoahObjToScanQueue* _queue;
 579   ShenandoahHeap* _heap;
 580   ShenandoahMarkingContext* const _mark_context;
 581 
 582   template <class T>
 583   inline void do_oop_nv(T* p) {
 584     ShenandoahConcurrentMark::mark_through_ref<T, SIMPLE, false /* string dedup */>(p, _heap, _queue, _mark_context);
 585   }
 586 
 587 public:
 588   ShenandoahCMKeepAliveUpdateClosure(ShenandoahObjToScanQueue* q) :
 589     _queue(q),
 590     _heap(ShenandoahHeap::heap()),
 591     _mark_context(_heap->next_marking_context()) {}
 592 
 593   void do_oop(narrowOop* p) { do_oop_nv(p); }
 594   void do_oop(oop* p)       { do_oop_nv(p); }
 595 };
 596 
 597 class ShenandoahRefProcTaskProxy : public AbstractGangTask {
 598 
 599 private:
 600   AbstractRefProcTaskExecutor::ProcessTask& _proc_task;
 601   ParallelTaskTerminator* _terminator;
 602 public:
 603 
 604   ShenandoahRefProcTaskProxy(AbstractRefProcTaskExecutor::ProcessTask& proc_task,
 605                              ParallelTaskTerminator* t) :
 606     AbstractGangTask("Process reference objects in parallel"),
 607     _proc_task(proc_task),
 608     _terminator(t) {
 609   }
 610 
 611   void work(uint worker_id) {


 703   rp->verify_no_references_recorded();
 704   assert(!rp->discovery_enabled(), "Post condition");
 705 
 706 }
 707 
 708 void ShenandoahConcurrentMark::weak_refs_work_doit(bool full_gc) {
 709   ShenandoahHeap* sh = ShenandoahHeap::heap();
 710 
 711   ReferenceProcessor* rp = sh->ref_processor();
 712 
 713   ShenandoahPhaseTimings::Phase phase_process =
 714           full_gc ?
 715           ShenandoahPhaseTimings::full_gc_weakrefs_process :
 716           ShenandoahPhaseTimings::weakrefs_process;
 717 
 718   ShenandoahPhaseTimings::Phase phase_enqueue =
 719           full_gc ?
 720           ShenandoahPhaseTimings::full_gc_weakrefs_enqueue :
 721           ShenandoahPhaseTimings::weakrefs_enqueue;
 722 
 723   ShenandoahPhaseTimings::Phase phase_process_termination =
 724           full_gc ?
 725           ShenandoahPhaseTimings::full_gc_weakrefs_termination :
 726           ShenandoahPhaseTimings::weakrefs_termination;
 727 
 728   shenandoah_assert_rp_isalive_not_installed();
 729   ShenandoahIsAliveSelector is_alive;
 730   ReferenceProcessorIsAliveMutator fix_isalive(rp, is_alive.is_alive_closure());
 731 
 732   WorkGang* workers = sh->workers();
 733   uint nworkers = workers->active_workers();
 734 
 735   rp->setup_policy(sh->collector_policy()->should_clear_all_soft_refs());



 736   rp->set_active_mt_degree(nworkers);
 737 
 738   assert(task_queues()->is_empty(), "Should be empty");
 739 
 740   // complete_gc and keep_alive closures instantiated here are only needed for
 741   // single-threaded path in RP. They share the queue 0 for tracking work, which
 742   // simplifies implementation. Since RP may decide to call complete_gc several
 743   // times, we need to be able to reuse the terminator.
 744   uint serial_worker_id = 0;
 745   ParallelTaskTerminator terminator(1, task_queues());
 746   ShenandoahCMDrainMarkingStackClosure complete_gc(serial_worker_id, &terminator, /* reset_terminator = */ true);
 747 
 748   ShenandoahRefProcTaskExecutor executor(workers);
 749 
 750   {
 751     ShenandoahGCPhase phase(phase_process);
 752     ShenandoahTerminationTracker phase_term(phase_process_termination);
 753 
 754     if (sh->has_forwarded_objects()) {
 755       ShenandoahForwardedIsAliveClosure is_alive;
 756       ShenandoahCMKeepAliveUpdateClosure keep_alive(get_queue(serial_worker_id));
 757       rp->process_discovered_references(&is_alive, &keep_alive,
 758                                         &complete_gc, &executor,
 759                                         NULL, sh->shenandoahPolicy()->tracer()->gc_id());
 760     } else {
 761       ShenandoahIsAliveClosure is_alive;
 762       ShenandoahCMKeepAliveClosure keep_alive(get_queue(serial_worker_id));
 763       rp->process_discovered_references(&is_alive, &keep_alive,
 764                                         &complete_gc, &executor,
 765                                         NULL, sh->shenandoahPolicy()->tracer()->gc_id());
 766     }
 767 
 768     assert(task_queues()->is_empty(), "Should be empty");
 769   }
 770 
 771   {
 772     ShenandoahGCPhase phase(phase_enqueue);
 773     rp->enqueue_discovered_references(&executor);
 774   }
 775 }
 776 
 777 class ShenandoahCancelledGCYieldClosure : public YieldClosure {
 778 private:
 779   ShenandoahHeap* const _heap;
 780 public:
 781   ShenandoahCancelledGCYieldClosure() : _heap(ShenandoahHeap::heap()) {};
 782   virtual bool should_return() { return _heap->cancelled_gc(); }
 783 };
 784 
 785 class ShenandoahPrecleanCompleteGCClosure : public VoidClosure {
 786 public:
 787   void do_void() {
 788     ShenandoahHeap* sh = ShenandoahHeap::heap();
 789     ShenandoahConcurrentMark* scm = sh->concurrentMark();
 790     assert(scm->process_references(), "why else would we be here?");
 791     ParallelTaskTerminator terminator(1, scm->task_queues());
 792 
 793     ReferenceProcessor* rp = sh->ref_processor();
 794     shenandoah_assert_rp_isalive_installed();
 795 
 796     scm->mark_loop(0, &terminator, rp,
 797                    false, // not cancellable


 798                    scm->unload_classes(),
 799                    sh->has_forwarded_objects(),
 800                    false); // do not do strdedup
 801   }
 802 };
 803 
 804 class ShenandoahPrecleanKeepAliveUpdateClosure : public OopClosure {
 805 private:
 806   ShenandoahObjToScanQueue* _queue;
 807   ShenandoahHeap* _heap;
 808   ShenandoahMarkingContext* const _mark_context;
 809 
 810   template <class T>
 811   inline void do_oop_nv(T* p) {
 812     ShenandoahConcurrentMark::mark_through_ref<T, CONCURRENT, false /* string dedup */>(p, _heap, _queue, _mark_context);
 813   }
 814 
 815 public:
 816   ShenandoahPrecleanKeepAliveUpdateClosure(ShenandoahObjToScanQueue* q) :
 817     _queue(q),
 818     _heap(ShenandoahHeap::heap()),
 819     _mark_context(_heap->next_marking_context()) {}
 820 
 821   void do_oop(narrowOop* p) { do_oop_nv(p); }
 822   void do_oop(oop* p)       { do_oop_nv(p); }
 823 };
 824 
 825 void ShenandoahConcurrentMark::preclean_weak_refs() {
 826   // Pre-cleaning weak references before diving into STW makes sense at the
 827   // end of concurrent mark. This will filter out the references which referents
 828   // are alive. Note that ReferenceProcessor already filters out these on reference
 829   // discovery, and the bulk of work is done here. This phase processes leftovers
 830   // that missed the initial filtering, i.e. when referent was marked alive after
 831   // reference was discovered by RP.
 832 
 833   assert(process_references(), "sanity");
 834 
 835   ShenandoahHeap* sh = ShenandoahHeap::heap();
 836   ReferenceProcessor* rp = sh->ref_processor();
 837   ReferenceProcessorMTDiscoveryMutator fix_mt_discovery(rp, false);
 838 
 839   shenandoah_assert_rp_isalive_not_installed();
 840   ShenandoahIsAliveSelector is_alive;
 841   ReferenceProcessorIsAliveMutator fix_isalive(rp, is_alive.is_alive_closure());
 842 
 843   // Interrupt on cancelled GC
 844   ShenandoahCancelledGCYieldClosure yield;
 845 
 846   assert(task_queues()->is_empty(), "Should be empty");
 847 
 848   ShenandoahPrecleanCompleteGCClosure complete_gc;
 849   if (sh->has_forwarded_objects()) {
 850     ShenandoahForwardedIsAliveClosure is_alive;
 851     ShenandoahPrecleanKeepAliveUpdateClosure keep_alive(get_queue(0));
 852     ResourceMark rm;
 853     rp->preclean_discovered_references(&is_alive, &keep_alive,
 854                                        &complete_gc, &yield,
 855                                        NULL, sh->shenandoahPolicy()->tracer()->gc_id());
 856   } else {
 857     ShenandoahIsAliveClosure is_alive;
 858     ShenandoahCMKeepAliveClosure keep_alive(get_queue(0));
 859     ResourceMark rm;
 860     rp->preclean_discovered_references(&is_alive, &keep_alive,
 861                                        &complete_gc, &yield,


 868 void ShenandoahConcurrentMark::cancel() {
 869   // Clean up marking stacks.
 870   ShenandoahObjToScanQueueSet* queues = task_queues();
 871   queues->clear();
 872 
 873   // Cancel SATB buffers.
 874   JavaThread::satb_mark_queue_set().abandon_partial_marking();
 875 }
 876 
 877 ShenandoahObjToScanQueue* ShenandoahConcurrentMark::get_queue(uint worker_id) {
 878   assert(task_queues()->get_reserved() > worker_id, err_msg("No reserved queue for worker id: %d", worker_id));
 879   return _task_queues->queue(worker_id);
 880 }
 881 
 882 void ShenandoahConcurrentMark::clear_queue(ShenandoahObjToScanQueue *q) {
 883   q->set_empty();
 884   q->overflow_stack()->clear();
 885   q->clear_buffer();
 886 }
 887 
 888 template <bool CANCELLABLE>
 889 void ShenandoahConcurrentMark::mark_loop_prework(uint w, ParallelTaskTerminator *t, ReferenceProcessor *rp,
 890                                                  bool class_unload, bool update_refs, bool strdedup) {
 891   ShenandoahObjToScanQueue* q = get_queue(w);
 892 
 893   jushort* ld = get_liveness(w);


 894   Copy::fill_to_bytes(ld, _heap->num_regions() * sizeof(jushort));



 895 
 896   // TODO: We can clean up this if we figure out how to do templated oop closures that
 897   // play nice with specialized_oop_iterators.
 898   if (class_unload) {
 899     if (update_refs) {
 900       if (strdedup) {
 901         ShenandoahStrDedupQueue* dq = ShenandoahStringDedup::queue(w);
 902         ShenandoahMarkUpdateRefsMetadataDedupClosure cl(q, dq, rp);
 903         mark_loop_work<ShenandoahMarkUpdateRefsMetadataDedupClosure, CANCELLABLE>(&cl, ld, w, t);
 904       } else {
 905         ShenandoahMarkUpdateRefsMetadataClosure cl(q, rp);
 906         mark_loop_work<ShenandoahMarkUpdateRefsMetadataClosure, CANCELLABLE>(&cl, ld, w, t);
 907       }
 908     } else {
 909       if (strdedup) {
 910         ShenandoahStrDedupQueue* dq = ShenandoahStringDedup::queue(w);
 911         ShenandoahMarkRefsMetadataDedupClosure cl(q, dq, rp);
 912         mark_loop_work<ShenandoahMarkRefsMetadataDedupClosure, CANCELLABLE>(&cl, ld, w, t);
 913       } else {
 914         ShenandoahMarkRefsMetadataClosure cl(q, rp);
 915         mark_loop_work<ShenandoahMarkRefsMetadataClosure, CANCELLABLE>(&cl, ld, w, t);
 916       }
 917     }
 918   } else {
 919     if (update_refs) {
 920       if (strdedup) {
 921         ShenandoahStrDedupQueue* dq = ShenandoahStringDedup::queue(w);
 922         ShenandoahMarkUpdateRefsDedupClosure cl(q, dq, rp);
 923         mark_loop_work<ShenandoahMarkUpdateRefsDedupClosure, CANCELLABLE>(&cl, ld, w, t);
 924       } else {
 925         ShenandoahMarkUpdateRefsClosure cl(q, rp);
 926         mark_loop_work<ShenandoahMarkUpdateRefsClosure, CANCELLABLE>(&cl, ld, w, t);
 927       }
 928     } else {
 929       if (strdedup) {
 930         ShenandoahStrDedupQueue* dq = ShenandoahStringDedup::queue(w);
 931         ShenandoahMarkRefsDedupClosure cl(q, dq, rp);
 932         mark_loop_work<ShenandoahMarkRefsDedupClosure, CANCELLABLE>(&cl, ld, w, t);
 933       } else {
 934         ShenandoahMarkRefsClosure cl(q, rp);
 935         mark_loop_work<ShenandoahMarkRefsClosure, CANCELLABLE>(&cl, ld, w, t);
 936       }
 937     }
 938   }
 939 

 940   for (uint i = 0; i < _heap->num_regions(); i++) {
 941     ShenandoahHeapRegion* r = _heap->get_region(i);
 942     jushort live = ld[i];
 943     if (live > 0) {
 944       r->increase_live_data_gc_words(live);
 945     }
 946   }

 947 }
 948 
 949 template <class T, bool CANCELLABLE>
 950 void ShenandoahConcurrentMark::mark_loop_work(T* cl, jushort* live_data, uint worker_id, ParallelTaskTerminator *terminator) {
 951   int seed = 17;
 952   uintx stride = ShenandoahMarkLoopStride;
 953 
 954   ShenandoahHeap* heap = ShenandoahHeap::heap();
 955   ShenandoahObjToScanQueueSet* queues = task_queues();
 956   ShenandoahObjToScanQueue* q;
 957   ShenandoahMarkTask t;
 958 
 959   /*
 960    * Process outstanding queues, if any.
 961    *
 962    * There can be more queues than workers. To deal with the imbalance, we claim
 963    * extra queues first. Since marking can push new tasks into the queue associated
 964    * with this worker id, we come back to process this queue in the normal loop.
 965    */
 966   assert(queues->get_reserved() == heap->workers()->active_workers(),
 967     "Need to reserve proper number of queues");
 968 
 969   q = queues->claim_next();
 970   while (q != NULL) {
 971     if (CANCELLABLE && heap->cancelled_gc()) {
 972       ShenandoahCancelledTerminatorTerminator tt;
 973       while (!terminator->offer_termination(&tt));
 974       return;
 975     }
 976 
 977     for (uint i = 0; i < stride; i++) {
 978       if (try_queue(q, t)) {
 979         do_task<T>(q, cl, live_data, &t);
 980       } else {
 981         assert(q->is_empty(), "Must be empty");
 982         q = queues->claim_next();
 983         break;
 984       }
 985     }
 986   }
 987 
 988   q = get_queue(worker_id);
 989 
 990   ShenandoahSATBBufferClosure drain_satb(q);
 991   SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
 992 
 993   /*
 994    * Normal marking loop:
 995    */
 996   while (true) {
 997     if (CANCELLABLE && heap->cancelled_gc()) {
 998       ShenandoahCancelledTerminatorTerminator tt;
 999       while (!terminator->offer_termination(&tt));
1000       return;
1001     }
1002 
1003     while (satb_mq_set.completed_buffers_num() > 0) {
1004       satb_mq_set.apply_closure_to_completed_buffer(&drain_satb);
1005     }
1006 
1007     uint work = 0;
1008     for (uint i = 0; i < stride; i++) {
1009       if (try_queue(q, t) ||

1010           queues->steal(worker_id, &seed, t)) {
1011         do_task<T>(q, cl, live_data, &t);
1012         work++;
1013       } else {
1014         break;
1015       }
1016     }
1017 
1018     if (work == 0) {
1019       // No work encountered in current stride, try to terminate.
1020       ShenandoahTerminationTimingsTracker term_tracker(worker_id);
1021       if (terminator->offer_termination()) return;
1022     }
1023   }
1024 }
1025 
1026 bool ShenandoahConcurrentMark::process_references() const {
1027   return _heap->process_references();
1028 }
1029 
1030 bool ShenandoahConcurrentMark::unload_classes() const {
1031   return _heap->unload_classes();
1032 }
1033 
1034 bool ShenandoahConcurrentMark::claim_codecache() {
1035   assert(ShenandoahConcurrentScanCodeRoots, "must not be called otherwise");
1036   return _claimed_codecache.try_set();
1037 }
1038 
1039 void ShenandoahConcurrentMark::clear_claim_codecache() {
1040   assert(ShenandoahConcurrentScanCodeRoots, "must not be called otherwise");
1041   _claimed_codecache.unset();
< prev index next >