1 /*
   2  * Copyright (c) 2013, 2015, Red Hat, Inc. and/or its affiliates.
   3  *
   4  * This code is free software; you can redistribute it and/or modify it
   5  * under the terms of the GNU General Public License version 2 only, as
   6  * published by the Free Software Foundation.
   7  *
   8  * This code is distributed in the hope that it will be useful, but WITHOUT
   9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  11  * version 2 for more details (a copy is included in the LICENSE file that
  12  * accompanied this code).
  13  *
  14  * You should have received a copy of the GNU General Public License version
  15  * 2 along with this work; if not, write to the Free Software Foundation,
  16  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  17  *
  18  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  19  * or visit www.oracle.com if you need additional information or have any
  20  * questions.
  21  *
  22  */
  23 
  24 #include "precompiled.hpp"
  25 
  26 #include "classfile/symbolTable.hpp"
  27 #include "classfile/systemDictionary.hpp"
  28 #include "code/codeCache.hpp"
  29 
  30 #include "gc_implementation/shared/parallelCleaning.hpp"
  31 #include "gc_implementation/shenandoah/brooksPointer.hpp"
  32 #include "gc_implementation/shenandoah/shenandoahBarrierSet.inline.hpp"
  33 #include "gc_implementation/shenandoah/shenandoahConcurrentMark.inline.hpp"
  34 #include "gc_implementation/shenandoah/shenandoahOopClosures.inline.hpp"
  35 #include "gc_implementation/shenandoah/shenandoahHeap.inline.hpp"
  36 #include "gc_implementation/shenandoah/shenandoahRootProcessor.hpp"
  37 #include "gc_implementation/shenandoah/shenandoahTaskqueue.hpp"
  38 #include "gc_implementation/shenandoah/shenandoahTaskqueue.inline.hpp"
  39 #include "gc_implementation/shenandoah/shenandoahWorkGroup.hpp"
  40 #include "gc_implementation/shenandoah/shenandoahUtils.hpp"
  41 #include "gc_implementation/shenandoah/shenandoah_specialized_oop_closures.hpp"
  42 
  43 #include "memory/referenceProcessor.hpp"
  44 #include "memory/iterator.inline.hpp"
  45 #include "memory/resourceArea.hpp"
  46 #include "oops/oop.inline.hpp"
  47 
  48 template<UpdateRefsMode UPDATE_REFS>
  49 class ShenandoahInitMarkRootsClosure : public OopClosure {
  50 private:
  51   ShenandoahObjToScanQueue* _queue;
  52   ShenandoahHeap* _heap;
  53   ShenandoahMarkingContext* const _mark_context;
  54 
  55   template <class T>
  56   inline void do_oop_nv(T* p) {
  57     ShenandoahConcurrentMark::mark_through_ref<T, UPDATE_REFS, NO_DEDUP>(p, _heap, _queue, _mark_context);
  58   }
  59 
  60 public:
  61   ShenandoahInitMarkRootsClosure(ShenandoahObjToScanQueue* q) :
  62     _queue(q),
  63     _heap(ShenandoahHeap::heap()),
  64     _mark_context(_heap->marking_context()) {};
  65 
  66   void do_oop(narrowOop* p) { do_oop_nv(p); }
  67   void do_oop(oop* p)       { do_oop_nv(p); }
  68 };
  69 
  70 ShenandoahMarkRefsSuperClosure::ShenandoahMarkRefsSuperClosure(ShenandoahObjToScanQueue* q, ReferenceProcessor* rp) :
  71   MetadataAwareOopClosure(rp),
  72   _queue(q),
  73   _dedup_queue(NULL),
  74   _heap(ShenandoahHeap::heap()),
  75   _mark_context(_heap->marking_context())
  76 { }
  77 
  78 
  79 ShenandoahMarkRefsSuperClosure::ShenandoahMarkRefsSuperClosure(ShenandoahObjToScanQueue* q, ShenandoahStrDedupQueue* dq, ReferenceProcessor* rp) :
  80   MetadataAwareOopClosure(rp),
  81   _queue(q),
  82   _dedup_queue(dq),
  83   _heap(ShenandoahHeap::heap()),
  84   _mark_context(_heap->marking_context())
  85 { }
  86 
  87 
  88 template<UpdateRefsMode UPDATE_REFS>
  89 class ShenandoahInitMarkRootsTask : public AbstractGangTask {
  90 private:
  91   ShenandoahRootProcessor* _rp;
  92   bool _process_refs;
  93 public:
  94   ShenandoahInitMarkRootsTask(ShenandoahRootProcessor* rp, bool process_refs) :
  95     AbstractGangTask("Shenandoah init mark roots task"),
  96     _rp(rp),
  97     _process_refs(process_refs) {
  98   }
  99 
 100   void work(uint worker_id) {
 101     assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
 102     ShenandoahWorkerSession worker_session(worker_id);
 103 
 104     ShenandoahHeap* heap = ShenandoahHeap::heap();
 105     ShenandoahObjToScanQueueSet* queues = heap->concurrent_mark()->task_queues();
 106     assert(queues->get_reserved() > worker_id, err_msg("Queue has not been reserved for worker id: %d", worker_id));
 107 
 108     ShenandoahObjToScanQueue* q = queues->queue(worker_id);
 109     ShenandoahInitMarkRootsClosure<UPDATE_REFS> mark_cl(q);
 110     CLDToOopClosure cldCl(&mark_cl);
 111     MarkingCodeBlobClosure blobsCl(&mark_cl, ! CodeBlobToOopClosure::FixRelocations);
 112 
 113     // The rationale for selecting the roots to scan is as follows:
 114     //   a. With unload_classes = true, we only want to scan the actual strong roots from the
 115     //      code cache. This will allow us to identify the dead classes, unload them, *and*
 116     //      invalidate the relevant code cache blobs. This could be only done together with
 117     //      class unloading.
 118     //   b. With unload_classes = false, we have to nominally retain all the references from code
 119     //      cache, because there could be the case of embedded class/oop in the generated code,
 120     //      which we will never visit during mark. Without code cache invalidation, as in (a),
 121     //      we risk executing that code cache blob, and crashing.
 122     //   c. With ShenandoahConcurrentScanCodeRoots, we avoid scanning the entire code cache here,
 123     //      and instead do that in concurrent phase under the relevant lock. This saves init mark
 124     //      pause time.
 125 
 126     ResourceMark m;
 127     if (heap->unload_classes()) {
 128       _rp->process_strong_roots(&mark_cl, _process_refs ? NULL : &mark_cl, &cldCl, NULL, &blobsCl, NULL, worker_id);
 129     } else {
 130       if (ShenandoahConcurrentScanCodeRoots) {
 131         CodeBlobClosure* code_blobs = NULL;
 132 #ifdef ASSERT
 133         ShenandoahAssertToSpaceClosure assert_to_space_oops;
 134         CodeBlobToOopClosure assert_to_space(&assert_to_space_oops, !CodeBlobToOopClosure::FixRelocations);
 135         // If conc code cache evac is disabled, code cache should have only to-space ptrs.
 136         // Otherwise, it should have to-space ptrs only if mark does not update refs.
 137         if (!heap->has_forwarded_objects()) {
 138           code_blobs = &assert_to_space;
 139         }
 140 #endif
 141         _rp->process_all_roots(&mark_cl, _process_refs ? NULL : &mark_cl, &cldCl, code_blobs, NULL, worker_id);
 142       } else {
 143         _rp->process_all_roots(&mark_cl, _process_refs ? NULL : &mark_cl, &cldCl, &blobsCl, NULL, worker_id);
 144       }
 145     }
 146   }
 147 };
 148 
 149 class ShenandoahUpdateRootsTask : public AbstractGangTask {
 150 private:
 151   ShenandoahRootProcessor* _rp;
 152   const bool _update_code_cache;
 153 public:
 154   ShenandoahUpdateRootsTask(ShenandoahRootProcessor* rp, bool update_code_cache) :
 155     AbstractGangTask("Shenandoah update roots task"),
 156     _rp(rp),
 157     _update_code_cache(update_code_cache) {
 158   }
 159 
 160   void work(uint worker_id) {
 161     assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
 162     ShenandoahWorkerSession worker_session(worker_id);
 163 
 164     ShenandoahHeap* heap = ShenandoahHeap::heap();
 165     ShenandoahUpdateRefsClosure cl;
 166     CLDToOopClosure cldCl(&cl);
 167 
 168     CodeBlobClosure* code_blobs;
 169     CodeBlobToOopClosure update_blobs(&cl, CodeBlobToOopClosure::FixRelocations);
 170 #ifdef ASSERT
 171     ShenandoahAssertToSpaceClosure assert_to_space_oops;
 172     CodeBlobToOopClosure assert_to_space(&assert_to_space_oops, !CodeBlobToOopClosure::FixRelocations);
 173 #endif
 174     if (_update_code_cache) {
 175       code_blobs = &update_blobs;
 176     } else {
 177       code_blobs =
 178         DEBUG_ONLY(&assert_to_space)
 179         NOT_DEBUG(NULL);
 180     }
 181     _rp->process_all_roots(&cl, &cl, &cldCl, code_blobs, NULL, worker_id);
 182   }
 183 };
 184 
 185 class ShenandoahConcurrentMarkingTask : public AbstractGangTask {
 186 private:
 187   ShenandoahConcurrentMark* _cm;
 188   ParallelTaskTerminator* _terminator;
 189 
 190 public:
 191   ShenandoahConcurrentMarkingTask(ShenandoahConcurrentMark* cm, ParallelTaskTerminator* terminator) :
 192     AbstractGangTask("Root Region Scan"), _cm(cm), _terminator(terminator) {
 193   }
 194 
 195   void work(uint worker_id) {
 196     ShenandoahHeap* heap = ShenandoahHeap::heap();
 197     ShenandoahWorkerSession worker_session(worker_id);
 198     ShenandoahObjToScanQueue* q = _cm->get_queue(worker_id);
 199     ReferenceProcessor* rp;
 200     if (heap->process_references()) {
 201       rp = ShenandoahHeap::heap()->ref_processor();
 202       shenandoah_assert_rp_isalive_installed();
 203     } else {
 204       rp = NULL;
 205     }
 206 
 207     _cm->concurrent_scan_code_roots(worker_id, rp);
 208     _cm->mark_loop(worker_id, _terminator, rp,
 209                    true, // cancellable
 210                    ShenandoahStringDedup::is_enabled()); // perform string dedup
 211   }
 212 };
 213 
 214 class ShenandoahSATBThreadsClosure : public ThreadClosure {
 215   ShenandoahSATBBufferClosure* _satb_cl;
 216   int _thread_parity;
 217 
 218  public:
 219   ShenandoahSATBThreadsClosure(ShenandoahSATBBufferClosure* satb_cl) :
 220     _satb_cl(satb_cl),
 221     _thread_parity(SharedHeap::heap()->strong_roots_parity()) {}
 222 
 223   void do_thread(Thread* thread) {
 224     if (thread->is_Java_thread()) {
 225       if (thread->claim_oops_do(true, _thread_parity)) {
 226         JavaThread* jt = (JavaThread*)thread;
 227         jt->satb_mark_queue().apply_closure_and_empty(_satb_cl);
 228       }
 229     } else if (thread->is_VM_thread()) {
 230       if (thread->claim_oops_do(true, _thread_parity)) {
 231         JavaThread::satb_mark_queue_set().shared_satb_queue()->apply_closure_and_empty(_satb_cl);
 232       }
 233     }
 234   }
 235 };
 236 
 237 class ShenandoahFinalMarkingTask : public AbstractGangTask {
 238 private:
 239   ShenandoahConcurrentMark* _cm;
 240   ParallelTaskTerminator* _terminator;
 241   bool _dedup_string;
 242 
 243 public:
 244   ShenandoahFinalMarkingTask(ShenandoahConcurrentMark* cm, ParallelTaskTerminator* terminator, bool dedup_string) :
 245     AbstractGangTask("Shenandoah Final Marking"), _cm(cm), _terminator(terminator), _dedup_string(dedup_string) {
 246   }
 247 
 248   void work(uint worker_id) {
 249     ShenandoahHeap* heap = ShenandoahHeap::heap();
 250 
 251     // First drain remaining SATB buffers.
 252     // Notice that this is not strictly necessary for mark-compact. But since
 253     // it requires a StrongRootsScope around the task, we need to claim the
 254     // threads, and performance-wise it doesn't really matter. Adds about 1ms to
 255     // full-gc.
 256     {
 257       ShenandoahObjToScanQueue* q = _cm->get_queue(worker_id);
 258       ShenandoahSATBBufferClosure cl(q);
 259       SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
 260       while (satb_mq_set.apply_closure_to_completed_buffer(&cl));
 261       ShenandoahSATBThreadsClosure tc(&cl);
 262       Threads::threads_do(&tc);
 263     }
 264 
 265     ReferenceProcessor* rp;
 266     if (heap->process_references()) {
 267       rp = ShenandoahHeap::heap()->ref_processor();
 268       shenandoah_assert_rp_isalive_installed();
 269     } else {
 270       rp = NULL;
 271     }
 272 
 273     // Degenerated cycle may bypass concurrent cycle, so code roots might not be scanned,
 274     // let's check here.
 275     _cm->concurrent_scan_code_roots(worker_id, rp);
 276     _cm->mark_loop(worker_id, _terminator, rp,
 277                    false, // not cancellable
 278                    _dedup_string);
 279 
 280     assert(_cm->task_queues()->is_empty(), "Should be empty");
 281   }
 282 };
 283 
 284 void ShenandoahConcurrentMark::mark_roots(ShenandoahPhaseTimings::Phase root_phase) {
 285   assert(Thread::current()->is_VM_thread(), "can only do this in VMThread");
 286   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
 287 
 288   ShenandoahHeap* heap = ShenandoahHeap::heap();
 289 
 290   ShenandoahGCPhase phase(root_phase);
 291 
 292   WorkGang* workers = heap->workers();
 293   uint nworkers = workers->active_workers();
 294 
 295   assert(nworkers <= task_queues()->size(), "Just check");
 296 
 297   ShenandoahRootProcessor root_proc(heap, nworkers, root_phase);
 298   TASKQUEUE_STATS_ONLY(task_queues()->reset_taskqueue_stats());
 299   task_queues()->reserve(nworkers);
 300 
 301   if (heap->has_forwarded_objects()) {
 302     ShenandoahInitMarkRootsTask<RESOLVE> mark_roots(&root_proc, _heap->process_references());
 303     workers->run_task(&mark_roots);
 304   } else {
 305     // No need to update references, which means the heap is stable.
 306     // Can save time not walking through forwarding pointers.
 307     ShenandoahInitMarkRootsTask<NONE> mark_roots(&root_proc, _heap->process_references());
 308     workers->run_task(&mark_roots);
 309   }
 310 
 311   if (ShenandoahConcurrentScanCodeRoots) {
 312     clear_claim_codecache();
 313   }
 314 }
 315 
 316 void ShenandoahConcurrentMark::update_roots(ShenandoahPhaseTimings::Phase root_phase) {
 317   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
 318 
 319   bool update_code_cache = true; // initialize to safer value
 320   switch (root_phase) {
 321     case ShenandoahPhaseTimings::update_roots:
 322     case ShenandoahPhaseTimings::final_update_refs_roots:
 323       update_code_cache = false;
 324       break;
 325     case ShenandoahPhaseTimings::full_gc_roots:
 326     case ShenandoahPhaseTimings::degen_gc_update_roots:
 327       update_code_cache = true;
 328       break;
 329     default:
 330       ShouldNotReachHere();
 331   }
 332 
 333   ShenandoahHeap* heap = ShenandoahHeap::heap();
 334 
 335   ShenandoahGCPhase phase(root_phase);
 336 
 337   COMPILER2_PRESENT(DerivedPointerTable::clear());
 338 
 339   uint nworkers = heap->workers()->active_workers();
 340 
 341   ShenandoahRootProcessor root_proc(heap, nworkers, root_phase);
 342   ShenandoahUpdateRootsTask update_roots(&root_proc, update_code_cache);
 343   heap->workers()->run_task(&update_roots);
 344 
 345   COMPILER2_PRESENT(DerivedPointerTable::update_pointers());
 346 }
 347 
 348 void ShenandoahConcurrentMark::initialize(uint workers) {
 349   _heap = ShenandoahHeap::heap();
 350 
 351   uint num_queues = MAX2(workers, 1U);
 352 
 353   _task_queues = new ShenandoahObjToScanQueueSet((int) num_queues);
 354 
 355   for (uint i = 0; i < num_queues; ++i) {
 356     ShenandoahObjToScanQueue* task_queue = new ShenandoahObjToScanQueue();
 357     task_queue->initialize();
 358     _task_queues->register_queue(i, task_queue);
 359   }
 360 
 361   JavaThread::satb_mark_queue_set().set_buffer_size(ShenandoahSATBBufferSize);
 362 }
 363 
 364 void ShenandoahConcurrentMark::concurrent_scan_code_roots(uint worker_id, ReferenceProcessor* rp) {
 365   if (ShenandoahConcurrentScanCodeRoots && claim_codecache()) {
 366     ShenandoahObjToScanQueue* q = task_queues()->queue(worker_id);
 367     if (!_heap->unload_classes()) {
 368       MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
 369       if (_heap->has_forwarded_objects()) {
 370         ShenandoahMarkResolveRefsClosure cl(q, rp);
 371         CodeBlobToOopClosure blobs(&cl, !CodeBlobToOopClosure::FixRelocations);
 372         CodeCache::blobs_do(&blobs);
 373       } else {
 374         ShenandoahMarkRefsClosure cl(q, rp);
 375         CodeBlobToOopClosure blobs(&cl, !CodeBlobToOopClosure::FixRelocations);
 376         CodeCache::blobs_do(&blobs);
 377       }
 378     }
 379   }
 380 }
 381 
 382 void ShenandoahConcurrentMark::mark_from_roots() {
 383   ShenandoahHeap* sh = ShenandoahHeap::heap();
 384   WorkGang* workers = sh->workers();
 385   uint nworkers = workers->active_workers();
 386 
 387   ShenandoahGCPhase conc_mark_phase(ShenandoahPhaseTimings::conc_mark);
 388 
 389   if (_heap->process_references()) {
 390     ReferenceProcessor* rp = sh->ref_processor();
 391     rp->set_active_mt_degree(nworkers);
 392 
 393     // enable ("weak") refs discovery
 394     rp->enable_discovery(true /*verify_no_refs*/, true);
 395     rp->setup_policy(sh->collector_policy()->should_clear_all_soft_refs());
 396   }
 397 
 398   shenandoah_assert_rp_isalive_not_installed();
 399   ShenandoahIsAliveSelector is_alive;
 400   ReferenceProcessorIsAliveMutator fix_isalive(sh->ref_processor(), is_alive.is_alive_closure());
 401 
 402   task_queues()->reserve(nworkers);
 403 
 404   {
 405     ShenandoahTerminationTracker term(ShenandoahPhaseTimings::conc_termination);
 406     if (UseShenandoahOWST) {
 407       ShenandoahTaskTerminator terminator(nworkers, task_queues());
 408       ShenandoahConcurrentMarkingTask task(this, &terminator);
 409       workers->run_task(&task);
 410     } else {
 411       ParallelTaskTerminator terminator(nworkers, task_queues());
 412       ShenandoahConcurrentMarkingTask task(this, &terminator);
 413       workers->run_task(&task);
 414     }
 415   }
 416 
 417   assert(task_queues()->is_empty() || sh->cancelled_gc(), "Should be empty when not cancelled");
 418   if (!sh->cancelled_gc()) {
 419     TASKQUEUE_STATS_ONLY(task_queues()->print_taskqueue_stats());
 420   }
 421 
 422   TASKQUEUE_STATS_ONLY(task_queues()->reset_taskqueue_stats());
 423 }
 424 
 425 void ShenandoahConcurrentMark::finish_mark_from_roots(bool full_gc) {
 426   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
 427 
 428   ShenandoahHeap* sh = ShenandoahHeap::heap();
 429 
 430   uint nworkers = sh->workers()->active_workers();
 431 
 432   // Finally mark everything else we've got in our queues during the previous steps.
 433   // It does two different things for concurrent vs. mark-compact GC:
 434   // - For concurrent GC, it starts with empty task queues, drains the remaining
 435   //   SATB buffers, and then completes the marking closure.
 436   // - For mark-compact GC, it starts out with the task queues seeded by initial
 437   //   root scan, and completes the closure, thus marking through all live objects
 438   // The implementation is the same, so it's shared here.
 439   {
 440     ShenandoahGCPhase phase(full_gc ?
 441                             ShenandoahPhaseTimings::full_gc_mark_finish_queues :
 442                             ShenandoahPhaseTimings::finish_queues);
 443     task_queues()->reserve(nworkers);
 444 
 445     shenandoah_assert_rp_isalive_not_installed();
 446     ShenandoahIsAliveSelector is_alive;
 447     ReferenceProcessorIsAliveMutator fix_isalive(sh->ref_processor(), is_alive.is_alive_closure());
 448 
 449     ShenandoahTerminationTracker termination_tracker(full_gc ?
 450                                                      ShenandoahPhaseTimings::full_gc_mark_termination :
 451                                                      ShenandoahPhaseTimings::termination);
 452 
 453     SharedHeap::StrongRootsScope scope(sh, true);
 454     if (UseShenandoahOWST) {
 455       ShenandoahTaskTerminator terminator(nworkers, task_queues());
 456       ShenandoahFinalMarkingTask task(this, &terminator, full_gc && ShenandoahStringDedup::is_enabled());
 457       sh->workers()->run_task(&task);
 458     } else {
 459       ParallelTaskTerminator terminator(nworkers, task_queues());
 460       ShenandoahFinalMarkingTask task(this, &terminator, full_gc && ShenandoahStringDedup::is_enabled());
 461       sh->workers()->run_task(&task);
 462     }
 463   }
 464 
 465   assert(task_queues()->is_empty(), "Should be empty");
 466 
 467   // When we're done marking everything, we process weak references.
 468   if (_heap->process_references()) {
 469     weak_refs_work(full_gc);
 470   }
 471 
 472   // And finally finish class unloading
 473   if (_heap->unload_classes()) {
 474     sh->unload_classes_and_cleanup_tables(full_gc);
 475   }
 476 
 477   assert(task_queues()->is_empty(), "Should be empty");
 478   TASKQUEUE_STATS_ONLY(task_queues()->print_taskqueue_stats());
 479   TASKQUEUE_STATS_ONLY(task_queues()->reset_taskqueue_stats());
 480 }
 481 
 482 // Weak Reference Closures
 483 class ShenandoahCMDrainMarkingStackClosure: public VoidClosure {
 484   uint _worker_id;
 485   ParallelTaskTerminator* _terminator;
 486   bool _reset_terminator;
 487 
 488 public:
 489   ShenandoahCMDrainMarkingStackClosure(uint worker_id, ParallelTaskTerminator* t, bool reset_terminator = false):
 490     _worker_id(worker_id),
 491     _terminator(t),
 492     _reset_terminator(reset_terminator) {
 493   }
 494 
 495   void do_void() {
 496     assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
 497 
 498     ShenandoahHeap* sh = ShenandoahHeap::heap();
 499     ShenandoahConcurrentMark* scm = sh->concurrent_mark();
 500     assert(sh->process_references(), "why else would we be here?");
 501     ReferenceProcessor* rp = sh->ref_processor();
 502 
 503     shenandoah_assert_rp_isalive_installed();
 504 
 505     scm->mark_loop(_worker_id, _terminator, rp,
 506                    false,   // not cancellable
 507                    false);  // do not do strdedup
 508 
 509     if (_reset_terminator) {
 510       _terminator->reset_for_reuse();
 511     }
 512   }
 513 };
 514 
 515 
 516 class ShenandoahCMKeepAliveClosure : public OopClosure {
 517 private:
 518   ShenandoahObjToScanQueue* _queue;
 519   ShenandoahHeap* _heap;
 520   ShenandoahMarkingContext* const _mark_context;
 521 
 522   template <class T>
 523   inline void do_oop_nv(T* p) {
 524     ShenandoahConcurrentMark::mark_through_ref<T, NONE, NO_DEDUP>(p, _heap, _queue, _mark_context);
 525   }
 526 
 527 public:
 528   ShenandoahCMKeepAliveClosure(ShenandoahObjToScanQueue* q) :
 529     _queue(q),
 530     _heap(ShenandoahHeap::heap()),
 531     _mark_context(_heap->marking_context()) {}
 532 
 533   void do_oop(narrowOop* p) { do_oop_nv(p); }
 534   void do_oop(oop* p)       { do_oop_nv(p); }
 535 };
 536 
 537 class ShenandoahCMKeepAliveUpdateClosure : public OopClosure {
 538 private:
 539   ShenandoahObjToScanQueue* _queue;
 540   ShenandoahHeap* _heap;
 541   ShenandoahMarkingContext* const _mark_context;
 542 
 543   template <class T>
 544   inline void do_oop_nv(T* p) {
 545     ShenandoahConcurrentMark::mark_through_ref<T, SIMPLE, NO_DEDUP>(p, _heap, _queue, _mark_context);
 546   }
 547 
 548 public:
 549   ShenandoahCMKeepAliveUpdateClosure(ShenandoahObjToScanQueue* q) :
 550     _queue(q),
 551     _heap(ShenandoahHeap::heap()),
 552     _mark_context(_heap->marking_context()) {}
 553 
 554   void do_oop(narrowOop* p) { do_oop_nv(p); }
 555   void do_oop(oop* p)       { do_oop_nv(p); }
 556 };
 557 
 558 class ShenandoahRefProcTaskProxy : public AbstractGangTask {
 559 
 560 private:
 561   AbstractRefProcTaskExecutor::ProcessTask& _proc_task;
 562   ParallelTaskTerminator* _terminator;
 563 public:
 564 
 565   ShenandoahRefProcTaskProxy(AbstractRefProcTaskExecutor::ProcessTask& proc_task,
 566                              ParallelTaskTerminator* t) :
 567     AbstractGangTask("Process reference objects in parallel"),
 568     _proc_task(proc_task),
 569     _terminator(t) {
 570   }
 571 
 572   void work(uint worker_id) {
 573     assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
 574     ShenandoahHeap* heap = ShenandoahHeap::heap();
 575     ShenandoahCMDrainMarkingStackClosure complete_gc(worker_id, _terminator);
 576     if (heap->has_forwarded_objects()) {
 577       ShenandoahForwardedIsAliveClosure is_alive;
 578       ShenandoahCMKeepAliveUpdateClosure keep_alive(heap->concurrent_mark()->get_queue(worker_id));
 579       _proc_task.work(worker_id, is_alive, keep_alive, complete_gc);
 580     } else {
 581       ShenandoahIsAliveClosure is_alive;
 582       ShenandoahCMKeepAliveClosure keep_alive(heap->concurrent_mark()->get_queue(worker_id));
 583       _proc_task.work(worker_id, is_alive, keep_alive, complete_gc);
 584     }
 585   }
 586 };
 587 
 588 class ShenandoahRefEnqueueTaskProxy : public AbstractGangTask {
 589 
 590 private:
 591   AbstractRefProcTaskExecutor::EnqueueTask& _enqueue_task;
 592 
 593 public:
 594 
 595   ShenandoahRefEnqueueTaskProxy(AbstractRefProcTaskExecutor::EnqueueTask& enqueue_task) :
 596     AbstractGangTask("Enqueue reference objects in parallel"),
 597     _enqueue_task(enqueue_task) {
 598   }
 599 
 600   void work(uint worker_id) {
 601     _enqueue_task.work(worker_id);
 602   }
 603 };
 604 
 605 class ShenandoahRefProcTaskExecutor : public AbstractRefProcTaskExecutor {
 606 
 607 private:
 608   WorkGang* _workers;
 609 
 610 public:
 611 
 612   ShenandoahRefProcTaskExecutor(WorkGang* workers) :
 613     _workers(workers) {
 614   }
 615 
 616   // Executes a task using worker threads.
 617   void execute(ProcessTask& task) {
 618     assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
 619 
 620     // Shortcut execution if task is empty.
 621     // This should be replaced with the generic ReferenceProcessor shortcut,
 622     // see JDK-8181214, JDK-8043575, JDK-6938732.
 623     if (task.is_empty()) {
 624       return;
 625     }
 626 
 627     ShenandoahHeap* heap = ShenandoahHeap::heap();
 628     ShenandoahConcurrentMark* cm = heap->concurrent_mark();
 629     uint nworkers = _workers->active_workers();
 630     cm->task_queues()->reserve(nworkers);
 631     if (UseShenandoahOWST) {
 632       ShenandoahTaskTerminator terminator(nworkers, cm->task_queues());
 633       ShenandoahRefProcTaskProxy proc_task_proxy(task, &terminator);
 634       _workers->run_task(&proc_task_proxy);
 635     } else {
 636       ParallelTaskTerminator terminator(nworkers, cm->task_queues());
 637       ShenandoahRefProcTaskProxy proc_task_proxy(task, &terminator);
 638       _workers->run_task(&proc_task_proxy);
 639     }
 640   }
 641 
 642   void execute(EnqueueTask& task) {
 643     ShenandoahRefEnqueueTaskProxy enqueue_task_proxy(task);
 644     _workers->run_task(&enqueue_task_proxy);
 645   }
 646 };
 647 
 648 
 649 void ShenandoahConcurrentMark::weak_refs_work(bool full_gc) {
 650   assert(_heap->process_references(), "sanity");
 651 
 652   ShenandoahHeap* sh = ShenandoahHeap::heap();
 653 
 654   ShenandoahPhaseTimings::Phase phase_root =
 655           full_gc ?
 656           ShenandoahPhaseTimings::full_gc_weakrefs :
 657           ShenandoahPhaseTimings::weakrefs;
 658 
 659   ShenandoahGCPhase phase(phase_root);
 660 
 661   ReferenceProcessor* rp = sh->ref_processor();
 662   weak_refs_work_doit(full_gc);
 663 
 664   rp->verify_no_references_recorded();
 665   assert(!rp->discovery_enabled(), "Post condition");
 666 
 667 }
 668 
 669 void ShenandoahConcurrentMark::weak_refs_work_doit(bool full_gc) {
 670   ShenandoahHeap* sh = ShenandoahHeap::heap();
 671 
 672   ReferenceProcessor* rp = sh->ref_processor();
 673 
 674   ShenandoahPhaseTimings::Phase phase_process =
 675           full_gc ?
 676           ShenandoahPhaseTimings::full_gc_weakrefs_process :
 677           ShenandoahPhaseTimings::weakrefs_process;
 678 
 679   ShenandoahPhaseTimings::Phase phase_enqueue =
 680           full_gc ?
 681           ShenandoahPhaseTimings::full_gc_weakrefs_enqueue :
 682           ShenandoahPhaseTimings::weakrefs_enqueue;
 683 
 684   ShenandoahPhaseTimings::Phase phase_process_termination =
 685           full_gc ?
 686           ShenandoahPhaseTimings::full_gc_weakrefs_termination :
 687           ShenandoahPhaseTimings::weakrefs_termination;
 688 
 689   shenandoah_assert_rp_isalive_not_installed();
 690   ShenandoahIsAliveSelector is_alive;
 691   ReferenceProcessorIsAliveMutator fix_isalive(rp, is_alive.is_alive_closure());
 692 
 693   WorkGang* workers = sh->workers();
 694   uint nworkers = workers->active_workers();
 695 
 696   rp->setup_policy(sh->collector_policy()->should_clear_all_soft_refs());
 697   rp->set_active_mt_degree(nworkers);
 698 
 699   assert(task_queues()->is_empty(), "Should be empty");
 700 
 701   // complete_gc and keep_alive closures instantiated here are only needed for
 702   // single-threaded path in RP. They share the queue 0 for tracking work, which
 703   // simplifies implementation. Since RP may decide to call complete_gc several
 704   // times, we need to be able to reuse the terminator.
 705   uint serial_worker_id = 0;
 706   ParallelTaskTerminator terminator(1, task_queues());
 707   ShenandoahCMDrainMarkingStackClosure complete_gc(serial_worker_id, &terminator, /* reset_terminator = */ true);
 708 
 709   ShenandoahRefProcTaskExecutor executor(workers);
 710 
 711   {
 712     ShenandoahGCPhase phase(phase_process);
 713     ShenandoahTerminationTracker phase_term(phase_process_termination);
 714 
 715     if (sh->has_forwarded_objects()) {
 716       ShenandoahForwardedIsAliveClosure is_alive;
 717       ShenandoahCMKeepAliveUpdateClosure keep_alive(get_queue(serial_worker_id));
 718       rp->process_discovered_references(&is_alive, &keep_alive,
 719                                         &complete_gc, &executor,
 720                                         NULL, sh->shenandoah_policy()->tracer()->gc_id());
 721     } else {
 722       ShenandoahIsAliveClosure is_alive;
 723       ShenandoahCMKeepAliveClosure keep_alive(get_queue(serial_worker_id));
 724       rp->process_discovered_references(&is_alive, &keep_alive,
 725                                         &complete_gc, &executor,
 726                                         NULL, sh->shenandoah_policy()->tracer()->gc_id());
 727     }
 728 
 729     assert(task_queues()->is_empty(), "Should be empty");
 730   }
 731 
 732   {
 733     ShenandoahGCPhase phase(phase_enqueue);
 734     rp->enqueue_discovered_references(&executor);
 735   }
 736 }
 737 
 738 class ShenandoahCancelledGCYieldClosure : public YieldClosure {
 739 private:
 740   ShenandoahHeap* const _heap;
 741 public:
 742   ShenandoahCancelledGCYieldClosure() : _heap(ShenandoahHeap::heap()) {};
 743   virtual bool should_return() { return _heap->cancelled_gc(); }
 744 };
 745 
 746 class ShenandoahPrecleanCompleteGCClosure : public VoidClosure {
 747 public:
 748   void do_void() {
 749     ShenandoahHeap* sh = ShenandoahHeap::heap();
 750     ShenandoahConcurrentMark* scm = sh->concurrent_mark();
 751     assert(sh->process_references(), "why else would we be here?");
 752     ParallelTaskTerminator terminator(1, scm->task_queues());
 753 
 754     ReferenceProcessor* rp = sh->ref_processor();
 755     shenandoah_assert_rp_isalive_installed();
 756 
 757     scm->mark_loop(0, &terminator, rp,
 758                    false, // not cancellable
 759                    false); // do not do strdedup
 760   }
 761 };
 762 
 763 class ShenandoahPrecleanKeepAliveUpdateClosure : public OopClosure {
 764 private:
 765   ShenandoahObjToScanQueue* _queue;
 766   ShenandoahHeap* _heap;
 767   ShenandoahMarkingContext* const _mark_context;
 768 
 769   template <class T>
 770   inline void do_oop_nv(T* p) {
 771     ShenandoahConcurrentMark::mark_through_ref<T, CONCURRENT, NO_DEDUP>(p, _heap, _queue, _mark_context);
 772   }
 773 
 774 public:
 775   ShenandoahPrecleanKeepAliveUpdateClosure(ShenandoahObjToScanQueue* q) :
 776     _queue(q),
 777     _heap(ShenandoahHeap::heap()),
 778     _mark_context(_heap->marking_context()) {}
 779 
 780   void do_oop(narrowOop* p) { do_oop_nv(p); }
 781   void do_oop(oop* p)       { do_oop_nv(p); }
 782 };
 783 
 784 void ShenandoahConcurrentMark::preclean_weak_refs() {
 785   // Pre-cleaning weak references before diving into STW makes sense at the
 786   // end of concurrent mark. This will filter out the references which referents
 787   // are alive. Note that ReferenceProcessor already filters out these on reference
 788   // discovery, and the bulk of work is done here. This phase processes leftovers
 789   // that missed the initial filtering, i.e. when referent was marked alive after
 790   // reference was discovered by RP.
 791 
 792   ShenandoahHeap* sh = ShenandoahHeap::heap();
 793 
 794   assert(sh->process_references(), "sanity");
 795 
 796   ReferenceProcessor* rp = sh->ref_processor();
 797   ReferenceProcessorMTDiscoveryMutator fix_mt_discovery(rp, false);
 798 
 799   shenandoah_assert_rp_isalive_not_installed();
 800   ShenandoahIsAliveSelector is_alive;
 801   ReferenceProcessorIsAliveMutator fix_isalive(rp, is_alive.is_alive_closure());
 802 
 803   // Interrupt on cancelled GC
 804   ShenandoahCancelledGCYieldClosure yield;
 805 
 806   assert(task_queues()->is_empty(), "Should be empty");
 807 
 808   ShenandoahPrecleanCompleteGCClosure complete_gc;
 809   if (sh->has_forwarded_objects()) {
 810     ShenandoahForwardedIsAliveClosure is_alive;
 811     ShenandoahPrecleanKeepAliveUpdateClosure keep_alive(get_queue(0));
 812     ResourceMark rm;
 813     rp->preclean_discovered_references(&is_alive, &keep_alive,
 814                                        &complete_gc, &yield,
 815                                        NULL, sh->shenandoah_policy()->tracer()->gc_id());
 816   } else {
 817     ShenandoahIsAliveClosure is_alive;
 818     ShenandoahCMKeepAliveClosure keep_alive(get_queue(0));
 819     ResourceMark rm;
 820     rp->preclean_discovered_references(&is_alive, &keep_alive,
 821                                        &complete_gc, &yield,
 822                                        NULL, sh->shenandoah_policy()->tracer()->gc_id());
 823   }
 824 
 825   assert(task_queues()->is_empty(), "Should be empty");
 826 }
 827 
 828 void ShenandoahConcurrentMark::cancel() {
 829   // Clean up marking stacks.
 830   ShenandoahObjToScanQueueSet* queues = task_queues();
 831   queues->clear();
 832 
 833   // Cancel SATB buffers.
 834   JavaThread::satb_mark_queue_set().abandon_partial_marking();
 835 }
 836 
 837 ShenandoahObjToScanQueue* ShenandoahConcurrentMark::get_queue(uint worker_id) {
 838   assert(task_queues()->get_reserved() > worker_id, err_msg("No reserved queue for worker id: %d", worker_id));
 839   return _task_queues->queue(worker_id);
 840 }
 841 
 842 template <bool CANCELLABLE>
 843 void ShenandoahConcurrentMark::mark_loop_prework(uint w, ParallelTaskTerminator *t, ReferenceProcessor *rp,
 844                                                  bool strdedup) {
 845   ShenandoahObjToScanQueue* q = get_queue(w);
 846 
 847   jushort* ld = _heap->get_liveness_cache(w);
 848 
 849   // TODO: We can clean up this if we figure out how to do templated oop closures that
 850   // play nice with specialized_oop_iterators.
 851   if (_heap->unload_classes()) {
 852     if (_heap->has_forwarded_objects()) {
 853       if (strdedup) {
 854         ShenandoahStrDedupQueue* dq = ShenandoahStringDedup::queue(w);
 855         ShenandoahMarkUpdateRefsMetadataDedupClosure cl(q, dq, rp);
 856         mark_loop_work<ShenandoahMarkUpdateRefsMetadataDedupClosure, CANCELLABLE>(&cl, ld, w, t);
 857       } else {
 858         ShenandoahMarkUpdateRefsMetadataClosure cl(q, rp);
 859         mark_loop_work<ShenandoahMarkUpdateRefsMetadataClosure, CANCELLABLE>(&cl, ld, w, t);
 860       }
 861     } else {
 862       if (strdedup) {
 863         ShenandoahStrDedupQueue* dq = ShenandoahStringDedup::queue(w);
 864         ShenandoahMarkRefsMetadataDedupClosure cl(q, dq, rp);
 865         mark_loop_work<ShenandoahMarkRefsMetadataDedupClosure, CANCELLABLE>(&cl, ld, w, t);
 866       } else {
 867         ShenandoahMarkRefsMetadataClosure cl(q, rp);
 868         mark_loop_work<ShenandoahMarkRefsMetadataClosure, CANCELLABLE>(&cl, ld, w, t);
 869       }
 870     }
 871   } else {
 872     if (_heap->has_forwarded_objects()) {
 873       if (strdedup) {
 874         ShenandoahStrDedupQueue* dq = ShenandoahStringDedup::queue(w);
 875         ShenandoahMarkUpdateRefsDedupClosure cl(q, dq, rp);
 876         mark_loop_work<ShenandoahMarkUpdateRefsDedupClosure, CANCELLABLE>(&cl, ld, w, t);
 877       } else {
 878         ShenandoahMarkUpdateRefsClosure cl(q, rp);
 879         mark_loop_work<ShenandoahMarkUpdateRefsClosure, CANCELLABLE>(&cl, ld, w, t);
 880       }
 881     } else {
 882       if (strdedup) {
 883         ShenandoahStrDedupQueue* dq = ShenandoahStringDedup::queue(w);
 884         ShenandoahMarkRefsDedupClosure cl(q, dq, rp);
 885         mark_loop_work<ShenandoahMarkRefsDedupClosure, CANCELLABLE>(&cl, ld, w, t);
 886       } else {
 887         ShenandoahMarkRefsClosure cl(q, rp);
 888         mark_loop_work<ShenandoahMarkRefsClosure, CANCELLABLE>(&cl, ld, w, t);
 889       }
 890     }
 891   }
 892 
 893   _heap->flush_liveness_cache(w);
 894 }
 895 
 896 template <class T, bool CANCELLABLE>
 897 void ShenandoahConcurrentMark::mark_loop_work(T* cl, jushort* live_data, uint worker_id, ParallelTaskTerminator *terminator) {
 898   int seed = 17;
 899   uintx stride = ShenandoahMarkLoopStride;
 900 
 901   ShenandoahHeap* heap = ShenandoahHeap::heap();
 902   ShenandoahObjToScanQueueSet* queues = task_queues();
 903   ShenandoahObjToScanQueue* q;
 904   ShenandoahMarkTask t;
 905 
 906   /*
 907    * Process outstanding queues, if any.
 908    *
 909    * There can be more queues than workers. To deal with the imbalance, we claim
 910    * extra queues first. Since marking can push new tasks into the queue associated
 911    * with this worker id, we come back to process this queue in the normal loop.
 912    */
 913   assert(queues->get_reserved() == heap->workers()->active_workers(),
 914     "Need to reserve proper number of queues");
 915 
 916   q = queues->claim_next();
 917   while (q != NULL) {
 918     if (CANCELLABLE && heap->cancelled_gc()) {
 919       ShenandoahCancelledTerminatorTerminator tt;
 920       while (!terminator->offer_termination(&tt));
 921       return;
 922     }
 923 
 924     for (uint i = 0; i < stride; i++) {
 925       if (q->pop(t)) {
 926         do_task<T>(q, cl, live_data, &t);
 927       } else {
 928         assert(q->is_empty(), "Must be empty");
 929         q = queues->claim_next();
 930         break;
 931       }
 932     }
 933   }
 934 
 935   q = get_queue(worker_id);
 936 
 937   ShenandoahSATBBufferClosure drain_satb(q);
 938   SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
 939 
 940   /*
 941    * Normal marking loop:
 942    */
 943   while (true) {
 944     if (CANCELLABLE && heap->cancelled_gc()) {
 945       ShenandoahCancelledTerminatorTerminator tt;
 946       while (!terminator->offer_termination(&tt));
 947       return;
 948     }
 949 
 950     while (satb_mq_set.completed_buffers_num() > 0) {
 951       satb_mq_set.apply_closure_to_completed_buffer(&drain_satb);
 952     }
 953 
 954     uint work = 0;
 955     for (uint i = 0; i < stride; i++) {
 956       if (q->pop(t) ||
 957           queues->steal(worker_id, &seed, t)) {
 958         do_task<T>(q, cl, live_data, &t);
 959         work++;
 960       } else {
 961         break;
 962       }
 963     }
 964 
 965     if (work == 0) {
 966       // No work encountered in current stride, try to terminate.
 967       ShenandoahTerminationTimingsTracker term_tracker(worker_id);
 968       if (terminator->offer_termination()) return;
 969     }
 970   }
 971 }
 972 
 973 bool ShenandoahConcurrentMark::claim_codecache() {
 974   assert(ShenandoahConcurrentScanCodeRoots, "must not be called otherwise");
 975   return _claimed_codecache.try_set();
 976 }
 977 
 978 void ShenandoahConcurrentMark::clear_claim_codecache() {
 979   assert(ShenandoahConcurrentScanCodeRoots, "must not be called otherwise");
 980   _claimed_codecache.unset();
 981 }