1 /*
   2  * Copyright (c) 2013, 2020, Red Hat, Inc. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 
  27 #include "classfile/symbolTable.hpp"
  28 #include "classfile/systemDictionary.hpp"
  29 #include "code/codeCache.hpp"
  30 
  31 #include "gc/shared/weakProcessor.inline.hpp"
  32 #include "gc/shared/gcTimer.hpp"
  33 #include "gc/shared/referenceProcessor.hpp"
  34 #include "gc/shared/referenceProcessorPhaseTimes.hpp"
  35 #include "gc/shared/strongRootsScope.hpp"
  36 
  37 #include "gc/shenandoah/shenandoahBarrierSet.inline.hpp"
  38 #include "gc/shenandoah/shenandoahClosures.inline.hpp"
  39 #include "gc/shenandoah/shenandoahConcurrentMark.inline.hpp"
  40 #include "gc/shenandoah/shenandoahMarkCompact.hpp"
  41 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
  42 #include "gc/shenandoah/shenandoahRootProcessor.inline.hpp"
  43 #include "gc/shenandoah/shenandoahOopClosures.inline.hpp"
  44 #include "gc/shenandoah/shenandoahPhaseTimings.hpp"
  45 #include "gc/shenandoah/shenandoahTaskqueue.inline.hpp"
  46 #include "gc/shenandoah/shenandoahUtils.hpp"
  47 
  48 #include "memory/iterator.inline.hpp"
  49 #include "memory/metaspace.hpp"
  50 #include "memory/resourceArea.hpp"
  51 #include "oops/oop.inline.hpp"
  52 #include "runtime/handles.inline.hpp"
  53 
  54 template<UpdateRefsMode UPDATE_REFS>
  55 class ShenandoahInitMarkRootsClosure : public OopClosure {
  56 private:
  57   ShenandoahObjToScanQueue* _queue;
  58   ShenandoahHeap* _heap;
  59   ShenandoahMarkingContext* const _mark_context;
  60 
  61   template <class T>
  62   inline void do_oop_work(T* p) {
  63     ShenandoahConcurrentMark::mark_through_ref<T, UPDATE_REFS, NO_DEDUP>(p, _heap, _queue, _mark_context);
  64   }
  65 
  66 public:
  67   ShenandoahInitMarkRootsClosure(ShenandoahObjToScanQueue* q) :
  68     _queue(q),
  69     _heap(ShenandoahHeap::heap()),
  70     _mark_context(_heap->marking_context()) {};
  71 
  72   void do_oop(narrowOop* p) { do_oop_work(p); }
  73   void do_oop(oop* p)       { do_oop_work(p); }
  74 };
  75 
  76 ShenandoahMarkRefsSuperClosure::ShenandoahMarkRefsSuperClosure(ShenandoahObjToScanQueue* q, ReferenceProcessor* rp) :
  77   MetadataVisitingOopIterateClosure(rp),
  78   _queue(q),
  79   _heap(ShenandoahHeap::heap()),
  80   _mark_context(_heap->marking_context())
  81 { }
  82 
  83 template<UpdateRefsMode UPDATE_REFS>
  84 class ShenandoahInitMarkRootsTask : public AbstractGangTask {
  85 private:
  86   ShenandoahAllRootScanner* _rp;
  87 public:
  88   ShenandoahInitMarkRootsTask(ShenandoahAllRootScanner* rp) :
  89     AbstractGangTask("Shenandoah init mark roots task"),
  90     _rp(rp) {
  91   }
  92 
  93   void work(uint worker_id) {
  94     assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
  95     ShenandoahParallelWorkerSession worker_session(worker_id);
  96 
  97     ShenandoahHeap* heap = ShenandoahHeap::heap();
  98     ShenandoahObjToScanQueueSet* queues = heap->concurrent_mark()->task_queues();
  99     assert(queues->get_reserved() > worker_id, "Queue has not been reserved for worker id: %d", worker_id);
 100 
 101     ShenandoahObjToScanQueue* q = queues->queue(worker_id);
 102 
 103     ShenandoahInitMarkRootsClosure<UPDATE_REFS> mark_cl(q);
 104     do_work(heap, &mark_cl, worker_id);
 105   }
 106 
 107 private:
 108   void do_work(ShenandoahHeap* heap, OopClosure* oops, uint worker_id) {
 109     // The rationale for selecting the roots to scan is as follows:
 110     //   a. With unload_classes = true, we only want to scan the actual strong roots from the
 111     //      code cache. This will allow us to identify the dead classes, unload them, *and*
 112     //      invalidate the relevant code cache blobs. This could be only done together with
 113     //      class unloading.
 114     //   b. With unload_classes = false, we have to nominally retain all the references from code
 115     //      cache, because there could be the case of embedded class/oop in the generated code,
 116     //      which we will never visit during mark. Without code cache invalidation, as in (a),
 117     //      we risk executing that code cache blob, and crashing.
 118     if (heap->unload_classes()) {
 119       _rp->strong_roots_do(worker_id, oops);
 120     } else {
 121       _rp->roots_do(worker_id, oops);
 122     }
 123   }
 124 };
 125 
 126 class ShenandoahUpdateRootsTask : public AbstractGangTask {
 127 private:
 128   ShenandoahRootUpdater*  _root_updater;
 129   bool                    _check_alive;
 130 public:
 131   ShenandoahUpdateRootsTask(ShenandoahRootUpdater* root_updater, bool check_alive) :
 132     AbstractGangTask("Shenandoah update roots task"),
 133     _root_updater(root_updater),
 134     _check_alive(check_alive){
 135   }
 136 
 137   void work(uint worker_id) {
 138     assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
 139     ShenandoahParallelWorkerSession worker_session(worker_id);
 140 
 141     ShenandoahHeap* heap = ShenandoahHeap::heap();
 142     ShenandoahUpdateRefsClosure cl;
 143     if (_check_alive) {
 144       ShenandoahForwardedIsAliveClosure is_alive;
 145       _root_updater->roots_do<ShenandoahForwardedIsAliveClosure, ShenandoahUpdateRefsClosure>(worker_id, &is_alive, &cl);
 146     } else {
 147       AlwaysTrueClosure always_true;;
 148       _root_updater->roots_do<AlwaysTrueClosure, ShenandoahUpdateRefsClosure>(worker_id, &always_true, &cl);
 149     }
 150   }
 151 };
 152 
 153 class ShenandoahConcurrentMarkingTask : public AbstractGangTask {
 154 private:
 155   ShenandoahConcurrentMark* _cm;
 156   TaskTerminator* _terminator;
 157 
 158 public:
 159   ShenandoahConcurrentMarkingTask(ShenandoahConcurrentMark* cm, TaskTerminator* terminator) :
 160     AbstractGangTask("Root Region Scan"), _cm(cm), _terminator(terminator) {
 161   }
 162 
 163   void work(uint worker_id) {
 164     ShenandoahHeap* heap = ShenandoahHeap::heap();
 165     ShenandoahConcurrentWorkerSession worker_session(worker_id);
 166     ShenandoahSuspendibleThreadSetJoiner stsj(ShenandoahSuspendibleWorkers);
 167     ShenandoahObjToScanQueue* q = _cm->get_queue(worker_id);
 168     ReferenceProcessor* rp;
 169     if (heap->process_references()) {
 170       rp = heap->ref_processor();
 171       shenandoah_assert_rp_isalive_installed();
 172     } else {
 173       rp = NULL;
 174     }
 175 
 176     _cm->concurrent_scan_code_roots(worker_id, rp);
 177     _cm->mark_loop(worker_id, _terminator, rp,
 178                    true, // cancellable
 179                    ShenandoahStringDedup::is_enabled()); // perform string dedup
 180   }
 181 };
 182 
 183 class ShenandoahSATBAndRemarkCodeRootsThreadsClosure : public ThreadClosure {
 184 private:
 185   ShenandoahSATBBufferClosure* const _satb_cl;
 186   OopClosure*                  const _cl;
 187   MarkingCodeBlobClosure*      const _code_cl;
 188   uintx _claim_token;
 189 public:
 190   ShenandoahSATBAndRemarkCodeRootsThreadsClosure(ShenandoahSATBBufferClosure* satb_cl,
 191                                                  OopClosure* cl, MarkingCodeBlobClosure* code_cl) :
 192     _satb_cl(satb_cl), _cl(cl), _code_cl(code_cl),
 193     _claim_token(Threads::thread_claim_token()) {}
 194 
 195   void do_thread(Thread* thread) {
 196     if (thread->claim_threads_do(true, _claim_token)) {
 197       ShenandoahThreadLocalData::satb_mark_queue(thread).apply_closure_and_empty(_satb_cl);
 198       if (_cl != NULL) {
 199         // This doesn't appear to add very much to final-mark latency. If that ever becomes a problem,
 200         // we can attempt to trim it to only scan actual thread-stacks (and avoid stuff like handles, monitors, etc)
 201         // and there only compiled frames. We can also make thread-scans templatized to avoid virtual calls and
 202         // instead inline the closures.
 203         ResourceMark rm;
 204         thread->oops_do(_cl, _code_cl);
 205       } else if (_code_cl != NULL && thread->is_Java_thread()) {
 206         // In theory it should not be neccessary to explicitly walk the nmethods to find roots for concurrent marking
 207         // however the liveness of oops reachable from nmethods have very complex lifecycles:
 208         // * Alive if on the stack of an executing method
 209         // * Weakly reachable otherwise
 210         // Some objects reachable from nmethods, such as the class loader (or klass_holder) of the receiver should be
 211         // live by the SATB invariant but other oops recorded in nmethods may behave differently.
 212         JavaThread* jt = (JavaThread*)thread;
 213         jt->nmethods_do(_code_cl);
 214       }
 215     }
 216   }
 217 };
 218 
 219 class ShenandoahFinalMarkingTask : public AbstractGangTask {
 220 private:
 221   ShenandoahConcurrentMark* _cm;
 222   TaskTerminator*           _terminator;
 223   bool _dedup_string;
 224 
 225 public:
 226   ShenandoahFinalMarkingTask(ShenandoahConcurrentMark* cm, TaskTerminator* terminator, bool dedup_string) :
 227     AbstractGangTask("Shenandoah Final Marking"), _cm(cm), _terminator(terminator), _dedup_string(dedup_string) {
 228   }
 229 
 230   void work(uint worker_id) {
 231     ShenandoahHeap* heap = ShenandoahHeap::heap();
 232 
 233     ShenandoahParallelWorkerSession worker_session(worker_id);
 234     ReferenceProcessor* rp;
 235     if (heap->process_references()) {
 236       rp = heap->ref_processor();
 237       shenandoah_assert_rp_isalive_installed();
 238     } else {
 239       rp = NULL;
 240     }
 241 
 242     // First drain remaining SATB buffers.
 243     // Notice that this is not strictly necessary for mark-compact. But since
 244     // it requires a StrongRootsScope around the task, we need to claim the
 245     // threads, and performance-wise it doesn't really matter. Adds about 1ms to
 246     // full-gc.
 247     {
 248       ShenandoahObjToScanQueue* q = _cm->get_queue(worker_id);
 249 
 250       ShenandoahSATBBufferClosure cl(q);
 251       SATBMarkQueueSet& satb_mq_set = ShenandoahBarrierSet::satb_mark_queue_set();
 252       while (satb_mq_set.apply_closure_to_completed_buffer(&cl));
 253 
 254       bool do_nmethods = heap->unload_classes() && !ShenandoahConcurrentRoots::can_do_concurrent_class_unloading();
 255       if (heap->has_forwarded_objects()) {
 256         ShenandoahMarkResolveRefsClosure resolve_mark_cl(q, rp);
 257         MarkingCodeBlobClosure blobsCl(&resolve_mark_cl, !CodeBlobToOopClosure::FixRelocations);
 258         ShenandoahSATBAndRemarkCodeRootsThreadsClosure tc(&cl,
 259                                                           ShenandoahAggressiveReferenceDiscovery ? &resolve_mark_cl : NULL,
 260                                                           do_nmethods ? &blobsCl : NULL);
 261         Threads::threads_do(&tc);
 262       } else {
 263         ShenandoahMarkRefsClosure mark_cl(q, rp);
 264         MarkingCodeBlobClosure blobsCl(&mark_cl, !CodeBlobToOopClosure::FixRelocations);
 265         ShenandoahSATBAndRemarkCodeRootsThreadsClosure tc(&cl,
 266                                                           ShenandoahAggressiveReferenceDiscovery ? &mark_cl : NULL,
 267                                                           do_nmethods ? &blobsCl : NULL);
 268         Threads::threads_do(&tc);
 269       }
 270     }
 271 
 272     if (heap->is_degenerated_gc_in_progress()) {
 273       // Degenerated cycle may bypass concurrent cycle, so code roots might not be scanned,
 274       // let's check here.
 275       _cm->concurrent_scan_code_roots(worker_id, rp);
 276     }
 277 
 278     _cm->mark_loop(worker_id, _terminator, rp,
 279                    false, // not cancellable
 280                    _dedup_string);
 281 
 282     assert(_cm->task_queues()->is_empty(), "Should be empty");
 283   }
 284 };
 285 
 286 void ShenandoahConcurrentMark::mark_roots(ShenandoahPhaseTimings::Phase root_phase) {
 287   assert(Thread::current()->is_VM_thread(), "can only do this in VMThread");
 288   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
 289 
 290   ShenandoahHeap* heap = ShenandoahHeap::heap();
 291 
 292   ShenandoahGCPhase phase(root_phase);
 293 
 294   WorkGang* workers = heap->workers();
 295   uint nworkers = workers->active_workers();
 296 
 297   assert(nworkers <= task_queues()->size(), "Just check");
 298 
 299   ShenandoahAllRootScanner root_proc(nworkers, root_phase);
 300   TASKQUEUE_STATS_ONLY(task_queues()->reset_taskqueue_stats());
 301   task_queues()->reserve(nworkers);
 302 
 303   if (heap->has_forwarded_objects()) {
 304     ShenandoahInitMarkRootsTask<RESOLVE> mark_roots(&root_proc);
 305     workers->run_task(&mark_roots);
 306   } else {
 307     // No need to update references, which means the heap is stable.
 308     // Can save time not walking through forwarding pointers.
 309     ShenandoahInitMarkRootsTask<NONE> mark_roots(&root_proc);
 310     workers->run_task(&mark_roots);
 311   }
 312 
 313   if (ShenandoahConcurrentScanCodeRoots) {
 314     clear_claim_codecache();
 315   }
 316 }
 317 
 318 void ShenandoahConcurrentMark::update_roots(ShenandoahPhaseTimings::Phase root_phase) {
 319   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
 320   assert(root_phase == ShenandoahPhaseTimings::full_gc_roots ||
 321          root_phase == ShenandoahPhaseTimings::degen_gc_update_roots,
 322          "Only for these phases");
 323 
 324   ShenandoahGCPhase phase(root_phase);
 325 
 326   bool check_alive = root_phase == ShenandoahPhaseTimings::degen_gc_update_roots;
 327 
 328 #if COMPILER2_OR_JVMCI
 329   DerivedPointerTable::clear();
 330 #endif
 331 
 332   uint nworkers = _heap->workers()->active_workers();
 333 
 334   ShenandoahRootUpdater root_updater(nworkers, root_phase);
 335   ShenandoahUpdateRootsTask update_roots(&root_updater, check_alive);
 336   _heap->workers()->run_task(&update_roots);
 337 
 338 #if COMPILER2_OR_JVMCI
 339   DerivedPointerTable::update_pointers();
 340 #endif
 341 }
 342 
 343 class ShenandoahUpdateThreadRootsTask : public AbstractGangTask {
 344 private:
 345   ShenandoahThreadRoots           _thread_roots;
 346   ShenandoahPhaseTimings::Phase   _phase;
 347   ShenandoahGCWorkerPhase         _worker_phase;
 348 public:
 349   ShenandoahUpdateThreadRootsTask(bool is_par, ShenandoahPhaseTimings::Phase phase) :
 350     AbstractGangTask("Shenandoah Update Thread Roots"),
 351     _thread_roots(is_par),
 352     _phase(phase),
 353     _worker_phase(phase) {}
 354 
 355   void work(uint worker_id) {
 356     ShenandoahUpdateRefsClosure cl;
 357     _thread_roots.oops_do(&cl, NULL, worker_id);
 358   }
 359 };
 360 
 361 void ShenandoahConcurrentMark::update_thread_roots(ShenandoahPhaseTimings::Phase root_phase) {
 362   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
 363 
 364   ShenandoahGCPhase phase(root_phase);
 365 
 366 #if COMPILER2_OR_JVMCI
 367   DerivedPointerTable::clear();
 368 #endif
 369 
 370   WorkGang* workers = _heap->workers();
 371   bool is_par = workers->active_workers() > 1;
 372 
 373   ShenandoahUpdateThreadRootsTask task(is_par, root_phase);
 374   workers->run_task(&task);
 375 
 376 #if COMPILER2_OR_JVMCI
 377   DerivedPointerTable::update_pointers();
 378 #endif
 379 }
 380 
 381 void ShenandoahConcurrentMark::initialize(uint workers) {
 382   _heap = ShenandoahHeap::heap();
 383 
 384   uint num_queues = MAX2(workers, 1U);
 385 
 386   _task_queues = new ShenandoahObjToScanQueueSet((int) num_queues);
 387 
 388   for (uint i = 0; i < num_queues; ++i) {
 389     ShenandoahObjToScanQueue* task_queue = new ShenandoahObjToScanQueue();
 390     task_queue->initialize();
 391     _task_queues->register_queue(i, task_queue);
 392   }
 393 }
 394 
 395 void ShenandoahConcurrentMark::concurrent_scan_code_roots(uint worker_id, ReferenceProcessor* rp) {
 396   if (ShenandoahConcurrentScanCodeRoots && claim_codecache()) {
 397     ShenandoahObjToScanQueue* q = task_queues()->queue(worker_id);
 398     if (!_heap->unload_classes()) {
 399       MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
 400       // TODO: We can not honor StringDeduplication here, due to lock ranking
 401       // inversion. So, we may miss some deduplication candidates.
 402       if (_heap->has_forwarded_objects()) {
 403         ShenandoahMarkResolveRefsClosure cl(q, rp);
 404         CodeBlobToOopClosure blobs(&cl, !CodeBlobToOopClosure::FixRelocations);
 405         CodeCache::blobs_do(&blobs);
 406       } else {
 407         ShenandoahMarkRefsClosure cl(q, rp);
 408         CodeBlobToOopClosure blobs(&cl, !CodeBlobToOopClosure::FixRelocations);
 409         CodeCache::blobs_do(&blobs);
 410       }
 411     }
 412   }
 413 }
 414 
 415 void ShenandoahConcurrentMark::mark_from_roots() {
 416   WorkGang* workers = _heap->workers();
 417   uint nworkers = workers->active_workers();
 418 
 419   ShenandoahGCPhase conc_mark_phase(ShenandoahPhaseTimings::conc_mark);
 420 
 421   if (_heap->process_references()) {
 422     ReferenceProcessor* rp = _heap->ref_processor();
 423     rp->set_active_mt_degree(nworkers);
 424 
 425     // enable ("weak") refs discovery
 426     rp->enable_discovery(true /*verify_no_refs*/);
 427     rp->setup_policy(_heap->soft_ref_policy()->should_clear_all_soft_refs());
 428   }
 429 
 430   shenandoah_assert_rp_isalive_not_installed();
 431   ShenandoahIsAliveSelector is_alive;
 432   ReferenceProcessorIsAliveMutator fix_isalive(_heap->ref_processor(), is_alive.is_alive_closure());
 433 
 434   task_queues()->reserve(nworkers);
 435 
 436   {
 437     TaskTerminator terminator(nworkers, task_queues());
 438     ShenandoahConcurrentMarkingTask task(this, &terminator);
 439     workers->run_task(&task);
 440   }
 441 
 442   assert(task_queues()->is_empty() || _heap->cancelled_gc(), "Should be empty when not cancelled");
 443 }
 444 
 445 void ShenandoahConcurrentMark::finish_mark_from_roots(bool full_gc) {
 446   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
 447 
 448   uint nworkers = _heap->workers()->active_workers();
 449 
 450   // Finally mark everything else we've got in our queues during the previous steps.
 451   // It does two different things for concurrent vs. mark-compact GC:
 452   // - For concurrent GC, it starts with empty task queues, drains the remaining
 453   //   SATB buffers, and then completes the marking closure.
 454   // - For mark-compact GC, it starts out with the task queues seeded by initial
 455   //   root scan, and completes the closure, thus marking through all live objects
 456   // The implementation is the same, so it's shared here.
 457   {
 458     ShenandoahGCPhase phase(full_gc ?
 459                             ShenandoahPhaseTimings::full_gc_mark_finish_queues :
 460                             ShenandoahPhaseTimings::finish_queues);
 461     task_queues()->reserve(nworkers);
 462 
 463     shenandoah_assert_rp_isalive_not_installed();
 464     ShenandoahIsAliveSelector is_alive;
 465     ReferenceProcessorIsAliveMutator fix_isalive(_heap->ref_processor(), is_alive.is_alive_closure());
 466 
 467     StrongRootsScope scope(nworkers);
 468     TaskTerminator terminator(nworkers, task_queues());
 469     ShenandoahFinalMarkingTask task(this, &terminator, ShenandoahStringDedup::is_enabled());
 470     _heap->workers()->run_task(&task);
 471   }
 472 
 473   assert(task_queues()->is_empty(), "Should be empty");
 474 
 475   // When we're done marking everything, we process weak references.
 476   if (_heap->process_references()) {
 477     weak_refs_work(full_gc);
 478   }
 479 
 480   assert(task_queues()->is_empty(), "Should be empty");
 481   TASKQUEUE_STATS_ONLY(task_queues()->print_taskqueue_stats());
 482   TASKQUEUE_STATS_ONLY(task_queues()->reset_taskqueue_stats());
 483 }
 484 
 485 // Weak Reference Closures
 486 class ShenandoahCMDrainMarkingStackClosure: public VoidClosure {
 487   uint _worker_id;
 488   TaskTerminator* _terminator;
 489   bool _reset_terminator;
 490 
 491 public:
 492   ShenandoahCMDrainMarkingStackClosure(uint worker_id, TaskTerminator* t, bool reset_terminator = false):
 493     _worker_id(worker_id),
 494     _terminator(t),
 495     _reset_terminator(reset_terminator) {
 496   }
 497 
 498   void do_void() {
 499     assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
 500 
 501     ShenandoahHeap* sh = ShenandoahHeap::heap();
 502     ShenandoahConcurrentMark* scm = sh->concurrent_mark();
 503     assert(sh->process_references(), "why else would we be here?");
 504     ReferenceProcessor* rp = sh->ref_processor();
 505 
 506     shenandoah_assert_rp_isalive_installed();
 507 
 508     scm->mark_loop(_worker_id, _terminator, rp,
 509                    false,   // not cancellable
 510                    false);  // do not do strdedup
 511 
 512     if (_reset_terminator) {
 513       _terminator->reset_for_reuse();
 514     }
 515   }
 516 };
 517 
 518 class ShenandoahCMKeepAliveClosure : public OopClosure {
 519 private:
 520   ShenandoahObjToScanQueue* _queue;
 521   ShenandoahHeap* _heap;
 522   ShenandoahMarkingContext* const _mark_context;
 523 
 524   template <class T>
 525   inline void do_oop_work(T* p) {
 526     ShenandoahConcurrentMark::mark_through_ref<T, NONE, NO_DEDUP>(p, _heap, _queue, _mark_context);
 527   }
 528 
 529 public:
 530   ShenandoahCMKeepAliveClosure(ShenandoahObjToScanQueue* q) :
 531     _queue(q),
 532     _heap(ShenandoahHeap::heap()),
 533     _mark_context(_heap->marking_context()) {}
 534 
 535   void do_oop(narrowOop* p) { do_oop_work(p); }
 536   void do_oop(oop* p)       { do_oop_work(p); }
 537 };
 538 
 539 class ShenandoahCMKeepAliveUpdateClosure : public OopClosure {
 540 private:
 541   ShenandoahObjToScanQueue* _queue;
 542   ShenandoahHeap* _heap;
 543   ShenandoahMarkingContext* const _mark_context;
 544 
 545   template <class T>
 546   inline void do_oop_work(T* p) {
 547     ShenandoahConcurrentMark::mark_through_ref<T, SIMPLE, NO_DEDUP>(p, _heap, _queue, _mark_context);
 548   }
 549 
 550 public:
 551   ShenandoahCMKeepAliveUpdateClosure(ShenandoahObjToScanQueue* q) :
 552     _queue(q),
 553     _heap(ShenandoahHeap::heap()),
 554     _mark_context(_heap->marking_context()) {}
 555 
 556   void do_oop(narrowOop* p) { do_oop_work(p); }
 557   void do_oop(oop* p)       { do_oop_work(p); }
 558 };
 559 
 560 class ShenandoahWeakUpdateClosure : public OopClosure {
 561 private:
 562   ShenandoahHeap* const _heap;
 563 
 564   template <class T>
 565   inline void do_oop_work(T* p) {
 566     oop o = _heap->maybe_update_with_forwarded(p);
 567     shenandoah_assert_marked_except(p, o, o == NULL);
 568   }
 569 
 570 public:
 571   ShenandoahWeakUpdateClosure() : _heap(ShenandoahHeap::heap()) {}
 572 
 573   void do_oop(narrowOop* p) { do_oop_work(p); }
 574   void do_oop(oop* p)       { do_oop_work(p); }
 575 };
 576 
 577 class ShenandoahRefProcTaskProxy : public AbstractGangTask {
 578 private:
 579   AbstractRefProcTaskExecutor::ProcessTask& _proc_task;
 580   TaskTerminator* _terminator;
 581 
 582 public:
 583   ShenandoahRefProcTaskProxy(AbstractRefProcTaskExecutor::ProcessTask& proc_task,
 584                              TaskTerminator* t) :
 585     AbstractGangTask("Process reference objects in parallel"),
 586     _proc_task(proc_task),
 587     _terminator(t) {
 588   }
 589 
 590   void work(uint worker_id) {
 591     ResourceMark rm;
 592     HandleMark hm;
 593     assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
 594     ShenandoahHeap* heap = ShenandoahHeap::heap();
 595     ShenandoahCMDrainMarkingStackClosure complete_gc(worker_id, _terminator);
 596     if (heap->has_forwarded_objects()) {
 597       ShenandoahForwardedIsAliveClosure is_alive;
 598       ShenandoahCMKeepAliveUpdateClosure keep_alive(heap->concurrent_mark()->get_queue(worker_id));
 599       _proc_task.work(worker_id, is_alive, keep_alive, complete_gc);
 600     } else {
 601       ShenandoahIsAliveClosure is_alive;
 602       ShenandoahCMKeepAliveClosure keep_alive(heap->concurrent_mark()->get_queue(worker_id));
 603       _proc_task.work(worker_id, is_alive, keep_alive, complete_gc);
 604     }
 605   }
 606 };
 607 
 608 class ShenandoahRefProcTaskExecutor : public AbstractRefProcTaskExecutor {
 609 private:
 610   WorkGang* _workers;
 611 
 612 public:
 613   ShenandoahRefProcTaskExecutor(WorkGang* workers) :
 614     _workers(workers) {
 615   }
 616 
 617   // Executes a task using worker threads.
 618   void execute(ProcessTask& task, uint ergo_workers) {
 619     assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
 620 
 621     ShenandoahHeap* heap = ShenandoahHeap::heap();
 622     ShenandoahConcurrentMark* cm = heap->concurrent_mark();
 623     ShenandoahPushWorkerQueuesScope scope(_workers, cm->task_queues(),
 624                                           ergo_workers,
 625                                           /* do_check = */ false);
 626     uint nworkers = _workers->active_workers();
 627     cm->task_queues()->reserve(nworkers);
 628     TaskTerminator terminator(nworkers, cm->task_queues());
 629     ShenandoahRefProcTaskProxy proc_task_proxy(task, &terminator);
 630     _workers->run_task(&proc_task_proxy);
 631   }
 632 };
 633 
 634 void ShenandoahConcurrentMark::weak_refs_work(bool full_gc) {
 635   assert(_heap->process_references(), "sanity");
 636 
 637   ShenandoahPhaseTimings::Phase phase_root =
 638           full_gc ?
 639           ShenandoahPhaseTimings::full_gc_weakrefs :
 640           ShenandoahPhaseTimings::weakrefs;
 641 
 642   ShenandoahGCPhase phase(phase_root);
 643 
 644   ReferenceProcessor* rp = _heap->ref_processor();
 645 
 646   // NOTE: We cannot shortcut on has_discovered_references() here, because
 647   // we will miss marking JNI Weak refs then, see implementation in
 648   // ReferenceProcessor::process_discovered_references.
 649   weak_refs_work_doit(full_gc);
 650 
 651   rp->verify_no_references_recorded();
 652   assert(!rp->discovery_enabled(), "Post condition");
 653 
 654 }
 655 
 656 void ShenandoahConcurrentMark::weak_refs_work_doit(bool full_gc) {
 657   ReferenceProcessor* rp = _heap->ref_processor();
 658 
 659   ShenandoahPhaseTimings::Phase phase_process =
 660           full_gc ?
 661           ShenandoahPhaseTimings::full_gc_weakrefs_process :
 662           ShenandoahPhaseTimings::weakrefs_process;
 663 
 664   shenandoah_assert_rp_isalive_not_installed();
 665   ShenandoahIsAliveSelector is_alive;
 666   ReferenceProcessorIsAliveMutator fix_isalive(rp, is_alive.is_alive_closure());
 667 
 668   WorkGang* workers = _heap->workers();
 669   uint nworkers = workers->active_workers();
 670 
 671   rp->setup_policy(_heap->soft_ref_policy()->should_clear_all_soft_refs());
 672   rp->set_active_mt_degree(nworkers);
 673 
 674   assert(task_queues()->is_empty(), "Should be empty");
 675 
 676   // complete_gc and keep_alive closures instantiated here are only needed for
 677   // single-threaded path in RP. They share the queue 0 for tracking work, which
 678   // simplifies implementation. Since RP may decide to call complete_gc several
 679   // times, we need to be able to reuse the terminator.
 680   uint serial_worker_id = 0;
 681   TaskTerminator terminator(1, task_queues());
 682   ShenandoahCMDrainMarkingStackClosure complete_gc(serial_worker_id, &terminator, /* reset_terminator = */ true);
 683 
 684   ShenandoahRefProcTaskExecutor executor(workers);
 685 
 686   ReferenceProcessorPhaseTimes pt(_heap->gc_timer(), rp->num_queues());
 687 
 688   {
 689     ShenandoahGCPhase phase(phase_process);
 690 
 691     if (_heap->has_forwarded_objects()) {
 692       ShenandoahCMKeepAliveUpdateClosure keep_alive(get_queue(serial_worker_id));
 693       rp->process_discovered_references(is_alive.is_alive_closure(), &keep_alive,
 694                                         &complete_gc, &executor,
 695                                         &pt);
 696 
 697     } else {
 698       ShenandoahCMKeepAliveClosure keep_alive(get_queue(serial_worker_id));
 699       rp->process_discovered_references(is_alive.is_alive_closure(), &keep_alive,
 700                                         &complete_gc, &executor,
 701                                         &pt);
 702 
 703     }
 704 
 705     pt.print_all_references();
 706 
 707     assert(task_queues()->is_empty(), "Should be empty");
 708   }
 709 }
 710 
 711 class ShenandoahCancelledGCYieldClosure : public YieldClosure {
 712 private:
 713   ShenandoahHeap* const _heap;
 714 public:
 715   ShenandoahCancelledGCYieldClosure() : _heap(ShenandoahHeap::heap()) {};
 716   virtual bool should_return() { return _heap->cancelled_gc(); }
 717 };
 718 
 719 class ShenandoahPrecleanCompleteGCClosure : public VoidClosure {
 720 public:
 721   void do_void() {
 722     ShenandoahHeap* sh = ShenandoahHeap::heap();
 723     ShenandoahConcurrentMark* scm = sh->concurrent_mark();
 724     assert(sh->process_references(), "why else would we be here?");
 725     TaskTerminator terminator(1, scm->task_queues());
 726 
 727     ReferenceProcessor* rp = sh->ref_processor();
 728     shenandoah_assert_rp_isalive_installed();
 729 
 730     scm->mark_loop(0, &terminator, rp,
 731                    false, // not cancellable
 732                    false); // do not do strdedup
 733   }
 734 };
 735 
 736 class ShenandoahPrecleanTask : public AbstractGangTask {
 737 private:
 738   ReferenceProcessor* _rp;
 739 
 740 public:
 741   ShenandoahPrecleanTask(ReferenceProcessor* rp) :
 742           AbstractGangTask("Precleaning task"),
 743           _rp(rp) {}
 744 
 745   void work(uint worker_id) {
 746     assert(worker_id == 0, "The code below is single-threaded, only one worker is expected");
 747     ShenandoahParallelWorkerSession worker_session(worker_id);
 748 
 749     ShenandoahHeap* sh = ShenandoahHeap::heap();
 750     assert(!sh->has_forwarded_objects(), "No forwarded objects expected here");
 751 
 752     ShenandoahObjToScanQueue* q = sh->concurrent_mark()->get_queue(worker_id);
 753 
 754     ShenandoahCancelledGCYieldClosure yield;
 755     ShenandoahPrecleanCompleteGCClosure complete_gc;
 756 
 757     ShenandoahIsAliveClosure is_alive;
 758     ShenandoahCMKeepAliveClosure keep_alive(q);
 759     ResourceMark rm;
 760     _rp->preclean_discovered_references(&is_alive, &keep_alive,
 761                                         &complete_gc, &yield,
 762                                         NULL);
 763   }
 764 };
 765 
 766 void ShenandoahConcurrentMark::preclean_weak_refs() {
 767   // Pre-cleaning weak references before diving into STW makes sense at the
 768   // end of concurrent mark. This will filter out the references which referents
 769   // are alive. Note that ReferenceProcessor already filters out these on reference
 770   // discovery, and the bulk of work is done here. This phase processes leftovers
 771   // that missed the initial filtering, i.e. when referent was marked alive after
 772   // reference was discovered by RP.
 773 
 774   assert(_heap->process_references(), "sanity");
 775 
 776   // Shortcut if no references were discovered to avoid winding up threads.
 777   ReferenceProcessor* rp = _heap->ref_processor();
 778   if (!rp->has_discovered_references()) {
 779     return;
 780   }
 781 
 782   assert(task_queues()->is_empty(), "Should be empty");
 783 
 784   ReferenceProcessorMTDiscoveryMutator fix_mt_discovery(rp, false);
 785 
 786   shenandoah_assert_rp_isalive_not_installed();
 787   ShenandoahIsAliveSelector is_alive;
 788   ReferenceProcessorIsAliveMutator fix_isalive(rp, is_alive.is_alive_closure());
 789 
 790   // Execute precleaning in the worker thread: it will give us GCLABs, String dedup
 791   // queues and other goodies. When upstream ReferenceProcessor starts supporting
 792   // parallel precleans, we can extend this to more threads.
 793   WorkGang* workers = _heap->workers();
 794   uint nworkers = workers->active_workers();
 795   assert(nworkers == 1, "This code uses only a single worker");
 796   task_queues()->reserve(nworkers);
 797 
 798   ShenandoahPrecleanTask task(rp);
 799   workers->run_task(&task);
 800 
 801   assert(task_queues()->is_empty(), "Should be empty");
 802 }
 803 
 804 void ShenandoahConcurrentMark::cancel() {
 805   // Clean up marking stacks.
 806   ShenandoahObjToScanQueueSet* queues = task_queues();
 807   queues->clear();
 808 
 809   // Cancel SATB buffers.
 810   ShenandoahBarrierSet::satb_mark_queue_set().abandon_partial_marking();
 811 }
 812 
 813 ShenandoahObjToScanQueue* ShenandoahConcurrentMark::get_queue(uint worker_id) {
 814   assert(task_queues()->get_reserved() > worker_id, "No reserved queue for worker id: %d", worker_id);
 815   return _task_queues->queue(worker_id);
 816 }
 817 
 818 template <bool CANCELLABLE>
 819 void ShenandoahConcurrentMark::mark_loop_prework(uint w, TaskTerminator *t, ReferenceProcessor *rp,
 820                                                  bool strdedup) {
 821   ShenandoahObjToScanQueue* q = get_queue(w);
 822 
 823   jushort* ld = _heap->get_liveness_cache(w);
 824 
 825   // TODO: We can clean up this if we figure out how to do templated oop closures that
 826   // play nice with specialized_oop_iterators.
 827   if (_heap->unload_classes()) {
 828     if (_heap->has_forwarded_objects()) {
 829       if (strdedup) {
 830         ShenandoahMarkUpdateRefsMetadataDedupClosure cl(q, rp);
 831         mark_loop_work<ShenandoahMarkUpdateRefsMetadataDedupClosure, CANCELLABLE>(&cl, ld, w, t);
 832       } else {
 833         ShenandoahMarkUpdateRefsMetadataClosure cl(q, rp);
 834         mark_loop_work<ShenandoahMarkUpdateRefsMetadataClosure, CANCELLABLE>(&cl, ld, w, t);
 835       }
 836     } else {
 837       if (strdedup) {
 838         ShenandoahMarkRefsMetadataDedupClosure cl(q, rp);
 839         mark_loop_work<ShenandoahMarkRefsMetadataDedupClosure, CANCELLABLE>(&cl, ld, w, t);
 840       } else {
 841         ShenandoahMarkRefsMetadataClosure cl(q, rp);
 842         mark_loop_work<ShenandoahMarkRefsMetadataClosure, CANCELLABLE>(&cl, ld, w, t);
 843       }
 844     }
 845   } else {
 846     if (_heap->has_forwarded_objects()) {
 847       if (strdedup) {
 848         ShenandoahMarkUpdateRefsDedupClosure cl(q, rp);
 849         mark_loop_work<ShenandoahMarkUpdateRefsDedupClosure, CANCELLABLE>(&cl, ld, w, t);
 850       } else {
 851         ShenandoahMarkUpdateRefsClosure cl(q, rp);
 852         mark_loop_work<ShenandoahMarkUpdateRefsClosure, CANCELLABLE>(&cl, ld, w, t);
 853       }
 854     } else {
 855       if (strdedup) {
 856         ShenandoahMarkRefsDedupClosure cl(q, rp);
 857         mark_loop_work<ShenandoahMarkRefsDedupClosure, CANCELLABLE>(&cl, ld, w, t);
 858       } else {
 859         ShenandoahMarkRefsClosure cl(q, rp);
 860         mark_loop_work<ShenandoahMarkRefsClosure, CANCELLABLE>(&cl, ld, w, t);
 861       }
 862     }
 863   }
 864 
 865   _heap->flush_liveness_cache(w);
 866 }
 867 
 868 template <class T, bool CANCELLABLE>
 869 void ShenandoahConcurrentMark::mark_loop_work(T* cl, jushort* live_data, uint worker_id, TaskTerminator *terminator) {
 870   uintx stride = ShenandoahMarkLoopStride;
 871 
 872   ShenandoahHeap* heap = ShenandoahHeap::heap();
 873   ShenandoahObjToScanQueueSet* queues = task_queues();
 874   ShenandoahObjToScanQueue* q;
 875   ShenandoahMarkTask t;
 876 
 877   /*
 878    * Process outstanding queues, if any.
 879    *
 880    * There can be more queues than workers. To deal with the imbalance, we claim
 881    * extra queues first. Since marking can push new tasks into the queue associated
 882    * with this worker id, we come back to process this queue in the normal loop.
 883    */
 884   assert(queues->get_reserved() == heap->workers()->active_workers(),
 885          "Need to reserve proper number of queues: reserved: %u, active: %u", queues->get_reserved(), heap->workers()->active_workers());
 886 
 887   q = queues->claim_next();
 888   while (q != NULL) {
 889     if (CANCELLABLE && heap->check_cancelled_gc_and_yield()) {
 890       return;
 891     }
 892 
 893     for (uint i = 0; i < stride; i++) {
 894       if (q->pop(t)) {
 895         do_task<T>(q, cl, live_data, &t);
 896       } else {
 897         assert(q->is_empty(), "Must be empty");
 898         q = queues->claim_next();
 899         break;
 900       }
 901     }
 902   }
 903   q = get_queue(worker_id);
 904 
 905   ShenandoahSATBBufferClosure drain_satb(q);
 906   SATBMarkQueueSet& satb_mq_set = ShenandoahBarrierSet::satb_mark_queue_set();
 907 
 908   /*
 909    * Normal marking loop:
 910    */
 911   while (true) {
 912     if (CANCELLABLE && heap->check_cancelled_gc_and_yield()) {
 913       return;
 914     }
 915 
 916     while (satb_mq_set.completed_buffers_num() > 0) {
 917       satb_mq_set.apply_closure_to_completed_buffer(&drain_satb);
 918     }
 919 
 920     uint work = 0;
 921     for (uint i = 0; i < stride; i++) {
 922       if (q->pop(t) ||
 923           queues->steal(worker_id, t)) {
 924         do_task<T>(q, cl, live_data, &t);
 925         work++;
 926       } else {
 927         break;
 928       }
 929     }
 930 
 931     if (work == 0) {
 932       // No work encountered in current stride, try to terminate.
 933       // Need to leave the STS here otherwise it might block safepoints.
 934       ShenandoahSuspendibleThreadSetLeaver stsl(CANCELLABLE && ShenandoahSuspendibleWorkers);
 935       ShenandoahTerminatorTerminator tt(heap);
 936       if (terminator->offer_termination(&tt)) return;
 937     }
 938   }
 939 }
 940 
 941 bool ShenandoahConcurrentMark::claim_codecache() {
 942   assert(ShenandoahConcurrentScanCodeRoots, "must not be called otherwise");
 943   return _claimed_codecache.try_set();
 944 }
 945 
 946 void ShenandoahConcurrentMark::clear_claim_codecache() {
 947   assert(ShenandoahConcurrentScanCodeRoots, "must not be called otherwise");
 948   _claimed_codecache.unset();
 949 }