1 /*
   2  * Copyright (c) 2013, 2019, Red Hat, Inc. All rights reserved.
   3  *
   4  * This code is free software; you can redistribute it and/or modify it
   5  * under the terms of the GNU General Public License version 2 only, as
   6  * published by the Free Software Foundation.
   7  *
   8  * This code is distributed in the hope that it will be useful, but WITHOUT
   9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  11  * version 2 for more details (a copy is included in the LICENSE file that
  12  * accompanied this code).
  13  *
  14  * You should have received a copy of the GNU General Public License version
  15  * 2 along with this work; if not, write to the Free Software Foundation,
  16  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  17  *
  18  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  19  * or visit www.oracle.com if you need additional information or have any
  20  * questions.
  21  *
  22  */
  23 
  24 #include "precompiled.hpp"
  25 
  26 #include "classfile/symbolTable.hpp"
  27 #include "classfile/systemDictionary.hpp"
  28 #include "code/codeCache.hpp"
  29 
  30 #include "gc/shared/weakProcessor.inline.hpp"
  31 #include "gc/shared/gcTimer.hpp"
  32 #include "gc/shared/referenceProcessor.hpp"
  33 #include "gc/shared/referenceProcessorPhaseTimes.hpp"
  34 
  35 #include "gc/shenandoah/shenandoahBarrierSet.inline.hpp"
  36 #include "gc/shenandoah/shenandoahConcurrentMark.inline.hpp"
  37 #include "gc/shenandoah/shenandoahMarkCompact.hpp"
  38 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
  39 #include "gc/shenandoah/shenandoahRootProcessor.hpp"
  40 #include "gc/shenandoah/shenandoahOopClosures.inline.hpp"
  41 #include "gc/shenandoah/shenandoahTaskqueue.inline.hpp"
  42 #include "gc/shenandoah/shenandoahTimingTracker.hpp"
  43 #include "gc/shenandoah/shenandoahUtils.hpp"
  44 
  45 #include "memory/iterator.inline.hpp"
  46 #include "memory/metaspace.hpp"
  47 #include "memory/resourceArea.hpp"
  48 #include "oops/oop.inline.hpp"
  49 #include "runtime/handles.inline.hpp"
  50 
  51 template<UpdateRefsMode UPDATE_REFS, StringDedupMode STRING_DEDUP>
  52 class ShenandoahInitMarkRootsClosure : public OopClosure {
  53 private:
  54   ShenandoahObjToScanQueue* _queue;
  55   ShenandoahHeap* _heap;
  56   ShenandoahMarkingContext* const _mark_context;
  57 
  58   template <class T>
  59   inline void do_oop_work(T* p) {
  60     ShenandoahConcurrentMark::mark_through_ref<T, UPDATE_REFS, STRING_DEDUP>(p, _heap, _queue, _mark_context);
  61   }
  62 
  63 public:
  64   ShenandoahInitMarkRootsClosure(ShenandoahObjToScanQueue* q) :
  65     _queue(q),
  66     _heap(ShenandoahHeap::heap()),
  67     _mark_context(_heap->marking_context()) {};
  68 
  69   void do_oop(narrowOop* p) { do_oop_work(p); }
  70   void do_oop(oop* p)       { do_oop_work(p); }
  71 };
  72 
  73 ShenandoahMarkRefsSuperClosure::ShenandoahMarkRefsSuperClosure(ShenandoahObjToScanQueue* q, ReferenceProcessor* rp) :
  74   MetadataVisitingOopIterateClosure(rp),
  75   _queue(q),
  76   _heap(ShenandoahHeap::heap()),
  77   _mark_context(_heap->marking_context())
  78 { }
  79 
  80 template<UpdateRefsMode UPDATE_REFS>
  81 class ShenandoahInitMarkRootsTask : public AbstractGangTask {
  82 private:
  83   ShenandoahRootProcessor* _rp;
  84   bool _process_refs;
  85 public:
  86   ShenandoahInitMarkRootsTask(ShenandoahRootProcessor* rp, bool process_refs) :
  87     AbstractGangTask("Shenandoah init mark roots task"),
  88     _rp(rp),
  89     _process_refs(process_refs) {
  90   }
  91 
  92   void work(uint worker_id) {
  93     assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
  94     ShenandoahParallelWorkerSession worker_session(worker_id);
  95 
  96     ShenandoahHeap* heap = ShenandoahHeap::heap();
  97     ShenandoahObjToScanQueueSet* queues = heap->concurrent_mark()->task_queues();
  98     assert(queues->get_reserved() > worker_id, "Queue has not been reserved for worker id: %d", worker_id);
  99 
 100     ShenandoahObjToScanQueue* q = queues->queue(worker_id);
 101 
 102     if (ShenandoahStringDedup::is_enabled()) {
 103       ShenandoahInitMarkRootsClosure<UPDATE_REFS, ENQUEUE_DEDUP> mark_cl(q);
 104       do_work(heap, &mark_cl, worker_id);
 105     } else {
 106       ShenandoahInitMarkRootsClosure<UPDATE_REFS, NO_DEDUP> mark_cl(q);
 107       do_work(heap, &mark_cl, worker_id);
 108     }
 109   }
 110 
 111 private:
 112   void do_work(ShenandoahHeap* heap, OopClosure* oops, uint worker_id) {
 113     // The rationale for selecting the roots to scan is as follows:
 114     //   a. With unload_classes = true, we only want to scan the actual strong roots from the
 115     //      code cache. This will allow us to identify the dead classes, unload them, *and*
 116     //      invalidate the relevant code cache blobs. This could be only done together with
 117     //      class unloading.
 118     //   b. With unload_classes = false, we have to nominally retain all the references from code
 119     //      cache, because there could be the case of embedded class/oop in the generated code,
 120     //      which we will never visit during mark. Without code cache invalidation, as in (a),
 121     //      we risk executing that code cache blob, and crashing.
 122     //   c. With ShenandoahConcurrentScanCodeRoots, we avoid scanning the entire code cache here,
 123     //      and instead do that in concurrent phase under the relevant lock. This saves init mark
 124     //      pause time.
 125 
 126     CLDToOopClosure clds_cl(oops, ClassLoaderData::_claim_strong);
 127     MarkingCodeBlobClosure blobs_cl(oops, ! CodeBlobToOopClosure::FixRelocations);
 128     OopClosure* weak_oops = _process_refs ? NULL : oops;
 129 
 130     ResourceMark m;
 131     if (heap->unload_classes()) {
 132       _rp->process_strong_roots(oops, weak_oops, &clds_cl, NULL, &blobs_cl, NULL, worker_id);
 133     } else {
 134       if (ShenandoahConcurrentScanCodeRoots) {
 135         CodeBlobClosure* code_blobs = NULL;
 136 #ifdef ASSERT
 137         ShenandoahAssertToSpaceClosure assert_to_space_oops;
 138         CodeBlobToOopClosure assert_to_space(&assert_to_space_oops, !CodeBlobToOopClosure::FixRelocations);
 139         // If conc code cache evac is disabled, code cache should have only to-space ptrs.
 140         // Otherwise, it should have to-space ptrs only if mark does not update refs.
 141         if (!heap->has_forwarded_objects()) {
 142           code_blobs = &assert_to_space;
 143         }
 144 #endif
 145         _rp->process_all_roots(oops, weak_oops, &clds_cl, code_blobs, NULL, worker_id);
 146       } else {
 147         _rp->process_all_roots(oops, weak_oops, &clds_cl, &blobs_cl, NULL, worker_id);
 148       }
 149     }
 150   }
 151 };
 152 
 153 class ShenandoahUpdateRootsTask : public AbstractGangTask {
 154 private:
 155   ShenandoahRootProcessor* _rp;
 156   const bool _update_code_cache;
 157 public:
 158   ShenandoahUpdateRootsTask(ShenandoahRootProcessor* rp, bool update_code_cache) :
 159     AbstractGangTask("Shenandoah update roots task"),
 160     _rp(rp),
 161     _update_code_cache(update_code_cache) {
 162   }
 163 
 164   void work(uint worker_id) {
 165     assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
 166     ShenandoahParallelWorkerSession worker_session(worker_id);
 167 
 168     ShenandoahHeap* heap = ShenandoahHeap::heap();
 169     ShenandoahUpdateRefsClosure cl;
 170     CLDToOopClosure cldCl(&cl, ClassLoaderData::_claim_strong);
 171 
 172     CodeBlobClosure* code_blobs;
 173     CodeBlobToOopClosure update_blobs(&cl, CodeBlobToOopClosure::FixRelocations);
 174 #ifdef ASSERT
 175     ShenandoahAssertToSpaceClosure assert_to_space_oops;
 176     CodeBlobToOopClosure assert_to_space(&assert_to_space_oops, !CodeBlobToOopClosure::FixRelocations);
 177 #endif
 178     if (_update_code_cache) {
 179       code_blobs = &update_blobs;
 180     } else {
 181       code_blobs =
 182         DEBUG_ONLY(&assert_to_space)
 183         NOT_DEBUG(NULL);
 184     }
 185     _rp->process_all_roots(&cl, &cl, &cldCl, code_blobs, NULL, worker_id);
 186   }
 187 };
 188 
 189 class ShenandoahConcurrentMarkingTask : public AbstractGangTask {
 190 private:
 191   ShenandoahConcurrentMark* _cm;
 192   ShenandoahTaskTerminator* _terminator;
 193 
 194 public:
 195   ShenandoahConcurrentMarkingTask(ShenandoahConcurrentMark* cm, ShenandoahTaskTerminator* terminator) :
 196     AbstractGangTask("Root Region Scan"), _cm(cm), _terminator(terminator) {
 197   }
 198 
 199   void work(uint worker_id) {
 200     ShenandoahHeap* heap = ShenandoahHeap::heap();
 201     ShenandoahConcurrentWorkerSession worker_session(worker_id);
 202     ShenandoahSuspendibleThreadSetJoiner stsj(ShenandoahSuspendibleWorkers);
 203     ShenandoahObjToScanQueue* q = _cm->get_queue(worker_id);
 204     ReferenceProcessor* rp;
 205     if (heap->process_references()) {
 206       rp = heap->ref_processor();
 207       shenandoah_assert_rp_isalive_installed();
 208     } else {
 209       rp = NULL;
 210     }
 211 
 212     _cm->concurrent_scan_code_roots(worker_id, rp);
 213     _cm->mark_loop(worker_id, _terminator, rp,
 214                    true, // cancellable
 215                    ShenandoahStringDedup::is_enabled()); // perform string dedup
 216   }
 217 };
 218 
 219 class ShenandoahSATBThreadsClosure : public ThreadClosure {
 220 private:
 221   ShenandoahSATBBufferClosure* _satb_cl;
 222   int _thread_parity;
 223 
 224 public:
 225   ShenandoahSATBThreadsClosure(ShenandoahSATBBufferClosure* satb_cl) :
 226     _satb_cl(satb_cl),
 227     _thread_parity(Threads::thread_claim_parity()) {}
 228 
 229   void do_thread(Thread* thread) {
 230     if (thread->is_Java_thread()) {
 231       if (thread->claim_oops_do(true, _thread_parity)) {
 232         JavaThread* jt = (JavaThread*)thread;
 233         ShenandoahThreadLocalData::satb_mark_queue(jt).apply_closure_and_empty(_satb_cl);
 234       }
 235     } else if (thread->is_VM_thread()) {
 236       if (thread->claim_oops_do(true, _thread_parity)) {
 237         ShenandoahBarrierSet::satb_mark_queue_set().shared_satb_queue()->apply_closure_and_empty(_satb_cl);
 238       }
 239     }
 240   }
 241 };
 242 
 243 class ShenandoahFinalMarkingTask : public AbstractGangTask {
 244 private:
 245   ShenandoahConcurrentMark* _cm;
 246   ShenandoahTaskTerminator* _terminator;
 247   bool _dedup_string;
 248 
 249 public:
 250   ShenandoahFinalMarkingTask(ShenandoahConcurrentMark* cm, ShenandoahTaskTerminator* terminator, bool dedup_string) :
 251     AbstractGangTask("Shenandoah Final Marking"), _cm(cm), _terminator(terminator), _dedup_string(dedup_string) {
 252   }
 253 
 254   void work(uint worker_id) {
 255     ShenandoahHeap* heap = ShenandoahHeap::heap();
 256 
 257     ShenandoahParallelWorkerSession worker_session(worker_id);
 258     // First drain remaining SATB buffers.
 259     // Notice that this is not strictly necessary for mark-compact. But since
 260     // it requires a StrongRootsScope around the task, we need to claim the
 261     // threads, and performance-wise it doesn't really matter. Adds about 1ms to
 262     // full-gc.
 263     {
 264       ShenandoahObjToScanQueue* q = _cm->get_queue(worker_id);
 265       ShenandoahSATBBufferClosure cl(q);
 266       SATBMarkQueueSet& satb_mq_set = ShenandoahBarrierSet::satb_mark_queue_set();
 267       while (satb_mq_set.apply_closure_to_completed_buffer(&cl));
 268       ShenandoahSATBThreadsClosure tc(&cl);
 269       Threads::threads_do(&tc);
 270     }
 271 
 272     ReferenceProcessor* rp;
 273     if (heap->process_references()) {
 274       rp = heap->ref_processor();
 275       shenandoah_assert_rp_isalive_installed();
 276     } else {
 277       rp = NULL;
 278     }
 279 
 280     // Degenerated cycle may bypass concurrent cycle, so code roots might not be scanned,
 281     // let's check here.
 282     _cm->concurrent_scan_code_roots(worker_id, rp);
 283     _cm->mark_loop(worker_id, _terminator, rp,
 284                    false, // not cancellable
 285                    _dedup_string);
 286 
 287     assert(_cm->task_queues()->is_empty(), "Should be empty");
 288   }
 289 };
 290 
 291 void ShenandoahConcurrentMark::mark_roots(ShenandoahPhaseTimings::Phase root_phase) {
 292   assert(Thread::current()->is_VM_thread(), "can only do this in VMThread");
 293   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
 294 
 295   ShenandoahHeap* heap = ShenandoahHeap::heap();
 296 
 297   ShenandoahGCPhase phase(root_phase);
 298 
 299   WorkGang* workers = heap->workers();
 300   uint nworkers = workers->active_workers();
 301 
 302   assert(nworkers <= task_queues()->size(), "Just check");
 303 
 304   ShenandoahRootProcessor root_proc(heap, nworkers, root_phase);
 305   TASKQUEUE_STATS_ONLY(task_queues()->reset_taskqueue_stats());
 306   task_queues()->reserve(nworkers);
 307 
 308   if (heap->has_forwarded_objects()) {
 309     ShenandoahInitMarkRootsTask<RESOLVE> mark_roots(&root_proc, _heap->process_references());
 310     workers->run_task(&mark_roots);
 311   } else {
 312     // No need to update references, which means the heap is stable.
 313     // Can save time not walking through forwarding pointers.
 314     ShenandoahInitMarkRootsTask<NONE> mark_roots(&root_proc, _heap->process_references());
 315     workers->run_task(&mark_roots);
 316   }
 317 
 318   if (ShenandoahConcurrentScanCodeRoots) {
 319     clear_claim_codecache();
 320   }
 321 }
 322 
 323 void ShenandoahConcurrentMark::update_roots(ShenandoahPhaseTimings::Phase root_phase) {
 324   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
 325 
 326   bool update_code_cache = true; // initialize to safer value
 327   switch (root_phase) {
 328     case ShenandoahPhaseTimings::update_roots:
 329     case ShenandoahPhaseTimings::final_update_refs_roots:
 330       update_code_cache = false;
 331       break;
 332     case ShenandoahPhaseTimings::full_gc_roots:
 333     case ShenandoahPhaseTimings::degen_gc_update_roots:
 334       update_code_cache = true;
 335       break;
 336     default:
 337       ShouldNotReachHere();
 338   }
 339 
 340   ShenandoahGCPhase phase(root_phase);
 341 
 342 #if defined(COMPILER2) || INCLUDE_JVMCI
 343   DerivedPointerTable::clear();
 344 #endif
 345 
 346   uint nworkers = _heap->workers()->active_workers();
 347 
 348   ShenandoahRootProcessor root_proc(_heap, nworkers, root_phase);
 349   ShenandoahUpdateRootsTask update_roots(&root_proc, update_code_cache);
 350   _heap->workers()->run_task(&update_roots);
 351 
 352 #if defined(COMPILER2) || INCLUDE_JVMCI
 353   DerivedPointerTable::update_pointers();
 354 #endif
 355 }
 356 
 357 void ShenandoahConcurrentMark::initialize(uint workers) {
 358   _heap = ShenandoahHeap::heap();
 359 
 360   uint num_queues = MAX2(workers, 1U);
 361 
 362   _task_queues = new ShenandoahObjToScanQueueSet((int) num_queues);
 363 
 364   for (uint i = 0; i < num_queues; ++i) {
 365     ShenandoahObjToScanQueue* task_queue = new ShenandoahObjToScanQueue();
 366     task_queue->initialize();
 367     _task_queues->register_queue(i, task_queue);
 368   }
 369 }
 370 
 371 void ShenandoahConcurrentMark::concurrent_scan_code_roots(uint worker_id, ReferenceProcessor* rp) {
 372   if (ShenandoahConcurrentScanCodeRoots && claim_codecache()) {
 373     ShenandoahObjToScanQueue* q = task_queues()->queue(worker_id);
 374     if (!_heap->unload_classes()) {
 375       MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
 376       // TODO: We can not honor StringDeduplication here, due to lock ranking
 377       // inversion. So, we may miss some deduplication candidates.
 378       if (_heap->has_forwarded_objects()) {
 379         ShenandoahMarkResolveRefsClosure cl(q, rp);
 380         CodeBlobToOopClosure blobs(&cl, !CodeBlobToOopClosure::FixRelocations);
 381         CodeCache::blobs_do(&blobs);
 382       } else {
 383         ShenandoahMarkRefsClosure cl(q, rp);
 384         CodeBlobToOopClosure blobs(&cl, !CodeBlobToOopClosure::FixRelocations);
 385         CodeCache::blobs_do(&blobs);
 386       }
 387     }
 388   }
 389 }
 390 
 391 void ShenandoahConcurrentMark::mark_from_roots() {
 392   WorkGang* workers = _heap->workers();
 393   uint nworkers = workers->active_workers();
 394 
 395   ShenandoahGCPhase conc_mark_phase(ShenandoahPhaseTimings::conc_mark);
 396 
 397   if (_heap->process_references()) {
 398     ReferenceProcessor* rp = _heap->ref_processor();
 399     rp->set_active_mt_degree(nworkers);
 400 
 401     // enable ("weak") refs discovery
 402     rp->enable_discovery(true /*verify_no_refs*/);
 403     rp->setup_policy(_heap->soft_ref_policy()->should_clear_all_soft_refs());
 404   }
 405 
 406   shenandoah_assert_rp_isalive_not_installed();
 407   ShenandoahIsAliveSelector is_alive;
 408   ReferenceProcessorIsAliveMutator fix_isalive(_heap->ref_processor(), is_alive.is_alive_closure());
 409 
 410   task_queues()->reserve(nworkers);
 411 
 412   {
 413     ShenandoahTerminationTracker term(ShenandoahPhaseTimings::conc_termination);
 414     ShenandoahTaskTerminator terminator(nworkers, task_queues());
 415     ShenandoahConcurrentMarkingTask task(this, &terminator);
 416     workers->run_task(&task);
 417   }
 418 
 419   assert(task_queues()->is_empty() || _heap->cancelled_gc(), "Should be empty when not cancelled");
 420 }
 421 
 422 void ShenandoahConcurrentMark::finish_mark_from_roots(bool full_gc) {
 423   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
 424 
 425   uint nworkers = _heap->workers()->active_workers();
 426 
 427   // Finally mark everything else we've got in our queues during the previous steps.
 428   // It does two different things for concurrent vs. mark-compact GC:
 429   // - For concurrent GC, it starts with empty task queues, drains the remaining
 430   //   SATB buffers, and then completes the marking closure.
 431   // - For mark-compact GC, it starts out with the task queues seeded by initial
 432   //   root scan, and completes the closure, thus marking through all live objects
 433   // The implementation is the same, so it's shared here.
 434   {
 435     ShenandoahGCPhase phase(full_gc ?
 436                             ShenandoahPhaseTimings::full_gc_mark_finish_queues :
 437                             ShenandoahPhaseTimings::finish_queues);
 438     task_queues()->reserve(nworkers);
 439 
 440     shenandoah_assert_rp_isalive_not_installed();
 441     ShenandoahIsAliveSelector is_alive;
 442     ReferenceProcessorIsAliveMutator fix_isalive(_heap->ref_processor(), is_alive.is_alive_closure());
 443 
 444     ShenandoahTerminationTracker termination_tracker(full_gc ?
 445                                                      ShenandoahPhaseTimings::full_gc_mark_termination :
 446                                                      ShenandoahPhaseTimings::termination);
 447 
 448     StrongRootsScope scope(nworkers);
 449     ShenandoahTaskTerminator terminator(nworkers, task_queues());
 450     ShenandoahFinalMarkingTask task(this, &terminator, ShenandoahStringDedup::is_enabled());
 451     _heap->workers()->run_task(&task);
 452   }
 453 
 454   assert(task_queues()->is_empty(), "Should be empty");
 455 
 456   // When we're done marking everything, we process weak references.
 457   if (_heap->process_references()) {
 458     weak_refs_work(full_gc);
 459   }
 460 
 461   // And finally finish class unloading
 462   if (_heap->unload_classes()) {
 463     _heap->unload_classes_and_cleanup_tables(full_gc);
 464   }
 465 
 466   assert(task_queues()->is_empty(), "Should be empty");
 467   TASKQUEUE_STATS_ONLY(task_queues()->print_taskqueue_stats());
 468   TASKQUEUE_STATS_ONLY(task_queues()->reset_taskqueue_stats());
 469 
 470   // Resize Metaspace
 471   MetaspaceGC::compute_new_size();
 472 }
 473 
 474 // Weak Reference Closures
 475 class ShenandoahCMDrainMarkingStackClosure: public VoidClosure {
 476   uint _worker_id;
 477   ShenandoahTaskTerminator* _terminator;
 478   bool _reset_terminator;
 479 
 480 public:
 481   ShenandoahCMDrainMarkingStackClosure(uint worker_id, ShenandoahTaskTerminator* t, bool reset_terminator = false):
 482     _worker_id(worker_id),
 483     _terminator(t),
 484     _reset_terminator(reset_terminator) {
 485   }
 486 
 487   void do_void() {
 488     assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
 489 
 490     ShenandoahHeap* sh = ShenandoahHeap::heap();
 491     ShenandoahConcurrentMark* scm = sh->concurrent_mark();
 492     assert(sh->process_references(), "why else would we be here?");
 493     ReferenceProcessor* rp = sh->ref_processor();
 494 
 495     shenandoah_assert_rp_isalive_installed();
 496 
 497     scm->mark_loop(_worker_id, _terminator, rp,
 498                    false,   // not cancellable
 499                    false);  // do not do strdedup
 500 
 501     if (_reset_terminator) {
 502       _terminator->reset_for_reuse();
 503     }
 504   }
 505 };
 506 
 507 class ShenandoahCMKeepAliveClosure : public OopClosure {
 508 private:
 509   ShenandoahObjToScanQueue* _queue;
 510   ShenandoahHeap* _heap;
 511   ShenandoahMarkingContext* const _mark_context;
 512 
 513   template <class T>
 514   inline void do_oop_work(T* p) {
 515     ShenandoahConcurrentMark::mark_through_ref<T, NONE, NO_DEDUP>(p, _heap, _queue, _mark_context);
 516   }
 517 
 518 public:
 519   ShenandoahCMKeepAliveClosure(ShenandoahObjToScanQueue* q) :
 520     _queue(q),
 521     _heap(ShenandoahHeap::heap()),
 522     _mark_context(_heap->marking_context()) {}
 523 
 524   void do_oop(narrowOop* p) { do_oop_work(p); }
 525   void do_oop(oop* p)       { do_oop_work(p); }
 526 };
 527 
 528 class ShenandoahCMKeepAliveUpdateClosure : public OopClosure {
 529 private:
 530   ShenandoahObjToScanQueue* _queue;
 531   ShenandoahHeap* _heap;
 532   ShenandoahMarkingContext* const _mark_context;
 533 
 534   template <class T>
 535   inline void do_oop_work(T* p) {
 536     ShenandoahConcurrentMark::mark_through_ref<T, SIMPLE, NO_DEDUP>(p, _heap, _queue, _mark_context);
 537   }
 538 
 539 public:
 540   ShenandoahCMKeepAliveUpdateClosure(ShenandoahObjToScanQueue* q) :
 541     _queue(q),
 542     _heap(ShenandoahHeap::heap()),
 543     _mark_context(_heap->marking_context()) {}
 544 
 545   void do_oop(narrowOop* p) { do_oop_work(p); }
 546   void do_oop(oop* p)       { do_oop_work(p); }
 547 };
 548 
 549 class ShenandoahWeakUpdateClosure : public OopClosure {
 550 private:
 551   ShenandoahHeap* const _heap;
 552 
 553   template <class T>
 554   inline void do_oop_work(T* p) {
 555     oop o = _heap->maybe_update_with_forwarded(p);
 556     shenandoah_assert_marked_except(p, o, o == NULL);
 557   }
 558 
 559 public:
 560   ShenandoahWeakUpdateClosure() : _heap(ShenandoahHeap::heap()) {}
 561 
 562   void do_oop(narrowOop* p) { do_oop_work(p); }
 563   void do_oop(oop* p)       { do_oop_work(p); }
 564 };
 565 
 566 class ShenandoahWeakAssertNotForwardedClosure : public OopClosure {
 567 private:
 568   template <class T>
 569   inline void do_oop_work(T* p) {
 570     T o = RawAccess<>::oop_load(p);
 571     if (!CompressedOops::is_null(o)) {
 572       oop obj = CompressedOops::decode_not_null(o);
 573       shenandoah_assert_not_forwarded(p, obj);
 574     }
 575   }
 576 
 577 public:
 578   ShenandoahWeakAssertNotForwardedClosure() {}
 579 
 580   void do_oop(narrowOop* p) { do_oop_work(p); }
 581   void do_oop(oop* p)       { do_oop_work(p); }
 582 };
 583 
 584 class ShenandoahRefProcTaskProxy : public AbstractGangTask {
 585 private:
 586   AbstractRefProcTaskExecutor::ProcessTask& _proc_task;
 587   ShenandoahTaskTerminator* _terminator;
 588 
 589 public:
 590   ShenandoahRefProcTaskProxy(AbstractRefProcTaskExecutor::ProcessTask& proc_task,
 591                              ShenandoahTaskTerminator* t) :
 592     AbstractGangTask("Process reference objects in parallel"),
 593     _proc_task(proc_task),
 594     _terminator(t) {
 595   }
 596 
 597   void work(uint worker_id) {
 598     ResourceMark rm;
 599     HandleMark hm;
 600     assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
 601     ShenandoahHeap* heap = ShenandoahHeap::heap();
 602     ShenandoahCMDrainMarkingStackClosure complete_gc(worker_id, _terminator);
 603     if (heap->has_forwarded_objects()) {
 604       ShenandoahForwardedIsAliveClosure is_alive;
 605       ShenandoahCMKeepAliveUpdateClosure keep_alive(heap->concurrent_mark()->get_queue(worker_id));
 606       _proc_task.work(worker_id, is_alive, keep_alive, complete_gc);
 607     } else {
 608       ShenandoahIsAliveClosure is_alive;
 609       ShenandoahCMKeepAliveClosure keep_alive(heap->concurrent_mark()->get_queue(worker_id));
 610       _proc_task.work(worker_id, is_alive, keep_alive, complete_gc);
 611     }
 612   }
 613 };
 614 
 615 class ShenandoahRefProcTaskExecutor : public AbstractRefProcTaskExecutor {
 616 private:
 617   WorkGang* _workers;
 618 
 619 public:
 620   ShenandoahRefProcTaskExecutor(WorkGang* workers) :
 621     _workers(workers) {
 622   }
 623 
 624   // Executes a task using worker threads.
 625   void execute(ProcessTask& task, uint ergo_workers) {
 626     assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
 627 
 628     ShenandoahHeap* heap = ShenandoahHeap::heap();
 629     ShenandoahConcurrentMark* cm = heap->concurrent_mark();
 630     ShenandoahPushWorkerQueuesScope scope(_workers, cm->task_queues(),
 631                                           ergo_workers,
 632                                           /* do_check = */ false);
 633     uint nworkers = _workers->active_workers();
 634     cm->task_queues()->reserve(nworkers);
 635     ShenandoahTaskTerminator terminator(nworkers, cm->task_queues());
 636     ShenandoahRefProcTaskProxy proc_task_proxy(task, &terminator);
 637     _workers->run_task(&proc_task_proxy);
 638   }
 639 };
 640 
 641 void ShenandoahConcurrentMark::weak_refs_work(bool full_gc) {
 642   assert(_heap->process_references(), "sanity");
 643 
 644   ShenandoahPhaseTimings::Phase phase_root =
 645           full_gc ?
 646           ShenandoahPhaseTimings::full_gc_weakrefs :
 647           ShenandoahPhaseTimings::weakrefs;
 648 
 649   ShenandoahGCPhase phase(phase_root);
 650 
 651   ReferenceProcessor* rp = _heap->ref_processor();
 652 
 653   // NOTE: We cannot shortcut on has_discovered_references() here, because
 654   // we will miss marking JNI Weak refs then, see implementation in
 655   // ReferenceProcessor::process_discovered_references.
 656   weak_refs_work_doit(full_gc);
 657 
 658   rp->verify_no_references_recorded();
 659   assert(!rp->discovery_enabled(), "Post condition");
 660 
 661 }
 662 
 663 void ShenandoahConcurrentMark::weak_refs_work_doit(bool full_gc) {
 664   ReferenceProcessor* rp = _heap->ref_processor();
 665 
 666   ShenandoahPhaseTimings::Phase phase_process =
 667           full_gc ?
 668           ShenandoahPhaseTimings::full_gc_weakrefs_process :
 669           ShenandoahPhaseTimings::weakrefs_process;
 670 
 671   ShenandoahPhaseTimings::Phase phase_process_termination =
 672           full_gc ?
 673           ShenandoahPhaseTimings::full_gc_weakrefs_termination :
 674           ShenandoahPhaseTimings::weakrefs_termination;
 675 
 676   shenandoah_assert_rp_isalive_not_installed();
 677   ShenandoahIsAliveSelector is_alive;
 678   ReferenceProcessorIsAliveMutator fix_isalive(rp, is_alive.is_alive_closure());
 679 
 680   WorkGang* workers = _heap->workers();
 681   uint nworkers = workers->active_workers();
 682 
 683   rp->setup_policy(_heap->soft_ref_policy()->should_clear_all_soft_refs());
 684   rp->set_active_mt_degree(nworkers);
 685 
 686   assert(task_queues()->is_empty(), "Should be empty");
 687 
 688   // complete_gc and keep_alive closures instantiated here are only needed for
 689   // single-threaded path in RP. They share the queue 0 for tracking work, which
 690   // simplifies implementation. Since RP may decide to call complete_gc several
 691   // times, we need to be able to reuse the terminator.
 692   uint serial_worker_id = 0;
 693   ShenandoahTaskTerminator terminator(1, task_queues());
 694   ShenandoahCMDrainMarkingStackClosure complete_gc(serial_worker_id, &terminator, /* reset_terminator = */ true);
 695 
 696   ShenandoahRefProcTaskExecutor executor(workers);
 697 
 698   ReferenceProcessorPhaseTimes pt(_heap->gc_timer(), rp->num_queues());
 699 
 700   {
 701     ShenandoahGCPhase phase(phase_process);
 702     ShenandoahTerminationTracker phase_term(phase_process_termination);
 703 
 704     // Process leftover weak oops: update them, if needed (using parallel version),
 705     // or assert they do not need updating (using serial version) otherwise.
 706     // Weak processor API requires us to visit the oops, even if we are not doing
 707     // anything to them.
 708     if (_heap->has_forwarded_objects()) {
 709       ShenandoahCMKeepAliveUpdateClosure keep_alive(get_queue(serial_worker_id));
 710       rp->process_discovered_references(is_alive.is_alive_closure(), &keep_alive,
 711                                         &complete_gc, &executor,
 712                                         &pt);
 713 
 714       ShenandoahWeakUpdateClosure cl;
 715       WeakProcessor::weak_oops_do(workers, is_alive.is_alive_closure(), &cl, 1);
 716     } else {
 717       ShenandoahCMKeepAliveClosure keep_alive(get_queue(serial_worker_id));
 718       rp->process_discovered_references(is_alive.is_alive_closure(), &keep_alive,
 719                                         &complete_gc, &executor,
 720                                         &pt);
 721 
 722       ShenandoahWeakAssertNotForwardedClosure cl;
 723       WeakProcessor::weak_oops_do(is_alive.is_alive_closure(), &cl);
 724     }
 725 
 726     pt.print_all_references();
 727 
 728     assert(task_queues()->is_empty(), "Should be empty");
 729   }
 730 }
 731 
 732 class ShenandoahCancelledGCYieldClosure : public YieldClosure {
 733 private:
 734   ShenandoahHeap* const _heap;
 735 public:
 736   ShenandoahCancelledGCYieldClosure() : _heap(ShenandoahHeap::heap()) {};
 737   virtual bool should_return() { return _heap->cancelled_gc(); }
 738 };
 739 
 740 class ShenandoahPrecleanCompleteGCClosure : public VoidClosure {
 741 public:
 742   void do_void() {
 743     ShenandoahHeap* sh = ShenandoahHeap::heap();
 744     ShenandoahConcurrentMark* scm = sh->concurrent_mark();
 745     assert(sh->process_references(), "why else would we be here?");
 746     ShenandoahTaskTerminator terminator(1, scm->task_queues());
 747 
 748     ReferenceProcessor* rp = sh->ref_processor();
 749     shenandoah_assert_rp_isalive_installed();
 750 
 751     scm->mark_loop(0, &terminator, rp,
 752                    false, // not cancellable
 753                    false); // do not do strdedup
 754   }
 755 };
 756 
 757 class ShenandoahPrecleanKeepAliveUpdateClosure : public OopClosure {
 758 private:
 759   ShenandoahObjToScanQueue* _queue;
 760   ShenandoahHeap* _heap;
 761   ShenandoahMarkingContext* const _mark_context;
 762 
 763   template <class T>
 764   inline void do_oop_work(T* p) {
 765     ShenandoahConcurrentMark::mark_through_ref<T, CONCURRENT, NO_DEDUP>(p, _heap, _queue, _mark_context);
 766   }
 767 
 768 public:
 769   ShenandoahPrecleanKeepAliveUpdateClosure(ShenandoahObjToScanQueue* q) :
 770     _queue(q),
 771     _heap(ShenandoahHeap::heap()),
 772     _mark_context(_heap->marking_context()) {}
 773 
 774   void do_oop(narrowOop* p) { do_oop_work(p); }
 775   void do_oop(oop* p)       { do_oop_work(p); }
 776 };
 777 
 778 class ShenandoahPrecleanTask : public AbstractGangTask {
 779 private:
 780   ReferenceProcessor* _rp;
 781 
 782 public:
 783   ShenandoahPrecleanTask(ReferenceProcessor* rp) :
 784           AbstractGangTask("Precleaning task"),
 785           _rp(rp) {}
 786 
 787   void work(uint worker_id) {
 788     assert(worker_id == 0, "The code below is single-threaded, only one worker is expected");
 789     ShenandoahParallelWorkerSession worker_session(worker_id);
 790 
 791     ShenandoahHeap* sh = ShenandoahHeap::heap();
 792 
 793     ShenandoahObjToScanQueue* q = sh->concurrent_mark()->get_queue(worker_id);
 794 
 795     ShenandoahCancelledGCYieldClosure yield;
 796     ShenandoahPrecleanCompleteGCClosure complete_gc;
 797 
 798     if (sh->has_forwarded_objects()) {
 799       ShenandoahForwardedIsAliveClosure is_alive;
 800       ShenandoahPrecleanKeepAliveUpdateClosure keep_alive(q);
 801       ResourceMark rm;
 802       _rp->preclean_discovered_references(&is_alive, &keep_alive,
 803                                           &complete_gc, &yield,
 804                                           NULL);
 805     } else {
 806       ShenandoahIsAliveClosure is_alive;
 807       ShenandoahCMKeepAliveClosure keep_alive(q);
 808       ResourceMark rm;
 809       _rp->preclean_discovered_references(&is_alive, &keep_alive,
 810                                           &complete_gc, &yield,
 811                                           NULL);
 812     }
 813   }
 814 };
 815 
 816 void ShenandoahConcurrentMark::preclean_weak_refs() {
 817   // Pre-cleaning weak references before diving into STW makes sense at the
 818   // end of concurrent mark. This will filter out the references which referents
 819   // are alive. Note that ReferenceProcessor already filters out these on reference
 820   // discovery, and the bulk of work is done here. This phase processes leftovers
 821   // that missed the initial filtering, i.e. when referent was marked alive after
 822   // reference was discovered by RP.
 823 
 824   assert(_heap->process_references(), "sanity");
 825 
 826   // Shortcut if no references were discovered to avoid winding up threads.
 827   ReferenceProcessor* rp = _heap->ref_processor();
 828   if (!rp->has_discovered_references()) {
 829     return;
 830   }
 831 
 832   assert(task_queues()->is_empty(), "Should be empty");
 833 
 834   ReferenceProcessorMTDiscoveryMutator fix_mt_discovery(rp, false);
 835 
 836   shenandoah_assert_rp_isalive_not_installed();
 837   ShenandoahIsAliveSelector is_alive;
 838   ReferenceProcessorIsAliveMutator fix_isalive(rp, is_alive.is_alive_closure());
 839 
 840   // Execute precleaning in the worker thread: it will give us GCLABs, String dedup
 841   // queues and other goodies. When upstream ReferenceProcessor starts supporting
 842   // parallel precleans, we can extend this to more threads.
 843   WorkGang* workers = _heap->workers();
 844   uint nworkers = workers->active_workers();
 845   assert(nworkers == 1, "This code uses only a single worker");
 846   task_queues()->reserve(nworkers);
 847 
 848   ShenandoahPrecleanTask task(rp);
 849   workers->run_task(&task);
 850 
 851   assert(task_queues()->is_empty(), "Should be empty");
 852 }
 853 
 854 void ShenandoahConcurrentMark::cancel() {
 855   // Clean up marking stacks.
 856   ShenandoahObjToScanQueueSet* queues = task_queues();
 857   queues->clear();
 858 
 859   // Cancel SATB buffers.
 860   ShenandoahBarrierSet::satb_mark_queue_set().abandon_partial_marking();
 861 }
 862 
 863 ShenandoahObjToScanQueue* ShenandoahConcurrentMark::get_queue(uint worker_id) {
 864   assert(task_queues()->get_reserved() > worker_id, "No reserved queue for worker id: %d", worker_id);
 865   return _task_queues->queue(worker_id);
 866 }
 867 
 868 template <bool CANCELLABLE>
 869 void ShenandoahConcurrentMark::mark_loop_prework(uint w, ShenandoahTaskTerminator *t, ReferenceProcessor *rp,
 870                                                  bool strdedup) {
 871   ShenandoahObjToScanQueue* q = get_queue(w);
 872 
 873   jushort* ld = _heap->get_liveness_cache(w);
 874 
 875   // TODO: We can clean up this if we figure out how to do templated oop closures that
 876   // play nice with specialized_oop_iterators.
 877   if (_heap->unload_classes()) {
 878     if (_heap->has_forwarded_objects()) {
 879       if (strdedup) {
 880         ShenandoahMarkUpdateRefsMetadataDedupClosure cl(q, rp);
 881         mark_loop_work<ShenandoahMarkUpdateRefsMetadataDedupClosure, CANCELLABLE>(&cl, ld, w, t);
 882       } else {
 883         ShenandoahMarkUpdateRefsMetadataClosure cl(q, rp);
 884         mark_loop_work<ShenandoahMarkUpdateRefsMetadataClosure, CANCELLABLE>(&cl, ld, w, t);
 885       }
 886     } else {
 887       if (strdedup) {
 888         ShenandoahMarkRefsMetadataDedupClosure cl(q, rp);
 889         mark_loop_work<ShenandoahMarkRefsMetadataDedupClosure, CANCELLABLE>(&cl, ld, w, t);
 890       } else {
 891         ShenandoahMarkRefsMetadataClosure cl(q, rp);
 892         mark_loop_work<ShenandoahMarkRefsMetadataClosure, CANCELLABLE>(&cl, ld, w, t);
 893       }
 894     }
 895   } else {
 896     if (_heap->has_forwarded_objects()) {
 897       if (strdedup) {
 898         ShenandoahMarkUpdateRefsDedupClosure cl(q, rp);
 899         mark_loop_work<ShenandoahMarkUpdateRefsDedupClosure, CANCELLABLE>(&cl, ld, w, t);
 900       } else {
 901         ShenandoahMarkUpdateRefsClosure cl(q, rp);
 902         mark_loop_work<ShenandoahMarkUpdateRefsClosure, CANCELLABLE>(&cl, ld, w, t);
 903       }
 904     } else {
 905       if (strdedup) {
 906         ShenandoahMarkRefsDedupClosure cl(q, rp);
 907         mark_loop_work<ShenandoahMarkRefsDedupClosure, CANCELLABLE>(&cl, ld, w, t);
 908       } else {
 909         ShenandoahMarkRefsClosure cl(q, rp);
 910         mark_loop_work<ShenandoahMarkRefsClosure, CANCELLABLE>(&cl, ld, w, t);
 911       }
 912     }
 913   }
 914 
 915   _heap->flush_liveness_cache(w);
 916 }
 917 
 918 template <class T, bool CANCELLABLE>
 919 void ShenandoahConcurrentMark::mark_loop_work(T* cl, jushort* live_data, uint worker_id, ShenandoahTaskTerminator *terminator) {
 920   uintx stride = ShenandoahMarkLoopStride;
 921 
 922   ShenandoahHeap* heap = ShenandoahHeap::heap();
 923   ShenandoahObjToScanQueueSet* queues = task_queues();
 924   ShenandoahObjToScanQueue* q;
 925   ShenandoahMarkTask t;
 926 
 927   /*
 928    * Process outstanding queues, if any.
 929    *
 930    * There can be more queues than workers. To deal with the imbalance, we claim
 931    * extra queues first. Since marking can push new tasks into the queue associated
 932    * with this worker id, we come back to process this queue in the normal loop.
 933    */
 934   assert(queues->get_reserved() == heap->workers()->active_workers(),
 935          "Need to reserve proper number of queues: reserved: %u, active: %u", queues->get_reserved(), heap->workers()->active_workers());
 936 
 937   q = queues->claim_next();
 938   while (q != NULL) {
 939     if (CANCELLABLE && heap->check_cancelled_gc_and_yield()) {
 940       return;
 941     }
 942 
 943     for (uint i = 0; i < stride; i++) {
 944       if (q->pop(t)) {
 945         do_task<T>(q, cl, live_data, &t);
 946       } else {
 947         assert(q->is_empty(), "Must be empty");
 948         q = queues->claim_next();
 949         break;
 950       }
 951     }
 952   }
 953   q = get_queue(worker_id);
 954 
 955   ShenandoahSATBBufferClosure drain_satb(q);
 956   SATBMarkQueueSet& satb_mq_set = ShenandoahBarrierSet::satb_mark_queue_set();
 957 
 958   /*
 959    * Normal marking loop:
 960    */
 961   while (true) {
 962     if (CANCELLABLE && heap->check_cancelled_gc_and_yield()) {
 963       return;
 964     }
 965 
 966     while (satb_mq_set.completed_buffers_num() > 0) {
 967       satb_mq_set.apply_closure_to_completed_buffer(&drain_satb);
 968     }
 969 
 970     uint work = 0;
 971     for (uint i = 0; i < stride; i++) {
 972       if (q->pop(t) ||
 973           queues->steal(worker_id, t)) {
 974         do_task<T>(q, cl, live_data, &t);
 975         work++;
 976       } else {
 977         break;
 978       }
 979     }
 980 
 981     if (work == 0) {
 982       // No work encountered in current stride, try to terminate.
 983       // Need to leave the STS here otherwise it might block safepoints.
 984       ShenandoahSuspendibleThreadSetLeaver stsl(CANCELLABLE && ShenandoahSuspendibleWorkers);
 985       ShenandoahTerminationTimingsTracker term_tracker(worker_id);
 986       ShenandoahTerminatorTerminator tt(heap);
 987       if (terminator->offer_termination(&tt)) return;
 988     }
 989   }
 990 }
 991 
 992 bool ShenandoahConcurrentMark::claim_codecache() {
 993   assert(ShenandoahConcurrentScanCodeRoots, "must not be called otherwise");
 994   return _claimed_codecache.try_set();
 995 }
 996 
 997 void ShenandoahConcurrentMark::clear_claim_codecache() {
 998   assert(ShenandoahConcurrentScanCodeRoots, "must not be called otherwise");
 999   _claimed_codecache.unset();
1000 }