1 /*
   2  * Copyright (c) 2013, 2020, Red Hat, Inc. All rights reserved.
   3  * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
   4  *
   5  * This code is free software; you can redistribute it and/or modify it
   6  * under the terms of the GNU General Public License version 2 only, as
   7  * published by the Free Software Foundation.
   8  *
   9  * This code is distributed in the hope that it will be useful, but WITHOUT
  10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  12  * version 2 for more details (a copy is included in the LICENSE file that
  13  * accompanied this code).
  14  *
  15  * You should have received a copy of the GNU General Public License version
  16  * 2 along with this work; if not, write to the Free Software Foundation,
  17  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  18  *
  19  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  20  * or visit www.oracle.com if you need additional information or have any
  21  * questions.
  22  *
  23  */
  24 
  25 #include "precompiled.hpp"
  26 
  27 #include "classfile/symbolTable.hpp"
  28 #include "classfile/systemDictionary.hpp"
  29 #include "code/codeCache.hpp"
  30 
  31 #include "gc/shared/weakProcessor.inline.hpp"
  32 #include "gc/shared/gcTimer.hpp"
  33 #include "gc/shared/referenceProcessor.hpp"
  34 #include "gc/shared/referenceProcessorPhaseTimes.hpp"
  35 #include "gc/shared/strongRootsScope.hpp"
  36 
  37 #include "gc/shenandoah/shenandoahBarrierSet.inline.hpp"
  38 #include "gc/shenandoah/shenandoahClosures.inline.hpp"
  39 #include "gc/shenandoah/shenandoahConcurrentMark.inline.hpp"
  40 #include "gc/shenandoah/shenandoahMarkCompact.hpp"
  41 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
  42 #include "gc/shenandoah/shenandoahRootProcessor.inline.hpp"
  43 #include "gc/shenandoah/shenandoahOopClosures.inline.hpp"
  44 #include "gc/shenandoah/shenandoahTaskqueue.inline.hpp"
  45 #include "gc/shenandoah/shenandoahTimingTracker.hpp"
  46 #include "gc/shenandoah/shenandoahUtils.hpp"
  47 
  48 #include "memory/iterator.inline.hpp"
  49 #include "memory/metaspace.hpp"
  50 #include "memory/resourceArea.hpp"
  51 #include "oops/oop.inline.hpp"
  52 #include "runtime/handles.inline.hpp"
  53 
  54 template<UpdateRefsMode UPDATE_REFS>
  55 class ShenandoahInitMarkRootsClosure : public OopClosure {
  56 private:
  57   ShenandoahObjToScanQueue* _queue;
  58   ShenandoahHeap* _heap;
  59   ShenandoahMarkingContext* const _mark_context;
  60 
  61   template <class T>
  62   inline void do_oop_work(T* p) {
  63     ShenandoahConcurrentMark::mark_through_ref<T, UPDATE_REFS, NO_DEDUP>(p, _heap, _queue, _mark_context);
  64   }
  65 
  66 public:
  67   ShenandoahInitMarkRootsClosure(ShenandoahObjToScanQueue* q) :
  68     _queue(q),
  69     _heap(ShenandoahHeap::heap()),
  70     _mark_context(_heap->marking_context()) {};
  71 
  72   void do_oop(narrowOop* p) { do_oop_work(p); }
  73   void do_oop(oop* p)       { do_oop_work(p); }
  74 };
  75 
  76 ShenandoahMarkRefsSuperClosure::ShenandoahMarkRefsSuperClosure(ShenandoahObjToScanQueue* q, ReferenceProcessor* rp) :
  77   MetadataVisitingOopIterateClosure(rp),
  78   _queue(q),
  79   _heap(ShenandoahHeap::heap()),
  80   _mark_context(_heap->marking_context())
  81 { }
  82 
  83 template<UpdateRefsMode UPDATE_REFS>
  84 class ShenandoahInitMarkRootsTask : public AbstractGangTask {
  85 private:
  86   ShenandoahAllRootScanner* _rp;
  87   bool _process_refs;
  88 public:
  89   ShenandoahInitMarkRootsTask(ShenandoahAllRootScanner* rp, bool process_refs) :
  90     AbstractGangTask("Shenandoah init mark roots task"),
  91     _rp(rp),
  92     _process_refs(process_refs) {
  93   }
  94 
  95   void work(uint worker_id) {
  96     assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
  97     ShenandoahParallelWorkerSession worker_session(worker_id);
  98 
  99     ShenandoahHeap* heap = ShenandoahHeap::heap();
 100     ShenandoahObjToScanQueueSet* queues = heap->concurrent_mark()->task_queues();
 101     assert(queues->get_reserved() > worker_id, "Queue has not been reserved for worker id: %d", worker_id);
 102 
 103     ShenandoahObjToScanQueue* q = queues->queue(worker_id);
 104 
 105     ShenandoahInitMarkRootsClosure<UPDATE_REFS> mark_cl(q);
 106     do_work(heap, &mark_cl, worker_id);
 107   }
 108 
 109 private:
 110   void do_work(ShenandoahHeap* heap, OopClosure* oops, uint worker_id) {
 111     // The rationale for selecting the roots to scan is as follows:
 112     //   a. With unload_classes = true, we only want to scan the actual strong roots from the
 113     //      code cache. This will allow us to identify the dead classes, unload them, *and*
 114     //      invalidate the relevant code cache blobs. This could be only done together with
 115     //      class unloading.
 116     //   b. With unload_classes = false, we have to nominally retain all the references from code
 117     //      cache, because there could be the case of embedded class/oop in the generated code,
 118     //      which we will never visit during mark. Without code cache invalidation, as in (a),
 119     //      we risk executing that code cache blob, and crashing.
 120     if (heap->unload_classes()) {
 121       _rp->strong_roots_do(worker_id, oops);
 122     } else {
 123       _rp->roots_do(worker_id, oops);
 124     }
 125   }
 126 };
 127 
 128 class ShenandoahUpdateRootsTask : public AbstractGangTask {
 129 private:
 130   ShenandoahRootUpdater*  _root_updater;
 131   bool                    _check_alive;
 132 public:
 133   ShenandoahUpdateRootsTask(ShenandoahRootUpdater* root_updater, bool check_alive) :
 134     AbstractGangTask("Shenandoah update roots task"),
 135     _root_updater(root_updater),
 136     _check_alive(check_alive){
 137   }
 138 
 139   void work(uint worker_id) {
 140     assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
 141     ShenandoahParallelWorkerSession worker_session(worker_id);
 142 
 143     ShenandoahHeap* heap = ShenandoahHeap::heap();
 144     ShenandoahUpdateRefsClosure cl;
 145     if (_check_alive) {
 146       ShenandoahForwardedIsAliveClosure is_alive;
 147       _root_updater->roots_do<ShenandoahForwardedIsAliveClosure, ShenandoahUpdateRefsClosure>(worker_id, &is_alive, &cl);
 148     } else {
 149       AlwaysTrueClosure always_true;;
 150       _root_updater->roots_do<AlwaysTrueClosure, ShenandoahUpdateRefsClosure>(worker_id, &always_true, &cl);
 151     }
 152   }
 153 };
 154 
 155 class ShenandoahConcurrentMarkingTask : public AbstractGangTask {
 156 private:
 157   ShenandoahConcurrentMark* _cm;
 158   TaskTerminator* _terminator;
 159 
 160 public:
 161   ShenandoahConcurrentMarkingTask(ShenandoahConcurrentMark* cm, TaskTerminator* terminator) :
 162     AbstractGangTask("Root Region Scan"), _cm(cm), _terminator(terminator) {
 163   }
 164 
 165   void work(uint worker_id) {
 166     ShenandoahHeap* heap = ShenandoahHeap::heap();
 167     ShenandoahConcurrentWorkerSession worker_session(worker_id);
 168     ShenandoahSuspendibleThreadSetJoiner stsj(ShenandoahSuspendibleWorkers);
 169     ShenandoahObjToScanQueue* q = _cm->get_queue(worker_id);
 170     ReferenceProcessor* rp;
 171     if (heap->process_references()) {
 172       rp = heap->ref_processor();
 173       shenandoah_assert_rp_isalive_installed();
 174     } else {
 175       rp = NULL;
 176     }
 177 
 178     _cm->concurrent_scan_code_roots(worker_id, rp);
 179     _cm->mark_loop(worker_id, _terminator, rp,
 180                    true, // cancellable
 181                    ShenandoahStringDedup::is_enabled()); // perform string dedup
 182   }
 183 };
 184 
 185 class ShenandoahSATBThreadsClosure : public ThreadClosure {
 186 private:
 187   ShenandoahSATBBufferClosure* _satb_cl;
 188   uintx _claim_token;
 189 
 190 public:
 191   ShenandoahSATBThreadsClosure(ShenandoahSATBBufferClosure* satb_cl) :
 192     _satb_cl(satb_cl),
 193     _claim_token(Threads::thread_claim_token()) {}
 194 
 195   void do_thread(Thread* thread) {
 196     if (thread->claim_threads_do(true, _claim_token)) {
 197       ShenandoahThreadLocalData::satb_mark_queue(thread).apply_closure_and_empty(_satb_cl);
 198     }
 199   }
 200 };
 201 
 202 class ShenandoahFinalMarkingTask : public AbstractGangTask {
 203 private:
 204   ShenandoahConcurrentMark* _cm;
 205   TaskTerminator*           _terminator;
 206   bool _dedup_string;
 207 
 208 public:
 209   ShenandoahFinalMarkingTask(ShenandoahConcurrentMark* cm, TaskTerminator* terminator, bool dedup_string) :
 210     AbstractGangTask("Shenandoah Final Marking"), _cm(cm), _terminator(terminator), _dedup_string(dedup_string) {
 211   }
 212 
 213   void work(uint worker_id) {
 214     ShenandoahHeap* heap = ShenandoahHeap::heap();
 215 
 216     ShenandoahParallelWorkerSession worker_session(worker_id);
 217     // First drain remaining SATB buffers.
 218     // Notice that this is not strictly necessary for mark-compact. But since
 219     // it requires a StrongRootsScope around the task, we need to claim the
 220     // threads, and performance-wise it doesn't really matter. Adds about 1ms to
 221     // full-gc.
 222     {
 223       ShenandoahObjToScanQueue* q = _cm->get_queue(worker_id);
 224       ShenandoahSATBBufferClosure cl(q);
 225       SATBMarkQueueSet& satb_mq_set = ShenandoahBarrierSet::satb_mark_queue_set();
 226       while (satb_mq_set.apply_closure_to_completed_buffer(&cl));
 227       ShenandoahSATBThreadsClosure tc(&cl);
 228       Threads::threads_do(&tc);
 229     }
 230 
 231     ReferenceProcessor* rp;
 232     if (heap->process_references()) {
 233       rp = heap->ref_processor();
 234       shenandoah_assert_rp_isalive_installed();
 235     } else {
 236       rp = NULL;
 237     }
 238 
 239     if (heap->is_degenerated_gc_in_progress()) {
 240       // Degenerated cycle may bypass concurrent cycle, so code roots might not be scanned,
 241       // let's check here.
 242       _cm->concurrent_scan_code_roots(worker_id, rp);
 243     }
 244 
 245     _cm->mark_loop(worker_id, _terminator, rp,
 246                    false, // not cancellable
 247                    _dedup_string);
 248 
 249     assert(_cm->task_queues()->is_empty(), "Should be empty");
 250   }
 251 };
 252 
 253 void ShenandoahConcurrentMark::mark_roots(ShenandoahPhaseTimings::Phase root_phase) {
 254   assert(Thread::current()->is_VM_thread(), "can only do this in VMThread");
 255   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
 256 
 257   ShenandoahHeap* heap = ShenandoahHeap::heap();
 258 
 259   ShenandoahGCPhase phase(root_phase);
 260 
 261   WorkGang* workers = heap->workers();
 262   uint nworkers = workers->active_workers();
 263 
 264   assert(nworkers <= task_queues()->size(), "Just check");
 265 
 266   ShenandoahAllRootScanner root_proc(nworkers, root_phase);
 267   TASKQUEUE_STATS_ONLY(task_queues()->reset_taskqueue_stats());
 268   task_queues()->reserve(nworkers);
 269 
 270   if (heap->has_forwarded_objects()) {
 271     ShenandoahInitMarkRootsTask<RESOLVE> mark_roots(&root_proc, _heap->process_references());
 272     workers->run_task(&mark_roots);
 273   } else {
 274     // No need to update references, which means the heap is stable.
 275     // Can save time not walking through forwarding pointers.
 276     ShenandoahInitMarkRootsTask<NONE> mark_roots(&root_proc, _heap->process_references());
 277     workers->run_task(&mark_roots);
 278   }
 279 
 280   if (ShenandoahConcurrentScanCodeRoots) {
 281     clear_claim_codecache();
 282   }
 283 }
 284 
 285 void ShenandoahConcurrentMark::update_roots(ShenandoahPhaseTimings::Phase root_phase) {
 286   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
 287   assert(root_phase == ShenandoahPhaseTimings::full_gc_roots ||
 288          root_phase == ShenandoahPhaseTimings::degen_gc_update_roots,
 289          "Only for these phases");
 290 
 291   ShenandoahGCPhase phase(root_phase);
 292 
 293   bool check_alive = root_phase == ShenandoahPhaseTimings::degen_gc_update_roots;
 294 
 295 #if COMPILER2_OR_JVMCI
 296   DerivedPointerTable::clear();
 297 #endif
 298 
 299   uint nworkers = _heap->workers()->active_workers();
 300 
 301   ShenandoahRootUpdater root_updater(nworkers, root_phase);
 302   ShenandoahUpdateRootsTask update_roots(&root_updater, check_alive);
 303   _heap->workers()->run_task(&update_roots);
 304 
 305 #if COMPILER2_OR_JVMCI
 306   DerivedPointerTable::update_pointers();
 307 #endif
 308 }
 309 
 310 class ShenandoahUpdateThreadRootsTask : public AbstractGangTask {
 311 private:
 312   ShenandoahThreadRoots           _thread_roots;
 313   ShenandoahPhaseTimings::Phase   _phase;
 314 public:
 315   ShenandoahUpdateThreadRootsTask(bool is_par, ShenandoahPhaseTimings::Phase phase) :
 316     AbstractGangTask("Shenandoah Update Thread Roots"),
 317     _thread_roots(is_par),
 318     _phase(phase) {
 319     ShenandoahHeap::heap()->phase_timings()->record_workers_start(_phase);
 320   }
 321 
 322   ~ShenandoahUpdateThreadRootsTask() {
 323     ShenandoahHeap::heap()->phase_timings()->record_workers_end(_phase);
 324   }
 325   void work(uint worker_id) {
 326     ShenandoahUpdateRefsClosure cl;
 327     _thread_roots.oops_do(&cl, NULL, worker_id);
 328   }
 329 };
 330 
 331 void ShenandoahConcurrentMark::update_thread_roots(ShenandoahPhaseTimings::Phase root_phase) {
 332   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
 333 
 334   ShenandoahGCPhase phase(root_phase);
 335 
 336 #if COMPILER2_OR_JVMCI
 337   DerivedPointerTable::clear();
 338 #endif
 339 
 340   WorkGang* workers = _heap->workers();
 341   bool is_par = workers->active_workers() > 1;
 342 
 343   ShenandoahUpdateThreadRootsTask task(is_par, root_phase);
 344   workers->run_task(&task);
 345 
 346 #if COMPILER2_OR_JVMCI
 347   DerivedPointerTable::update_pointers();
 348 #endif
 349 }
 350 
 351 void ShenandoahConcurrentMark::initialize(uint workers) {
 352   _heap = ShenandoahHeap::heap();
 353 
 354   uint num_queues = MAX2(workers, 1U);
 355 
 356   _task_queues = new ShenandoahObjToScanQueueSet((int) num_queues);
 357 
 358   for (uint i = 0; i < num_queues; ++i) {
 359     ShenandoahObjToScanQueue* task_queue = new ShenandoahObjToScanQueue();
 360     task_queue->initialize();
 361     _task_queues->register_queue(i, task_queue);
 362   }
 363 }
 364 
 365 void ShenandoahConcurrentMark::concurrent_scan_code_roots(uint worker_id, ReferenceProcessor* rp) {
 366   if (ShenandoahConcurrentScanCodeRoots && claim_codecache()) {
 367     ShenandoahObjToScanQueue* q = task_queues()->queue(worker_id);
 368     if (!_heap->unload_classes()) {
 369       MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
 370       // TODO: We can not honor StringDeduplication here, due to lock ranking
 371       // inversion. So, we may miss some deduplication candidates.
 372       if (_heap->has_forwarded_objects()) {
 373         ShenandoahMarkResolveRefsClosure cl(q, rp);
 374         CodeBlobToOopClosure blobs(&cl, !CodeBlobToOopClosure::FixRelocations);
 375         CodeCache::blobs_do(&blobs);
 376       } else {
 377         ShenandoahMarkRefsClosure cl(q, rp);
 378         CodeBlobToOopClosure blobs(&cl, !CodeBlobToOopClosure::FixRelocations);
 379         CodeCache::blobs_do(&blobs);
 380       }
 381     }
 382   }
 383 }
 384 
 385 void ShenandoahConcurrentMark::mark_from_roots() {
 386   WorkGang* workers = _heap->workers();
 387   uint nworkers = workers->active_workers();
 388 
 389   ShenandoahGCPhase conc_mark_phase(ShenandoahPhaseTimings::conc_mark);
 390 
 391   if (_heap->process_references()) {
 392     ReferenceProcessor* rp = _heap->ref_processor();
 393     rp->set_active_mt_degree(nworkers);
 394 
 395     // enable ("weak") refs discovery
 396     rp->enable_discovery(true /*verify_no_refs*/);
 397     rp->setup_policy(_heap->soft_ref_policy()->should_clear_all_soft_refs());
 398   }
 399 
 400   shenandoah_assert_rp_isalive_not_installed();
 401   ShenandoahIsAliveSelector is_alive;
 402   ReferenceProcessorIsAliveMutator fix_isalive(_heap->ref_processor(), is_alive.is_alive_closure());
 403 
 404   task_queues()->reserve(nworkers);
 405 
 406   {
 407     TaskTerminator terminator(nworkers, task_queues());
 408     ShenandoahConcurrentMarkingTask task(this, &terminator);
 409     workers->run_task(&task);
 410   }
 411 
 412   assert(task_queues()->is_empty() || _heap->cancelled_gc(), "Should be empty when not cancelled");
 413 }
 414 
 415 void ShenandoahConcurrentMark::finish_mark_from_roots(bool full_gc) {
 416   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
 417 
 418   uint nworkers = _heap->workers()->active_workers();
 419 
 420   // Finally mark everything else we've got in our queues during the previous steps.
 421   // It does two different things for concurrent vs. mark-compact GC:
 422   // - For concurrent GC, it starts with empty task queues, drains the remaining
 423   //   SATB buffers, and then completes the marking closure.
 424   // - For mark-compact GC, it starts out with the task queues seeded by initial
 425   //   root scan, and completes the closure, thus marking through all live objects
 426   // The implementation is the same, so it's shared here.
 427   {
 428     ShenandoahGCPhase phase(full_gc ?
 429                             ShenandoahPhaseTimings::full_gc_mark_finish_queues :
 430                             ShenandoahPhaseTimings::finish_queues);
 431     task_queues()->reserve(nworkers);
 432 
 433     shenandoah_assert_rp_isalive_not_installed();
 434     ShenandoahIsAliveSelector is_alive;
 435     ReferenceProcessorIsAliveMutator fix_isalive(_heap->ref_processor(), is_alive.is_alive_closure());
 436 
 437     StrongRootsScope scope(nworkers);
 438     TaskTerminator terminator(nworkers, task_queues());
 439     ShenandoahFinalMarkingTask task(this, &terminator, ShenandoahStringDedup::is_enabled());
 440     _heap->workers()->run_task(&task);
 441   }
 442 
 443   assert(task_queues()->is_empty(), "Should be empty");
 444 
 445   // When we're done marking everything, we process weak references.
 446   if (_heap->process_references()) {
 447     weak_refs_work(full_gc);
 448   }
 449 
 450   assert(task_queues()->is_empty(), "Should be empty");
 451   TASKQUEUE_STATS_ONLY(task_queues()->print_taskqueue_stats());
 452   TASKQUEUE_STATS_ONLY(task_queues()->reset_taskqueue_stats());
 453 }
 454 
 455 // Weak Reference Closures
 456 class ShenandoahCMDrainMarkingStackClosure: public VoidClosure {
 457   uint _worker_id;
 458   TaskTerminator* _terminator;
 459   bool _reset_terminator;
 460 
 461 public:
 462   ShenandoahCMDrainMarkingStackClosure(uint worker_id, TaskTerminator* t, bool reset_terminator = false):
 463     _worker_id(worker_id),
 464     _terminator(t),
 465     _reset_terminator(reset_terminator) {
 466   }
 467 
 468   void do_void() {
 469     assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
 470 
 471     ShenandoahHeap* sh = ShenandoahHeap::heap();
 472     ShenandoahConcurrentMark* scm = sh->concurrent_mark();
 473     assert(sh->process_references(), "why else would we be here?");
 474     ReferenceProcessor* rp = sh->ref_processor();
 475 
 476     shenandoah_assert_rp_isalive_installed();
 477 
 478     scm->mark_loop(_worker_id, _terminator, rp,
 479                    false,   // not cancellable
 480                    false);  // do not do strdedup
 481 
 482     if (_reset_terminator) {
 483       _terminator->reset_for_reuse();
 484     }
 485   }
 486 };
 487 
 488 class ShenandoahCMKeepAliveClosure : public OopClosure {
 489 private:
 490   ShenandoahObjToScanQueue* _queue;
 491   ShenandoahHeap* _heap;
 492   ShenandoahMarkingContext* const _mark_context;
 493 
 494   template <class T>
 495   inline void do_oop_work(T* p) {
 496     ShenandoahConcurrentMark::mark_through_ref<T, NONE, NO_DEDUP>(p, _heap, _queue, _mark_context);
 497   }
 498 
 499 public:
 500   ShenandoahCMKeepAliveClosure(ShenandoahObjToScanQueue* q) :
 501     _queue(q),
 502     _heap(ShenandoahHeap::heap()),
 503     _mark_context(_heap->marking_context()) {}
 504 
 505   void do_oop(narrowOop* p) { do_oop_work(p); }
 506   void do_oop(oop* p)       { do_oop_work(p); }
 507 };
 508 
 509 class ShenandoahCMKeepAliveUpdateClosure : public OopClosure {
 510 private:
 511   ShenandoahObjToScanQueue* _queue;
 512   ShenandoahHeap* _heap;
 513   ShenandoahMarkingContext* const _mark_context;
 514 
 515   template <class T>
 516   inline void do_oop_work(T* p) {
 517     ShenandoahConcurrentMark::mark_through_ref<T, SIMPLE, NO_DEDUP>(p, _heap, _queue, _mark_context);
 518   }
 519 
 520 public:
 521   ShenandoahCMKeepAliveUpdateClosure(ShenandoahObjToScanQueue* q) :
 522     _queue(q),
 523     _heap(ShenandoahHeap::heap()),
 524     _mark_context(_heap->marking_context()) {}
 525 
 526   void do_oop(narrowOop* p) { do_oop_work(p); }
 527   void do_oop(oop* p)       { do_oop_work(p); }
 528 };
 529 
 530 class ShenandoahWeakUpdateClosure : public OopClosure {
 531 private:
 532   ShenandoahHeap* const _heap;
 533 
 534   template <class T>
 535   inline void do_oop_work(T* p) {
 536     oop o = _heap->maybe_update_with_forwarded(p);
 537     shenandoah_assert_marked_except(p, o, o == NULL);
 538   }
 539 
 540 public:
 541   ShenandoahWeakUpdateClosure() : _heap(ShenandoahHeap::heap()) {}
 542 
 543   void do_oop(narrowOop* p) { do_oop_work(p); }
 544   void do_oop(oop* p)       { do_oop_work(p); }
 545 };
 546 
 547 class ShenandoahRefProcTaskProxy : public AbstractGangTask {
 548 private:
 549   AbstractRefProcTaskExecutor::ProcessTask& _proc_task;
 550   TaskTerminator* _terminator;
 551 
 552 public:
 553   ShenandoahRefProcTaskProxy(AbstractRefProcTaskExecutor::ProcessTask& proc_task,
 554                              TaskTerminator* t) :
 555     AbstractGangTask("Process reference objects in parallel"),
 556     _proc_task(proc_task),
 557     _terminator(t) {
 558   }
 559 
 560   void work(uint worker_id) {
 561     ResourceMark rm;
 562     HandleMark hm;
 563     assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
 564     ShenandoahHeap* heap = ShenandoahHeap::heap();
 565     ShenandoahCMDrainMarkingStackClosure complete_gc(worker_id, _terminator);
 566     if (heap->has_forwarded_objects()) {
 567       ShenandoahForwardedIsAliveClosure is_alive;
 568       ShenandoahCMKeepAliveUpdateClosure keep_alive(heap->concurrent_mark()->get_queue(worker_id));
 569       _proc_task.work(worker_id, is_alive, keep_alive, complete_gc);
 570     } else {
 571       ShenandoahIsAliveClosure is_alive;
 572       ShenandoahCMKeepAliveClosure keep_alive(heap->concurrent_mark()->get_queue(worker_id));
 573       _proc_task.work(worker_id, is_alive, keep_alive, complete_gc);
 574     }
 575   }
 576 };
 577 
 578 class ShenandoahRefProcTaskExecutor : public AbstractRefProcTaskExecutor {
 579 private:
 580   WorkGang* _workers;
 581 
 582 public:
 583   ShenandoahRefProcTaskExecutor(WorkGang* workers) :
 584     _workers(workers) {
 585   }
 586 
 587   // Executes a task using worker threads.
 588   void execute(ProcessTask& task, uint ergo_workers) {
 589     assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
 590 
 591     ShenandoahHeap* heap = ShenandoahHeap::heap();
 592     ShenandoahConcurrentMark* cm = heap->concurrent_mark();
 593     ShenandoahPushWorkerQueuesScope scope(_workers, cm->task_queues(),
 594                                           ergo_workers,
 595                                           /* do_check = */ false);
 596     uint nworkers = _workers->active_workers();
 597     cm->task_queues()->reserve(nworkers);
 598     TaskTerminator terminator(nworkers, cm->task_queues());
 599     ShenandoahRefProcTaskProxy proc_task_proxy(task, &terminator);
 600     _workers->run_task(&proc_task_proxy);
 601   }
 602 };
 603 
 604 void ShenandoahConcurrentMark::weak_refs_work(bool full_gc) {
 605   assert(_heap->process_references(), "sanity");
 606 
 607   ShenandoahPhaseTimings::Phase phase_root =
 608           full_gc ?
 609           ShenandoahPhaseTimings::full_gc_weakrefs :
 610           ShenandoahPhaseTimings::weakrefs;
 611 
 612   ShenandoahGCPhase phase(phase_root);
 613 
 614   ReferenceProcessor* rp = _heap->ref_processor();
 615 
 616   // NOTE: We cannot shortcut on has_discovered_references() here, because
 617   // we will miss marking JNI Weak refs then, see implementation in
 618   // ReferenceProcessor::process_discovered_references.
 619   weak_refs_work_doit(full_gc);
 620 
 621   rp->verify_no_references_recorded();
 622   assert(!rp->discovery_enabled(), "Post condition");
 623 
 624 }
 625 
 626 void ShenandoahConcurrentMark::weak_refs_work_doit(bool full_gc) {
 627   ReferenceProcessor* rp = _heap->ref_processor();
 628 
 629   ShenandoahPhaseTimings::Phase phase_process =
 630           full_gc ?
 631           ShenandoahPhaseTimings::full_gc_weakrefs_process :
 632           ShenandoahPhaseTimings::weakrefs_process;
 633 
 634   shenandoah_assert_rp_isalive_not_installed();
 635   ShenandoahIsAliveSelector is_alive;
 636   ReferenceProcessorIsAliveMutator fix_isalive(rp, is_alive.is_alive_closure());
 637 
 638   WorkGang* workers = _heap->workers();
 639   uint nworkers = workers->active_workers();
 640 
 641   rp->setup_policy(_heap->soft_ref_policy()->should_clear_all_soft_refs());
 642   rp->set_active_mt_degree(nworkers);
 643 
 644   assert(task_queues()->is_empty(), "Should be empty");
 645 
 646   // complete_gc and keep_alive closures instantiated here are only needed for
 647   // single-threaded path in RP. They share the queue 0 for tracking work, which
 648   // simplifies implementation. Since RP may decide to call complete_gc several
 649   // times, we need to be able to reuse the terminator.
 650   uint serial_worker_id = 0;
 651   TaskTerminator terminator(1, task_queues());
 652   ShenandoahCMDrainMarkingStackClosure complete_gc(serial_worker_id, &terminator, /* reset_terminator = */ true);
 653 
 654   ShenandoahRefProcTaskExecutor executor(workers);
 655 
 656   ReferenceProcessorPhaseTimes pt(_heap->gc_timer(), rp->num_queues());
 657 
 658   {
 659     ShenandoahGCPhase phase(phase_process);
 660 
 661     if (_heap->has_forwarded_objects()) {
 662       ShenandoahCMKeepAliveUpdateClosure keep_alive(get_queue(serial_worker_id));
 663       rp->process_discovered_references(is_alive.is_alive_closure(), &keep_alive,
 664                                         &complete_gc, &executor,
 665                                         &pt);
 666 
 667     } else {
 668       ShenandoahCMKeepAliveClosure keep_alive(get_queue(serial_worker_id));
 669       rp->process_discovered_references(is_alive.is_alive_closure(), &keep_alive,
 670                                         &complete_gc, &executor,
 671                                         &pt);
 672 
 673     }
 674 
 675     pt.print_all_references();
 676 
 677     assert(task_queues()->is_empty(), "Should be empty");
 678   }
 679 }
 680 
 681 class ShenandoahCancelledGCYieldClosure : public YieldClosure {
 682 private:
 683   ShenandoahHeap* const _heap;
 684 public:
 685   ShenandoahCancelledGCYieldClosure() : _heap(ShenandoahHeap::heap()) {};
 686   virtual bool should_return() { return _heap->cancelled_gc(); }
 687 };
 688 
 689 class ShenandoahPrecleanCompleteGCClosure : public VoidClosure {
 690 public:
 691   void do_void() {
 692     ShenandoahHeap* sh = ShenandoahHeap::heap();
 693     ShenandoahConcurrentMark* scm = sh->concurrent_mark();
 694     assert(sh->process_references(), "why else would we be here?");
 695     TaskTerminator terminator(1, scm->task_queues());
 696 
 697     ReferenceProcessor* rp = sh->ref_processor();
 698     shenandoah_assert_rp_isalive_installed();
 699 
 700     scm->mark_loop(0, &terminator, rp,
 701                    false, // not cancellable
 702                    false); // do not do strdedup
 703   }
 704 };
 705 
 706 class ShenandoahPrecleanKeepAliveUpdateClosure : public OopClosure {
 707 private:
 708   ShenandoahObjToScanQueue* _queue;
 709   ShenandoahHeap* _heap;
 710   ShenandoahMarkingContext* const _mark_context;
 711 
 712   template <class T>
 713   inline void do_oop_work(T* p) {
 714     ShenandoahConcurrentMark::mark_through_ref<T, CONCURRENT, NO_DEDUP>(p, _heap, _queue, _mark_context);
 715   }
 716 
 717 public:
 718   ShenandoahPrecleanKeepAliveUpdateClosure(ShenandoahObjToScanQueue* q) :
 719     _queue(q),
 720     _heap(ShenandoahHeap::heap()),
 721     _mark_context(_heap->marking_context()) {}
 722 
 723   void do_oop(narrowOop* p) { do_oop_work(p); }
 724   void do_oop(oop* p)       { do_oop_work(p); }
 725 };
 726 
 727 class ShenandoahPrecleanTask : public AbstractGangTask {
 728 private:
 729   ReferenceProcessor* _rp;
 730 
 731 public:
 732   ShenandoahPrecleanTask(ReferenceProcessor* rp) :
 733           AbstractGangTask("Precleaning task"),
 734           _rp(rp) {}
 735 
 736   void work(uint worker_id) {
 737     assert(worker_id == 0, "The code below is single-threaded, only one worker is expected");
 738     ShenandoahParallelWorkerSession worker_session(worker_id);
 739 
 740     ShenandoahHeap* sh = ShenandoahHeap::heap();
 741 
 742     ShenandoahObjToScanQueue* q = sh->concurrent_mark()->get_queue(worker_id);
 743 
 744     ShenandoahCancelledGCYieldClosure yield;
 745     ShenandoahPrecleanCompleteGCClosure complete_gc;
 746 
 747     if (sh->has_forwarded_objects()) {
 748       ShenandoahForwardedIsAliveClosure is_alive;
 749       ShenandoahPrecleanKeepAliveUpdateClosure keep_alive(q);
 750       ResourceMark rm;
 751       _rp->preclean_discovered_references(&is_alive, &keep_alive,
 752                                           &complete_gc, &yield,
 753                                           NULL);
 754     } else {
 755       ShenandoahIsAliveClosure is_alive;
 756       ShenandoahCMKeepAliveClosure keep_alive(q);
 757       ResourceMark rm;
 758       _rp->preclean_discovered_references(&is_alive, &keep_alive,
 759                                           &complete_gc, &yield,
 760                                           NULL);
 761     }
 762   }
 763 };
 764 
 765 void ShenandoahConcurrentMark::preclean_weak_refs() {
 766   // Pre-cleaning weak references before diving into STW makes sense at the
 767   // end of concurrent mark. This will filter out the references which referents
 768   // are alive. Note that ReferenceProcessor already filters out these on reference
 769   // discovery, and the bulk of work is done here. This phase processes leftovers
 770   // that missed the initial filtering, i.e. when referent was marked alive after
 771   // reference was discovered by RP.
 772 
 773   assert(_heap->process_references(), "sanity");
 774 
 775   // Shortcut if no references were discovered to avoid winding up threads.
 776   ReferenceProcessor* rp = _heap->ref_processor();
 777   if (!rp->has_discovered_references()) {
 778     return;
 779   }
 780 
 781   assert(task_queues()->is_empty(), "Should be empty");
 782 
 783   ReferenceProcessorMTDiscoveryMutator fix_mt_discovery(rp, false);
 784 
 785   shenandoah_assert_rp_isalive_not_installed();
 786   ShenandoahIsAliveSelector is_alive;
 787   ReferenceProcessorIsAliveMutator fix_isalive(rp, is_alive.is_alive_closure());
 788 
 789   // Execute precleaning in the worker thread: it will give us GCLABs, String dedup
 790   // queues and other goodies. When upstream ReferenceProcessor starts supporting
 791   // parallel precleans, we can extend this to more threads.
 792   WorkGang* workers = _heap->workers();
 793   uint nworkers = workers->active_workers();
 794   assert(nworkers == 1, "This code uses only a single worker");
 795   task_queues()->reserve(nworkers);
 796 
 797   ShenandoahPrecleanTask task(rp);
 798   workers->run_task(&task);
 799 
 800   assert(task_queues()->is_empty(), "Should be empty");
 801 }
 802 
 803 void ShenandoahConcurrentMark::cancel() {
 804   // Clean up marking stacks.
 805   ShenandoahObjToScanQueueSet* queues = task_queues();
 806   queues->clear();
 807 
 808   // Cancel SATB buffers.
 809   ShenandoahBarrierSet::satb_mark_queue_set().abandon_partial_marking();
 810 }
 811 
 812 ShenandoahObjToScanQueue* ShenandoahConcurrentMark::get_queue(uint worker_id) {
 813   assert(task_queues()->get_reserved() > worker_id, "No reserved queue for worker id: %d", worker_id);
 814   return _task_queues->queue(worker_id);
 815 }
 816 
 817 template <bool CANCELLABLE>
 818 void ShenandoahConcurrentMark::mark_loop_prework(uint w, TaskTerminator *t, ReferenceProcessor *rp,
 819                                                  bool strdedup) {
 820   ShenandoahObjToScanQueue* q = get_queue(w);
 821 
 822   jushort* ld = _heap->get_liveness_cache(w);
 823 
 824   // TODO: We can clean up this if we figure out how to do templated oop closures that
 825   // play nice with specialized_oop_iterators.
 826   if (_heap->unload_classes()) {
 827     if (_heap->has_forwarded_objects()) {
 828       if (strdedup) {
 829         ShenandoahMarkUpdateRefsMetadataDedupClosure cl(q, rp);
 830         mark_loop_work<ShenandoahMarkUpdateRefsMetadataDedupClosure, CANCELLABLE>(&cl, ld, w, t);
 831       } else {
 832         ShenandoahMarkUpdateRefsMetadataClosure cl(q, rp);
 833         mark_loop_work<ShenandoahMarkUpdateRefsMetadataClosure, CANCELLABLE>(&cl, ld, w, t);
 834       }
 835     } else {
 836       if (strdedup) {
 837         ShenandoahMarkRefsMetadataDedupClosure cl(q, rp);
 838         mark_loop_work<ShenandoahMarkRefsMetadataDedupClosure, CANCELLABLE>(&cl, ld, w, t);
 839       } else {
 840         ShenandoahMarkRefsMetadataClosure cl(q, rp);
 841         mark_loop_work<ShenandoahMarkRefsMetadataClosure, CANCELLABLE>(&cl, ld, w, t);
 842       }
 843     }
 844   } else {
 845     if (_heap->has_forwarded_objects()) {
 846       if (strdedup) {
 847         ShenandoahMarkUpdateRefsDedupClosure cl(q, rp);
 848         mark_loop_work<ShenandoahMarkUpdateRefsDedupClosure, CANCELLABLE>(&cl, ld, w, t);
 849       } else {
 850         ShenandoahMarkUpdateRefsClosure cl(q, rp);
 851         mark_loop_work<ShenandoahMarkUpdateRefsClosure, CANCELLABLE>(&cl, ld, w, t);
 852       }
 853     } else {
 854       if (strdedup) {
 855         ShenandoahMarkRefsDedupClosure cl(q, rp);
 856         mark_loop_work<ShenandoahMarkRefsDedupClosure, CANCELLABLE>(&cl, ld, w, t);
 857       } else {
 858         ShenandoahMarkRefsClosure cl(q, rp);
 859         mark_loop_work<ShenandoahMarkRefsClosure, CANCELLABLE>(&cl, ld, w, t);
 860       }
 861     }
 862   }
 863 
 864   _heap->flush_liveness_cache(w);
 865 }
 866 
 867 template <class T, bool CANCELLABLE>
 868 void ShenandoahConcurrentMark::mark_loop_work(T* cl, jushort* live_data, uint worker_id, TaskTerminator *terminator) {
 869   uintx stride = ShenandoahMarkLoopStride;
 870 
 871   ShenandoahHeap* heap = ShenandoahHeap::heap();
 872   ShenandoahObjToScanQueueSet* queues = task_queues();
 873   ShenandoahObjToScanQueue* q;
 874   ShenandoahMarkTask t;
 875 
 876   /*
 877    * Process outstanding queues, if any.
 878    *
 879    * There can be more queues than workers. To deal with the imbalance, we claim
 880    * extra queues first. Since marking can push new tasks into the queue associated
 881    * with this worker id, we come back to process this queue in the normal loop.
 882    */
 883   assert(queues->get_reserved() == heap->workers()->active_workers(),
 884          "Need to reserve proper number of queues: reserved: %u, active: %u", queues->get_reserved(), heap->workers()->active_workers());
 885 
 886   q = queues->claim_next();
 887   while (q != NULL) {
 888     if (CANCELLABLE && heap->check_cancelled_gc_and_yield()) {
 889       return;
 890     }
 891 
 892     for (uint i = 0; i < stride; i++) {
 893       if (q->pop(t)) {
 894         do_task<T>(q, cl, live_data, &t);
 895       } else {
 896         assert(q->is_empty(), "Must be empty");
 897         q = queues->claim_next();
 898         break;
 899       }
 900     }
 901   }
 902   q = get_queue(worker_id);
 903 
 904   ShenandoahSATBBufferClosure drain_satb(q);
 905   SATBMarkQueueSet& satb_mq_set = ShenandoahBarrierSet::satb_mark_queue_set();
 906 
 907   /*
 908    * Normal marking loop:
 909    */
 910   while (true) {
 911     if (CANCELLABLE && heap->check_cancelled_gc_and_yield()) {
 912       return;
 913     }
 914 
 915     while (satb_mq_set.completed_buffers_num() > 0) {
 916       satb_mq_set.apply_closure_to_completed_buffer(&drain_satb);
 917     }
 918 
 919     uint work = 0;
 920     for (uint i = 0; i < stride; i++) {
 921       if (q->pop(t) ||
 922           queues->steal(worker_id, t)) {
 923         do_task<T>(q, cl, live_data, &t);
 924         work++;
 925       } else {
 926         break;
 927       }
 928     }
 929 
 930     if (work == 0) {
 931       // No work encountered in current stride, try to terminate.
 932       // Need to leave the STS here otherwise it might block safepoints.
 933       ShenandoahSuspendibleThreadSetLeaver stsl(CANCELLABLE && ShenandoahSuspendibleWorkers);
 934       ShenandoahTerminatorTerminator tt(heap);
 935       if (terminator->offer_termination(&tt)) return;
 936     }
 937   }
 938 }
 939 
 940 bool ShenandoahConcurrentMark::claim_codecache() {
 941   assert(ShenandoahConcurrentScanCodeRoots, "must not be called otherwise");
 942   return _claimed_codecache.try_set();
 943 }
 944 
 945 void ShenandoahConcurrentMark::clear_claim_codecache() {
 946   assert(ShenandoahConcurrentScanCodeRoots, "must not be called otherwise");
 947   _claimed_codecache.unset();
 948 }