1 /*
   2  * Copyright (c) 2013, 2019, Red Hat, Inc. All rights reserved.
   3  *
   4  * This code is free software; you can redistribute it and/or modify it
   5  * under the terms of the GNU General Public License version 2 only, as
   6  * published by the Free Software Foundation.
   7  *
   8  * This code is distributed in the hope that it will be useful, but WITHOUT
   9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  11  * version 2 for more details (a copy is included in the LICENSE file that
  12  * accompanied this code).
  13  *
  14  * You should have received a copy of the GNU General Public License version
  15  * 2 along with this work; if not, write to the Free Software Foundation,
  16  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  17  *
  18  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  19  * or visit www.oracle.com if you need additional information or have any
  20  * questions.
  21  *
  22  */
  23 
  24 #include "precompiled.hpp"
  25 
  26 #include "classfile/symbolTable.hpp"
  27 #include "classfile/systemDictionary.hpp"
  28 #include "code/codeCache.hpp"
  29 
  30 #include "gc/shared/weakProcessor.inline.hpp"
  31 #include "gc/shared/gcTimer.hpp"
  32 #include "gc/shared/referenceProcessor.hpp"
  33 #include "gc/shared/referenceProcessorPhaseTimes.hpp"
  34 #include "gc/shared/strongRootsScope.hpp"
  35 
  36 #include "gc/shenandoah/shenandoahBarrierSet.inline.hpp"
  37 #include "gc/shenandoah/shenandoahClosures.inline.hpp"
  38 #include "gc/shenandoah/shenandoahConcurrentMark.inline.hpp"
  39 #include "gc/shenandoah/shenandoahMarkCompact.hpp"
  40 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
  41 #include "gc/shenandoah/shenandoahRootProcessor.inline.hpp"
  42 #include "gc/shenandoah/shenandoahOopClosures.inline.hpp"
  43 #include "gc/shenandoah/shenandoahTaskqueue.inline.hpp"
  44 #include "gc/shenandoah/shenandoahTimingTracker.hpp"
  45 #include "gc/shenandoah/shenandoahUtils.hpp"
  46 
  47 #include "memory/iterator.inline.hpp"
  48 #include "memory/metaspace.hpp"
  49 #include "memory/resourceArea.hpp"
  50 #include "oops/oop.inline.hpp"
  51 #include "runtime/handles.inline.hpp"
  52 
  53 template<UpdateRefsMode UPDATE_REFS>
  54 class ShenandoahInitMarkRootsClosure : public OopClosure {
  55 private:
  56   ShenandoahObjToScanQueue* _queue;
  57   ShenandoahHeap* _heap;
  58   ShenandoahMarkingContext* const _mark_context;
  59 
  60   template <class T>
  61   inline void do_oop_work(T* p) {
  62     ShenandoahConcurrentMark::mark_through_ref<T, UPDATE_REFS, NO_DEDUP>(p, _heap, _queue, _mark_context);
  63   }
  64 
  65 public:
  66   ShenandoahInitMarkRootsClosure(ShenandoahObjToScanQueue* q) :
  67     _queue(q),
  68     _heap(ShenandoahHeap::heap()),
  69     _mark_context(_heap->marking_context()) {};
  70 
  71   void do_oop(narrowOop* p) { do_oop_work(p); }
  72   void do_oop(oop* p)       { do_oop_work(p); }
  73 };
  74 
  75 ShenandoahMarkRefsSuperClosure::ShenandoahMarkRefsSuperClosure(ShenandoahObjToScanQueue* q, ReferenceProcessor* rp) :
  76   MetadataVisitingOopIterateClosure(rp),
  77   _queue(q),
  78   _heap(ShenandoahHeap::heap()),
  79   _mark_context(_heap->marking_context())
  80 { }
  81 
  82 template<UpdateRefsMode UPDATE_REFS>
  83 class ShenandoahInitMarkRootsTask : public AbstractGangTask {
  84 private:
  85   ShenandoahAllRootScanner* _rp;
  86   bool _process_refs;
  87 public:
  88   ShenandoahInitMarkRootsTask(ShenandoahAllRootScanner* rp, bool process_refs) :
  89     AbstractGangTask("Shenandoah init mark roots task"),
  90     _rp(rp),
  91     _process_refs(process_refs) {
  92   }
  93 
  94   void work(uint worker_id) {
  95     assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
  96     ShenandoahParallelWorkerSession worker_session(worker_id);
  97 
  98     ShenandoahHeap* heap = ShenandoahHeap::heap();
  99     ShenandoahObjToScanQueueSet* queues = heap->concurrent_mark()->task_queues();
 100     assert(queues->get_reserved() > worker_id, "Queue has not been reserved for worker id: %d", worker_id);
 101 
 102     ShenandoahObjToScanQueue* q = queues->queue(worker_id);
 103 
 104     ShenandoahInitMarkRootsClosure<UPDATE_REFS> mark_cl(q);
 105     do_work(heap, &mark_cl, worker_id);
 106   }
 107 
 108 private:
 109   void do_work(ShenandoahHeap* heap, OopClosure* oops, uint worker_id) {
 110     // The rationale for selecting the roots to scan is as follows:
 111     //   a. With unload_classes = true, we only want to scan the actual strong roots from the
 112     //      code cache. This will allow us to identify the dead classes, unload them, *and*
 113     //      invalidate the relevant code cache blobs. This could be only done together with
 114     //      class unloading.
 115     //   b. With unload_classes = false, we have to nominally retain all the references from code
 116     //      cache, because there could be the case of embedded class/oop in the generated code,
 117     //      which we will never visit during mark. Without code cache invalidation, as in (a),
 118     //      we risk executing that code cache blob, and crashing.
 119     if (heap->unload_classes()) {
 120       _rp->strong_roots_do(worker_id, oops);
 121     } else {
 122       _rp->roots_do(worker_id, oops);
 123     }
 124   }
 125 };
 126 
 127 class ShenandoahUpdateRootsTask : public AbstractGangTask {
 128 private:
 129   ShenandoahRootUpdater*  _root_updater;
 130 public:
 131   ShenandoahUpdateRootsTask(ShenandoahRootUpdater* root_updater) :
 132     AbstractGangTask("Shenandoah update roots task"),
 133     _root_updater(root_updater) {
 134   }
 135 
 136   void work(uint worker_id) {
 137     assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
 138     ShenandoahParallelWorkerSession worker_session(worker_id);
 139 
 140     ShenandoahHeap* heap = ShenandoahHeap::heap();
 141     ShenandoahUpdateRefsClosure cl;
 142     AlwaysTrueClosure always_true;
 143     _root_updater->roots_do<AlwaysTrueClosure, ShenandoahUpdateRefsClosure>(worker_id, &always_true, &cl);
 144   }
 145 };
 146 
 147 class ShenandoahConcurrentMarkingTask : public AbstractGangTask {
 148 private:
 149   ShenandoahConcurrentMark* _cm;
 150   ShenandoahTaskTerminator* _terminator;
 151 
 152 public:
 153   ShenandoahConcurrentMarkingTask(ShenandoahConcurrentMark* cm, ShenandoahTaskTerminator* terminator) :
 154     AbstractGangTask("Root Region Scan"), _cm(cm), _terminator(terminator) {
 155   }
 156 
 157   void work(uint worker_id) {
 158     ShenandoahHeap* heap = ShenandoahHeap::heap();
 159     ShenandoahConcurrentWorkerSession worker_session(worker_id);
 160     ShenandoahSuspendibleThreadSetJoiner stsj(ShenandoahSuspendibleWorkers);
 161     ShenandoahObjToScanQueue* q = _cm->get_queue(worker_id);
 162     ReferenceProcessor* rp;
 163     if (heap->process_references()) {
 164       rp = heap->ref_processor();
 165       shenandoah_assert_rp_isalive_installed();
 166     } else {
 167       rp = NULL;
 168     }
 169 
 170     _cm->concurrent_scan_code_roots(worker_id, rp);
 171     _cm->mark_loop(worker_id, _terminator, rp,
 172                    true, // cancellable
 173                    ShenandoahStringDedup::is_enabled()); // perform string dedup
 174   }
 175 };
 176 
 177 class ShenandoahSATBThreadsClosure : public ThreadClosure {
 178 private:
 179   ShenandoahSATBBufferClosure* _satb_cl;
 180   uintx _claim_token;
 181 
 182 public:
 183   ShenandoahSATBThreadsClosure(ShenandoahSATBBufferClosure* satb_cl) :
 184     _satb_cl(satb_cl),
 185     _claim_token(Threads::thread_claim_token()) {}
 186 
 187   void do_thread(Thread* thread) {
 188     if (thread->claim_threads_do(true, _claim_token)) {
 189       ShenandoahThreadLocalData::satb_mark_queue(thread).apply_closure_and_empty(_satb_cl);
 190     }
 191   }
 192 };
 193 
 194 class ShenandoahFinalMarkingTask : public AbstractGangTask {
 195 private:
 196   ShenandoahConcurrentMark* _cm;
 197   ShenandoahTaskTerminator* _terminator;
 198   bool _dedup_string;
 199 
 200 public:
 201   ShenandoahFinalMarkingTask(ShenandoahConcurrentMark* cm, ShenandoahTaskTerminator* terminator, bool dedup_string) :
 202     AbstractGangTask("Shenandoah Final Marking"), _cm(cm), _terminator(terminator), _dedup_string(dedup_string) {
 203   }
 204 
 205   void work(uint worker_id) {
 206     ShenandoahHeap* heap = ShenandoahHeap::heap();
 207 
 208     ShenandoahParallelWorkerSession worker_session(worker_id);
 209     // First drain remaining SATB buffers.
 210     // Notice that this is not strictly necessary for mark-compact. But since
 211     // it requires a StrongRootsScope around the task, we need to claim the
 212     // threads, and performance-wise it doesn't really matter. Adds about 1ms to
 213     // full-gc.
 214     {
 215       ShenandoahObjToScanQueue* q = _cm->get_queue(worker_id);
 216       ShenandoahSATBBufferClosure cl(q);
 217       SATBMarkQueueSet& satb_mq_set = ShenandoahBarrierSet::satb_mark_queue_set();
 218       while (satb_mq_set.apply_closure_to_completed_buffer(&cl));
 219       ShenandoahSATBThreadsClosure tc(&cl);
 220       Threads::threads_do(&tc);
 221     }
 222 
 223     ReferenceProcessor* rp;
 224     if (heap->process_references()) {
 225       rp = heap->ref_processor();
 226       shenandoah_assert_rp_isalive_installed();
 227     } else {
 228       rp = NULL;
 229     }
 230 
 231     if (heap->is_degenerated_gc_in_progress()) {
 232       // Degenerated cycle may bypass concurrent cycle, so code roots might not be scanned,
 233       // let's check here.
 234       _cm->concurrent_scan_code_roots(worker_id, rp);
 235     }
 236 
 237     _cm->mark_loop(worker_id, _terminator, rp,
 238                    false, // not cancellable
 239                    _dedup_string);
 240 
 241     assert(_cm->task_queues()->is_empty(), "Should be empty");
 242   }
 243 };
 244 
 245 void ShenandoahConcurrentMark::mark_roots(ShenandoahPhaseTimings::Phase root_phase) {
 246   assert(Thread::current()->is_VM_thread(), "can only do this in VMThread");
 247   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
 248 
 249   ShenandoahHeap* heap = ShenandoahHeap::heap();
 250 
 251   ShenandoahGCPhase phase(root_phase);
 252 
 253   WorkGang* workers = heap->workers();
 254   uint nworkers = workers->active_workers();
 255 
 256   assert(nworkers <= task_queues()->size(), "Just check");
 257 
 258   ShenandoahAllRootScanner root_proc(nworkers, root_phase);
 259   TASKQUEUE_STATS_ONLY(task_queues()->reset_taskqueue_stats());
 260   task_queues()->reserve(nworkers);
 261 
 262   if (heap->has_forwarded_objects()) {
 263     ShenandoahInitMarkRootsTask<RESOLVE> mark_roots(&root_proc, _heap->process_references());
 264     workers->run_task(&mark_roots);
 265   } else {
 266     // No need to update references, which means the heap is stable.
 267     // Can save time not walking through forwarding pointers.
 268     ShenandoahInitMarkRootsTask<NONE> mark_roots(&root_proc, _heap->process_references());
 269     workers->run_task(&mark_roots);
 270   }
 271 
 272   if (ShenandoahConcurrentScanCodeRoots) {
 273     clear_claim_codecache();
 274   }
 275 }
 276 
 277 void ShenandoahConcurrentMark::update_roots(ShenandoahPhaseTimings::Phase root_phase) {
 278   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
 279 
 280   bool update_code_cache = true; // initialize to safer value
 281   switch (root_phase) {
 282     case ShenandoahPhaseTimings::update_roots:
 283     case ShenandoahPhaseTimings::final_update_refs_roots:
 284       update_code_cache = false;
 285       break;
 286     case ShenandoahPhaseTimings::full_gc_roots:
 287     case ShenandoahPhaseTimings::degen_gc_update_roots:
 288       update_code_cache = true;
 289       break;
 290     default:
 291       ShouldNotReachHere();
 292   }
 293 
 294   ShenandoahGCPhase phase(root_phase);
 295 
 296 #if COMPILER2_OR_JVMCI
 297   DerivedPointerTable::clear();
 298 #endif
 299 
 300   uint nworkers = _heap->workers()->active_workers();
 301 
 302   ShenandoahRootUpdater root_updater(nworkers, root_phase, update_code_cache);
 303   ShenandoahUpdateRootsTask update_roots(&root_updater);
 304   _heap->workers()->run_task(&update_roots);
 305 
 306 #if COMPILER2_OR_JVMCI
 307   DerivedPointerTable::update_pointers();
 308 #endif
 309 }
 310 
 311 class ShenandoahUpdateThreadRootsTask : public AbstractGangTask {
 312 private:
 313   ShenandoahThreadRoots           _thread_roots;
 314   ShenandoahPhaseTimings::Phase   _phase;
 315 public:
 316   ShenandoahUpdateThreadRootsTask(bool is_par, ShenandoahPhaseTimings::Phase phase) :
 317     AbstractGangTask("Shenandoah Update Thread Roots"),
 318     _thread_roots(is_par),
 319     _phase(phase) {
 320     ShenandoahHeap::heap()->phase_timings()->record_workers_start(_phase);
 321   }
 322 
 323   ~ShenandoahUpdateThreadRootsTask() {
 324     ShenandoahHeap::heap()->phase_timings()->record_workers_end(_phase);
 325   }
 326   void work(uint worker_id) {
 327     ShenandoahUpdateRefsClosure cl;
 328     _thread_roots.oops_do(&cl, NULL, worker_id);
 329   }
 330 };
 331 
 332 void ShenandoahConcurrentMark::update_thread_roots(ShenandoahPhaseTimings::Phase root_phase) {
 333   WorkGang* workers = _heap->workers();
 334   bool is_par = workers->active_workers() > 1;
 335 #if COMPILER2_OR_JVMCI
 336   DerivedPointerTable::clear();
 337 #endif
 338   ShenandoahUpdateThreadRootsTask task(is_par, root_phase);
 339   workers->run_task(&task);
 340 #if COMPILER2_OR_JVMCI
 341   DerivedPointerTable::update_pointers();
 342 #endif
 343 }
 344 
 345 void ShenandoahConcurrentMark::initialize(uint workers) {
 346   _heap = ShenandoahHeap::heap();
 347 
 348   uint num_queues = MAX2(workers, 1U);
 349 
 350   _task_queues = new ShenandoahObjToScanQueueSet((int) num_queues);
 351 
 352   for (uint i = 0; i < num_queues; ++i) {
 353     ShenandoahObjToScanQueue* task_queue = new ShenandoahObjToScanQueue();
 354     task_queue->initialize();
 355     _task_queues->register_queue(i, task_queue);
 356   }
 357 }
 358 
 359 void ShenandoahConcurrentMark::concurrent_scan_code_roots(uint worker_id, ReferenceProcessor* rp) {
 360   if (ShenandoahConcurrentScanCodeRoots && claim_codecache()) {
 361     ShenandoahObjToScanQueue* q = task_queues()->queue(worker_id);
 362     if (!_heap->unload_classes()) {
 363       MutexLocker mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
 364       // TODO: We can not honor StringDeduplication here, due to lock ranking
 365       // inversion. So, we may miss some deduplication candidates.
 366       if (_heap->has_forwarded_objects()) {
 367         ShenandoahMarkResolveRefsClosure cl(q, rp);
 368         CodeBlobToOopClosure blobs(&cl, !CodeBlobToOopClosure::FixRelocations);
 369         CodeCache::blobs_do(&blobs);
 370       } else {
 371         ShenandoahMarkRefsClosure cl(q, rp);
 372         CodeBlobToOopClosure blobs(&cl, !CodeBlobToOopClosure::FixRelocations);
 373         CodeCache::blobs_do(&blobs);
 374       }
 375     }
 376   }
 377 }
 378 
 379 void ShenandoahConcurrentMark::mark_from_roots() {
 380   WorkGang* workers = _heap->workers();
 381   uint nworkers = workers->active_workers();
 382 
 383   ShenandoahGCPhase conc_mark_phase(ShenandoahPhaseTimings::conc_mark);
 384 
 385   if (_heap->process_references()) {
 386     ReferenceProcessor* rp = _heap->ref_processor();
 387     rp->set_active_mt_degree(nworkers);
 388 
 389     // enable ("weak") refs discovery
 390     rp->enable_discovery(true /*verify_no_refs*/);
 391     rp->setup_policy(_heap->soft_ref_policy()->should_clear_all_soft_refs());
 392   }
 393 
 394   shenandoah_assert_rp_isalive_not_installed();
 395   ShenandoahIsAliveSelector is_alive;
 396   ReferenceProcessorIsAliveMutator fix_isalive(_heap->ref_processor(), is_alive.is_alive_closure());
 397 
 398   task_queues()->reserve(nworkers);
 399 
 400   {
 401     ShenandoahTerminationTracker term(ShenandoahPhaseTimings::conc_termination);
 402     ShenandoahTaskTerminator terminator(nworkers, task_queues());
 403     ShenandoahConcurrentMarkingTask task(this, &terminator);
 404     workers->run_task(&task);
 405   }
 406 
 407   assert(task_queues()->is_empty() || _heap->cancelled_gc(), "Should be empty when not cancelled");
 408 }
 409 
 410 void ShenandoahConcurrentMark::finish_mark_from_roots(bool full_gc) {
 411   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
 412 
 413   uint nworkers = _heap->workers()->active_workers();
 414 
 415   // Finally mark everything else we've got in our queues during the previous steps.
 416   // It does two different things for concurrent vs. mark-compact GC:
 417   // - For concurrent GC, it starts with empty task queues, drains the remaining
 418   //   SATB buffers, and then completes the marking closure.
 419   // - For mark-compact GC, it starts out with the task queues seeded by initial
 420   //   root scan, and completes the closure, thus marking through all live objects
 421   // The implementation is the same, so it's shared here.
 422   {
 423     ShenandoahGCPhase phase(full_gc ?
 424                             ShenandoahPhaseTimings::full_gc_mark_finish_queues :
 425                             ShenandoahPhaseTimings::finish_queues);
 426     task_queues()->reserve(nworkers);
 427 
 428     shenandoah_assert_rp_isalive_not_installed();
 429     ShenandoahIsAliveSelector is_alive;
 430     ReferenceProcessorIsAliveMutator fix_isalive(_heap->ref_processor(), is_alive.is_alive_closure());
 431 
 432     ShenandoahTerminationTracker termination_tracker(full_gc ?
 433                                                      ShenandoahPhaseTimings::full_gc_mark_termination :
 434                                                      ShenandoahPhaseTimings::termination);
 435 
 436     StrongRootsScope scope(nworkers);
 437     ShenandoahTaskTerminator terminator(nworkers, task_queues());
 438     ShenandoahFinalMarkingTask task(this, &terminator, ShenandoahStringDedup::is_enabled());
 439     _heap->workers()->run_task(&task);
 440   }
 441 
 442   assert(task_queues()->is_empty(), "Should be empty");
 443 
 444   // When we're done marking everything, we process weak references.
 445   if (_heap->process_references()) {
 446     weak_refs_work(full_gc);
 447   }
 448 
 449   _heap->parallel_cleaning(full_gc);
 450 
 451   assert(task_queues()->is_empty(), "Should be empty");
 452   TASKQUEUE_STATS_ONLY(task_queues()->print_taskqueue_stats());
 453   TASKQUEUE_STATS_ONLY(task_queues()->reset_taskqueue_stats());
 454 }
 455 
 456 // Weak Reference Closures
 457 class ShenandoahCMDrainMarkingStackClosure: public VoidClosure {
 458   uint _worker_id;
 459   ShenandoahTaskTerminator* _terminator;
 460   bool _reset_terminator;
 461 
 462 public:
 463   ShenandoahCMDrainMarkingStackClosure(uint worker_id, ShenandoahTaskTerminator* t, bool reset_terminator = false):
 464     _worker_id(worker_id),
 465     _terminator(t),
 466     _reset_terminator(reset_terminator) {
 467   }
 468 
 469   void do_void() {
 470     assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
 471 
 472     ShenandoahHeap* sh = ShenandoahHeap::heap();
 473     ShenandoahConcurrentMark* scm = sh->concurrent_mark();
 474     assert(sh->process_references(), "why else would we be here?");
 475     ReferenceProcessor* rp = sh->ref_processor();
 476 
 477     shenandoah_assert_rp_isalive_installed();
 478 
 479     scm->mark_loop(_worker_id, _terminator, rp,
 480                    false,   // not cancellable
 481                    false);  // do not do strdedup
 482 
 483     if (_reset_terminator) {
 484       _terminator->reset_for_reuse();
 485     }
 486   }
 487 };
 488 
 489 class ShenandoahCMKeepAliveClosure : public OopClosure {
 490 private:
 491   ShenandoahObjToScanQueue* _queue;
 492   ShenandoahHeap* _heap;
 493   ShenandoahMarkingContext* const _mark_context;
 494 
 495   template <class T>
 496   inline void do_oop_work(T* p) {
 497     ShenandoahConcurrentMark::mark_through_ref<T, NONE, NO_DEDUP>(p, _heap, _queue, _mark_context);
 498   }
 499 
 500 public:
 501   ShenandoahCMKeepAliveClosure(ShenandoahObjToScanQueue* q) :
 502     _queue(q),
 503     _heap(ShenandoahHeap::heap()),
 504     _mark_context(_heap->marking_context()) {}
 505 
 506   void do_oop(narrowOop* p) { do_oop_work(p); }
 507   void do_oop(oop* p)       { do_oop_work(p); }
 508 };
 509 
 510 class ShenandoahCMKeepAliveUpdateClosure : public OopClosure {
 511 private:
 512   ShenandoahObjToScanQueue* _queue;
 513   ShenandoahHeap* _heap;
 514   ShenandoahMarkingContext* const _mark_context;
 515 
 516   template <class T>
 517   inline void do_oop_work(T* p) {
 518     ShenandoahConcurrentMark::mark_through_ref<T, SIMPLE, NO_DEDUP>(p, _heap, _queue, _mark_context);
 519   }
 520 
 521 public:
 522   ShenandoahCMKeepAliveUpdateClosure(ShenandoahObjToScanQueue* q) :
 523     _queue(q),
 524     _heap(ShenandoahHeap::heap()),
 525     _mark_context(_heap->marking_context()) {}
 526 
 527   void do_oop(narrowOop* p) { do_oop_work(p); }
 528   void do_oop(oop* p)       { do_oop_work(p); }
 529 };
 530 
 531 class ShenandoahWeakUpdateClosure : public OopClosure {
 532 private:
 533   ShenandoahHeap* const _heap;
 534 
 535   template <class T>
 536   inline void do_oop_work(T* p) {
 537     oop o = _heap->maybe_update_with_forwarded(p);
 538     shenandoah_assert_marked_except(p, o, o == NULL);
 539   }
 540 
 541 public:
 542   ShenandoahWeakUpdateClosure() : _heap(ShenandoahHeap::heap()) {}
 543 
 544   void do_oop(narrowOop* p) { do_oop_work(p); }
 545   void do_oop(oop* p)       { do_oop_work(p); }
 546 };
 547 
 548 class ShenandoahRefProcTaskProxy : public AbstractGangTask {
 549 private:
 550   AbstractRefProcTaskExecutor::ProcessTask& _proc_task;
 551   ShenandoahTaskTerminator* _terminator;
 552 
 553 public:
 554   ShenandoahRefProcTaskProxy(AbstractRefProcTaskExecutor::ProcessTask& proc_task,
 555                              ShenandoahTaskTerminator* t) :
 556     AbstractGangTask("Process reference objects in parallel"),
 557     _proc_task(proc_task),
 558     _terminator(t) {
 559   }
 560 
 561   void work(uint worker_id) {
 562     ResourceMark rm;
 563     HandleMark hm;
 564     assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
 565     ShenandoahHeap* heap = ShenandoahHeap::heap();
 566     ShenandoahCMDrainMarkingStackClosure complete_gc(worker_id, _terminator);
 567     if (heap->has_forwarded_objects()) {
 568       ShenandoahForwardedIsAliveClosure is_alive;
 569       ShenandoahCMKeepAliveUpdateClosure keep_alive(heap->concurrent_mark()->get_queue(worker_id));
 570       _proc_task.work(worker_id, is_alive, keep_alive, complete_gc);
 571     } else {
 572       ShenandoahIsAliveClosure is_alive;
 573       ShenandoahCMKeepAliveClosure keep_alive(heap->concurrent_mark()->get_queue(worker_id));
 574       _proc_task.work(worker_id, is_alive, keep_alive, complete_gc);
 575     }
 576   }
 577 };
 578 
 579 class ShenandoahRefProcTaskExecutor : public AbstractRefProcTaskExecutor {
 580 private:
 581   WorkGang* _workers;
 582 
 583 public:
 584   ShenandoahRefProcTaskExecutor(WorkGang* workers) :
 585     _workers(workers) {
 586   }
 587 
 588   // Executes a task using worker threads.
 589   void execute(ProcessTask& task, uint ergo_workers) {
 590     assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
 591 
 592     ShenandoahHeap* heap = ShenandoahHeap::heap();
 593     ShenandoahConcurrentMark* cm = heap->concurrent_mark();
 594     ShenandoahPushWorkerQueuesScope scope(_workers, cm->task_queues(),
 595                                           ergo_workers,
 596                                           /* do_check = */ false);
 597     uint nworkers = _workers->active_workers();
 598     cm->task_queues()->reserve(nworkers);
 599     ShenandoahTaskTerminator terminator(nworkers, cm->task_queues());
 600     ShenandoahRefProcTaskProxy proc_task_proxy(task, &terminator);
 601     _workers->run_task(&proc_task_proxy);
 602   }
 603 };
 604 
 605 void ShenandoahConcurrentMark::weak_refs_work(bool full_gc) {
 606   assert(_heap->process_references(), "sanity");
 607 
 608   ShenandoahPhaseTimings::Phase phase_root =
 609           full_gc ?
 610           ShenandoahPhaseTimings::full_gc_weakrefs :
 611           ShenandoahPhaseTimings::weakrefs;
 612 
 613   ShenandoahGCPhase phase(phase_root);
 614 
 615   ReferenceProcessor* rp = _heap->ref_processor();
 616 
 617   // NOTE: We cannot shortcut on has_discovered_references() here, because
 618   // we will miss marking JNI Weak refs then, see implementation in
 619   // ReferenceProcessor::process_discovered_references.
 620   weak_refs_work_doit(full_gc);
 621 
 622   rp->verify_no_references_recorded();
 623   assert(!rp->discovery_enabled(), "Post condition");
 624 
 625 }
 626 
 627 void ShenandoahConcurrentMark::weak_refs_work_doit(bool full_gc) {
 628   ReferenceProcessor* rp = _heap->ref_processor();
 629 
 630   ShenandoahPhaseTimings::Phase phase_process =
 631           full_gc ?
 632           ShenandoahPhaseTimings::full_gc_weakrefs_process :
 633           ShenandoahPhaseTimings::weakrefs_process;
 634 
 635   ShenandoahPhaseTimings::Phase phase_process_termination =
 636           full_gc ?
 637           ShenandoahPhaseTimings::full_gc_weakrefs_termination :
 638           ShenandoahPhaseTimings::weakrefs_termination;
 639 
 640   shenandoah_assert_rp_isalive_not_installed();
 641   ShenandoahIsAliveSelector is_alive;
 642   ReferenceProcessorIsAliveMutator fix_isalive(rp, is_alive.is_alive_closure());
 643 
 644   WorkGang* workers = _heap->workers();
 645   uint nworkers = workers->active_workers();
 646 
 647   rp->setup_policy(_heap->soft_ref_policy()->should_clear_all_soft_refs());
 648   rp->set_active_mt_degree(nworkers);
 649 
 650   assert(task_queues()->is_empty(), "Should be empty");
 651 
 652   // complete_gc and keep_alive closures instantiated here are only needed for
 653   // single-threaded path in RP. They share the queue 0 for tracking work, which
 654   // simplifies implementation. Since RP may decide to call complete_gc several
 655   // times, we need to be able to reuse the terminator.
 656   uint serial_worker_id = 0;
 657   ShenandoahTaskTerminator terminator(1, task_queues());
 658   ShenandoahCMDrainMarkingStackClosure complete_gc(serial_worker_id, &terminator, /* reset_terminator = */ true);
 659 
 660   ShenandoahRefProcTaskExecutor executor(workers);
 661 
 662   ReferenceProcessorPhaseTimes pt(_heap->gc_timer(), rp->num_queues());
 663 
 664   {
 665     ShenandoahGCPhase phase(phase_process);
 666     ShenandoahTerminationTracker phase_term(phase_process_termination);
 667 
 668     if (_heap->has_forwarded_objects()) {
 669       ShenandoahCMKeepAliveUpdateClosure keep_alive(get_queue(serial_worker_id));
 670       rp->process_discovered_references(is_alive.is_alive_closure(), &keep_alive,
 671                                         &complete_gc, &executor,
 672                                         &pt);
 673 
 674     } else {
 675       ShenandoahCMKeepAliveClosure keep_alive(get_queue(serial_worker_id));
 676       rp->process_discovered_references(is_alive.is_alive_closure(), &keep_alive,
 677                                         &complete_gc, &executor,
 678                                         &pt);
 679 
 680     }
 681 
 682     pt.print_all_references();
 683 
 684     assert(task_queues()->is_empty(), "Should be empty");
 685   }
 686 }
 687 
 688 class ShenandoahCancelledGCYieldClosure : public YieldClosure {
 689 private:
 690   ShenandoahHeap* const _heap;
 691 public:
 692   ShenandoahCancelledGCYieldClosure() : _heap(ShenandoahHeap::heap()) {};
 693   virtual bool should_return() { return _heap->cancelled_gc(); }
 694 };
 695 
 696 class ShenandoahPrecleanCompleteGCClosure : public VoidClosure {
 697 public:
 698   void do_void() {
 699     ShenandoahHeap* sh = ShenandoahHeap::heap();
 700     ShenandoahConcurrentMark* scm = sh->concurrent_mark();
 701     assert(sh->process_references(), "why else would we be here?");
 702     ShenandoahTaskTerminator terminator(1, scm->task_queues());
 703 
 704     ReferenceProcessor* rp = sh->ref_processor();
 705     shenandoah_assert_rp_isalive_installed();
 706 
 707     scm->mark_loop(0, &terminator, rp,
 708                    false, // not cancellable
 709                    false); // do not do strdedup
 710   }
 711 };
 712 
 713 class ShenandoahPrecleanKeepAliveUpdateClosure : public OopClosure {
 714 private:
 715   ShenandoahObjToScanQueue* _queue;
 716   ShenandoahHeap* _heap;
 717   ShenandoahMarkingContext* const _mark_context;
 718 
 719   template <class T>
 720   inline void do_oop_work(T* p) {
 721     ShenandoahConcurrentMark::mark_through_ref<T, CONCURRENT, NO_DEDUP>(p, _heap, _queue, _mark_context);
 722   }
 723 
 724 public:
 725   ShenandoahPrecleanKeepAliveUpdateClosure(ShenandoahObjToScanQueue* q) :
 726     _queue(q),
 727     _heap(ShenandoahHeap::heap()),
 728     _mark_context(_heap->marking_context()) {}
 729 
 730   void do_oop(narrowOop* p) { do_oop_work(p); }
 731   void do_oop(oop* p)       { do_oop_work(p); }
 732 };
 733 
 734 class ShenandoahPrecleanTask : public AbstractGangTask {
 735 private:
 736   ReferenceProcessor* _rp;
 737 
 738 public:
 739   ShenandoahPrecleanTask(ReferenceProcessor* rp) :
 740           AbstractGangTask("Precleaning task"),
 741           _rp(rp) {}
 742 
 743   void work(uint worker_id) {
 744     assert(worker_id == 0, "The code below is single-threaded, only one worker is expected");
 745     ShenandoahParallelWorkerSession worker_session(worker_id);
 746 
 747     ShenandoahHeap* sh = ShenandoahHeap::heap();
 748 
 749     ShenandoahObjToScanQueue* q = sh->concurrent_mark()->get_queue(worker_id);
 750 
 751     ShenandoahCancelledGCYieldClosure yield;
 752     ShenandoahPrecleanCompleteGCClosure complete_gc;
 753 
 754     if (sh->has_forwarded_objects()) {
 755       ShenandoahForwardedIsAliveClosure is_alive;
 756       ShenandoahPrecleanKeepAliveUpdateClosure keep_alive(q);
 757       ResourceMark rm;
 758       _rp->preclean_discovered_references(&is_alive, &keep_alive,
 759                                           &complete_gc, &yield,
 760                                           NULL);
 761     } else {
 762       ShenandoahIsAliveClosure is_alive;
 763       ShenandoahCMKeepAliveClosure keep_alive(q);
 764       ResourceMark rm;
 765       _rp->preclean_discovered_references(&is_alive, &keep_alive,
 766                                           &complete_gc, &yield,
 767                                           NULL);
 768     }
 769   }
 770 };
 771 
 772 void ShenandoahConcurrentMark::preclean_weak_refs() {
 773   // Pre-cleaning weak references before diving into STW makes sense at the
 774   // end of concurrent mark. This will filter out the references which referents
 775   // are alive. Note that ReferenceProcessor already filters out these on reference
 776   // discovery, and the bulk of work is done here. This phase processes leftovers
 777   // that missed the initial filtering, i.e. when referent was marked alive after
 778   // reference was discovered by RP.
 779 
 780   assert(_heap->process_references(), "sanity");
 781 
 782   // Shortcut if no references were discovered to avoid winding up threads.
 783   ReferenceProcessor* rp = _heap->ref_processor();
 784   if (!rp->has_discovered_references()) {
 785     return;
 786   }
 787 
 788   assert(task_queues()->is_empty(), "Should be empty");
 789 
 790   ReferenceProcessorMTDiscoveryMutator fix_mt_discovery(rp, false);
 791 
 792   shenandoah_assert_rp_isalive_not_installed();
 793   ShenandoahIsAliveSelector is_alive;
 794   ReferenceProcessorIsAliveMutator fix_isalive(rp, is_alive.is_alive_closure());
 795 
 796   // Execute precleaning in the worker thread: it will give us GCLABs, String dedup
 797   // queues and other goodies. When upstream ReferenceProcessor starts supporting
 798   // parallel precleans, we can extend this to more threads.
 799   WorkGang* workers = _heap->workers();
 800   uint nworkers = workers->active_workers();
 801   assert(nworkers == 1, "This code uses only a single worker");
 802   task_queues()->reserve(nworkers);
 803 
 804   ShenandoahPrecleanTask task(rp);
 805   workers->run_task(&task);
 806 
 807   assert(task_queues()->is_empty(), "Should be empty");
 808 }
 809 
 810 void ShenandoahConcurrentMark::cancel() {
 811   // Clean up marking stacks.
 812   ShenandoahObjToScanQueueSet* queues = task_queues();
 813   queues->clear();
 814 
 815   // Cancel SATB buffers.
 816   ShenandoahBarrierSet::satb_mark_queue_set().abandon_partial_marking();
 817 }
 818 
 819 ShenandoahObjToScanQueue* ShenandoahConcurrentMark::get_queue(uint worker_id) {
 820   assert(task_queues()->get_reserved() > worker_id, "No reserved queue for worker id: %d", worker_id);
 821   return _task_queues->queue(worker_id);
 822 }
 823 
 824 template <bool CANCELLABLE>
 825 void ShenandoahConcurrentMark::mark_loop_prework(uint w, ShenandoahTaskTerminator *t, ReferenceProcessor *rp,
 826                                                  bool strdedup) {
 827   ShenandoahObjToScanQueue* q = get_queue(w);
 828 
 829   jushort* ld = _heap->get_liveness_cache(w);
 830 
 831   // TODO: We can clean up this if we figure out how to do templated oop closures that
 832   // play nice with specialized_oop_iterators.
 833   if (_heap->unload_classes()) {
 834     if (_heap->has_forwarded_objects()) {
 835       if (strdedup) {
 836         ShenandoahMarkUpdateRefsMetadataDedupClosure cl(q, rp);
 837         mark_loop_work<ShenandoahMarkUpdateRefsMetadataDedupClosure, CANCELLABLE>(&cl, ld, w, t);
 838       } else {
 839         ShenandoahMarkUpdateRefsMetadataClosure cl(q, rp);
 840         mark_loop_work<ShenandoahMarkUpdateRefsMetadataClosure, CANCELLABLE>(&cl, ld, w, t);
 841       }
 842     } else {
 843       if (strdedup) {
 844         ShenandoahMarkRefsMetadataDedupClosure cl(q, rp);
 845         mark_loop_work<ShenandoahMarkRefsMetadataDedupClosure, CANCELLABLE>(&cl, ld, w, t);
 846       } else {
 847         ShenandoahMarkRefsMetadataClosure cl(q, rp);
 848         mark_loop_work<ShenandoahMarkRefsMetadataClosure, CANCELLABLE>(&cl, ld, w, t);
 849       }
 850     }
 851   } else {
 852     if (_heap->has_forwarded_objects()) {
 853       if (strdedup) {
 854         ShenandoahMarkUpdateRefsDedupClosure cl(q, rp);
 855         mark_loop_work<ShenandoahMarkUpdateRefsDedupClosure, CANCELLABLE>(&cl, ld, w, t);
 856       } else {
 857         ShenandoahMarkUpdateRefsClosure cl(q, rp);
 858         mark_loop_work<ShenandoahMarkUpdateRefsClosure, CANCELLABLE>(&cl, ld, w, t);
 859       }
 860     } else {
 861       if (strdedup) {
 862         ShenandoahMarkRefsDedupClosure cl(q, rp);
 863         mark_loop_work<ShenandoahMarkRefsDedupClosure, CANCELLABLE>(&cl, ld, w, t);
 864       } else {
 865         ShenandoahMarkRefsClosure cl(q, rp);
 866         mark_loop_work<ShenandoahMarkRefsClosure, CANCELLABLE>(&cl, ld, w, t);
 867       }
 868     }
 869   }
 870 
 871   _heap->flush_liveness_cache(w);
 872 }
 873 
 874 template <class T, bool CANCELLABLE>
 875 void ShenandoahConcurrentMark::mark_loop_work(T* cl, jushort* live_data, uint worker_id, ShenandoahTaskTerminator *terminator) {
 876   uintx stride = ShenandoahMarkLoopStride;
 877 
 878   ShenandoahHeap* heap = ShenandoahHeap::heap();
 879   ShenandoahObjToScanQueueSet* queues = task_queues();
 880   ShenandoahObjToScanQueue* q;
 881   ShenandoahMarkTask t;
 882 
 883   /*
 884    * Process outstanding queues, if any.
 885    *
 886    * There can be more queues than workers. To deal with the imbalance, we claim
 887    * extra queues first. Since marking can push new tasks into the queue associated
 888    * with this worker id, we come back to process this queue in the normal loop.
 889    */
 890   assert(queues->get_reserved() == heap->workers()->active_workers(),
 891          "Need to reserve proper number of queues: reserved: %u, active: %u", queues->get_reserved(), heap->workers()->active_workers());
 892 
 893   q = queues->claim_next();
 894   while (q != NULL) {
 895     if (CANCELLABLE && heap->check_cancelled_gc_and_yield()) {
 896       return;
 897     }
 898 
 899     for (uint i = 0; i < stride; i++) {
 900       if (q->pop(t)) {
 901         do_task<T>(q, cl, live_data, &t);
 902       } else {
 903         assert(q->is_empty(), "Must be empty");
 904         q = queues->claim_next();
 905         break;
 906       }
 907     }
 908   }
 909   q = get_queue(worker_id);
 910 
 911   ShenandoahSATBBufferClosure drain_satb(q);
 912   SATBMarkQueueSet& satb_mq_set = ShenandoahBarrierSet::satb_mark_queue_set();
 913 
 914   /*
 915    * Normal marking loop:
 916    */
 917   while (true) {
 918     if (CANCELLABLE && heap->check_cancelled_gc_and_yield()) {
 919       return;
 920     }
 921 
 922     while (satb_mq_set.completed_buffers_num() > 0) {
 923       satb_mq_set.apply_closure_to_completed_buffer(&drain_satb);
 924     }
 925 
 926     uint work = 0;
 927     for (uint i = 0; i < stride; i++) {
 928       if (q->pop(t) ||
 929           queues->steal(worker_id, t)) {
 930         do_task<T>(q, cl, live_data, &t);
 931         work++;
 932       } else {
 933         break;
 934       }
 935     }
 936 
 937     if (work == 0) {
 938       // No work encountered in current stride, try to terminate.
 939       // Need to leave the STS here otherwise it might block safepoints.
 940       ShenandoahSuspendibleThreadSetLeaver stsl(CANCELLABLE && ShenandoahSuspendibleWorkers);
 941       ShenandoahTerminationTimingsTracker term_tracker(worker_id);
 942       ShenandoahTerminatorTerminator tt(heap);
 943       if (terminator->offer_termination(&tt)) return;
 944     }
 945   }
 946 }
 947 
 948 bool ShenandoahConcurrentMark::claim_codecache() {
 949   assert(ShenandoahConcurrentScanCodeRoots, "must not be called otherwise");
 950   return _claimed_codecache.try_set();
 951 }
 952 
 953 void ShenandoahConcurrentMark::clear_claim_codecache() {
 954   assert(ShenandoahConcurrentScanCodeRoots, "must not be called otherwise");
 955   _claimed_codecache.unset();
 956 }