1 /*
   2  * Copyright (c) 2013, 2019, Red Hat, Inc. All rights reserved.
   3  *
   4  * This code is free software; you can redistribute it and/or modify it
   5  * under the terms of the GNU General Public License version 2 only, as
   6  * published by the Free Software Foundation.
   7  *
   8  * This code is distributed in the hope that it will be useful, but WITHOUT
   9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  11  * version 2 for more details (a copy is included in the LICENSE file that
  12  * accompanied this code).
  13  *
  14  * You should have received a copy of the GNU General Public License version
  15  * 2 along with this work; if not, write to the Free Software Foundation,
  16  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  17  *
  18  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  19  * or visit www.oracle.com if you need additional information or have any
  20  * questions.
  21  *
  22  */
  23 
  24 #include "precompiled.hpp"
  25 
  26 #include "classfile/symbolTable.hpp"
  27 #include "classfile/systemDictionary.hpp"
  28 #include "code/codeCache.hpp"
  29 
  30 #include "gc/shared/weakProcessor.hpp"
  31 #include "gc/shared/gcTimer.hpp"
  32 #include "gc/shared/referenceProcessor.hpp"
  33 #include "gc/shared/referenceProcessorPhaseTimes.hpp"
  34 
  35 #include "gc/shenandoah/shenandoahBarrierSet.inline.hpp"
  36 #include "gc/shenandoah/shenandoahClosures.inline.hpp"
  37 #include "gc/shenandoah/shenandoahConcurrentMark.inline.hpp"
  38 #include "gc/shenandoah/shenandoahMarkCompact.hpp"
  39 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
  40 #include "gc/shenandoah/shenandoahRootProcessor.inline.hpp"
  41 #include "gc/shenandoah/shenandoahOopClosures.inline.hpp"
  42 #include "gc/shenandoah/shenandoahTaskqueue.inline.hpp"
  43 #include "gc/shenandoah/shenandoahTimingTracker.hpp"
  44 #include "gc/shenandoah/shenandoahUtils.hpp"
  45 #include "gc/shared/weakProcessor.hpp"
  46 
  47 #include "memory/iterator.inline.hpp"
  48 #include "memory/metaspace.hpp"
  49 #include "memory/resourceArea.hpp"
  50 #include "oops/oop.inline.hpp"
  51 
  52 template<UpdateRefsMode UPDATE_REFS>
  53 class ShenandoahInitMarkRootsClosure : public OopClosure {
  54 private:
  55   ShenandoahObjToScanQueue* _queue;
  56   ShenandoahHeap* _heap;
  57   ShenandoahMarkingContext* const _mark_context;
  58 
  59   template <class T>
  60   inline void do_oop_work(T* p) {
  61     ShenandoahConcurrentMark::mark_through_ref<T, UPDATE_REFS, NO_DEDUP>(p, _heap, _queue, _mark_context);
  62   }
  63 
  64 public:
  65   ShenandoahInitMarkRootsClosure(ShenandoahObjToScanQueue* q) :
  66     _queue(q),
  67     _heap(ShenandoahHeap::heap()),
  68     _mark_context(_heap->marking_context()) {};
  69 
  70   void do_oop(narrowOop* p) { do_oop_work(p); }
  71   void do_oop(oop* p)       { do_oop_work(p); }
  72 };
  73 
  74 ShenandoahMarkRefsSuperClosure::ShenandoahMarkRefsSuperClosure(ShenandoahObjToScanQueue* q, ReferenceProcessor* rp) :
  75   MetadataVisitingOopIterateClosure(rp),
  76   _queue(q),
  77   _heap(ShenandoahHeap::heap()),
  78   _mark_context(_heap->marking_context())
  79 { }
  80 
  81 template<UpdateRefsMode UPDATE_REFS>
  82 class ShenandoahInitMarkRootsTask : public AbstractGangTask {
  83 private:
  84   ShenandoahAllRootScanner* _rp;
  85   bool _process_refs;
  86 public:
  87   ShenandoahInitMarkRootsTask(ShenandoahAllRootScanner* rp, bool process_refs) :
  88     AbstractGangTask("Shenandoah init mark roots task"),
  89     _rp(rp),
  90     _process_refs(process_refs) {
  91   }
  92 
  93   void work(uint worker_id) {
  94     assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
  95     ShenandoahParallelWorkerSession worker_session(worker_id);
  96 
  97     ShenandoahHeap* heap = ShenandoahHeap::heap();
  98     ShenandoahObjToScanQueueSet* queues = heap->concurrent_mark()->task_queues();
  99     assert(queues->get_reserved() > worker_id, "Queue has not been reserved for worker id: %d", worker_id);
 100 
 101     ShenandoahObjToScanQueue* q = queues->queue(worker_id);
 102 
 103     ShenandoahInitMarkRootsClosure<UPDATE_REFS> mark_cl(q);
 104     do_work(heap, &mark_cl, worker_id);
 105   }
 106 
 107 private:
 108   void do_work(ShenandoahHeap* heap, OopClosure* oops, uint worker_id) {
 109     // The rationale for selecting the roots to scan is as follows:
 110     //   a. With unload_classes = true, we only want to scan the actual strong roots from the
 111     //      code cache. This will allow us to identify the dead classes, unload them, *and*
 112     //      invalidate the relevant code cache blobs. This could be only done together with
 113     //      class unloading.
 114     //   b. With unload_classes = false, we have to nominally retain all the references from code
 115     //      cache, because there could be the case of embedded class/oop in the generated code,
 116     //      which we will never visit during mark. Without code cache invalidation, as in (a),
 117     //      we risk executing that code cache blob, and crashing.
 118     if (heap->unload_classes()) {
 119       _rp->strong_roots_do(worker_id, oops);
 120     } else {
 121       _rp->roots_do(worker_id, oops);
 122     }
 123   }
 124 };
 125 
 126 class ShenandoahUpdateRootsTask : public AbstractGangTask {
 127 private:
 128   ShenandoahRootUpdater*  _root_updater;
 129 public:
 130   ShenandoahUpdateRootsTask(ShenandoahRootUpdater* root_updater) :
 131     AbstractGangTask("Shenandoah update roots task"),
 132     _root_updater(root_updater) {
 133   }
 134 
 135   void work(uint worker_id) {
 136     assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
 137     ShenandoahParallelWorkerSession worker_session(worker_id);
 138 
 139     ShenandoahHeap* heap = ShenandoahHeap::heap();
 140     ShenandoahUpdateRefsClosure cl;
 141     AlwaysTrueClosure always_true;
 142     _root_updater->roots_do<AlwaysTrueClosure, ShenandoahUpdateRefsClosure>(worker_id, &always_true, &cl);
 143   }
 144 };
 145 
 146 class ShenandoahConcurrentMarkingTask : public AbstractGangTask {
 147 private:
 148   ShenandoahConcurrentMark* _cm;
 149   ShenandoahTaskTerminator* _terminator;
 150 
 151 public:
 152   ShenandoahConcurrentMarkingTask(ShenandoahConcurrentMark* cm, ShenandoahTaskTerminator* terminator) :
 153     AbstractGangTask("Root Region Scan"), _cm(cm), _terminator(terminator) {
 154   }
 155 
 156   void work(uint worker_id) {
 157     ShenandoahHeap* heap = ShenandoahHeap::heap();
 158     ShenandoahConcurrentWorkerSession worker_session(worker_id);
 159     ShenandoahSuspendibleThreadSetJoiner stsj(ShenandoahSuspendibleWorkers);
 160     ShenandoahObjToScanQueue* q = _cm->get_queue(worker_id);
 161     ReferenceProcessor* rp;
 162     if (heap->process_references()) {
 163       rp = heap->ref_processor();
 164       shenandoah_assert_rp_isalive_installed();
 165     } else {
 166       rp = NULL;
 167     }
 168 
 169     _cm->concurrent_scan_code_roots(worker_id, rp);
 170     _cm->mark_loop(worker_id, _terminator, rp,
 171                    true, // cancellable
 172                    ShenandoahStringDedup::is_enabled()); // perform string dedup
 173   }
 174 };
 175 
 176 class ShenandoahSATBThreadsClosure : public ThreadClosure {
 177 private:
 178   ShenandoahConcMarkSATBBufferClosure* _satb_cl;
 179   int _thread_parity;
 180 
 181 public:
 182   ShenandoahSATBThreadsClosure(ShenandoahConcMarkSATBBufferClosure* satb_cl) :
 183     _satb_cl(satb_cl),
 184     _thread_parity(Threads::thread_claim_parity()) {}
 185 
 186   void do_thread(Thread* thread) {
 187     if (thread->is_Java_thread()) {
 188       if (thread->claim_oops_do(true, _thread_parity)) {
 189         JavaThread* jt = (JavaThread*)thread;
 190         ShenandoahThreadLocalData::satb_mark_queue(jt).apply_closure_and_empty(_satb_cl);
 191       }
 192     } else if (thread->is_VM_thread()) {
 193       if (thread->claim_oops_do(true, _thread_parity)) {
 194         ShenandoahBarrierSet::satb_mark_queue_set().shared_satb_queue()->apply_closure_and_empty(_satb_cl);
 195       }
 196     }
 197   }
 198 };
 199 
 200 class ShenandoahFinalMarkingTask : public AbstractGangTask {
 201 private:
 202   ShenandoahConcurrentMark* _cm;
 203   ShenandoahTaskTerminator* _terminator;
 204   bool _dedup_string;
 205 
 206 public:
 207   ShenandoahFinalMarkingTask(ShenandoahConcurrentMark* cm, ShenandoahTaskTerminator* terminator, bool dedup_string) :
 208     AbstractGangTask("Shenandoah Final Marking"), _cm(cm), _terminator(terminator), _dedup_string(dedup_string) {
 209   }
 210 
 211   void work(uint worker_id) {
 212     ShenandoahHeap* heap = ShenandoahHeap::heap();
 213 
 214     ShenandoahParallelWorkerSession worker_session(worker_id);
 215     // First drain remaining SATB buffers.
 216     // Notice that this is not strictly necessary for mark-compact. But since
 217     // it requires a StrongRootsScope around the task, we need to claim the
 218     // threads, and performance-wise it doesn't really matter. Adds about 1ms to
 219     // full-gc.
 220     {
 221       ShenandoahObjToScanQueue* q = _cm->get_queue(worker_id);
 222       ShenandoahConcMarkSATBBufferClosure cl(q);
 223       ShenandoahSATBMarkQueueSet& satb_mq_set = ShenandoahBarrierSet::satb_mark_queue_set();
 224       while (satb_mq_set.apply_closure_to_completed_buffer(&cl));
 225       ShenandoahSATBThreadsClosure tc(&cl);
 226       Threads::threads_do(&tc);
 227     }
 228 
 229     ReferenceProcessor* rp;
 230     if (heap->process_references()) {
 231       rp = heap->ref_processor();
 232       shenandoah_assert_rp_isalive_installed();
 233     } else {
 234       rp = NULL;
 235     }
 236 
 237     if (heap->is_degenerated_gc_in_progress()) {
 238       // Degenerated cycle may bypass concurrent cycle, so code roots might not be scanned,
 239       // let's check here.
 240       _cm->concurrent_scan_code_roots(worker_id, rp);
 241     }
 242 
 243     _cm->mark_loop(worker_id, _terminator, rp,
 244                    false, // not cancellable
 245                    _dedup_string);
 246 
 247     assert(_cm->task_queues()->is_empty(), "Should be empty");
 248   }
 249 };
 250 
 251 void ShenandoahConcurrentMark::mark_roots(ShenandoahPhaseTimings::Phase root_phase) {
 252   assert(Thread::current()->is_VM_thread(), "can only do this in VMThread");
 253   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
 254 
 255   ShenandoahHeap* heap = ShenandoahHeap::heap();
 256 
 257   ShenandoahGCPhase phase(root_phase);
 258 
 259   WorkGang* workers = heap->workers();
 260   uint nworkers = workers->active_workers();
 261 
 262   assert(nworkers <= task_queues()->size(), "Just check");
 263 
 264   ShenandoahAllRootScanner root_proc(nworkers, root_phase);
 265   TASKQUEUE_STATS_ONLY(task_queues()->reset_taskqueue_stats());
 266   task_queues()->reserve(nworkers);
 267 
 268   if (heap->has_forwarded_objects()) {
 269     ShenandoahInitMarkRootsTask<RESOLVE> mark_roots(&root_proc, _heap->process_references());
 270     workers->run_task(&mark_roots);
 271   } else {
 272     // No need to update references, which means the heap is stable.
 273     // Can save time not walking through forwarding pointers.
 274     ShenandoahInitMarkRootsTask<NONE> mark_roots(&root_proc, _heap->process_references());
 275     workers->run_task(&mark_roots);
 276   }
 277 
 278   if (ShenandoahConcurrentScanCodeRoots) {
 279     clear_claim_codecache();
 280   }
 281 }
 282 
 283 void ShenandoahConcurrentMark::update_roots(ShenandoahPhaseTimings::Phase root_phase) {
 284   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
 285 
 286   bool update_code_cache = true; // initialize to safer value
 287   switch (root_phase) {
 288     case ShenandoahPhaseTimings::update_roots:
 289     case ShenandoahPhaseTimings::final_update_refs_roots:
 290       update_code_cache = false;
 291       break;
 292     case ShenandoahPhaseTimings::full_gc_roots:
 293     case ShenandoahPhaseTimings::degen_gc_update_roots:
 294       update_code_cache = true;
 295       break;
 296     default:
 297       ShouldNotReachHere();
 298   }
 299 
 300   ShenandoahGCPhase phase(root_phase);
 301 
 302 #if COMPILER2_OR_JVMCI
 303   DerivedPointerTable::clear();
 304 #endif
 305 
 306   uint nworkers = _heap->workers()->active_workers();
 307 
 308   ShenandoahRootUpdater root_updater(nworkers, root_phase, update_code_cache);
 309   ShenandoahUpdateRootsTask update_roots(&root_updater);
 310   _heap->workers()->run_task(&update_roots);
 311 
 312 #if COMPILER2_OR_JVMCI
 313   DerivedPointerTable::update_pointers();
 314 #endif
 315 }
 316 
 317 class ShenandoahUpdateThreadRootsTask : public AbstractGangTask {
 318 private:
 319   ShenandoahThreadRoots           _thread_roots;
 320   ShenandoahPhaseTimings::Phase   _phase;
 321 public:
 322   ShenandoahUpdateThreadRootsTask(bool is_par, ShenandoahPhaseTimings::Phase phase) :
 323     AbstractGangTask("Shenandoah Update Thread Roots"),
 324     _thread_roots(is_par),
 325     _phase(phase) {
 326     ShenandoahHeap::heap()->phase_timings()->record_workers_start(_phase);
 327   }
 328 
 329   ~ShenandoahUpdateThreadRootsTask() {
 330     ShenandoahHeap::heap()->phase_timings()->record_workers_end(_phase);
 331   }
 332   void work(uint worker_id) {
 333     ShenandoahUpdateRefsClosure cl;
 334     _thread_roots.oops_do(&cl, NULL, worker_id);
 335   }
 336 };
 337 
 338 void ShenandoahConcurrentMark::update_thread_roots(ShenandoahPhaseTimings::Phase root_phase) {
 339   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
 340 
 341   ShenandoahGCPhase phase(root_phase);
 342 
 343 #if COMPILER2_OR_JVMCI
 344   DerivedPointerTable::clear();
 345 #endif
 346 
 347   WorkGang* workers = _heap->workers();
 348   bool is_par = workers->active_workers() > 1;
 349 
 350   ShenandoahUpdateThreadRootsTask task(is_par, root_phase);
 351   workers->run_task(&task);
 352 
 353 #if COMPILER2_OR_JVMCI
 354   DerivedPointerTable::update_pointers();
 355 #endif
 356 }
 357 
 358 void ShenandoahConcurrentMark::initialize(uint workers) {
 359   _heap = ShenandoahHeap::heap();
 360 
 361   uint num_queues = MAX2(workers, 1U);
 362 
 363   _task_queues = new ShenandoahObjToScanQueueSet((int) num_queues);
 364 
 365   for (uint i = 0; i < num_queues; ++i) {
 366     ShenandoahObjToScanQueue* task_queue = new ShenandoahObjToScanQueue();
 367     task_queue->initialize();
 368     _task_queues->register_queue(i, task_queue);
 369   }
 370 
 371   ShenandoahBarrierSet::satb_mark_queue_set().set_buffer_size(ShenandoahSATBBufferSize);
 372 }
 373 
 374 void ShenandoahConcurrentMark::concurrent_scan_code_roots(uint worker_id, ReferenceProcessor* rp) {
 375   if (ShenandoahConcurrentScanCodeRoots && claim_codecache()) {
 376     ShenandoahObjToScanQueue* q = task_queues()->queue(worker_id);
 377     if (!_heap->unload_classes()) {
 378       MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
 379       // TODO: We can not honor StringDeduplication here, due to lock ranking
 380       // inversion. So, we may miss some deduplication candidates.
 381       if (_heap->has_forwarded_objects()) {
 382         ShenandoahMarkResolveRefsClosure cl(q, rp);
 383         CodeBlobToOopClosure blobs(&cl, !CodeBlobToOopClosure::FixRelocations);
 384         CodeCache::blobs_do(&blobs);
 385       } else {
 386         ShenandoahMarkRefsClosure cl(q, rp);
 387         CodeBlobToOopClosure blobs(&cl, !CodeBlobToOopClosure::FixRelocations);
 388         CodeCache::blobs_do(&blobs);
 389       }
 390     }
 391   }
 392 }
 393 
 394 void ShenandoahConcurrentMark::mark_from_roots() {
 395   WorkGang* workers = _heap->workers();
 396   uint nworkers = workers->active_workers();
 397 
 398   ShenandoahGCPhase conc_mark_phase(ShenandoahPhaseTimings::conc_mark);
 399 
 400   if (_heap->process_references()) {
 401     ReferenceProcessor* rp = _heap->ref_processor();
 402     rp->set_active_mt_degree(nworkers);
 403 
 404     // enable ("weak") refs discovery
 405     rp->enable_discovery(true /*verify_no_refs*/);
 406     rp->setup_policy(_heap->soft_ref_policy()->should_clear_all_soft_refs());
 407   }
 408 
 409   shenandoah_assert_rp_isalive_not_installed();
 410   ShenandoahIsAliveSelector is_alive;
 411   ReferenceProcessorIsAliveMutator fix_isalive(_heap->ref_processor(), is_alive.is_alive_closure());
 412 
 413   task_queues()->reserve(nworkers);
 414 
 415   {
 416     ShenandoahTerminationTracker term(ShenandoahPhaseTimings::conc_termination);
 417     ShenandoahTaskTerminator terminator(nworkers, task_queues());
 418     ShenandoahConcurrentMarkingTask task(this, &terminator);
 419     workers->run_task(&task);
 420   }
 421 
 422   assert(task_queues()->is_empty() || _heap->cancelled_gc(), "Should be empty when not cancelled");
 423 }
 424 
 425 void ShenandoahConcurrentMark::finish_mark_from_roots(bool full_gc) {
 426   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
 427 
 428   uint nworkers = _heap->workers()->active_workers();
 429 
 430   // Finally mark everything else we've got in our queues during the previous steps.
 431   // It does two different things for concurrent vs. mark-compact GC:
 432   // - For concurrent GC, it starts with empty task queues, drains the remaining
 433   //   SATB buffers, and then completes the marking closure.
 434   // - For mark-compact GC, it starts out with the task queues seeded by initial
 435   //   root scan, and completes the closure, thus marking through all live objects
 436   // The implementation is the same, so it's shared here.
 437   {
 438     ShenandoahGCPhase phase(full_gc ?
 439                             ShenandoahPhaseTimings::full_gc_mark_finish_queues :
 440                             ShenandoahPhaseTimings::finish_queues);
 441     task_queues()->reserve(nworkers);
 442 
 443     shenandoah_assert_rp_isalive_not_installed();
 444     ShenandoahIsAliveSelector is_alive;
 445     ReferenceProcessorIsAliveMutator fix_isalive(_heap->ref_processor(), is_alive.is_alive_closure());
 446 
 447     ShenandoahTerminationTracker termination_tracker(full_gc ?
 448                                                      ShenandoahPhaseTimings::full_gc_mark_termination :
 449                                                      ShenandoahPhaseTimings::termination);
 450 
 451     StrongRootsScope scope(nworkers);
 452     ShenandoahTaskTerminator terminator(nworkers, task_queues());
 453     ShenandoahFinalMarkingTask task(this, &terminator, ShenandoahStringDedup::is_enabled());
 454     _heap->workers()->run_task(&task);
 455   }
 456 
 457   assert(task_queues()->is_empty(), "Should be empty");
 458 
 459   // When we're done marking everything, we process weak references.
 460   if (_heap->process_references()) {
 461     weak_refs_work(full_gc);
 462   }
 463 
 464   weak_roots_work();
 465 
 466   // And finally finish class unloading
 467   if (_heap->unload_classes()) {
 468     _heap->unload_classes_and_cleanup_tables(full_gc);
 469   } else {
 470     ShenandoahIsAliveSelector alive;
 471     StringTable::unlink(alive.is_alive_closure());
 472   }
 473   if (ShenandoahStringDedup::is_enabled()) {
 474     ShenandoahIsAliveSelector alive;
 475     BoolObjectClosure* is_alive = alive.is_alive_closure();
 476     ShenandoahStringDedup::unlink_or_oops_do(is_alive, NULL, false);
 477   }
 478   assert(task_queues()->is_empty(), "Should be empty");
 479   TASKQUEUE_STATS_ONLY(task_queues()->print_taskqueue_stats());
 480   TASKQUEUE_STATS_ONLY(task_queues()->reset_taskqueue_stats());
 481 
 482   // Resize Metaspace
 483   MetaspaceGC::compute_new_size();
 484 }
 485 
 486 // Weak Reference Closures
 487 class ShenandoahCMDrainMarkingStackClosure: public VoidClosure {
 488   uint _worker_id;
 489   ShenandoahTaskTerminator* _terminator;
 490   bool _reset_terminator;
 491 
 492 public:
 493   ShenandoahCMDrainMarkingStackClosure(uint worker_id, ShenandoahTaskTerminator* t, bool reset_terminator = false):
 494     _worker_id(worker_id),
 495     _terminator(t),
 496     _reset_terminator(reset_terminator) {
 497   }
 498 
 499   void do_void() {
 500     assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
 501 
 502     ShenandoahHeap* sh = ShenandoahHeap::heap();
 503     ShenandoahConcurrentMark* scm = sh->concurrent_mark();
 504     assert(sh->process_references(), "why else would we be here?");
 505     ReferenceProcessor* rp = sh->ref_processor();
 506 
 507     shenandoah_assert_rp_isalive_installed();
 508 
 509     scm->mark_loop(_worker_id, _terminator, rp,
 510                    false,   // not cancellable
 511                    false);  // do not do strdedup
 512 
 513     if (_reset_terminator) {
 514       _terminator->reset_for_reuse();
 515     }
 516   }
 517 };
 518 
 519 class ShenandoahCMKeepAliveClosure : public OopClosure {
 520 private:
 521   ShenandoahObjToScanQueue* _queue;
 522   ShenandoahHeap* _heap;
 523   ShenandoahMarkingContext* const _mark_context;
 524 
 525   template <class T>
 526   inline void do_oop_work(T* p) {
 527     ShenandoahConcurrentMark::mark_through_ref<T, NONE, NO_DEDUP>(p, _heap, _queue, _mark_context);
 528   }
 529 
 530 public:
 531   ShenandoahCMKeepAliveClosure(ShenandoahObjToScanQueue* q) :
 532     _queue(q),
 533     _heap(ShenandoahHeap::heap()),
 534     _mark_context(_heap->marking_context()) {}
 535 
 536   void do_oop(narrowOop* p) { do_oop_work(p); }
 537   void do_oop(oop* p)       { do_oop_work(p); }
 538 };
 539 
 540 class ShenandoahCMKeepAliveUpdateClosure : public OopClosure {
 541 private:
 542   ShenandoahObjToScanQueue* _queue;
 543   ShenandoahHeap* _heap;
 544   ShenandoahMarkingContext* const _mark_context;
 545 
 546   template <class T>
 547   inline void do_oop_work(T* p) {
 548     ShenandoahConcurrentMark::mark_through_ref<T, SIMPLE, NO_DEDUP>(p, _heap, _queue, _mark_context);
 549   }
 550 
 551 public:
 552   ShenandoahCMKeepAliveUpdateClosure(ShenandoahObjToScanQueue* q) :
 553     _queue(q),
 554     _heap(ShenandoahHeap::heap()),
 555     _mark_context(_heap->marking_context()) {}
 556 
 557   void do_oop(narrowOop* p) { do_oop_work(p); }
 558   void do_oop(oop* p)       { do_oop_work(p); }
 559 };
 560 
 561 class ShenandoahWeakUpdateClosure : public OopClosure {
 562 private:
 563   ShenandoahHeap* const _heap;
 564 
 565   template <class T>
 566   inline void do_oop_work(T* p) {
 567     oop o = _heap->maybe_update_with_forwarded(p);
 568     shenandoah_assert_marked_except(p, o, o == NULL);
 569   }
 570 
 571 public:
 572   ShenandoahWeakUpdateClosure() : _heap(ShenandoahHeap::heap()) {}
 573 
 574   void do_oop(narrowOop* p) { do_oop_work(p); }
 575   void do_oop(oop* p)       { do_oop_work(p); }
 576 };
 577 
 578 class ShenandoahWeakAssertNotForwardedClosure : public OopClosure {
 579 private:
 580   template <class T>
 581   inline void do_oop_work(T* p) {
 582 #ifdef ASSERT
 583     T o = RawAccess<>::oop_load(p);
 584     if (!CompressedOops::is_null(o)) {
 585       oop obj = CompressedOops::decode_not_null(o);
 586       shenandoah_assert_not_forwarded(p, obj);
 587     }
 588 #endif
 589   }
 590 
 591 public:
 592   ShenandoahWeakAssertNotForwardedClosure() {}
 593 
 594   void do_oop(narrowOop* p) { do_oop_work(p); }
 595   void do_oop(oop* p)       { do_oop_work(p); }
 596 };
 597 
 598 class ShenandoahRefProcTaskProxy : public AbstractGangTask {
 599 private:
 600   AbstractRefProcTaskExecutor::ProcessTask& _proc_task;
 601   ShenandoahTaskTerminator* _terminator;
 602 
 603 public:
 604   ShenandoahRefProcTaskProxy(AbstractRefProcTaskExecutor::ProcessTask& proc_task,
 605                              ShenandoahTaskTerminator* t) :
 606     AbstractGangTask("Process reference objects in parallel"),
 607     _proc_task(proc_task),
 608     _terminator(t) {
 609   }
 610 
 611   void work(uint worker_id) {
 612     ResourceMark rm;
 613     HandleMark hm;
 614     assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
 615     ShenandoahHeap* heap = ShenandoahHeap::heap();
 616     ShenandoahCMDrainMarkingStackClosure complete_gc(worker_id, _terminator);
 617     if (heap->has_forwarded_objects()) {
 618       ShenandoahForwardedIsAliveClosure is_alive;
 619       ShenandoahCMKeepAliveUpdateClosure keep_alive(heap->concurrent_mark()->get_queue(worker_id));
 620       _proc_task.work(worker_id, is_alive, keep_alive, complete_gc);
 621     } else {
 622       ShenandoahIsAliveClosure is_alive;
 623       ShenandoahCMKeepAliveClosure keep_alive(heap->concurrent_mark()->get_queue(worker_id));
 624       _proc_task.work(worker_id, is_alive, keep_alive, complete_gc);
 625     }
 626   }
 627 };
 628 
 629 class ShenandoahRefProcTaskExecutor : public AbstractRefProcTaskExecutor {
 630 private:
 631   WorkGang* _workers;
 632 
 633 public:
 634   ShenandoahRefProcTaskExecutor(WorkGang* workers) :
 635     _workers(workers) {
 636   }
 637 
 638   // Executes a task using worker threads.
 639   void execute(ProcessTask& task, uint ergo_workers) {
 640     assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
 641 
 642     ShenandoahHeap* heap = ShenandoahHeap::heap();
 643     ShenandoahConcurrentMark* cm = heap->concurrent_mark();
 644     ShenandoahPushWorkerQueuesScope scope(_workers, cm->task_queues(),
 645                                           ergo_workers,
 646                                           /* do_check = */ false);
 647     uint nworkers = _workers->active_workers();
 648     cm->task_queues()->reserve(nworkers);
 649     ShenandoahTaskTerminator terminator(nworkers, cm->task_queues());
 650     ShenandoahRefProcTaskProxy proc_task_proxy(task, &terminator);
 651     _workers->run_task(&proc_task_proxy);
 652   }
 653 };
 654 
 655 void ShenandoahConcurrentMark::weak_refs_work(bool full_gc) {
 656   assert(_heap->process_references(), "sanity");
 657 
 658   ShenandoahPhaseTimings::Phase phase_root =
 659           full_gc ?
 660           ShenandoahPhaseTimings::full_gc_weakrefs :
 661           ShenandoahPhaseTimings::weakrefs;
 662 
 663   ShenandoahGCPhase phase(phase_root);
 664 
 665   ReferenceProcessor* rp = _heap->ref_processor();
 666 
 667   // NOTE: We cannot shortcut on has_discovered_references() here, because
 668   // we will miss marking JNI Weak refs then, see implementation in
 669   // ReferenceProcessor::process_discovered_references.
 670   weak_refs_work_doit(full_gc);
 671 
 672   rp->verify_no_references_recorded();
 673   assert(!rp->discovery_enabled(), "Post condition");
 674 
 675 }
 676 
 677 // Process leftover weak oops: update them, if needed or assert they do not
 678 // need updating otherwise.
 679 // Weak processor API requires us to visit the oops, even if we are not doing
 680 // anything to them.
 681 void ShenandoahConcurrentMark::weak_roots_work() {
 682   OopClosure* keep_alive = &do_nothing_cl;
 683 #ifdef ASSERT
 684   ShenandoahWeakAssertNotForwardedClosure verify_cl;
 685   keep_alive = &verify_cl;
 686 #endif
 687   ShenandoahIsAliveClosure is_alive;
 688   WeakProcessor::weak_oops_do(&is_alive, keep_alive);
 689 }
 690 
 691 void ShenandoahConcurrentMark::weak_refs_work_doit(bool full_gc) {
 692   ReferenceProcessor* rp = _heap->ref_processor();
 693 
 694   ShenandoahPhaseTimings::Phase phase_process =
 695           full_gc ?
 696           ShenandoahPhaseTimings::full_gc_weakrefs_process :
 697           ShenandoahPhaseTimings::weakrefs_process;
 698 
 699   ShenandoahPhaseTimings::Phase phase_process_termination =
 700           full_gc ?
 701           ShenandoahPhaseTimings::full_gc_weakrefs_termination :
 702           ShenandoahPhaseTimings::weakrefs_termination;
 703 
 704   shenandoah_assert_rp_isalive_not_installed();
 705   ShenandoahIsAliveSelector is_alive;
 706   ReferenceProcessorIsAliveMutator fix_isalive(rp, is_alive.is_alive_closure());
 707 
 708   WorkGang* workers = _heap->workers();
 709   uint nworkers = workers->active_workers();
 710 
 711   rp->setup_policy(_heap->soft_ref_policy()->should_clear_all_soft_refs());
 712   rp->set_active_mt_degree(nworkers);
 713 
 714   assert(task_queues()->is_empty(), "Should be empty");
 715 
 716   // complete_gc and keep_alive closures instantiated here are only needed for
 717   // single-threaded path in RP. They share the queue 0 for tracking work, which
 718   // simplifies implementation. Since RP may decide to call complete_gc several
 719   // times, we need to be able to reuse the terminator.
 720   uint serial_worker_id = 0;
 721   ShenandoahTaskTerminator terminator(1, task_queues());
 722   ShenandoahCMDrainMarkingStackClosure complete_gc(serial_worker_id, &terminator, /* reset_terminator = */ true);
 723 
 724   ShenandoahRefProcTaskExecutor executor(workers);
 725 
 726   ReferenceProcessorPhaseTimes pt(_heap->gc_timer(), rp->num_queues());
 727 
 728   {
 729     ShenandoahGCPhase phase(phase_process);
 730     ShenandoahTerminationTracker phase_term(phase_process_termination);
 731 
 732     if (_heap->has_forwarded_objects()) {
 733       ShenandoahCMKeepAliveUpdateClosure keep_alive(get_queue(serial_worker_id));
 734       rp->process_discovered_references(is_alive.is_alive_closure(), &keep_alive,
 735                                         &complete_gc, &executor,
 736                                         &pt);
 737 
 738     } else {
 739       ShenandoahCMKeepAliveClosure keep_alive(get_queue(serial_worker_id));
 740       rp->process_discovered_references(is_alive.is_alive_closure(), &keep_alive,
 741                                         &complete_gc, &executor,
 742                                         &pt);
 743 
 744     }
 745 
 746     pt.print_all_references();
 747 
 748     assert(task_queues()->is_empty(), "Should be empty");
 749   }
 750 }
 751 
 752 class ShenandoahCancelledGCYieldClosure : public YieldClosure {
 753 private:
 754   ShenandoahHeap* const _heap;
 755 public:
 756   ShenandoahCancelledGCYieldClosure() : _heap(ShenandoahHeap::heap()) {};
 757   virtual bool should_return() { return _heap->cancelled_gc(); }
 758 };
 759 
 760 class ShenandoahPrecleanCompleteGCClosure : public VoidClosure {
 761 public:
 762   void do_void() {
 763     ShenandoahHeap* sh = ShenandoahHeap::heap();
 764     ShenandoahConcurrentMark* scm = sh->concurrent_mark();
 765     assert(sh->process_references(), "why else would we be here?");
 766     ShenandoahTaskTerminator terminator(1, scm->task_queues());
 767 
 768     ReferenceProcessor* rp = sh->ref_processor();
 769     shenandoah_assert_rp_isalive_installed();
 770 
 771     scm->mark_loop(0, &terminator, rp,
 772                    false, // not cancellable
 773                    false); // do not do strdedup
 774   }
 775 };
 776 
 777 class ShenandoahPrecleanKeepAliveUpdateClosure : public OopClosure {
 778 private:
 779   ShenandoahObjToScanQueue* _queue;
 780   ShenandoahHeap* _heap;
 781   ShenandoahMarkingContext* const _mark_context;
 782 
 783   template <class T>
 784   inline void do_oop_work(T* p) {
 785     ShenandoahConcurrentMark::mark_through_ref<T, CONCURRENT, NO_DEDUP>(p, _heap, _queue, _mark_context);
 786   }
 787 
 788 public:
 789   ShenandoahPrecleanKeepAliveUpdateClosure(ShenandoahObjToScanQueue* q) :
 790     _queue(q),
 791     _heap(ShenandoahHeap::heap()),
 792     _mark_context(_heap->marking_context()) {}
 793 
 794   void do_oop(narrowOop* p) { do_oop_work(p); }
 795   void do_oop(oop* p)       { do_oop_work(p); }
 796 };
 797 
 798 class ShenandoahPrecleanTask : public AbstractGangTask {
 799 private:
 800   ReferenceProcessor* _rp;
 801 
 802 public:
 803   ShenandoahPrecleanTask(ReferenceProcessor* rp) :
 804           AbstractGangTask("Precleaning task"),
 805           _rp(rp) {}
 806 
 807   void work(uint worker_id) {
 808     assert(worker_id == 0, "The code below is single-threaded, only one worker is expected");
 809     ShenandoahParallelWorkerSession worker_session(worker_id);
 810 
 811     ShenandoahHeap* sh = ShenandoahHeap::heap();
 812 
 813     ShenandoahObjToScanQueue* q = sh->concurrent_mark()->get_queue(worker_id);
 814 
 815     ShenandoahCancelledGCYieldClosure yield;
 816     ShenandoahPrecleanCompleteGCClosure complete_gc;
 817 
 818     if (sh->has_forwarded_objects()) {
 819       ShenandoahForwardedIsAliveClosure is_alive;
 820       ShenandoahPrecleanKeepAliveUpdateClosure keep_alive(q);
 821       ResourceMark rm;
 822       _rp->preclean_discovered_references(&is_alive, &keep_alive,
 823                                           &complete_gc, &yield,
 824                                           NULL);
 825     } else {
 826       ShenandoahIsAliveClosure is_alive;
 827       ShenandoahCMKeepAliveClosure keep_alive(q);
 828       ResourceMark rm;
 829       _rp->preclean_discovered_references(&is_alive, &keep_alive,
 830                                           &complete_gc, &yield,
 831                                           NULL);
 832     }
 833   }
 834 };
 835 
 836 void ShenandoahConcurrentMark::preclean_weak_refs() {
 837   // Pre-cleaning weak references before diving into STW makes sense at the
 838   // end of concurrent mark. This will filter out the references which referents
 839   // are alive. Note that ReferenceProcessor already filters out these on reference
 840   // discovery, and the bulk of work is done here. This phase processes leftovers
 841   // that missed the initial filtering, i.e. when referent was marked alive after
 842   // reference was discovered by RP.
 843 
 844   assert(_heap->process_references(), "sanity");
 845 
 846   // Shortcut if no references were discovered to avoid winding up threads.
 847   ReferenceProcessor* rp = _heap->ref_processor();
 848   if (!rp->has_discovered_references()) {
 849     return;
 850   }
 851 
 852   assert(task_queues()->is_empty(), "Should be empty");
 853 
 854   ReferenceProcessorMTDiscoveryMutator fix_mt_discovery(rp, false);
 855 
 856   shenandoah_assert_rp_isalive_not_installed();
 857   ShenandoahIsAliveSelector is_alive;
 858   ReferenceProcessorIsAliveMutator fix_isalive(rp, is_alive.is_alive_closure());
 859 
 860   // Execute precleaning in the worker thread: it will give us GCLABs, String dedup
 861   // queues and other goodies. When upstream ReferenceProcessor starts supporting
 862   // parallel precleans, we can extend this to more threads.
 863   WorkGang* workers = _heap->workers();
 864   uint nworkers = workers->active_workers();
 865   assert(nworkers == 1, "This code uses only a single worker");
 866   task_queues()->reserve(nworkers);
 867 
 868   ShenandoahPrecleanTask task(rp);
 869   workers->run_task(&task);
 870 
 871   assert(task_queues()->is_empty(), "Should be empty");
 872 }
 873 
 874 void ShenandoahConcurrentMark::cancel() {
 875   // Clean up marking stacks.
 876   ShenandoahObjToScanQueueSet* queues = task_queues();
 877   queues->clear();
 878 
 879   // Cancel SATB buffers.
 880   ShenandoahBarrierSet::satb_mark_queue_set().abandon_partial_marking();
 881 }
 882 
 883 ShenandoahObjToScanQueue* ShenandoahConcurrentMark::get_queue(uint worker_id) {
 884   assert(task_queues()->get_reserved() > worker_id, "No reserved queue for worker id: %d", worker_id);
 885   return _task_queues->queue(worker_id);
 886 }
 887 
 888 template <bool CANCELLABLE>
 889 void ShenandoahConcurrentMark::mark_loop_prework(uint w, ShenandoahTaskTerminator *t, ReferenceProcessor *rp,
 890                                                  bool strdedup) {
 891   ShenandoahObjToScanQueue* q = get_queue(w);
 892 
 893   jushort* ld = _heap->get_liveness_cache(w);
 894 
 895   // TODO: We can clean up this if we figure out how to do templated oop closures that
 896   // play nice with specialized_oop_iterators.
 897   if (_heap->unload_classes()) {
 898     if (_heap->has_forwarded_objects()) {
 899       if (strdedup) {
 900         ShenandoahMarkUpdateRefsMetadataDedupClosure cl(q, rp);
 901         mark_loop_work<ShenandoahMarkUpdateRefsMetadataDedupClosure, CANCELLABLE>(&cl, ld, w, t);
 902       } else {
 903         ShenandoahMarkUpdateRefsMetadataClosure cl(q, rp);
 904         mark_loop_work<ShenandoahMarkUpdateRefsMetadataClosure, CANCELLABLE>(&cl, ld, w, t);
 905       }
 906     } else {
 907       if (strdedup) {
 908         ShenandoahMarkRefsMetadataDedupClosure cl(q, rp);
 909         mark_loop_work<ShenandoahMarkRefsMetadataDedupClosure, CANCELLABLE>(&cl, ld, w, t);
 910       } else {
 911         ShenandoahMarkRefsMetadataClosure cl(q, rp);
 912         mark_loop_work<ShenandoahMarkRefsMetadataClosure, CANCELLABLE>(&cl, ld, w, t);
 913       }
 914     }
 915   } else {
 916     if (_heap->has_forwarded_objects()) {
 917       if (strdedup) {
 918         ShenandoahMarkUpdateRefsDedupClosure cl(q, rp);
 919         mark_loop_work<ShenandoahMarkUpdateRefsDedupClosure, CANCELLABLE>(&cl, ld, w, t);
 920       } else {
 921         ShenandoahMarkUpdateRefsClosure cl(q, rp);
 922         mark_loop_work<ShenandoahMarkUpdateRefsClosure, CANCELLABLE>(&cl, ld, w, t);
 923       }
 924     } else {
 925       if (strdedup) {
 926         ShenandoahMarkRefsDedupClosure cl(q, rp);
 927         mark_loop_work<ShenandoahMarkRefsDedupClosure, CANCELLABLE>(&cl, ld, w, t);
 928       } else {
 929         ShenandoahMarkRefsClosure cl(q, rp);
 930         mark_loop_work<ShenandoahMarkRefsClosure, CANCELLABLE>(&cl, ld, w, t);
 931       }
 932     }
 933   }
 934 
 935   _heap->flush_liveness_cache(w);
 936 }
 937 
 938 template <class T, bool CANCELLABLE>
 939 void ShenandoahConcurrentMark::mark_loop_work(T* cl, jushort* live_data, uint worker_id, ShenandoahTaskTerminator *terminator) {
 940   int seed = 17;
 941   uintx stride = ShenandoahMarkLoopStride;
 942 
 943   ShenandoahHeap* heap = ShenandoahHeap::heap();
 944   ShenandoahObjToScanQueueSet* queues = task_queues();
 945   ShenandoahObjToScanQueue* q;
 946   ShenandoahMarkTask t;
 947 
 948   /*
 949    * Process outstanding queues, if any.
 950    *
 951    * There can be more queues than workers. To deal with the imbalance, we claim
 952    * extra queues first. Since marking can push new tasks into the queue associated
 953    * with this worker id, we come back to process this queue in the normal loop.
 954    */
 955   assert(queues->get_reserved() == heap->workers()->active_workers(),
 956          "Need to reserve proper number of queues: reserved: %u, active: %u", queues->get_reserved(), heap->workers()->active_workers());
 957 
 958   q = queues->claim_next();
 959   while (q != NULL) {
 960     if (CANCELLABLE && heap->check_cancelled_gc_and_yield()) {
 961       return;
 962     }
 963 
 964     for (uint i = 0; i < stride; i++) {
 965       if (q->pop(t)) {
 966         do_task<T>(q, cl, live_data, &t);
 967       } else {
 968         assert(q->is_empty(), "Must be empty");
 969         q = queues->claim_next();
 970         break;
 971       }
 972     }
 973   }
 974   q = get_queue(worker_id);
 975 
 976   ShenandoahConcMarkSATBBufferClosure drain_satb(q);
 977   ShenandoahSATBMarkQueueSet& satb_mq_set = ShenandoahBarrierSet::satb_mark_queue_set();
 978 
 979   /*
 980    * Normal marking loop:
 981    */
 982   while (true) {
 983     if (CANCELLABLE && heap->check_cancelled_gc_and_yield()) {
 984       return;
 985     }
 986 
 987     while (satb_mq_set.completed_buffers_num() > 0) {
 988       satb_mq_set.apply_closure_to_completed_buffer(&drain_satb);
 989     }
 990 
 991     uint work = 0;
 992     for (uint i = 0; i < stride; i++) {
 993       if (q->pop(t) ||
 994           queues->steal(worker_id, &seed, t)) {
 995         do_task<T>(q, cl, live_data, &t);
 996         work++;
 997       } else {
 998         break;
 999       }
1000     }
1001 
1002     if (work == 0) {
1003       // No work encountered in current stride, try to terminate.
1004       // Need to leave the STS here otherwise it might block safepoints.
1005       ShenandoahSuspendibleThreadSetLeaver stsl(CANCELLABLE && ShenandoahSuspendibleWorkers);
1006       ShenandoahTerminationTimingsTracker term_tracker(worker_id);
1007       ShenandoahTerminatorTerminator tt(heap);
1008       if (terminator->offer_termination(&tt)) return;
1009     }
1010   }
1011 }
1012 
1013 bool ShenandoahConcurrentMark::claim_codecache() {
1014   assert(ShenandoahConcurrentScanCodeRoots, "must not be called otherwise");
1015   return _claimed_codecache.try_set();
1016 }
1017 
1018 void ShenandoahConcurrentMark::clear_claim_codecache() {
1019   assert(ShenandoahConcurrentScanCodeRoots, "must not be called otherwise");
1020   _claimed_codecache.unset();
1021 }