1 /*
   2  * Copyright (c) 2013, 2019, Red Hat, Inc. All rights reserved.
   3  *
   4  * This code is free software; you can redistribute it and/or modify it
   5  * under the terms of the GNU General Public License version 2 only, as
   6  * published by the Free Software Foundation.
   7  *
   8  * This code is distributed in the hope that it will be useful, but WITHOUT
   9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  11  * version 2 for more details (a copy is included in the LICENSE file that
  12  * accompanied this code).
  13  *
  14  * You should have received a copy of the GNU General Public License version
  15  * 2 along with this work; if not, write to the Free Software Foundation,
  16  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  17  *
  18  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  19  * or visit www.oracle.com if you need additional information or have any
  20  * questions.
  21  *
  22  */
  23 
  24 #include "precompiled.hpp"
  25 
  26 #include "classfile/symbolTable.hpp"
  27 #include "classfile/systemDictionary.hpp"
  28 #include "code/codeCache.hpp"
  29 
  30 #include "gc/shared/weakProcessor.hpp"
  31 #include "gc/shared/gcTimer.hpp"
  32 #include "gc/shared/referenceProcessor.hpp"
  33 #include "gc/shared/referenceProcessorPhaseTimes.hpp"
  34 
  35 #include "gc/shenandoah/shenandoahBarrierSet.inline.hpp"
  36 #include "gc/shenandoah/shenandoahClosures.inline.hpp"
  37 #include "gc/shenandoah/shenandoahConcurrentMark.inline.hpp"
  38 #include "gc/shenandoah/shenandoahMarkCompact.hpp"
  39 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
  40 #include "gc/shenandoah/shenandoahRootProcessor.hpp"
  41 #include "gc/shenandoah/shenandoahOopClosures.inline.hpp"
  42 #include "gc/shenandoah/shenandoahTaskqueue.inline.hpp"
  43 #include "gc/shenandoah/shenandoahTimingTracker.hpp"
  44 #include "gc/shenandoah/shenandoahUtils.hpp"
  45 #include "gc/shared/weakProcessor.hpp"
  46 
  47 #include "memory/iterator.inline.hpp"
  48 #include "memory/metaspace.hpp"
  49 #include "memory/resourceArea.hpp"
  50 #include "oops/oop.inline.hpp"
  51 
  52 template<UpdateRefsMode UPDATE_REFS>
  53 class ShenandoahInitMarkRootsClosure : public OopClosure {
  54 private:
  55   ShenandoahObjToScanQueue* _queue;
  56   ShenandoahHeap* _heap;
  57   ShenandoahMarkingContext* const _mark_context;
  58 
  59   template <class T>
  60   inline void do_oop_work(T* p) {
  61     ShenandoahConcurrentMark::mark_through_ref<T, UPDATE_REFS, NO_DEDUP>(p, _heap, _queue, _mark_context);
  62   }
  63 
  64 public:
  65   ShenandoahInitMarkRootsClosure(ShenandoahObjToScanQueue* q) :
  66     _queue(q),
  67     _heap(ShenandoahHeap::heap()),
  68     _mark_context(_heap->marking_context()) {};
  69 
  70   void do_oop(narrowOop* p) { do_oop_work(p); }
  71   void do_oop(oop* p)       { do_oop_work(p); }
  72 };
  73 
  74 ShenandoahMarkRefsSuperClosure::ShenandoahMarkRefsSuperClosure(ShenandoahObjToScanQueue* q, ReferenceProcessor* rp) :
  75   MetadataVisitingOopIterateClosure(rp),
  76   _queue(q),
  77   _heap(ShenandoahHeap::heap()),
  78   _mark_context(_heap->marking_context())
  79 { }
  80 
  81 template<UpdateRefsMode UPDATE_REFS>
  82 class ShenandoahInitMarkRootsTask : public AbstractGangTask {
  83 private:
  84   ShenandoahRootProcessor* _rp;
  85   bool _process_refs;
  86 public:
  87   ShenandoahInitMarkRootsTask(ShenandoahRootProcessor* rp, bool process_refs) :
  88     AbstractGangTask("Shenandoah init mark roots task"),
  89     _rp(rp),
  90     _process_refs(process_refs) {
  91   }
  92 
  93   void work(uint worker_id) {
  94     assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
  95     ShenandoahParallelWorkerSession worker_session(worker_id);
  96 
  97     ShenandoahHeap* heap = ShenandoahHeap::heap();
  98     ShenandoahObjToScanQueueSet* queues = heap->concurrent_mark()->task_queues();
  99     assert(queues->get_reserved() > worker_id, "Queue has not been reserved for worker id: %d", worker_id);
 100 
 101     ShenandoahObjToScanQueue* q = queues->queue(worker_id);
 102 
 103     ShenandoahInitMarkRootsClosure<UPDATE_REFS> mark_cl(q);
 104     do_work(heap, &mark_cl, worker_id);
 105   }
 106 
 107 private:
 108   void do_work(ShenandoahHeap* heap, OopClosure* oops, uint worker_id) {
 109     // The rationale for selecting the roots to scan is as follows:
 110     //   a. With unload_classes = true, we only want to scan the actual strong roots from the
 111     //      code cache. This will allow us to identify the dead classes, unload them, *and*
 112     //      invalidate the relevant code cache blobs. This could be only done together with
 113     //      class unloading.
 114     //   b. With unload_classes = false, we have to nominally retain all the references from code
 115     //      cache, because there could be the case of embedded class/oop in the generated code,
 116     //      which we will never visit during mark. Without code cache invalidation, as in (a),
 117     //      we risk executing that code cache blob, and crashing.
 118     //   c. With ShenandoahConcurrentScanCodeRoots, we avoid scanning the entire code cache here,
 119     //      and instead do that in concurrent phase under the relevant lock. This saves init mark
 120     //      pause time.
 121 
 122     CLDToOopClosure clds_cl(oops);
 123     MarkingCodeBlobClosure blobs_cl(oops, ! CodeBlobToOopClosure::FixRelocations);
 124     OopClosure* weak_oops = _process_refs ? NULL : oops;
 125 
 126     ResourceMark m;
 127     if (heap->unload_classes()) {
 128       _rp->process_strong_roots(oops, weak_oops, &clds_cl, NULL, &blobs_cl, NULL, worker_id);
 129     } else {
 130       if (ShenandoahConcurrentScanCodeRoots) {
 131         CodeBlobClosure* code_blobs = NULL;
 132 #ifdef ASSERT
 133         ShenandoahAssertToSpaceClosure assert_to_space_oops;
 134         CodeBlobToOopClosure assert_to_space(&assert_to_space_oops, !CodeBlobToOopClosure::FixRelocations);
 135         // If conc code cache evac is disabled, code cache should have only to-space ptrs.
 136         // Otherwise, it should have to-space ptrs only if mark does not update refs.
 137         if (!heap->has_forwarded_objects()) {
 138           code_blobs = &assert_to_space;
 139         }
 140 #endif
 141         _rp->process_all_roots(oops, weak_oops, &clds_cl, code_blobs, NULL, worker_id);
 142       } else {
 143         _rp->process_all_roots(oops, weak_oops, &clds_cl, &blobs_cl, NULL, worker_id);
 144       }
 145     }
 146   }
 147 };
 148 
 149 class ShenandoahUpdateRootsTask : public AbstractGangTask {
 150 private:
 151   ShenandoahRootProcessor* _rp;
 152   const bool _update_code_cache;
 153 public:
 154   ShenandoahUpdateRootsTask(ShenandoahRootProcessor* rp, bool update_code_cache) :
 155     AbstractGangTask("Shenandoah update roots task"),
 156     _rp(rp),
 157     _update_code_cache(update_code_cache) {
 158   }
 159 
 160   void work(uint worker_id) {
 161     assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
 162     ShenandoahParallelWorkerSession worker_session(worker_id);
 163 
 164     ShenandoahHeap* heap = ShenandoahHeap::heap();
 165     ShenandoahUpdateRefsClosure cl;
 166     CLDToOopClosure cldCl(&cl);
 167 
 168     CodeBlobClosure* code_blobs;
 169     CodeBlobToOopClosure update_blobs(&cl, CodeBlobToOopClosure::FixRelocations);
 170 #ifdef ASSERT
 171     ShenandoahAssertToSpaceClosure assert_to_space_oops;
 172     CodeBlobToOopClosure assert_to_space(&assert_to_space_oops, !CodeBlobToOopClosure::FixRelocations);
 173 #endif
 174     if (_update_code_cache) {
 175       code_blobs = &update_blobs;
 176     } else {
 177       code_blobs =
 178         DEBUG_ONLY(&assert_to_space)
 179         NOT_DEBUG(NULL);
 180     }
 181     _rp->process_all_roots(&cl, &cl, &cldCl, code_blobs, NULL, worker_id);
 182   }
 183 };
 184 
 185 class ShenandoahConcurrentMarkingTask : public AbstractGangTask {
 186 private:
 187   ShenandoahConcurrentMark* _cm;
 188   ShenandoahTaskTerminator* _terminator;
 189 
 190 public:
 191   ShenandoahConcurrentMarkingTask(ShenandoahConcurrentMark* cm, ShenandoahTaskTerminator* terminator) :
 192     AbstractGangTask("Root Region Scan"), _cm(cm), _terminator(terminator) {
 193   }
 194 
 195   void work(uint worker_id) {
 196     ShenandoahHeap* heap = ShenandoahHeap::heap();
 197     ShenandoahConcurrentWorkerSession worker_session(worker_id);
 198     ShenandoahSuspendibleThreadSetJoiner stsj(ShenandoahSuspendibleWorkers);
 199     ShenandoahObjToScanQueue* q = _cm->get_queue(worker_id);
 200     ReferenceProcessor* rp;
 201     if (heap->process_references()) {
 202       rp = heap->ref_processor();
 203       shenandoah_assert_rp_isalive_installed();
 204     } else {
 205       rp = NULL;
 206     }
 207 
 208     _cm->concurrent_scan_code_roots(worker_id, rp);
 209     _cm->mark_loop(worker_id, _terminator, rp,
 210                    true, // cancellable
 211                    ShenandoahStringDedup::is_enabled()); // perform string dedup
 212   }
 213 };
 214 
 215 class ShenandoahSATBThreadsClosure : public ThreadClosure {
 216 private:
 217   ShenandoahSATBBufferClosure* _satb_cl;
 218   int _thread_parity;
 219 
 220 public:
 221   ShenandoahSATBThreadsClosure(ShenandoahSATBBufferClosure* satb_cl) :
 222     _satb_cl(satb_cl),
 223     _thread_parity(Threads::thread_claim_parity()) {}
 224 
 225   void do_thread(Thread* thread) {
 226     if (thread->is_Java_thread()) {
 227       if (thread->claim_oops_do(true, _thread_parity)) {
 228         JavaThread* jt = (JavaThread*)thread;
 229         ShenandoahThreadLocalData::satb_mark_queue(jt).apply_closure_and_empty(_satb_cl);
 230       }
 231     } else if (thread->is_VM_thread()) {
 232       if (thread->claim_oops_do(true, _thread_parity)) {
 233         ShenandoahBarrierSet::satb_mark_queue_set().shared_satb_queue()->apply_closure_and_empty(_satb_cl);
 234       }
 235     }
 236   }
 237 };
 238 
 239 class ShenandoahFinalMarkingTask : public AbstractGangTask {
 240 private:
 241   ShenandoahConcurrentMark* _cm;
 242   ShenandoahTaskTerminator* _terminator;
 243   bool _dedup_string;
 244 
 245 public:
 246   ShenandoahFinalMarkingTask(ShenandoahConcurrentMark* cm, ShenandoahTaskTerminator* terminator, bool dedup_string) :
 247     AbstractGangTask("Shenandoah Final Marking"), _cm(cm), _terminator(terminator), _dedup_string(dedup_string) {
 248   }
 249 
 250   void work(uint worker_id) {
 251     ShenandoahHeap* heap = ShenandoahHeap::heap();
 252 
 253     ShenandoahParallelWorkerSession worker_session(worker_id);
 254     // First drain remaining SATB buffers.
 255     // Notice that this is not strictly necessary for mark-compact. But since
 256     // it requires a StrongRootsScope around the task, we need to claim the
 257     // threads, and performance-wise it doesn't really matter. Adds about 1ms to
 258     // full-gc.
 259     {
 260       ShenandoahObjToScanQueue* q = _cm->get_queue(worker_id);
 261       ShenandoahSATBBufferClosure cl(q);
 262       SATBMarkQueueSet& satb_mq_set = ShenandoahBarrierSet::satb_mark_queue_set();
 263       while (satb_mq_set.apply_closure_to_completed_buffer(&cl));
 264       ShenandoahSATBThreadsClosure tc(&cl);
 265       Threads::threads_do(&tc);
 266     }
 267 
 268     ReferenceProcessor* rp;
 269     if (heap->process_references()) {
 270       rp = heap->ref_processor();
 271       shenandoah_assert_rp_isalive_installed();
 272     } else {
 273       rp = NULL;
 274     }
 275 
 276     // Degenerated cycle may bypass concurrent cycle, so code roots might not be scanned,
 277     // let's check here.
 278     _cm->concurrent_scan_code_roots(worker_id, rp);
 279     _cm->mark_loop(worker_id, _terminator, rp,
 280                    false, // not cancellable
 281                    _dedup_string);
 282 
 283     assert(_cm->task_queues()->is_empty(), "Should be empty");
 284   }
 285 };
 286 
 287 void ShenandoahConcurrentMark::mark_roots(ShenandoahPhaseTimings::Phase root_phase) {
 288   assert(Thread::current()->is_VM_thread(), "can only do this in VMThread");
 289   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
 290 
 291   ShenandoahHeap* heap = ShenandoahHeap::heap();
 292 
 293   ShenandoahGCPhase phase(root_phase);
 294 
 295   WorkGang* workers = heap->workers();
 296   uint nworkers = workers->active_workers();
 297 
 298   assert(nworkers <= task_queues()->size(), "Just check");
 299 
 300   ShenandoahRootProcessor root_proc(heap, nworkers, root_phase);
 301   TASKQUEUE_STATS_ONLY(task_queues()->reset_taskqueue_stats());
 302   task_queues()->reserve(nworkers);
 303 
 304   if (heap->has_forwarded_objects()) {
 305     ShenandoahInitMarkRootsTask<RESOLVE> mark_roots(&root_proc, _heap->process_references());
 306     workers->run_task(&mark_roots);
 307   } else {
 308     // No need to update references, which means the heap is stable.
 309     // Can save time not walking through forwarding pointers.
 310     ShenandoahInitMarkRootsTask<NONE> mark_roots(&root_proc, _heap->process_references());
 311     workers->run_task(&mark_roots);
 312   }
 313 
 314   if (ShenandoahConcurrentScanCodeRoots) {
 315     clear_claim_codecache();
 316   }
 317 }
 318 
 319 void ShenandoahConcurrentMark::update_roots(ShenandoahPhaseTimings::Phase root_phase) {
 320   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
 321 
 322   bool update_code_cache = true; // initialize to safer value
 323   switch (root_phase) {
 324     case ShenandoahPhaseTimings::update_roots:
 325     case ShenandoahPhaseTimings::final_update_refs_roots:
 326       update_code_cache = false;
 327       break;
 328     case ShenandoahPhaseTimings::full_gc_roots:
 329     case ShenandoahPhaseTimings::degen_gc_update_roots:
 330       update_code_cache = true;
 331       break;
 332     default:
 333       ShouldNotReachHere();
 334   }
 335 
 336   ShenandoahGCPhase phase(root_phase);
 337 
 338 #if defined(COMPILER2) || INCLUDE_JVMCI
 339   DerivedPointerTable::clear();
 340 #endif
 341 
 342   uint nworkers = _heap->workers()->active_workers();
 343 
 344   ShenandoahRootProcessor root_proc(_heap, nworkers, root_phase);
 345   ShenandoahUpdateRootsTask update_roots(&root_proc, update_code_cache);
 346   _heap->workers()->run_task(&update_roots);
 347 
 348 #if defined(COMPILER2) || INCLUDE_JVMCI
 349   DerivedPointerTable::update_pointers();
 350 #endif
 351 }
 352 
 353 void ShenandoahConcurrentMark::initialize(uint workers) {
 354   _heap = ShenandoahHeap::heap();
 355 
 356   uint num_queues = MAX2(workers, 1U);
 357 
 358   _task_queues = new ShenandoahObjToScanQueueSet((int) num_queues);
 359 
 360   for (uint i = 0; i < num_queues; ++i) {
 361     ShenandoahObjToScanQueue* task_queue = new ShenandoahObjToScanQueue();
 362     task_queue->initialize();
 363     _task_queues->register_queue(i, task_queue);
 364   }
 365 
 366   ShenandoahBarrierSet::satb_mark_queue_set().set_buffer_size(ShenandoahSATBBufferSize);
 367 }
 368 
 369 void ShenandoahConcurrentMark::concurrent_scan_code_roots(uint worker_id, ReferenceProcessor* rp) {
 370   if (ShenandoahConcurrentScanCodeRoots && claim_codecache()) {
 371     ShenandoahObjToScanQueue* q = task_queues()->queue(worker_id);
 372     if (!_heap->unload_classes()) {
 373       MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
 374       // TODO: We can not honor StringDeduplication here, due to lock ranking
 375       // inversion. So, we may miss some deduplication candidates.
 376       if (_heap->has_forwarded_objects()) {
 377         ShenandoahMarkResolveRefsClosure cl(q, rp);
 378         CodeBlobToOopClosure blobs(&cl, !CodeBlobToOopClosure::FixRelocations);
 379         CodeCache::blobs_do(&blobs);
 380       } else {
 381         ShenandoahMarkRefsClosure cl(q, rp);
 382         CodeBlobToOopClosure blobs(&cl, !CodeBlobToOopClosure::FixRelocations);
 383         CodeCache::blobs_do(&blobs);
 384       }
 385     }
 386   }
 387 }
 388 
 389 void ShenandoahConcurrentMark::mark_from_roots() {
 390   WorkGang* workers = _heap->workers();
 391   uint nworkers = workers->active_workers();
 392 
 393   ShenandoahGCPhase conc_mark_phase(ShenandoahPhaseTimings::conc_mark);
 394 
 395   if (_heap->process_references()) {
 396     ReferenceProcessor* rp = _heap->ref_processor();
 397     rp->set_active_mt_degree(nworkers);
 398 
 399     // enable ("weak") refs discovery
 400     rp->enable_discovery(true /*verify_no_refs*/);
 401     rp->setup_policy(_heap->soft_ref_policy()->should_clear_all_soft_refs());
 402   }
 403 
 404   shenandoah_assert_rp_isalive_not_installed();
 405   ShenandoahIsAliveSelector is_alive;
 406   ReferenceProcessorIsAliveMutator fix_isalive(_heap->ref_processor(), is_alive.is_alive_closure());
 407 
 408   task_queues()->reserve(nworkers);
 409 
 410   {
 411     ShenandoahTerminationTracker term(ShenandoahPhaseTimings::conc_termination);
 412     ShenandoahTaskTerminator terminator(nworkers, task_queues());
 413     ShenandoahConcurrentMarkingTask task(this, &terminator);
 414     workers->run_task(&task);
 415   }
 416 
 417   assert(task_queues()->is_empty() || _heap->cancelled_gc(), "Should be empty when not cancelled");
 418 }
 419 
 420 void ShenandoahConcurrentMark::finish_mark_from_roots(bool full_gc) {
 421   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
 422 
 423   uint nworkers = _heap->workers()->active_workers();
 424 
 425   // Finally mark everything else we've got in our queues during the previous steps.
 426   // It does two different things for concurrent vs. mark-compact GC:
 427   // - For concurrent GC, it starts with empty task queues, drains the remaining
 428   //   SATB buffers, and then completes the marking closure.
 429   // - For mark-compact GC, it starts out with the task queues seeded by initial
 430   //   root scan, and completes the closure, thus marking through all live objects
 431   // The implementation is the same, so it's shared here.
 432   {
 433     ShenandoahGCPhase phase(full_gc ?
 434                             ShenandoahPhaseTimings::full_gc_mark_finish_queues :
 435                             ShenandoahPhaseTimings::finish_queues);
 436     task_queues()->reserve(nworkers);
 437 
 438     shenandoah_assert_rp_isalive_not_installed();
 439     ShenandoahIsAliveSelector is_alive;
 440     ReferenceProcessorIsAliveMutator fix_isalive(_heap->ref_processor(), is_alive.is_alive_closure());
 441 
 442     ShenandoahTerminationTracker termination_tracker(full_gc ?
 443                                                      ShenandoahPhaseTimings::full_gc_mark_termination :
 444                                                      ShenandoahPhaseTimings::termination);
 445 
 446     StrongRootsScope scope(nworkers);
 447     ShenandoahTaskTerminator terminator(nworkers, task_queues());
 448     ShenandoahFinalMarkingTask task(this, &terminator, ShenandoahStringDedup::is_enabled());
 449     _heap->workers()->run_task(&task);
 450   }
 451 
 452   assert(task_queues()->is_empty(), "Should be empty");
 453 
 454   // When we're done marking everything, we process weak references.
 455   if (_heap->process_references()) {
 456     weak_refs_work(full_gc);
 457   }
 458 
 459   // And finally finish class unloading
 460   if (_heap->unload_classes()) {
 461     _heap->unload_classes_and_cleanup_tables(full_gc);
 462   }
 463 
 464   assert(task_queues()->is_empty(), "Should be empty");
 465   TASKQUEUE_STATS_ONLY(task_queues()->print_taskqueue_stats());
 466   TASKQUEUE_STATS_ONLY(task_queues()->reset_taskqueue_stats());
 467 
 468   // Resize Metaspace
 469   MetaspaceGC::compute_new_size();
 470 }
 471 
 472 // Weak Reference Closures
 473 class ShenandoahCMDrainMarkingStackClosure: public VoidClosure {
 474   uint _worker_id;
 475   ShenandoahTaskTerminator* _terminator;
 476   bool _reset_terminator;
 477 
 478 public:
 479   ShenandoahCMDrainMarkingStackClosure(uint worker_id, ShenandoahTaskTerminator* t, bool reset_terminator = false):
 480     _worker_id(worker_id),
 481     _terminator(t),
 482     _reset_terminator(reset_terminator) {
 483   }
 484 
 485   void do_void() {
 486     assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
 487 
 488     ShenandoahHeap* sh = ShenandoahHeap::heap();
 489     ShenandoahConcurrentMark* scm = sh->concurrent_mark();
 490     assert(sh->process_references(), "why else would we be here?");
 491     ReferenceProcessor* rp = sh->ref_processor();
 492 
 493     shenandoah_assert_rp_isalive_installed();
 494 
 495     scm->mark_loop(_worker_id, _terminator, rp,
 496                    false,   // not cancellable
 497                    false);  // do not do strdedup
 498 
 499     if (_reset_terminator) {
 500       _terminator->reset_for_reuse();
 501     }
 502   }
 503 };
 504 
 505 class ShenandoahCMKeepAliveClosure : public OopClosure {
 506 private:
 507   ShenandoahObjToScanQueue* _queue;
 508   ShenandoahHeap* _heap;
 509   ShenandoahMarkingContext* const _mark_context;
 510 
 511   template <class T>
 512   inline void do_oop_work(T* p) {
 513     ShenandoahConcurrentMark::mark_through_ref<T, NONE, NO_DEDUP>(p, _heap, _queue, _mark_context);
 514   }
 515 
 516 public:
 517   ShenandoahCMKeepAliveClosure(ShenandoahObjToScanQueue* q) :
 518     _queue(q),
 519     _heap(ShenandoahHeap::heap()),
 520     _mark_context(_heap->marking_context()) {}
 521 
 522   void do_oop(narrowOop* p) { do_oop_work(p); }
 523   void do_oop(oop* p)       { do_oop_work(p); }
 524 };
 525 
 526 class ShenandoahCMKeepAliveUpdateClosure : public OopClosure {
 527 private:
 528   ShenandoahObjToScanQueue* _queue;
 529   ShenandoahHeap* _heap;
 530   ShenandoahMarkingContext* const _mark_context;
 531 
 532   template <class T>
 533   inline void do_oop_work(T* p) {
 534     ShenandoahConcurrentMark::mark_through_ref<T, SIMPLE, NO_DEDUP>(p, _heap, _queue, _mark_context);
 535   }
 536 
 537 public:
 538   ShenandoahCMKeepAliveUpdateClosure(ShenandoahObjToScanQueue* q) :
 539     _queue(q),
 540     _heap(ShenandoahHeap::heap()),
 541     _mark_context(_heap->marking_context()) {}
 542 
 543   void do_oop(narrowOop* p) { do_oop_work(p); }
 544   void do_oop(oop* p)       { do_oop_work(p); }
 545 };
 546 
 547 class ShenandoahWeakUpdateClosure : public OopClosure {
 548 private:
 549   ShenandoahHeap* const _heap;
 550 
 551   template <class T>
 552   inline void do_oop_work(T* p) {
 553     oop o = _heap->maybe_update_with_forwarded(p);
 554     shenandoah_assert_marked_except(p, o, o == NULL);
 555   }
 556 
 557 public:
 558   ShenandoahWeakUpdateClosure() : _heap(ShenandoahHeap::heap()) {}
 559 
 560   void do_oop(narrowOop* p) { do_oop_work(p); }
 561   void do_oop(oop* p)       { do_oop_work(p); }
 562 };
 563 
 564 class ShenandoahWeakAssertNotForwardedClosure : public OopClosure {
 565 private:
 566   template <class T>
 567   inline void do_oop_work(T* p) {
 568     T o = RawAccess<>::oop_load(p);
 569     if (!CompressedOops::is_null(o)) {
 570       oop obj = CompressedOops::decode_not_null(o);
 571       shenandoah_assert_not_forwarded(p, obj);
 572     }
 573   }
 574 
 575 public:
 576   ShenandoahWeakAssertNotForwardedClosure() {}
 577 
 578   void do_oop(narrowOop* p) { do_oop_work(p); }
 579   void do_oop(oop* p)       { do_oop_work(p); }
 580 };
 581 
 582 class ShenandoahRefProcTaskProxy : public AbstractGangTask {
 583 private:
 584   AbstractRefProcTaskExecutor::ProcessTask& _proc_task;
 585   ShenandoahTaskTerminator* _terminator;
 586 
 587 public:
 588   ShenandoahRefProcTaskProxy(AbstractRefProcTaskExecutor::ProcessTask& proc_task,
 589                              ShenandoahTaskTerminator* t) :
 590     AbstractGangTask("Process reference objects in parallel"),
 591     _proc_task(proc_task),
 592     _terminator(t) {
 593   }
 594 
 595   void work(uint worker_id) {
 596     ResourceMark rm;
 597     HandleMark hm;
 598     assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
 599     ShenandoahHeap* heap = ShenandoahHeap::heap();
 600     ShenandoahCMDrainMarkingStackClosure complete_gc(worker_id, _terminator);
 601     if (heap->has_forwarded_objects()) {
 602       ShenandoahForwardedIsAliveClosure is_alive;
 603       ShenandoahCMKeepAliveUpdateClosure keep_alive(heap->concurrent_mark()->get_queue(worker_id));
 604       _proc_task.work(worker_id, is_alive, keep_alive, complete_gc);
 605     } else {
 606       ShenandoahIsAliveClosure is_alive;
 607       ShenandoahCMKeepAliveClosure keep_alive(heap->concurrent_mark()->get_queue(worker_id));
 608       _proc_task.work(worker_id, is_alive, keep_alive, complete_gc);
 609     }
 610   }
 611 };
 612 
 613 class ShenandoahRefProcTaskExecutor : public AbstractRefProcTaskExecutor {
 614 private:
 615   WorkGang* _workers;
 616 
 617 public:
 618   ShenandoahRefProcTaskExecutor(WorkGang* workers) :
 619     _workers(workers) {
 620   }
 621 
 622   // Executes a task using worker threads.
 623   void execute(ProcessTask& task, uint ergo_workers) {
 624     assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
 625 
 626     ShenandoahHeap* heap = ShenandoahHeap::heap();
 627     ShenandoahConcurrentMark* cm = heap->concurrent_mark();
 628     ShenandoahPushWorkerQueuesScope scope(_workers, cm->task_queues(),
 629                                           ergo_workers,
 630                                           /* do_check = */ false);
 631     uint nworkers = _workers->active_workers();
 632     cm->task_queues()->reserve(nworkers);
 633     ShenandoahTaskTerminator terminator(nworkers, cm->task_queues());
 634     ShenandoahRefProcTaskProxy proc_task_proxy(task, &terminator);
 635     _workers->run_task(&proc_task_proxy);
 636   }
 637 };
 638 
 639 void ShenandoahConcurrentMark::weak_refs_work(bool full_gc) {
 640   assert(_heap->process_references(), "sanity");
 641 
 642   ShenandoahPhaseTimings::Phase phase_root =
 643           full_gc ?
 644           ShenandoahPhaseTimings::full_gc_weakrefs :
 645           ShenandoahPhaseTimings::weakrefs;
 646 
 647   ShenandoahGCPhase phase(phase_root);
 648 
 649   ReferenceProcessor* rp = _heap->ref_processor();
 650 
 651   // NOTE: We cannot shortcut on has_discovered_references() here, because
 652   // we will miss marking JNI Weak refs then, see implementation in
 653   // ReferenceProcessor::process_discovered_references.
 654   weak_refs_work_doit(full_gc);
 655 
 656   rp->verify_no_references_recorded();
 657   assert(!rp->discovery_enabled(), "Post condition");
 658 
 659 }
 660 
 661 void ShenandoahConcurrentMark::weak_refs_work_doit(bool full_gc) {
 662   ReferenceProcessor* rp = _heap->ref_processor();
 663 
 664   ShenandoahPhaseTimings::Phase phase_process =
 665           full_gc ?
 666           ShenandoahPhaseTimings::full_gc_weakrefs_process :
 667           ShenandoahPhaseTimings::weakrefs_process;
 668 
 669   ShenandoahPhaseTimings::Phase phase_process_termination =
 670           full_gc ?
 671           ShenandoahPhaseTimings::full_gc_weakrefs_termination :
 672           ShenandoahPhaseTimings::weakrefs_termination;
 673 
 674   shenandoah_assert_rp_isalive_not_installed();
 675   ShenandoahIsAliveSelector is_alive;
 676   ReferenceProcessorIsAliveMutator fix_isalive(rp, is_alive.is_alive_closure());
 677 
 678   WorkGang* workers = _heap->workers();
 679   uint nworkers = workers->active_workers();
 680 
 681   rp->setup_policy(_heap->soft_ref_policy()->should_clear_all_soft_refs());
 682   rp->set_active_mt_degree(nworkers);
 683 
 684   assert(task_queues()->is_empty(), "Should be empty");
 685 
 686   // complete_gc and keep_alive closures instantiated here are only needed for
 687   // single-threaded path in RP. They share the queue 0 for tracking work, which
 688   // simplifies implementation. Since RP may decide to call complete_gc several
 689   // times, we need to be able to reuse the terminator.
 690   uint serial_worker_id = 0;
 691   ShenandoahTaskTerminator terminator(1, task_queues());
 692   ShenandoahCMDrainMarkingStackClosure complete_gc(serial_worker_id, &terminator, /* reset_terminator = */ true);
 693 
 694   ShenandoahRefProcTaskExecutor executor(workers);
 695 
 696   ReferenceProcessorPhaseTimes pt(_heap->gc_timer(), rp->num_queues());
 697 
 698   {
 699     ShenandoahGCPhase phase(phase_process);
 700     ShenandoahTerminationTracker phase_term(phase_process_termination);
 701 
 702     // Process leftover weak oops: update them, if needed, or assert they do not
 703     // need updating otherwise. This JDK version does not have parallel WeakProcessor.
 704     // Weak processor API requires us to visit the oops, even if we are not doing
 705     // anything to them.
 706     if (_heap->has_forwarded_objects()) {
 707       ShenandoahCMKeepAliveUpdateClosure keep_alive(get_queue(serial_worker_id));
 708       rp->process_discovered_references(is_alive.is_alive_closure(), &keep_alive,
 709                                         &complete_gc, &executor,
 710                                         &pt);
 711 
 712       ShenandoahWeakUpdateClosure cl;
 713       WeakProcessor::weak_oops_do(is_alive.is_alive_closure(), &cl);
 714     } else {
 715       ShenandoahCMKeepAliveClosure keep_alive(get_queue(serial_worker_id));
 716       rp->process_discovered_references(is_alive.is_alive_closure(), &keep_alive,
 717                                         &complete_gc, &executor,
 718                                         &pt);
 719 
 720       ShenandoahWeakAssertNotForwardedClosure cl;
 721       WeakProcessor::weak_oops_do(is_alive.is_alive_closure(), &cl);
 722     }
 723 
 724     pt.print_all_references();
 725 
 726     assert(task_queues()->is_empty(), "Should be empty");
 727   }
 728 }
 729 
 730 class ShenandoahCancelledGCYieldClosure : public YieldClosure {
 731 private:
 732   ShenandoahHeap* const _heap;
 733 public:
 734   ShenandoahCancelledGCYieldClosure() : _heap(ShenandoahHeap::heap()) {};
 735   virtual bool should_return() { return _heap->cancelled_gc(); }
 736 };
 737 
 738 class ShenandoahPrecleanCompleteGCClosure : public VoidClosure {
 739 public:
 740   void do_void() {
 741     ShenandoahHeap* sh = ShenandoahHeap::heap();
 742     ShenandoahConcurrentMark* scm = sh->concurrent_mark();
 743     assert(sh->process_references(), "why else would we be here?");
 744     ShenandoahTaskTerminator terminator(1, scm->task_queues());
 745 
 746     ReferenceProcessor* rp = sh->ref_processor();
 747     shenandoah_assert_rp_isalive_installed();
 748 
 749     scm->mark_loop(0, &terminator, rp,
 750                    false, // not cancellable
 751                    false); // do not do strdedup
 752   }
 753 };
 754 
 755 class ShenandoahPrecleanKeepAliveUpdateClosure : public OopClosure {
 756 private:
 757   ShenandoahObjToScanQueue* _queue;
 758   ShenandoahHeap* _heap;
 759   ShenandoahMarkingContext* const _mark_context;
 760 
 761   template <class T>
 762   inline void do_oop_work(T* p) {
 763     ShenandoahConcurrentMark::mark_through_ref<T, CONCURRENT, NO_DEDUP>(p, _heap, _queue, _mark_context);
 764   }
 765 
 766 public:
 767   ShenandoahPrecleanKeepAliveUpdateClosure(ShenandoahObjToScanQueue* q) :
 768     _queue(q),
 769     _heap(ShenandoahHeap::heap()),
 770     _mark_context(_heap->marking_context()) {}
 771 
 772   void do_oop(narrowOop* p) { do_oop_work(p); }
 773   void do_oop(oop* p)       { do_oop_work(p); }
 774 };
 775 
 776 class ShenandoahPrecleanTask : public AbstractGangTask {
 777 private:
 778   ReferenceProcessor* _rp;
 779 
 780 public:
 781   ShenandoahPrecleanTask(ReferenceProcessor* rp) :
 782           AbstractGangTask("Precleaning task"),
 783           _rp(rp) {}
 784 
 785   void work(uint worker_id) {
 786     assert(worker_id == 0, "The code below is single-threaded, only one worker is expected");
 787     ShenandoahParallelWorkerSession worker_session(worker_id);
 788 
 789     ShenandoahHeap* sh = ShenandoahHeap::heap();
 790 
 791     ShenandoahObjToScanQueue* q = sh->concurrent_mark()->get_queue(worker_id);
 792 
 793     ShenandoahCancelledGCYieldClosure yield;
 794     ShenandoahPrecleanCompleteGCClosure complete_gc;
 795 
 796     if (sh->has_forwarded_objects()) {
 797       ShenandoahForwardedIsAliveClosure is_alive;
 798       ShenandoahPrecleanKeepAliveUpdateClosure keep_alive(q);
 799       ResourceMark rm;
 800       _rp->preclean_discovered_references(&is_alive, &keep_alive,
 801                                           &complete_gc, &yield,
 802                                           NULL);
 803     } else {
 804       ShenandoahIsAliveClosure is_alive;
 805       ShenandoahCMKeepAliveClosure keep_alive(q);
 806       ResourceMark rm;
 807       _rp->preclean_discovered_references(&is_alive, &keep_alive,
 808                                           &complete_gc, &yield,
 809                                           NULL);
 810     }
 811   }
 812 };
 813 
 814 void ShenandoahConcurrentMark::preclean_weak_refs() {
 815   // Pre-cleaning weak references before diving into STW makes sense at the
 816   // end of concurrent mark. This will filter out the references which referents
 817   // are alive. Note that ReferenceProcessor already filters out these on reference
 818   // discovery, and the bulk of work is done here. This phase processes leftovers
 819   // that missed the initial filtering, i.e. when referent was marked alive after
 820   // reference was discovered by RP.
 821 
 822   assert(_heap->process_references(), "sanity");
 823 
 824   // Shortcut if no references were discovered to avoid winding up threads.
 825   ReferenceProcessor* rp = _heap->ref_processor();
 826   if (!rp->has_discovered_references()) {
 827     return;
 828   }
 829 
 830   assert(task_queues()->is_empty(), "Should be empty");
 831 
 832   ReferenceProcessorMTDiscoveryMutator fix_mt_discovery(rp, false);
 833 
 834   shenandoah_assert_rp_isalive_not_installed();
 835   ShenandoahIsAliveSelector is_alive;
 836   ReferenceProcessorIsAliveMutator fix_isalive(rp, is_alive.is_alive_closure());
 837 
 838   // Execute precleaning in the worker thread: it will give us GCLABs, String dedup
 839   // queues and other goodies. When upstream ReferenceProcessor starts supporting
 840   // parallel precleans, we can extend this to more threads.
 841   WorkGang* workers = _heap->workers();
 842   uint nworkers = workers->active_workers();
 843   assert(nworkers == 1, "This code uses only a single worker");
 844   task_queues()->reserve(nworkers);
 845 
 846   ShenandoahPrecleanTask task(rp);
 847   workers->run_task(&task);
 848 
 849   assert(task_queues()->is_empty(), "Should be empty");
 850 }
 851 
 852 void ShenandoahConcurrentMark::cancel() {
 853   // Clean up marking stacks.
 854   ShenandoahObjToScanQueueSet* queues = task_queues();
 855   queues->clear();
 856 
 857   // Cancel SATB buffers.
 858   ShenandoahBarrierSet::satb_mark_queue_set().abandon_partial_marking();
 859 }
 860 
 861 ShenandoahObjToScanQueue* ShenandoahConcurrentMark::get_queue(uint worker_id) {
 862   assert(task_queues()->get_reserved() > worker_id, "No reserved queue for worker id: %d", worker_id);
 863   return _task_queues->queue(worker_id);
 864 }
 865 
 866 template <bool CANCELLABLE>
 867 void ShenandoahConcurrentMark::mark_loop_prework(uint w, ShenandoahTaskTerminator *t, ReferenceProcessor *rp,
 868                                                  bool strdedup) {
 869   ShenandoahObjToScanQueue* q = get_queue(w);
 870 
 871   jushort* ld = _heap->get_liveness_cache(w);
 872 
 873   // TODO: We can clean up this if we figure out how to do templated oop closures that
 874   // play nice with specialized_oop_iterators.
 875   if (_heap->unload_classes()) {
 876     if (_heap->has_forwarded_objects()) {
 877       if (strdedup) {
 878         ShenandoahMarkUpdateRefsMetadataDedupClosure cl(q, rp);
 879         mark_loop_work<ShenandoahMarkUpdateRefsMetadataDedupClosure, CANCELLABLE>(&cl, ld, w, t);
 880       } else {
 881         ShenandoahMarkUpdateRefsMetadataClosure cl(q, rp);
 882         mark_loop_work<ShenandoahMarkUpdateRefsMetadataClosure, CANCELLABLE>(&cl, ld, w, t);
 883       }
 884     } else {
 885       if (strdedup) {
 886         ShenandoahMarkRefsMetadataDedupClosure cl(q, rp);
 887         mark_loop_work<ShenandoahMarkRefsMetadataDedupClosure, CANCELLABLE>(&cl, ld, w, t);
 888       } else {
 889         ShenandoahMarkRefsMetadataClosure cl(q, rp);
 890         mark_loop_work<ShenandoahMarkRefsMetadataClosure, CANCELLABLE>(&cl, ld, w, t);
 891       }
 892     }
 893   } else {
 894     if (_heap->has_forwarded_objects()) {
 895       if (strdedup) {
 896         ShenandoahMarkUpdateRefsDedupClosure cl(q, rp);
 897         mark_loop_work<ShenandoahMarkUpdateRefsDedupClosure, CANCELLABLE>(&cl, ld, w, t);
 898       } else {
 899         ShenandoahMarkUpdateRefsClosure cl(q, rp);
 900         mark_loop_work<ShenandoahMarkUpdateRefsClosure, CANCELLABLE>(&cl, ld, w, t);
 901       }
 902     } else {
 903       if (strdedup) {
 904         ShenandoahMarkRefsDedupClosure cl(q, rp);
 905         mark_loop_work<ShenandoahMarkRefsDedupClosure, CANCELLABLE>(&cl, ld, w, t);
 906       } else {
 907         ShenandoahMarkRefsClosure cl(q, rp);
 908         mark_loop_work<ShenandoahMarkRefsClosure, CANCELLABLE>(&cl, ld, w, t);
 909       }
 910     }
 911   }
 912 
 913   _heap->flush_liveness_cache(w);
 914 }
 915 
 916 template <class T, bool CANCELLABLE>
 917 void ShenandoahConcurrentMark::mark_loop_work(T* cl, jushort* live_data, uint worker_id, ShenandoahTaskTerminator *terminator) {
 918   int seed = 17;
 919   uintx stride = ShenandoahMarkLoopStride;
 920 
 921   ShenandoahHeap* heap = ShenandoahHeap::heap();
 922   ShenandoahObjToScanQueueSet* queues = task_queues();
 923   ShenandoahObjToScanQueue* q;
 924   ShenandoahMarkTask t;
 925 
 926   /*
 927    * Process outstanding queues, if any.
 928    *
 929    * There can be more queues than workers. To deal with the imbalance, we claim
 930    * extra queues first. Since marking can push new tasks into the queue associated
 931    * with this worker id, we come back to process this queue in the normal loop.
 932    */
 933   assert(queues->get_reserved() == heap->workers()->active_workers(),
 934          "Need to reserve proper number of queues: reserved: %u, active: %u", queues->get_reserved(), heap->workers()->active_workers());
 935 
 936   q = queues->claim_next();
 937   while (q != NULL) {
 938     if (CANCELLABLE && heap->check_cancelled_gc_and_yield()) {
 939       return;
 940     }
 941 
 942     for (uint i = 0; i < stride; i++) {
 943       if (q->pop(t)) {
 944         do_task<T>(q, cl, live_data, &t);
 945       } else {
 946         assert(q->is_empty(), "Must be empty");
 947         q = queues->claim_next();
 948         break;
 949       }
 950     }
 951   }
 952   q = get_queue(worker_id);
 953 
 954   ShenandoahSATBBufferClosure drain_satb(q);
 955   SATBMarkQueueSet& satb_mq_set = ShenandoahBarrierSet::satb_mark_queue_set();
 956 
 957   /*
 958    * Normal marking loop:
 959    */
 960   while (true) {
 961     if (CANCELLABLE && heap->check_cancelled_gc_and_yield()) {
 962       return;
 963     }
 964 
 965     while (satb_mq_set.completed_buffers_num() > 0) {
 966       satb_mq_set.apply_closure_to_completed_buffer(&drain_satb);
 967     }
 968 
 969     uint work = 0;
 970     for (uint i = 0; i < stride; i++) {
 971       if (q->pop(t) ||
 972           queues->steal(worker_id, &seed, t)) {
 973         do_task<T>(q, cl, live_data, &t);
 974         work++;
 975       } else {
 976         break;
 977       }
 978     }
 979 
 980     if (work == 0) {
 981       // No work encountered in current stride, try to terminate.
 982       // Need to leave the STS here otherwise it might block safepoints.
 983       ShenandoahSuspendibleThreadSetLeaver stsl(CANCELLABLE && ShenandoahSuspendibleWorkers);
 984       ShenandoahTerminationTimingsTracker term_tracker(worker_id);
 985       ShenandoahTerminatorTerminator tt(heap);
 986       if (terminator->offer_termination(&tt)) return;
 987     }
 988   }
 989 }
 990 
 991 bool ShenandoahConcurrentMark::claim_codecache() {
 992   assert(ShenandoahConcurrentScanCodeRoots, "must not be called otherwise");
 993   return _claimed_codecache.try_set();
 994 }
 995 
 996 void ShenandoahConcurrentMark::clear_claim_codecache() {
 997   assert(ShenandoahConcurrentScanCodeRoots, "must not be called otherwise");
 998   _claimed_codecache.unset();
 999 }