1 /*
   2  * Copyright (c) 2013, 2018, Red Hat, Inc. All rights reserved.
   3  *
   4  * This code is free software; you can redistribute it and/or modify it
   5  * under the terms of the GNU General Public License version 2 only, as
   6  * published by the Free Software Foundation.
   7  *
   8  * This code is distributed in the hope that it will be useful, but WITHOUT
   9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  11  * version 2 for more details (a copy is included in the LICENSE file that
  12  * accompanied this code).
  13  *
  14  * You should have received a copy of the GNU General Public License version
  15  * 2 along with this work; if not, write to the Free Software Foundation,
  16  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  17  *
  18  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  19  * or visit www.oracle.com if you need additional information or have any
  20  * questions.
  21  *
  22  */
  23 
  24 #include "precompiled.hpp"
  25 
  26 #include "classfile/symbolTable.hpp"
  27 #include "classfile/systemDictionary.hpp"
  28 #include "code/codeCache.hpp"
  29 
  30 #include "gc/shared/weakProcessor.hpp"
  31 #include "gc/shared/weakProcessor.inline.hpp"
  32 #include "gc/shared/gcTimer.hpp"
  33 #include "gc/shared/referenceProcessor.hpp"
  34 #include "gc/shared/referenceProcessorPhaseTimes.hpp"
  35 
  36 #include "gc/shenandoah/shenandoahBarrierSet.inline.hpp"
  37 #include "gc/shenandoah/shenandoahConcurrentMark.inline.hpp"
  38 #include "gc/shenandoah/shenandoahMarkCompact.hpp"
  39 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
  40 #include "gc/shenandoah/shenandoahRootProcessor.hpp"
  41 #include "gc/shenandoah/shenandoahOopClosures.hpp"
  42 #include "gc/shenandoah/shenandoahOopClosures.inline.hpp"
  43 #include "gc/shenandoah/shenandoahTaskqueue.hpp"
  44 #include "gc/shenandoah/shenandoahTaskqueue.inline.hpp"
  45 #include "gc/shenandoah/shenandoahTimingTracker.hpp"
  46 #include "gc/shenandoah/shenandoahUtils.hpp"
  47 
  48 #include "memory/iterator.inline.hpp"
  49 #include "memory/metaspace.hpp"
  50 #include "memory/resourceArea.hpp"
  51 #include "oops/oop.inline.hpp"
  52 
  53 template<UpdateRefsMode UPDATE_REFS, StringDedupMode STRING_DEDUP>
  54 class ShenandoahInitMarkRootsClosure : public OopClosure {
  55 private:
  56   ShenandoahObjToScanQueue* _queue;
  57   ShenandoahHeap* _heap;
  58   ShenandoahMarkingContext* const _mark_context;
  59 
  60   template <class T>
  61   inline void do_oop_work(T* p) {
  62     ShenandoahConcurrentMark::mark_through_ref<T, UPDATE_REFS, STRING_DEDUP>(p, _heap, _queue, _mark_context);
  63   }
  64 
  65 public:
  66   ShenandoahInitMarkRootsClosure(ShenandoahObjToScanQueue* q) :
  67     _queue(q),
  68     _heap(ShenandoahHeap::heap()),
  69     _mark_context(_heap->marking_context()) {};
  70 
  71   void do_oop(narrowOop* p) { do_oop_work(p); }
  72   void do_oop(oop* p)       { do_oop_work(p); }
  73 };
  74 
  75 ShenandoahMarkRefsSuperClosure::ShenandoahMarkRefsSuperClosure(ShenandoahObjToScanQueue* q, ReferenceProcessor* rp) :
  76   MetadataVisitingOopIterateClosure(rp),
  77   _queue(q),
  78   _heap(ShenandoahHeap::heap()),
  79   _mark_context(_heap->marking_context())
  80 { }
  81 
  82 template<UpdateRefsMode UPDATE_REFS>
  83 class ShenandoahInitMarkRootsTask : public AbstractGangTask {
  84 private:
  85   ShenandoahRootProcessor* _rp;
  86   bool _process_refs;
  87 public:
  88   ShenandoahInitMarkRootsTask(ShenandoahRootProcessor* rp, bool process_refs) :
  89     AbstractGangTask("Shenandoah init mark roots task"),
  90     _rp(rp),
  91     _process_refs(process_refs) {
  92   }
  93 
  94   void work(uint worker_id) {
  95     assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
  96     ShenandoahParallelWorkerSession worker_session(worker_id);
  97 
  98     ShenandoahHeap* heap = ShenandoahHeap::heap();
  99     ShenandoahObjToScanQueueSet* queues = heap->concurrent_mark()->task_queues();
 100     assert(queues->get_reserved() > worker_id, "Queue has not been reserved for worker id: %d", worker_id);
 101 
 102     ShenandoahObjToScanQueue* q = queues->queue(worker_id);
 103 
 104     if (ShenandoahStringDedup::is_enabled()) {
 105       ShenandoahInitMarkRootsClosure<UPDATE_REFS, ENQUEUE_DEDUP> mark_cl(q);
 106       do_work(heap, &mark_cl, worker_id);
 107     } else {
 108       ShenandoahInitMarkRootsClosure<UPDATE_REFS, NO_DEDUP> mark_cl(q);
 109       do_work(heap, &mark_cl, worker_id);
 110     }
 111   }
 112 
 113 private:
 114   void do_work(ShenandoahHeap* heap, OopClosure* oops, uint worker_id) {
 115     // The rationale for selecting the roots to scan is as follows:
 116     //   a. With unload_classes = true, we only want to scan the actual strong roots from the
 117     //      code cache. This will allow us to identify the dead classes, unload them, *and*
 118     //      invalidate the relevant code cache blobs. This could be only done together with
 119     //      class unloading.
 120     //   b. With unload_classes = false, we have to nominally retain all the references from code
 121     //      cache, because there could be the case of embedded class/oop in the generated code,
 122     //      which we will never visit during mark. Without code cache invalidation, as in (a),
 123     //      we risk executing that code cache blob, and crashing.
 124     //   c. With ShenandoahConcurrentScanCodeRoots, we avoid scanning the entire code cache here,
 125     //      and instead do that in concurrent phase under the relevant lock. This saves init mark
 126     //      pause time.
 127 
 128     CLDToOopClosure clds_cl(oops, ClassLoaderData::_claim_strong);
 129     MarkingCodeBlobClosure blobs_cl(oops, ! CodeBlobToOopClosure::FixRelocations);
 130     OopClosure* weak_oops = _process_refs ? NULL : oops;
 131 
 132     ResourceMark m;
 133     if (heap->unload_classes()) {
 134       _rp->process_strong_roots(oops, weak_oops, &clds_cl, NULL, &blobs_cl, NULL, worker_id);
 135     } else {
 136       if (ShenandoahConcurrentScanCodeRoots) {
 137         CodeBlobClosure* code_blobs = NULL;
 138 #ifdef ASSERT
 139         ShenandoahAssertToSpaceClosure assert_to_space_oops;
 140         CodeBlobToOopClosure assert_to_space(&assert_to_space_oops, !CodeBlobToOopClosure::FixRelocations);
 141         // If conc code cache evac is disabled, code cache should have only to-space ptrs.
 142         // Otherwise, it should have to-space ptrs only if mark does not update refs.
 143         if (!heap->has_forwarded_objects()) {
 144           code_blobs = &assert_to_space;
 145         }
 146 #endif
 147         _rp->process_all_roots(oops, weak_oops, &clds_cl, code_blobs, NULL, worker_id);
 148       } else {
 149         _rp->process_all_roots(oops, weak_oops, &clds_cl, &blobs_cl, NULL, worker_id);
 150       }
 151     }
 152   }
 153 };
 154 
 155 class ShenandoahUpdateRootsTask : public AbstractGangTask {
 156 private:
 157   ShenandoahRootProcessor* _rp;
 158   const bool _update_code_cache;
 159 public:
 160   ShenandoahUpdateRootsTask(ShenandoahRootProcessor* rp, bool update_code_cache) :
 161     AbstractGangTask("Shenandoah update roots task"),
 162     _rp(rp),
 163     _update_code_cache(update_code_cache) {
 164   }
 165 
 166   void work(uint worker_id) {
 167     assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
 168     ShenandoahParallelWorkerSession worker_session(worker_id);
 169 
 170     ShenandoahHeap* heap = ShenandoahHeap::heap();
 171     ShenandoahUpdateRefsClosure cl;
 172     CLDToOopClosure cldCl(&cl, ClassLoaderData::_claim_strong);
 173 
 174     CodeBlobClosure* code_blobs;
 175     CodeBlobToOopClosure update_blobs(&cl, CodeBlobToOopClosure::FixRelocations);
 176 #ifdef ASSERT
 177     ShenandoahAssertToSpaceClosure assert_to_space_oops;
 178     CodeBlobToOopClosure assert_to_space(&assert_to_space_oops, !CodeBlobToOopClosure::FixRelocations);
 179 #endif
 180     if (_update_code_cache) {
 181       code_blobs = &update_blobs;
 182     } else {
 183       code_blobs =
 184         DEBUG_ONLY(&assert_to_space)
 185         NOT_DEBUG(NULL);
 186     }
 187     _rp->process_all_roots(&cl, &cl, &cldCl, code_blobs, NULL, worker_id);
 188   }
 189 };
 190 
 191 class ShenandoahConcurrentMarkingTask : public AbstractGangTask {
 192 private:
 193   ShenandoahConcurrentMark* _cm;
 194   ShenandoahTaskTerminator* _terminator;
 195 
 196 public:
 197   ShenandoahConcurrentMarkingTask(ShenandoahConcurrentMark* cm, ShenandoahTaskTerminator* terminator) :
 198     AbstractGangTask("Root Region Scan"), _cm(cm), _terminator(terminator) {
 199   }
 200 
 201   void work(uint worker_id) {
 202     ShenandoahHeap* heap = ShenandoahHeap::heap();
 203     ShenandoahConcurrentWorkerSession worker_session(worker_id);
 204     ShenandoahSuspendibleThreadSetJoiner stsj(ShenandoahSuspendibleWorkers);
 205     ShenandoahObjToScanQueue* q = _cm->get_queue(worker_id);
 206     ReferenceProcessor* rp;
 207     if (heap->process_references()) {
 208       rp = heap->ref_processor();
 209       shenandoah_assert_rp_isalive_installed();
 210     } else {
 211       rp = NULL;
 212     }
 213 
 214     _cm->concurrent_scan_code_roots(worker_id, rp);
 215     _cm->mark_loop(worker_id, _terminator, rp,
 216                    true, // cancellable
 217                    ShenandoahStringDedup::is_enabled()); // perform string dedup
 218   }
 219 };
 220 
 221 class ShenandoahSATBThreadsClosure : public ThreadClosure {
 222 private:
 223   ShenandoahSATBBufferClosure* _satb_cl;
 224   int _thread_parity;
 225 
 226 public:
 227   ShenandoahSATBThreadsClosure(ShenandoahSATBBufferClosure* satb_cl) :
 228     _satb_cl(satb_cl),
 229     _thread_parity(Threads::thread_claim_parity()) {}
 230 
 231   void do_thread(Thread* thread) {
 232     if (thread->is_Java_thread()) {
 233       if (thread->claim_oops_do(true, _thread_parity)) {
 234         JavaThread* jt = (JavaThread*)thread;
 235         ShenandoahThreadLocalData::satb_mark_queue(jt).apply_closure_and_empty(_satb_cl);
 236       }
 237     } else if (thread->is_VM_thread()) {
 238       if (thread->claim_oops_do(true, _thread_parity)) {
 239         ShenandoahBarrierSet::satb_mark_queue_set().shared_satb_queue()->apply_closure_and_empty(_satb_cl);
 240       }
 241     }
 242   }
 243 };
 244 
 245 class ShenandoahFinalMarkingTask : public AbstractGangTask {
 246 private:
 247   ShenandoahConcurrentMark* _cm;
 248   ShenandoahTaskTerminator* _terminator;
 249   bool _dedup_string;
 250 
 251 public:
 252   ShenandoahFinalMarkingTask(ShenandoahConcurrentMark* cm, ShenandoahTaskTerminator* terminator, bool dedup_string) :
 253     AbstractGangTask("Shenandoah Final Marking"), _cm(cm), _terminator(terminator), _dedup_string(dedup_string) {
 254   }
 255 
 256   void work(uint worker_id) {
 257     ShenandoahHeap* heap = ShenandoahHeap::heap();
 258 
 259     ShenandoahParallelWorkerSession worker_session(worker_id);
 260     // First drain remaining SATB buffers.
 261     // Notice that this is not strictly necessary for mark-compact. But since
 262     // it requires a StrongRootsScope around the task, we need to claim the
 263     // threads, and performance-wise it doesn't really matter. Adds about 1ms to
 264     // full-gc.
 265     {
 266       ShenandoahObjToScanQueue* q = _cm->get_queue(worker_id);
 267       ShenandoahSATBBufferClosure cl(q);
 268       SATBMarkQueueSet& satb_mq_set = ShenandoahBarrierSet::satb_mark_queue_set();
 269       while (satb_mq_set.apply_closure_to_completed_buffer(&cl));
 270       ShenandoahSATBThreadsClosure tc(&cl);
 271       Threads::threads_do(&tc);
 272     }
 273 
 274     ReferenceProcessor* rp;
 275     if (heap->process_references()) {
 276       rp = heap->ref_processor();
 277       shenandoah_assert_rp_isalive_installed();
 278     } else {
 279       rp = NULL;
 280     }
 281 
 282     // Degenerated cycle may bypass concurrent cycle, so code roots might not be scanned,
 283     // let's check here.
 284     _cm->concurrent_scan_code_roots(worker_id, rp);
 285     _cm->mark_loop(worker_id, _terminator, rp,
 286                    false, // not cancellable
 287                    _dedup_string);
 288 
 289     assert(_cm->task_queues()->is_empty(), "Should be empty");
 290   }
 291 };
 292 
 293 void ShenandoahConcurrentMark::mark_roots(ShenandoahPhaseTimings::Phase root_phase) {
 294   assert(Thread::current()->is_VM_thread(), "can only do this in VMThread");
 295   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
 296 
 297   ShenandoahHeap* heap = ShenandoahHeap::heap();
 298 
 299   ShenandoahGCPhase phase(root_phase);
 300 
 301   WorkGang* workers = heap->workers();
 302   uint nworkers = workers->active_workers();
 303 
 304   assert(nworkers <= task_queues()->size(), "Just check");
 305 
 306   ShenandoahRootProcessor root_proc(heap, nworkers, root_phase);
 307   TASKQUEUE_STATS_ONLY(task_queues()->reset_taskqueue_stats());
 308   task_queues()->reserve(nworkers);
 309 
 310   if (heap->has_forwarded_objects()) {
 311     ShenandoahInitMarkRootsTask<RESOLVE> mark_roots(&root_proc, _heap->process_references());
 312     workers->run_task(&mark_roots);
 313   } else {
 314     // No need to update references, which means the heap is stable.
 315     // Can save time not walking through forwarding pointers.
 316     ShenandoahInitMarkRootsTask<NONE> mark_roots(&root_proc, _heap->process_references());
 317     workers->run_task(&mark_roots);
 318   }
 319 
 320   if (ShenandoahConcurrentScanCodeRoots) {
 321     clear_claim_codecache();
 322   }
 323 }
 324 
 325 void ShenandoahConcurrentMark::update_roots(ShenandoahPhaseTimings::Phase root_phase) {
 326   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
 327 
 328   bool update_code_cache = true; // initialize to safer value
 329   switch (root_phase) {
 330     case ShenandoahPhaseTimings::update_roots:
 331     case ShenandoahPhaseTimings::final_update_refs_roots:
 332       update_code_cache = false;
 333       break;
 334     case ShenandoahPhaseTimings::full_gc_roots:
 335     case ShenandoahPhaseTimings::degen_gc_update_roots:
 336       update_code_cache = true;
 337       break;
 338     default:
 339       ShouldNotReachHere();
 340   }
 341 
 342   ShenandoahGCPhase phase(root_phase);
 343 
 344 #if defined(COMPILER2) || INCLUDE_JVMCI
 345   DerivedPointerTable::clear();
 346 #endif
 347 
 348   uint nworkers = _heap->workers()->active_workers();
 349 
 350   ShenandoahRootProcessor root_proc(_heap, nworkers, root_phase);
 351   ShenandoahUpdateRootsTask update_roots(&root_proc, update_code_cache);
 352   _heap->workers()->run_task(&update_roots);
 353 
 354 #if defined(COMPILER2) || INCLUDE_JVMCI
 355   DerivedPointerTable::update_pointers();
 356 #endif
 357 }
 358 
 359 void ShenandoahConcurrentMark::initialize(uint workers) {
 360   _heap = ShenandoahHeap::heap();
 361 
 362   uint num_queues = MAX2(workers, 1U);
 363 
 364   _task_queues = new ShenandoahObjToScanQueueSet((int) num_queues);
 365 
 366   for (uint i = 0; i < num_queues; ++i) {
 367     ShenandoahObjToScanQueue* task_queue = new ShenandoahObjToScanQueue();
 368     task_queue->initialize();
 369     _task_queues->register_queue(i, task_queue);
 370   }
 371 }
 372 
 373 void ShenandoahConcurrentMark::concurrent_scan_code_roots(uint worker_id, ReferenceProcessor* rp) {
 374   if (ShenandoahConcurrentScanCodeRoots && claim_codecache()) {
 375     ShenandoahObjToScanQueue* q = task_queues()->queue(worker_id);
 376     if (!_heap->unload_classes()) {
 377       MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
 378       // TODO: We can not honor StringDeduplication here, due to lock ranking
 379       // inversion. So, we may miss some deduplication candidates.
 380       if (_heap->has_forwarded_objects()) {
 381         ShenandoahMarkResolveRefsClosure cl(q, rp);
 382         CodeBlobToOopClosure blobs(&cl, !CodeBlobToOopClosure::FixRelocations);
 383         CodeCache::blobs_do(&blobs);
 384       } else {
 385         ShenandoahMarkRefsClosure cl(q, rp);
 386         CodeBlobToOopClosure blobs(&cl, !CodeBlobToOopClosure::FixRelocations);
 387         CodeCache::blobs_do(&blobs);
 388       }
 389     }
 390   }
 391 }
 392 
 393 void ShenandoahConcurrentMark::mark_from_roots() {
 394   WorkGang* workers = _heap->workers();
 395   uint nworkers = workers->active_workers();
 396 
 397   ShenandoahGCPhase conc_mark_phase(ShenandoahPhaseTimings::conc_mark);
 398 
 399   if (_heap->process_references()) {
 400     ReferenceProcessor* rp = _heap->ref_processor();
 401     rp->set_active_mt_degree(nworkers);
 402 
 403     // enable ("weak") refs discovery
 404     rp->enable_discovery(true /*verify_no_refs*/);
 405     rp->setup_policy(_heap->soft_ref_policy()->should_clear_all_soft_refs());
 406   }
 407 
 408   shenandoah_assert_rp_isalive_not_installed();
 409   ShenandoahIsAliveSelector is_alive;
 410   ReferenceProcessorIsAliveMutator fix_isalive(_heap->ref_processor(), is_alive.is_alive_closure());
 411 
 412   task_queues()->reserve(nworkers);
 413 
 414   {
 415     ShenandoahTerminationTracker term(ShenandoahPhaseTimings::conc_termination);
 416     ShenandoahTaskTerminator terminator(nworkers, task_queues());
 417     ShenandoahConcurrentMarkingTask task(this, &terminator);
 418     workers->run_task(&task);
 419   }
 420 
 421   assert(task_queues()->is_empty() || _heap->cancelled_gc(), "Should be empty when not cancelled");
 422 }
 423 
 424 void ShenandoahConcurrentMark::finish_mark_from_roots(bool full_gc) {
 425   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
 426 
 427   uint nworkers = _heap->workers()->active_workers();
 428 
 429   // Finally mark everything else we've got in our queues during the previous steps.
 430   // It does two different things for concurrent vs. mark-compact GC:
 431   // - For concurrent GC, it starts with empty task queues, drains the remaining
 432   //   SATB buffers, and then completes the marking closure.
 433   // - For mark-compact GC, it starts out with the task queues seeded by initial
 434   //   root scan, and completes the closure, thus marking through all live objects
 435   // The implementation is the same, so it's shared here.
 436   {
 437     ShenandoahGCPhase phase(full_gc ?
 438                             ShenandoahPhaseTimings::full_gc_mark_finish_queues :
 439                             ShenandoahPhaseTimings::finish_queues);
 440     task_queues()->reserve(nworkers);
 441 
 442     shenandoah_assert_rp_isalive_not_installed();
 443     ShenandoahIsAliveSelector is_alive;
 444     ReferenceProcessorIsAliveMutator fix_isalive(_heap->ref_processor(), is_alive.is_alive_closure());
 445 
 446     ShenandoahTerminationTracker termination_tracker(full_gc ?
 447                                                      ShenandoahPhaseTimings::full_gc_mark_termination :
 448                                                      ShenandoahPhaseTimings::termination);
 449 
 450     StrongRootsScope scope(nworkers);
 451     ShenandoahTaskTerminator terminator(nworkers, task_queues());
 452     ShenandoahFinalMarkingTask task(this, &terminator, ShenandoahStringDedup::is_enabled());
 453     _heap->workers()->run_task(&task);
 454   }
 455 
 456   assert(task_queues()->is_empty(), "Should be empty");
 457 
 458   // When we're done marking everything, we process weak references.
 459   if (_heap->process_references()) {
 460     weak_refs_work(full_gc);
 461   }
 462 
 463   // And finally finish class unloading
 464   if (_heap->unload_classes()) {
 465     _heap->unload_classes_and_cleanup_tables(full_gc);
 466   }
 467 
 468   assert(task_queues()->is_empty(), "Should be empty");
 469   TASKQUEUE_STATS_ONLY(task_queues()->print_taskqueue_stats());
 470   TASKQUEUE_STATS_ONLY(task_queues()->reset_taskqueue_stats());
 471 
 472   // Resize Metaspace
 473   MetaspaceGC::compute_new_size();
 474 }
 475 
 476 // Weak Reference Closures
 477 class ShenandoahCMDrainMarkingStackClosure: public VoidClosure {
 478   uint _worker_id;
 479   ShenandoahTaskTerminator* _terminator;
 480   bool _reset_terminator;
 481 
 482 public:
 483   ShenandoahCMDrainMarkingStackClosure(uint worker_id, ShenandoahTaskTerminator* t, bool reset_terminator = false):
 484     _worker_id(worker_id),
 485     _terminator(t),
 486     _reset_terminator(reset_terminator) {
 487   }
 488 
 489   void do_void() {
 490     assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
 491 
 492     ShenandoahHeap* sh = ShenandoahHeap::heap();
 493     ShenandoahConcurrentMark* scm = sh->concurrent_mark();
 494     assert(sh->process_references(), "why else would we be here?");
 495     ReferenceProcessor* rp = sh->ref_processor();
 496 
 497     shenandoah_assert_rp_isalive_installed();
 498 
 499     scm->mark_loop(_worker_id, _terminator, rp,
 500                    false,   // not cancellable
 501                    false);  // do not do strdedup
 502 
 503     if (_reset_terminator) {
 504       _terminator->reset_for_reuse();
 505     }
 506   }
 507 };
 508 
 509 class ShenandoahCMKeepAliveClosure : public OopClosure {
 510 private:
 511   ShenandoahObjToScanQueue* _queue;
 512   ShenandoahHeap* _heap;
 513   ShenandoahMarkingContext* const _mark_context;
 514 
 515   template <class T>
 516   inline void do_oop_work(T* p) {
 517     ShenandoahConcurrentMark::mark_through_ref<T, NONE, NO_DEDUP>(p, _heap, _queue, _mark_context);
 518   }
 519 
 520 public:
 521   ShenandoahCMKeepAliveClosure(ShenandoahObjToScanQueue* q) :
 522     _queue(q),
 523     _heap(ShenandoahHeap::heap()),
 524     _mark_context(_heap->marking_context()) {}
 525 
 526   void do_oop(narrowOop* p) { do_oop_work(p); }
 527   void do_oop(oop* p)       { do_oop_work(p); }
 528 };
 529 
 530 class ShenandoahCMKeepAliveUpdateClosure : public OopClosure {
 531 private:
 532   ShenandoahObjToScanQueue* _queue;
 533   ShenandoahHeap* _heap;
 534   ShenandoahMarkingContext* const _mark_context;
 535 
 536   template <class T>
 537   inline void do_oop_work(T* p) {
 538     ShenandoahConcurrentMark::mark_through_ref<T, SIMPLE, NO_DEDUP>(p, _heap, _queue, _mark_context);
 539   }
 540 
 541 public:
 542   ShenandoahCMKeepAliveUpdateClosure(ShenandoahObjToScanQueue* q) :
 543     _queue(q),
 544     _heap(ShenandoahHeap::heap()),
 545     _mark_context(_heap->marking_context()) {}
 546 
 547   void do_oop(narrowOop* p) { do_oop_work(p); }
 548   void do_oop(oop* p)       { do_oop_work(p); }
 549 };
 550 
 551 class ShenandoahWeakUpdateClosure : public OopClosure {
 552 private:
 553   ShenandoahHeap* const _heap;
 554 
 555   template <class T>
 556   inline void do_oop_work(T* p) {
 557     oop o = _heap->maybe_update_with_forwarded(p);
 558     shenandoah_assert_marked_except(p, o, o == NULL);
 559   }
 560 
 561 public:
 562   ShenandoahWeakUpdateClosure() : _heap(ShenandoahHeap::heap()) {}
 563 
 564   void do_oop(narrowOop* p) { do_oop_work(p); }
 565   void do_oop(oop* p)       { do_oop_work(p); }
 566 };
 567 
 568 class ShenandoahWeakAssertNotForwardedClosure : public OopClosure {
 569 private:
 570   template <class T>
 571   inline void do_oop_work(T* p) {
 572     T o = RawAccess<>::oop_load(p);
 573     if (!CompressedOops::is_null(o)) {
 574       oop obj = CompressedOops::decode_not_null(o);
 575       shenandoah_assert_not_forwarded(p, obj);
 576     }
 577   }
 578 
 579 public:
 580   ShenandoahWeakAssertNotForwardedClosure() {}
 581 
 582   void do_oop(narrowOop* p) { do_oop_work(p); }
 583   void do_oop(oop* p)       { do_oop_work(p); }
 584 };
 585 
 586 class ShenandoahRefProcTaskProxy : public AbstractGangTask {
 587 private:
 588   AbstractRefProcTaskExecutor::ProcessTask& _proc_task;
 589   ShenandoahTaskTerminator* _terminator;
 590 
 591 public:
 592   ShenandoahRefProcTaskProxy(AbstractRefProcTaskExecutor::ProcessTask& proc_task,
 593                              ShenandoahTaskTerminator* t) :
 594     AbstractGangTask("Process reference objects in parallel"),
 595     _proc_task(proc_task),
 596     _terminator(t) {
 597   }
 598 
 599   void work(uint worker_id) {
 600     ResourceMark rm;
 601     HandleMark hm;
 602     assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
 603     ShenandoahHeap* heap = ShenandoahHeap::heap();
 604     ShenandoahCMDrainMarkingStackClosure complete_gc(worker_id, _terminator);
 605     if (heap->has_forwarded_objects()) {
 606       ShenandoahForwardedIsAliveClosure is_alive;
 607       ShenandoahCMKeepAliveUpdateClosure keep_alive(heap->concurrent_mark()->get_queue(worker_id));
 608       _proc_task.work(worker_id, is_alive, keep_alive, complete_gc);
 609     } else {
 610       ShenandoahIsAliveClosure is_alive;
 611       ShenandoahCMKeepAliveClosure keep_alive(heap->concurrent_mark()->get_queue(worker_id));
 612       _proc_task.work(worker_id, is_alive, keep_alive, complete_gc);
 613     }
 614   }
 615 };
 616 
 617 class ShenandoahRefProcTaskExecutor : public AbstractRefProcTaskExecutor {
 618 private:
 619   WorkGang* _workers;
 620 
 621 public:
 622   ShenandoahRefProcTaskExecutor(WorkGang* workers) :
 623     _workers(workers) {
 624   }
 625 
 626   // Executes a task using worker threads.
 627   void execute(ProcessTask& task, uint ergo_workers) {
 628     assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
 629 
 630     ShenandoahHeap* heap = ShenandoahHeap::heap();
 631     ShenandoahConcurrentMark* cm = heap->concurrent_mark();
 632     ShenandoahPushWorkerQueuesScope scope(_workers, cm->task_queues(),
 633                                           ergo_workers,
 634                                           /* do_check = */ false);
 635     uint nworkers = _workers->active_workers();
 636     cm->task_queues()->reserve(nworkers);
 637     ShenandoahTaskTerminator terminator(nworkers, cm->task_queues());
 638     ShenandoahRefProcTaskProxy proc_task_proxy(task, &terminator);
 639     _workers->run_task(&proc_task_proxy);
 640   }
 641 };
 642 
 643 void ShenandoahConcurrentMark::weak_refs_work(bool full_gc) {
 644   assert(_heap->process_references(), "sanity");
 645 
 646   ShenandoahPhaseTimings::Phase phase_root =
 647           full_gc ?
 648           ShenandoahPhaseTimings::full_gc_weakrefs :
 649           ShenandoahPhaseTimings::weakrefs;
 650 
 651   ShenandoahGCPhase phase(phase_root);
 652 
 653   ReferenceProcessor* rp = _heap->ref_processor();
 654 
 655   // NOTE: We cannot shortcut on has_discovered_references() here, because
 656   // we will miss marking JNI Weak refs then, see implementation in
 657   // ReferenceProcessor::process_discovered_references.
 658   weak_refs_work_doit(full_gc);
 659 
 660   rp->verify_no_references_recorded();
 661   assert(!rp->discovery_enabled(), "Post condition");
 662 
 663 }
 664 
 665 void ShenandoahConcurrentMark::weak_refs_work_doit(bool full_gc) {
 666   ReferenceProcessor* rp = _heap->ref_processor();
 667 
 668   ShenandoahPhaseTimings::Phase phase_process =
 669           full_gc ?
 670           ShenandoahPhaseTimings::full_gc_weakrefs_process :
 671           ShenandoahPhaseTimings::weakrefs_process;
 672 
 673   ShenandoahPhaseTimings::Phase phase_process_termination =
 674           full_gc ?
 675           ShenandoahPhaseTimings::full_gc_weakrefs_termination :
 676           ShenandoahPhaseTimings::weakrefs_termination;
 677 
 678   shenandoah_assert_rp_isalive_not_installed();
 679   ShenandoahIsAliveSelector is_alive;
 680   ReferenceProcessorIsAliveMutator fix_isalive(rp, is_alive.is_alive_closure());
 681 
 682   WorkGang* workers = _heap->workers();
 683   uint nworkers = workers->active_workers();
 684 
 685   rp->setup_policy(_heap->soft_ref_policy()->should_clear_all_soft_refs());
 686   rp->set_active_mt_degree(nworkers);
 687 
 688   assert(task_queues()->is_empty(), "Should be empty");
 689 
 690   // complete_gc and keep_alive closures instantiated here are only needed for
 691   // single-threaded path in RP. They share the queue 0 for tracking work, which
 692   // simplifies implementation. Since RP may decide to call complete_gc several
 693   // times, we need to be able to reuse the terminator.
 694   uint serial_worker_id = 0;
 695   ShenandoahTaskTerminator terminator(1, task_queues());
 696   ShenandoahCMDrainMarkingStackClosure complete_gc(serial_worker_id, &terminator, /* reset_terminator = */ true);
 697 
 698   ShenandoahRefProcTaskExecutor executor(workers);
 699 
 700   ReferenceProcessorPhaseTimes pt(_heap->gc_timer(), rp->num_queues());
 701 
 702   {
 703     ShenandoahGCPhase phase(phase_process);
 704     ShenandoahTerminationTracker phase_term(phase_process_termination);
 705 
 706     // Process leftover weak oops: update them, if needed (using parallel version),
 707     // or assert they do not need updating (using serial version) otherwise.
 708     // Weak processor API requires us to visit the oops, even if we are not doing
 709     // anything to them.
 710     if (_heap->has_forwarded_objects()) {
 711       ShenandoahCMKeepAliveUpdateClosure keep_alive(get_queue(serial_worker_id));
 712       rp->process_discovered_references(is_alive.is_alive_closure(), &keep_alive,
 713                                         &complete_gc, &executor,
 714                                         &pt);
 715 
 716       ShenandoahWeakUpdateClosure cl;
 717       WeakProcessor::weak_oops_do(workers, is_alive.is_alive_closure(), &cl, 1);
 718     } else {
 719       ShenandoahCMKeepAliveClosure keep_alive(get_queue(serial_worker_id));
 720       rp->process_discovered_references(is_alive.is_alive_closure(), &keep_alive,
 721                                         &complete_gc, &executor,
 722                                         &pt);
 723 
 724       ShenandoahWeakAssertNotForwardedClosure cl;
 725       WeakProcessor::weak_oops_do(is_alive.is_alive_closure(), &cl);
 726     }
 727 
 728     pt.print_all_references();
 729 
 730     assert(task_queues()->is_empty(), "Should be empty");
 731   }
 732 }
 733 
 734 class ShenandoahCancelledGCYieldClosure : public YieldClosure {
 735 private:
 736   ShenandoahHeap* const _heap;
 737 public:
 738   ShenandoahCancelledGCYieldClosure() : _heap(ShenandoahHeap::heap()) {};
 739   virtual bool should_return() { return _heap->cancelled_gc(); }
 740 };
 741 
 742 class ShenandoahPrecleanCompleteGCClosure : public VoidClosure {
 743 public:
 744   void do_void() {
 745     ShenandoahHeap* sh = ShenandoahHeap::heap();
 746     ShenandoahConcurrentMark* scm = sh->concurrent_mark();
 747     assert(sh->process_references(), "why else would we be here?");
 748     ShenandoahTaskTerminator terminator(1, scm->task_queues());
 749 
 750     ReferenceProcessor* rp = sh->ref_processor();
 751     shenandoah_assert_rp_isalive_installed();
 752 
 753     scm->mark_loop(0, &terminator, rp,
 754                    false, // not cancellable
 755                    false); // do not do strdedup
 756   }
 757 };
 758 
 759 class ShenandoahPrecleanKeepAliveUpdateClosure : public OopClosure {
 760 private:
 761   ShenandoahObjToScanQueue* _queue;
 762   ShenandoahHeap* _heap;
 763   ShenandoahMarkingContext* const _mark_context;
 764 
 765   template <class T>
 766   inline void do_oop_work(T* p) {
 767     ShenandoahConcurrentMark::mark_through_ref<T, CONCURRENT, NO_DEDUP>(p, _heap, _queue, _mark_context);
 768   }
 769 
 770 public:
 771   ShenandoahPrecleanKeepAliveUpdateClosure(ShenandoahObjToScanQueue* q) :
 772     _queue(q),
 773     _heap(ShenandoahHeap::heap()),
 774     _mark_context(_heap->marking_context()) {}
 775 
 776   void do_oop(narrowOop* p) { do_oop_work(p); }
 777   void do_oop(oop* p)       { do_oop_work(p); }
 778 };
 779 
 780 class ShenandoahPrecleanTask : public AbstractGangTask {
 781 private:
 782   ReferenceProcessor* _rp;
 783 
 784 public:
 785   ShenandoahPrecleanTask(ReferenceProcessor* rp) :
 786           AbstractGangTask("Precleaning task"),
 787           _rp(rp) {}
 788 
 789   void work(uint worker_id) {
 790     assert(worker_id == 0, "The code below is single-threaded, only one worker is expected");
 791     ShenandoahParallelWorkerSession worker_session(worker_id);
 792 
 793     ShenandoahHeap* sh = ShenandoahHeap::heap();
 794 
 795     ShenandoahObjToScanQueue* q = sh->concurrent_mark()->get_queue(worker_id);
 796 
 797     ShenandoahCancelledGCYieldClosure yield;
 798     ShenandoahPrecleanCompleteGCClosure complete_gc;
 799 
 800     if (sh->has_forwarded_objects()) {
 801       ShenandoahForwardedIsAliveClosure is_alive;
 802       ShenandoahPrecleanKeepAliveUpdateClosure keep_alive(q);
 803       ResourceMark rm;
 804       _rp->preclean_discovered_references(&is_alive, &keep_alive,
 805                                           &complete_gc, &yield,
 806                                           NULL);
 807     } else {
 808       ShenandoahIsAliveClosure is_alive;
 809       ShenandoahCMKeepAliveClosure keep_alive(q);
 810       ResourceMark rm;
 811       _rp->preclean_discovered_references(&is_alive, &keep_alive,
 812                                           &complete_gc, &yield,
 813                                           NULL);
 814     }
 815   }
 816 };
 817 
 818 void ShenandoahConcurrentMark::preclean_weak_refs() {
 819   // Pre-cleaning weak references before diving into STW makes sense at the
 820   // end of concurrent mark. This will filter out the references which referents
 821   // are alive. Note that ReferenceProcessor already filters out these on reference
 822   // discovery, and the bulk of work is done here. This phase processes leftovers
 823   // that missed the initial filtering, i.e. when referent was marked alive after
 824   // reference was discovered by RP.
 825 
 826   assert(_heap->process_references(), "sanity");
 827 
 828   // Shortcut if no references were discovered to avoid winding up threads.
 829   ReferenceProcessor* rp = _heap->ref_processor();
 830   if (!rp->has_discovered_references()) {
 831     return;
 832   }
 833 
 834   assert(task_queues()->is_empty(), "Should be empty");
 835 
 836   ReferenceProcessorMTDiscoveryMutator fix_mt_discovery(rp, false);
 837 
 838   shenandoah_assert_rp_isalive_not_installed();
 839   ShenandoahIsAliveSelector is_alive;
 840   ReferenceProcessorIsAliveMutator fix_isalive(rp, is_alive.is_alive_closure());
 841 
 842   // Execute precleaning in the worker thread: it will give us GCLABs, String dedup
 843   // queues and other goodies. When upstream ReferenceProcessor starts supporting
 844   // parallel precleans, we can extend this to more threads.
 845   WorkGang* workers = _heap->workers();
 846   uint nworkers = workers->active_workers();
 847   assert(nworkers == 1, "This code uses only a single worker");
 848   task_queues()->reserve(nworkers);
 849 
 850   ShenandoahPrecleanTask task(rp);
 851   workers->run_task(&task);
 852 
 853   assert(task_queues()->is_empty(), "Should be empty");
 854 }
 855 
 856 void ShenandoahConcurrentMark::cancel() {
 857   // Clean up marking stacks.
 858   ShenandoahObjToScanQueueSet* queues = task_queues();
 859   queues->clear();
 860 
 861   // Cancel SATB buffers.
 862   ShenandoahBarrierSet::satb_mark_queue_set().abandon_partial_marking();
 863 }
 864 
 865 ShenandoahObjToScanQueue* ShenandoahConcurrentMark::get_queue(uint worker_id) {
 866   assert(task_queues()->get_reserved() > worker_id, "No reserved queue for worker id: %d", worker_id);
 867   return _task_queues->queue(worker_id);
 868 }
 869 
 870 template <bool CANCELLABLE>
 871 void ShenandoahConcurrentMark::mark_loop_prework(uint w, ShenandoahTaskTerminator *t, ReferenceProcessor *rp,
 872                                                  bool strdedup) {
 873   ShenandoahObjToScanQueue* q = get_queue(w);
 874 
 875   jushort* ld = _heap->get_liveness_cache(w);
 876 
 877   // TODO: We can clean up this if we figure out how to do templated oop closures that
 878   // play nice with specialized_oop_iterators.
 879   if (_heap->unload_classes()) {
 880     if (_heap->has_forwarded_objects()) {
 881       if (strdedup) {
 882         ShenandoahMarkUpdateRefsMetadataDedupClosure cl(q, rp);
 883         mark_loop_work<ShenandoahMarkUpdateRefsMetadataDedupClosure, CANCELLABLE>(&cl, ld, w, t);
 884       } else {
 885         ShenandoahMarkUpdateRefsMetadataClosure cl(q, rp);
 886         mark_loop_work<ShenandoahMarkUpdateRefsMetadataClosure, CANCELLABLE>(&cl, ld, w, t);
 887       }
 888     } else {
 889       if (strdedup) {
 890         ShenandoahMarkRefsMetadataDedupClosure cl(q, rp);
 891         mark_loop_work<ShenandoahMarkRefsMetadataDedupClosure, CANCELLABLE>(&cl, ld, w, t);
 892       } else {
 893         ShenandoahMarkRefsMetadataClosure cl(q, rp);
 894         mark_loop_work<ShenandoahMarkRefsMetadataClosure, CANCELLABLE>(&cl, ld, w, t);
 895       }
 896     }
 897   } else {
 898     if (_heap->has_forwarded_objects()) {
 899       if (strdedup) {
 900         ShenandoahMarkUpdateRefsDedupClosure cl(q, rp);
 901         mark_loop_work<ShenandoahMarkUpdateRefsDedupClosure, CANCELLABLE>(&cl, ld, w, t);
 902       } else {
 903         ShenandoahMarkUpdateRefsClosure cl(q, rp);
 904         mark_loop_work<ShenandoahMarkUpdateRefsClosure, CANCELLABLE>(&cl, ld, w, t);
 905       }
 906     } else {
 907       if (strdedup) {
 908         ShenandoahMarkRefsDedupClosure cl(q, rp);
 909         mark_loop_work<ShenandoahMarkRefsDedupClosure, CANCELLABLE>(&cl, ld, w, t);
 910       } else {
 911         ShenandoahMarkRefsClosure cl(q, rp);
 912         mark_loop_work<ShenandoahMarkRefsClosure, CANCELLABLE>(&cl, ld, w, t);
 913       }
 914     }
 915   }
 916 
 917   _heap->flush_liveness_cache(w);
 918 }
 919 
 920 template <class T, bool CANCELLABLE>
 921 void ShenandoahConcurrentMark::mark_loop_work(T* cl, jushort* live_data, uint worker_id, ShenandoahTaskTerminator *terminator) {
 922   uintx stride = ShenandoahMarkLoopStride;
 923 
 924   ShenandoahHeap* heap = ShenandoahHeap::heap();
 925   ShenandoahObjToScanQueueSet* queues = task_queues();
 926   ShenandoahObjToScanQueue* q;
 927   ShenandoahMarkTask t;
 928 
 929   /*
 930    * Process outstanding queues, if any.
 931    *
 932    * There can be more queues than workers. To deal with the imbalance, we claim
 933    * extra queues first. Since marking can push new tasks into the queue associated
 934    * with this worker id, we come back to process this queue in the normal loop.
 935    */
 936   assert(queues->get_reserved() == heap->workers()->active_workers(),
 937          "Need to reserve proper number of queues: reserved: %u, active: %u", queues->get_reserved(), heap->workers()->active_workers());
 938 
 939   q = queues->claim_next();
 940   while (q != NULL) {
 941     if (CANCELLABLE && heap->check_cancelled_gc_and_yield()) {
 942       ShenandoahCancelledTerminatorTerminator tt;
 943       ShenandoahSuspendibleThreadSetLeaver stsl(ShenandoahSuspendibleWorkers);
 944       while (!terminator->offer_termination(&tt));
 945       return;
 946     }
 947 
 948     for (uint i = 0; i < stride; i++) {
 949       if (q->pop(t)) {
 950         do_task<T>(q, cl, live_data, &t);
 951       } else {
 952         assert(q->is_empty(), "Must be empty");
 953         q = queues->claim_next();
 954         break;
 955       }
 956     }
 957   }
 958   q = get_queue(worker_id);
 959 
 960   ShenandoahSATBBufferClosure drain_satb(q);
 961   SATBMarkQueueSet& satb_mq_set = ShenandoahBarrierSet::satb_mark_queue_set();
 962 
 963   /*
 964    * Normal marking loop:
 965    */
 966   while (true) {
 967     if (CANCELLABLE && heap->check_cancelled_gc_and_yield()) {
 968       ShenandoahCancelledTerminatorTerminator tt;
 969       ShenandoahSuspendibleThreadSetLeaver stsl(ShenandoahSuspendibleWorkers);
 970       while (!terminator->offer_termination(&tt));
 971       return;
 972     }
 973 
 974     while (satb_mq_set.completed_buffers_num() > 0) {
 975       satb_mq_set.apply_closure_to_completed_buffer(&drain_satb);
 976     }
 977 
 978     uint work = 0;
 979     for (uint i = 0; i < stride; i++) {
 980       if (q->pop(t) ||
 981           queues->steal(worker_id, t)) {
 982         do_task<T>(q, cl, live_data, &t);
 983         work++;
 984       } else {
 985         break;
 986       }
 987     }
 988 
 989     if (work == 0) {
 990       // No work encountered in current stride, try to terminate.
 991       // Need to leave the STS here otherwise it might block safepoints.
 992       ShenandoahSuspendibleThreadSetLeaver stsl(CANCELLABLE && ShenandoahSuspendibleWorkers);
 993       ShenandoahTerminationTimingsTracker term_tracker(worker_id);
 994       if (terminator->offer_termination()) return;
 995     }
 996   }
 997 }
 998 
 999 bool ShenandoahConcurrentMark::claim_codecache() {
1000   assert(ShenandoahConcurrentScanCodeRoots, "must not be called otherwise");
1001   return _claimed_codecache.try_set();
1002 }
1003 
1004 void ShenandoahConcurrentMark::clear_claim_codecache() {
1005   assert(ShenandoahConcurrentScanCodeRoots, "must not be called otherwise");
1006   _claimed_codecache.unset();
1007 }