1 /*
   2  * Copyright (c) 2013, 2017, Red Hat, Inc. and/or its affiliates.
   3  *
   4  * This code is free software; you can redistribute it and/or modify it
   5  * under the terms of the GNU General Public License version 2 only, as
   6  * published by the Free Software Foundation.
   7  *
   8  * This code is distributed in the hope that it will be useful, but WITHOUT
   9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  11  * version 2 for more details (a copy is included in the LICENSE file that
  12  * accompanied this code).
  13  *
  14  * You should have received a copy of the GNU General Public License version
  15  * 2 along with this work; if not, write to the Free Software Foundation,
  16  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  17  *
  18  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  19  * or visit www.oracle.com if you need additional information or have any
  20  * questions.
  21  *
  22  */
  23 
  24 #include "precompiled.hpp"
  25 #include "classfile/stringTable.hpp"
  26 #include "gc/shared/gcTimer.hpp"
  27 #include "gc/shared/parallelCleaning.hpp"
  28 #include "gc/shared/referenceProcessor.hpp"
  29 #include "gc/shared/referenceProcessorPhaseTimes.hpp"
  30 #include "gc/shared/strongRootsScope.hpp"
  31 #include "gc/shared/suspendibleThreadSet.hpp"
  32 #include "gc/shenandoah/brooksPointer.hpp"
  33 #include "gc/shenandoah/shenandoahBarrierSet.inline.hpp"
  34 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
  35 #include "gc/shenandoah/shenandoahConcurrentMark.inline.hpp"
  36 #include "gc/shenandoah/shenandoahOopClosures.inline.hpp"
  37 #include "gc/shenandoah/shenandoahMarkCompact.hpp"
  38 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
  39 #include "gc/shenandoah/shenandoahRootProcessor.hpp"
  40 #include "gc/shenandoah/shenandoahOopClosures.hpp"
  41 #include "gc/shenandoah/shenandoahTaskqueue.hpp"
  42 #include "gc/shenandoah/shenandoahUtils.hpp"
  43 #include "gc/shared/weakProcessor.hpp"
  44 #include "code/codeCache.hpp"
  45 #include "classfile/symbolTable.hpp"
  46 #include "classfile/systemDictionary.hpp"
  47 #include "memory/iterator.inline.hpp"
  48 #include "oops/oop.inline.hpp"
  49 #include "gc/shared/taskqueue.inline.hpp"
  50 #include "logging/logStream.hpp"
  51 
  52 template<UpdateRefsMode UPDATE_REFS>
  53 class ShenandoahInitMarkRootsClosure : public OopClosure {
  54 private:
  55   ShenandoahObjToScanQueue* _queue;
  56   ShenandoahHeap* _heap;
  57   ShenandoahMarkingContext* const _mark_context;
  58 
  59   template <class T>
  60   inline void do_oop_work(T* p) {
  61     ShenandoahConcurrentMark::mark_through_ref<T, UPDATE_REFS, false /* string dedup */>(p, _heap, _queue, _mark_context);
  62   }
  63 
  64 public:
  65   ShenandoahInitMarkRootsClosure(ShenandoahObjToScanQueue* q) :
  66     _queue(q),
  67     _heap(ShenandoahHeap::heap()),
  68     _mark_context(_heap->next_marking_context()) {};
  69 
  70   void do_oop(narrowOop* p) { do_oop_work(p); }
  71   void do_oop(oop* p)       { do_oop_work(p); }
  72 };
  73 
  74 ShenandoahMarkRefsSuperClosure::ShenandoahMarkRefsSuperClosure(ShenandoahObjToScanQueue* q, ReferenceProcessor* rp) :
  75   MetadataVisitingOopIterateClosure(rp),
  76   _queue(q),
  77   _heap(ShenandoahHeap::heap()),
  78   _mark_context(_heap->next_marking_context())
  79 { }
  80 
  81 
  82 template<UpdateRefsMode UPDATE_REFS>
  83 class ShenandoahInitMarkRootsTask : public AbstractGangTask {
  84 private:
  85   ShenandoahRootProcessor* _rp;
  86   bool _process_refs;
  87 public:
  88   ShenandoahInitMarkRootsTask(ShenandoahRootProcessor* rp, bool process_refs) :
  89     AbstractGangTask("Shenandoah init mark roots task"),
  90     _rp(rp),
  91     _process_refs(process_refs) {
  92   }
  93 
  94   void work(uint worker_id) {
  95     assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
  96     ShenandoahWorkerSession worker_session(worker_id);
  97 
  98     ShenandoahHeap* heap = ShenandoahHeap::heap();
  99     ShenandoahObjToScanQueueSet* queues = heap->concurrentMark()->task_queues();
 100     assert(queues->get_reserved() > worker_id, "Queue has not been reserved for worker id: %d", worker_id);
 101 
 102     ShenandoahObjToScanQueue* q = queues->queue(worker_id);
 103     ShenandoahInitMarkRootsClosure<UPDATE_REFS> mark_cl(q);
 104     CLDToOopClosure cldCl(&mark_cl);
 105     MarkingCodeBlobClosure blobsCl(&mark_cl, ! CodeBlobToOopClosure::FixRelocations);
 106 
 107     // The rationale for selecting the roots to scan is as follows:
 108     //   a. With unload_classes = true, we only want to scan the actual strong roots from the
 109     //      code cache. This will allow us to identify the dead classes, unload them, *and*
 110     //      invalidate the relevant code cache blobs. This could be only done together with
 111     //      class unloading.
 112     //   b. With unload_classes = false, we have to nominally retain all the references from code
 113     //      cache, because there could be the case of embedded class/oop in the generated code,
 114     //      which we will never visit during mark. Without code cache invalidation, as in (a),
 115     //      we risk executing that code cache blob, and crashing.
 116     //   c. With ShenandoahConcurrentScanCodeRoots, we avoid scanning the entire code cache here,
 117     //      and instead do that in concurrent phase under the relevant lock. This saves init mark
 118     //      pause time.
 119 
 120     ResourceMark m;
 121     if (heap->concurrentMark()->unload_classes()) {
 122       _rp->process_strong_roots(&mark_cl, _process_refs ? NULL : &mark_cl, &cldCl, NULL, &blobsCl, NULL, worker_id);
 123     } else {
 124       if (ShenandoahConcurrentScanCodeRoots) {
 125         CodeBlobClosure* code_blobs = NULL;
 126 #ifdef ASSERT
 127         ShenandoahAssertToSpaceClosure assert_to_space_oops;
 128         CodeBlobToOopClosure assert_to_space(&assert_to_space_oops, !CodeBlobToOopClosure::FixRelocations);
 129         // If conc code cache evac is disabled, code cache should have only to-space ptrs.
 130         // Otherwise, it should have to-space ptrs only if mark does not update refs.
 131         if (!heap->has_forwarded_objects()) {
 132           code_blobs = &assert_to_space;
 133         }
 134 #endif
 135         _rp->process_all_roots(&mark_cl, _process_refs ? NULL : &mark_cl, &cldCl, code_blobs, NULL, worker_id);
 136       } else {
 137         _rp->process_all_roots(&mark_cl, _process_refs ? NULL : &mark_cl, &cldCl, &blobsCl, NULL, worker_id);
 138       }
 139     }
 140   }
 141 };
 142 
 143 class ShenandoahUpdateRootsTask : public AbstractGangTask {
 144 private:
 145   ShenandoahRootProcessor* _rp;
 146   const bool _update_code_cache;
 147 public:
 148   ShenandoahUpdateRootsTask(ShenandoahRootProcessor* rp, bool update_code_cache) :
 149     AbstractGangTask("Shenandoah update roots task"),
 150     _rp(rp),
 151     _update_code_cache(update_code_cache) {
 152   }
 153 
 154   void work(uint worker_id) {
 155     assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
 156     ShenandoahWorkerSession worker_session(worker_id);
 157 
 158     ShenandoahHeap* heap = ShenandoahHeap::heap();
 159     ShenandoahUpdateRefsClosure cl;
 160     CLDToOopClosure cldCl(&cl);
 161 
 162     CodeBlobClosure* code_blobs;
 163     CodeBlobToOopClosure update_blobs(&cl, CodeBlobToOopClosure::FixRelocations);
 164 #ifdef ASSERT
 165     ShenandoahAssertToSpaceClosure assert_to_space_oops;
 166     CodeBlobToOopClosure assert_to_space(&assert_to_space_oops, !CodeBlobToOopClosure::FixRelocations);
 167 #endif
 168     if (_update_code_cache) {
 169       code_blobs = &update_blobs;
 170     } else {
 171       code_blobs =
 172         DEBUG_ONLY(&assert_to_space)
 173         NOT_DEBUG(NULL);
 174     }
 175     _rp->process_all_roots(&cl, &cl, &cldCl, code_blobs, NULL, worker_id);
 176   }
 177 };
 178 
 179 class ShenandoahConcurrentMarkingTask : public AbstractGangTask {
 180 private:
 181   ShenandoahConcurrentMark* _cm;
 182   ParallelTaskTerminator* _terminator;
 183   bool _update_refs;
 184 
 185 public:
 186   ShenandoahConcurrentMarkingTask(ShenandoahConcurrentMark* cm, ParallelTaskTerminator* terminator, bool update_refs) :
 187     AbstractGangTask("Root Region Scan"), _cm(cm), _terminator(terminator), _update_refs(update_refs) {
 188   }
 189 
 190 
 191   void work(uint worker_id) {
 192     ShenandoahWorkerSession worker_session(worker_id);
 193     SuspendibleThreadSetJoiner stsj(ShenandoahSuspendibleWorkers);
 194     ShenandoahObjToScanQueue* q = _cm->get_queue(worker_id);
 195     jushort* live_data = _cm->get_liveness(worker_id);
 196     ReferenceProcessor* rp;
 197     if (_cm->process_references()) {
 198       rp = ShenandoahHeap::heap()->ref_processor();
 199       shenandoah_assert_rp_isalive_installed();
 200     } else {
 201       rp = NULL;
 202     }
 203 
 204     _cm->concurrent_scan_code_roots(worker_id, rp, _update_refs);
 205     _cm->mark_loop(worker_id, _terminator, rp,
 206                    true, // cancellable
 207                    _cm->unload_classes(),
 208                    _update_refs,
 209                    ShenandoahStringDedup::is_enabled()); // perform string dedup
 210   }
 211 };
 212 
 213 class ShenandoahSATBThreadsClosure : public ThreadClosure {
 214   ShenandoahSATBBufferClosure* _satb_cl;
 215   int _thread_parity;
 216 
 217  public:
 218   ShenandoahSATBThreadsClosure(ShenandoahSATBBufferClosure* satb_cl) :
 219     _satb_cl(satb_cl),
 220     _thread_parity(Threads::thread_claim_parity()) {}
 221 
 222   void do_thread(Thread* thread) {
 223     if (thread->is_Java_thread()) {
 224       if (thread->claim_oops_do(true, _thread_parity)) {
 225         JavaThread* jt = (JavaThread*)thread;
 226         ShenandoahThreadLocalData::satb_mark_queue(jt).apply_closure_and_empty(_satb_cl);
 227       }
 228     } else if (thread->is_VM_thread()) {
 229       if (thread->claim_oops_do(true, _thread_parity)) {
 230         ShenandoahBarrierSet::satb_mark_queue_set().shared_satb_queue()->apply_closure_and_empty(_satb_cl);
 231       }
 232     }
 233   }
 234 };
 235 
 236 class ShenandoahFinalMarkingTask : public AbstractGangTask {
 237 private:
 238   ShenandoahConcurrentMark* _cm;
 239   ParallelTaskTerminator* _terminator;
 240   bool _update_refs;
 241   bool _unload_classes;
 242   bool _dedup_string;
 243 
 244 public:
 245   ShenandoahFinalMarkingTask(ShenandoahConcurrentMark* cm, ParallelTaskTerminator* terminator,
 246                              bool update_refs, bool unload_classes, bool dedup_string) :
 247     AbstractGangTask("Shenandoah Final Marking"), _cm(cm), _terminator(terminator),
 248     _update_refs(update_refs), _unload_classes(unload_classes), _dedup_string(dedup_string) {
 249   }
 250 
 251   void work(uint worker_id) {
 252     ShenandoahWorkerSession worker_session(worker_id);
 253     // First drain remaining SATB buffers.
 254     // Notice that this is not strictly necessary for mark-compact. But since
 255     // it requires a StrongRootsScope around the task, we need to claim the
 256     // threads, and performance-wise it doesn't really matter. Adds about 1ms to
 257     // full-gc.
 258     {
 259       ShenandoahObjToScanQueue* q = _cm->get_queue(worker_id);
 260       ShenandoahSATBBufferClosure cl(q);
 261       SATBMarkQueueSet& satb_mq_set = ShenandoahBarrierSet::satb_mark_queue_set();
 262       while (satb_mq_set.apply_closure_to_completed_buffer(&cl));
 263       ShenandoahSATBThreadsClosure tc(&cl);
 264       Threads::threads_do(&tc);
 265     }
 266 
 267     ReferenceProcessor* rp;
 268     if (_cm->process_references()) {
 269       rp = ShenandoahHeap::heap()->ref_processor();
 270       shenandoah_assert_rp_isalive_installed();
 271     } else {
 272       rp = NULL;
 273     }
 274 
 275     // Degenerated cycle may bypass concurrent cycle, so code roots might not be scanned,
 276     // let's check here.
 277     _cm->concurrent_scan_code_roots(worker_id, rp, _update_refs);
 278     _cm->mark_loop(worker_id, _terminator, rp,
 279                    false, // not cancellable
 280                    _unload_classes,
 281                    _update_refs,
 282                    _dedup_string);
 283 
 284     assert(_cm->task_queues()->is_empty(), "Should be empty");
 285   }
 286 };
 287 
 288 void ShenandoahConcurrentMark::mark_roots(ShenandoahPhaseTimings::Phase root_phase) {
 289   assert(Thread::current()->is_VM_thread(), "can only do this in VMThread");
 290   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
 291 
 292   ShenandoahHeap* heap = ShenandoahHeap::heap();
 293 
 294   ShenandoahGCPhase phase(root_phase);
 295 
 296   WorkGang* workers = heap->workers();
 297   uint nworkers = workers->active_workers();
 298 
 299   assert(nworkers <= task_queues()->size(), "Just check");
 300 
 301   ShenandoahRootProcessor root_proc(heap, nworkers, root_phase);
 302   TASKQUEUE_STATS_ONLY(task_queues()->reset_taskqueue_stats());
 303   task_queues()->reserve(nworkers);
 304 
 305   if (heap->has_forwarded_objects()) {
 306     ShenandoahInitMarkRootsTask<RESOLVE> mark_roots(&root_proc, process_references());
 307     workers->run_task(&mark_roots);
 308   } else {
 309     // No need to update references, which means the heap is stable.
 310     // Can save time not walking through forwarding pointers.
 311     ShenandoahInitMarkRootsTask<NONE> mark_roots(&root_proc, process_references());
 312     workers->run_task(&mark_roots);
 313   }
 314 
 315   if (ShenandoahConcurrentScanCodeRoots) {
 316     clear_claim_codecache();
 317   }
 318 }
 319 
 320 void ShenandoahConcurrentMark::init_mark_roots() {
 321   assert(Thread::current()->is_VM_thread(), "can only do this in VMThread");
 322   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
 323 
 324   mark_roots(ShenandoahPhaseTimings::scan_roots);
 325 }
 326 
 327 void ShenandoahConcurrentMark::update_roots(ShenandoahPhaseTimings::Phase root_phase) {
 328   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
 329 
 330   bool update_code_cache = true; // initialize to safer value
 331   switch (root_phase) {
 332     case ShenandoahPhaseTimings::update_roots:
 333     case ShenandoahPhaseTimings::final_update_refs_roots:
 334       update_code_cache = false;
 335       break;
 336     case ShenandoahPhaseTimings::full_gc_roots:
 337     case ShenandoahPhaseTimings::degen_gc_update_roots:
 338       update_code_cache = true;
 339       break;
 340     default:
 341       ShouldNotReachHere();
 342   }
 343 
 344   ShenandoahHeap* heap = ShenandoahHeap::heap();
 345 
 346   ShenandoahGCPhase phase(root_phase);
 347 
 348 #if defined(COMPILER2) || INCLUDE_JVMCI
 349   DerivedPointerTable::clear();
 350 #endif
 351 
 352   uint nworkers = heap->workers()->active_workers();
 353 
 354   ShenandoahRootProcessor root_proc(heap, nworkers, root_phase);
 355   ShenandoahUpdateRootsTask update_roots(&root_proc, update_code_cache);
 356   heap->workers()->run_task(&update_roots);
 357 
 358 #if defined(COMPILER2) || INCLUDE_JVMCI
 359   DerivedPointerTable::update_pointers();
 360 #endif
 361 }
 362 
 363 void ShenandoahConcurrentMark::initialize(uint workers) {
 364   _heap = ShenandoahHeap::heap();
 365 
 366   uint num_queues = MAX2(workers, 1U);
 367 
 368   _task_queues = new ShenandoahObjToScanQueueSet((int) num_queues);
 369 
 370   for (uint i = 0; i < num_queues; ++i) {
 371     ShenandoahObjToScanQueue* task_queue = new ShenandoahObjToScanQueue();
 372     task_queue->initialize();
 373     _task_queues->register_queue(i, task_queue);
 374   }
 375 
 376   size_t num_regions = ShenandoahHeap::heap()->num_regions();
 377   _liveness_local = NEW_C_HEAP_ARRAY(jushort*, workers, mtGC);
 378   for (uint worker = 0; worker < workers; worker++) {
 379      _liveness_local[worker] = NEW_C_HEAP_ARRAY(jushort, num_regions, mtGC);
 380   }
 381 }
 382 
 383 void ShenandoahConcurrentMark::concurrent_scan_code_roots(uint worker_id, ReferenceProcessor* rp, bool update_refs) {
 384   if (ShenandoahConcurrentScanCodeRoots && claim_codecache()) {
 385     ShenandoahObjToScanQueue* q = task_queues()->queue(worker_id);
 386     if (!unload_classes()) {
 387       MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
 388       if (update_refs) {
 389         ShenandoahMarkResolveRefsClosure cl(q, rp);
 390         CodeBlobToOopClosure blobs(&cl, !CodeBlobToOopClosure::FixRelocations);
 391         CodeCache::blobs_do(&blobs);
 392       } else {
 393         ShenandoahMarkRefsClosure cl(q, rp);
 394         CodeBlobToOopClosure blobs(&cl, !CodeBlobToOopClosure::FixRelocations);
 395         CodeCache::blobs_do(&blobs);
 396       }
 397     }
 398   }
 399 }
 400 
 401 void ShenandoahConcurrentMark::mark_from_roots() {
 402   ShenandoahHeap* sh = ShenandoahHeap::heap();
 403   WorkGang* workers = sh->workers();
 404   uint nworkers = workers->active_workers();
 405 
 406   bool update_refs = sh->has_forwarded_objects();
 407 
 408   ShenandoahGCPhase conc_mark_phase(ShenandoahPhaseTimings::conc_mark);
 409 
 410   if (process_references()) {
 411     ReferenceProcessor* rp = sh->ref_processor();
 412     rp->set_active_mt_degree(nworkers);
 413 
 414     // enable ("weak") refs discovery
 415     rp->enable_discovery(true /*verify_no_refs*/);
 416     rp->setup_policy(sh->soft_ref_policy()->should_clear_all_soft_refs());
 417   }
 418 
 419   shenandoah_assert_rp_isalive_not_installed();
 420   ShenandoahIsAliveSelector is_alive;
 421   ReferenceProcessorIsAliveMutator fix_isalive(sh->ref_processor(), is_alive.is_alive_closure());
 422 
 423   task_queues()->reserve(nworkers);
 424 
 425   {
 426     ShenandoahTerminationTracker term(ShenandoahPhaseTimings::conc_termination);
 427     if (UseShenandoahOWST) {
 428       ShenandoahTaskTerminator terminator(nworkers, task_queues());
 429       ShenandoahConcurrentMarkingTask task(this, &terminator, update_refs);
 430       workers->run_task(&task);
 431     } else {
 432       ParallelTaskTerminator terminator(nworkers, task_queues());
 433       ShenandoahConcurrentMarkingTask task(this, &terminator, update_refs);
 434       workers->run_task(&task);
 435     }
 436   }
 437 
 438   assert(task_queues()->is_empty() || sh->cancelled_gc(), "Should be empty when not cancelled");
 439 }
 440 
 441 void ShenandoahConcurrentMark::finish_mark_from_roots() {
 442   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
 443 
 444   ShenandoahHeap* sh = ShenandoahHeap::heap();
 445 
 446   shared_finish_mark_from_roots(/* full_gc = */ false);
 447 
 448   if (sh->has_forwarded_objects()) {
 449     update_roots(ShenandoahPhaseTimings::update_roots);
 450   }
 451 }
 452 
 453 void ShenandoahConcurrentMark::shared_finish_mark_from_roots(bool full_gc) {
 454   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
 455 
 456   ShenandoahHeap* sh = ShenandoahHeap::heap();
 457 
 458   uint nworkers = sh->workers()->active_workers();
 459 
 460   // Finally mark everything else we've got in our queues during the previous steps.
 461   // It does two different things for concurrent vs. mark-compact GC:
 462   // - For concurrent GC, it starts with empty task queues, drains the remaining
 463   //   SATB buffers, and then completes the marking closure.
 464   // - For mark-compact GC, it starts out with the task queues seeded by initial
 465   //   root scan, and completes the closure, thus marking through all live objects
 466   // The implementation is the same, so it's shared here.
 467   {
 468     ShenandoahGCPhase phase(full_gc ?
 469                             ShenandoahPhaseTimings::full_gc_mark_finish_queues :
 470                             ShenandoahPhaseTimings::finish_queues);
 471     task_queues()->reserve(nworkers);
 472 
 473     shenandoah_assert_rp_isalive_not_installed();
 474     ShenandoahIsAliveSelector is_alive;
 475     ReferenceProcessorIsAliveMutator fix_isalive(sh->ref_processor(), is_alive.is_alive_closure());
 476 
 477     ShenandoahTerminationTracker termination_tracker(full_gc ?
 478                                                      ShenandoahPhaseTimings::full_gc_mark_termination :
 479                                                      ShenandoahPhaseTimings::termination);
 480 
 481     StrongRootsScope scope(nworkers);
 482     if (UseShenandoahOWST) {
 483       ShenandoahTaskTerminator terminator(nworkers, task_queues());
 484       ShenandoahFinalMarkingTask task(this, &terminator, sh->has_forwarded_objects(),
 485         unload_classes(), full_gc && ShenandoahStringDedup::is_enabled());
 486       sh->workers()->run_task(&task);
 487     } else {
 488       ParallelTaskTerminator terminator(nworkers, task_queues());
 489       ShenandoahFinalMarkingTask task(this, &terminator, sh->has_forwarded_objects(),
 490         unload_classes(), full_gc && ShenandoahStringDedup::is_enabled());
 491       sh->workers()->run_task(&task);
 492     }
 493   }
 494 
 495   assert(task_queues()->is_empty(), "Should be empty");
 496 
 497   // When we're done marking everything, we process weak references.
 498   if (process_references()) {
 499     weak_refs_work(full_gc);
 500   }
 501 
 502   // And finally finish class unloading
 503   if (unload_classes()) {
 504     sh->unload_classes_and_cleanup_tables(full_gc);
 505   }
 506 
 507   assert(task_queues()->is_empty(), "Should be empty");
 508   TASKQUEUE_STATS_ONLY(task_queues()->print_taskqueue_stats());
 509   TASKQUEUE_STATS_ONLY(task_queues()->reset_taskqueue_stats());
 510 }
 511 
 512 // Weak Reference Closures
 513 class ShenandoahCMDrainMarkingStackClosure: public VoidClosure {
 514   uint _worker_id;
 515   ParallelTaskTerminator* _terminator;
 516   bool _reset_terminator;
 517 
 518 public:
 519   ShenandoahCMDrainMarkingStackClosure(uint worker_id, ParallelTaskTerminator* t, bool reset_terminator = false):
 520     _worker_id(worker_id),
 521     _terminator(t),
 522     _reset_terminator(reset_terminator) {
 523   }
 524 
 525   void do_void() {
 526     assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
 527 
 528     ShenandoahHeap* sh = ShenandoahHeap::heap();
 529     ShenandoahConcurrentMark* scm = sh->concurrentMark();
 530     assert(scm->process_references(), "why else would we be here?");
 531     ReferenceProcessor* rp = sh->ref_processor();
 532 
 533     shenandoah_assert_rp_isalive_installed();
 534 
 535     scm->mark_loop(_worker_id, _terminator, rp,
 536                    false, // not cancellable
 537                    scm->unload_classes(),
 538                    sh->has_forwarded_objects(),
 539                    false);  // do not do strdedup
 540 
 541     if (_reset_terminator) {
 542       _terminator->reset_for_reuse();
 543     }
 544   }
 545 };
 546 
 547 
 548 class ShenandoahCMKeepAliveClosure : public OopClosure {
 549 private:
 550   ShenandoahObjToScanQueue* _queue;
 551   ShenandoahHeap* _heap;
 552   ShenandoahMarkingContext* const _mark_context;
 553 
 554   template <class T>
 555   inline void do_oop_work(T* p) {
 556     ShenandoahConcurrentMark::mark_through_ref<T, NONE, false /* string dedup */>(p, _heap, _queue, _mark_context);
 557   }
 558 
 559 public:
 560   ShenandoahCMKeepAliveClosure(ShenandoahObjToScanQueue* q) :
 561     _queue(q),
 562     _heap(ShenandoahHeap::heap()),
 563     _mark_context(_heap->next_marking_context()) {}
 564 
 565   void do_oop(narrowOop* p) { do_oop_work(p); }
 566   void do_oop(oop* p)       { do_oop_work(p); }
 567 };
 568 
 569 class ShenandoahCMKeepAliveUpdateClosure : public OopClosure {
 570 private:
 571   ShenandoahObjToScanQueue* _queue;
 572   ShenandoahHeap* _heap;
 573   ShenandoahMarkingContext* const _mark_context;
 574 
 575   template <class T>
 576   inline void do_oop_work(T* p) {
 577     ShenandoahConcurrentMark::mark_through_ref<T, SIMPLE, false /* string dedup */>(p, _heap, _queue, _mark_context);
 578   }
 579 
 580 public:
 581   ShenandoahCMKeepAliveUpdateClosure(ShenandoahObjToScanQueue* q) :
 582     _queue(q),
 583     _heap(ShenandoahHeap::heap()),
 584     _mark_context(_heap->next_marking_context()) {}
 585 
 586   void do_oop(narrowOop* p) { do_oop_work(p); }
 587   void do_oop(oop* p)       { do_oop_work(p); }
 588 };
 589 
 590 class ShenandoahRefProcTaskProxy : public AbstractGangTask {
 591 
 592 private:
 593   AbstractRefProcTaskExecutor::ProcessTask& _proc_task;
 594   ParallelTaskTerminator* _terminator;
 595 public:
 596 
 597   ShenandoahRefProcTaskProxy(AbstractRefProcTaskExecutor::ProcessTask& proc_task,
 598                              ParallelTaskTerminator* t) :
 599     AbstractGangTask("Process reference objects in parallel"),
 600     _proc_task(proc_task),
 601     _terminator(t) {
 602   }
 603 
 604   void work(uint worker_id) {
 605     ResourceMark rm;
 606     HandleMark hm;
 607     assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
 608     ShenandoahHeap* heap = ShenandoahHeap::heap();
 609     ShenandoahCMDrainMarkingStackClosure complete_gc(worker_id, _terminator);
 610     if (heap->has_forwarded_objects()) {
 611       ShenandoahForwardedIsAliveClosure is_alive;
 612       ShenandoahCMKeepAliveUpdateClosure keep_alive(heap->concurrentMark()->get_queue(worker_id));
 613       _proc_task.work(worker_id, is_alive, keep_alive, complete_gc);
 614     } else {
 615       ShenandoahIsAliveClosure is_alive;
 616       ShenandoahCMKeepAliveClosure keep_alive(heap->concurrentMark()->get_queue(worker_id));
 617       _proc_task.work(worker_id, is_alive, keep_alive, complete_gc);
 618     }
 619   }
 620 };
 621 
 622 class ShenandoahRefProcTaskExecutor : public AbstractRefProcTaskExecutor {
 623 
 624 private:
 625   WorkGang* _workers;
 626 
 627 public:
 628 
 629   ShenandoahRefProcTaskExecutor(WorkGang* workers) :
 630     _workers(workers) {
 631   }
 632 
 633   // Executes a task using worker threads.
 634   void execute(ProcessTask& task, uint ergo_workers) {
 635     assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
 636 
 637     ShenandoahHeap* heap = ShenandoahHeap::heap();
 638     ShenandoahConcurrentMark* cm = heap->concurrentMark();
 639     ShenandoahPushWorkerQueuesScope scope(_workers, cm->task_queues(),
 640                                           ergo_workers,
 641                                           /* do_check = */ false);
 642     uint nworkers = _workers->active_workers();
 643     cm->task_queues()->reserve(nworkers);
 644     if (UseShenandoahOWST) {
 645       ShenandoahTaskTerminator terminator(nworkers, cm->task_queues());
 646       ShenandoahRefProcTaskProxy proc_task_proxy(task, &terminator);
 647       if (nworkers == 1) {
 648         proc_task_proxy.work(0);
 649       } else {
 650         _workers->run_task(&proc_task_proxy);
 651       }
 652     } else {
 653       ParallelTaskTerminator terminator(nworkers, cm->task_queues());
 654       ShenandoahRefProcTaskProxy proc_task_proxy(task, &terminator);
 655       if (nworkers == 1) {
 656         proc_task_proxy.work(0);
 657       } else {
 658         _workers->run_task(&proc_task_proxy);
 659       }
 660     }
 661   }
 662 };
 663 
 664 
 665 void ShenandoahConcurrentMark::weak_refs_work(bool full_gc) {
 666   assert(process_references(), "sanity");
 667 
 668   ShenandoahHeap* sh = ShenandoahHeap::heap();
 669 
 670   ShenandoahPhaseTimings::Phase phase_root =
 671           full_gc ?
 672           ShenandoahPhaseTimings::full_gc_weakrefs :
 673           ShenandoahPhaseTimings::weakrefs;
 674 
 675   ShenandoahGCPhase phase(phase_root);
 676 
 677   ReferenceProcessor* rp = sh->ref_processor();
 678 
 679   // NOTE: We cannot shortcut on has_discovered_references() here, because
 680   // we will miss marking JNI Weak refs then, see implementation in
 681   // ReferenceProcessor::process_discovered_references.
 682   weak_refs_work_doit(full_gc);
 683 
 684   rp->verify_no_references_recorded();
 685   assert(!rp->discovery_enabled(), "Post condition");
 686 
 687 }
 688 
 689 void ShenandoahConcurrentMark::weak_refs_work_doit(bool full_gc) {
 690   ShenandoahHeap* sh = ShenandoahHeap::heap();
 691 
 692   ReferenceProcessor* rp = sh->ref_processor();
 693 
 694   ShenandoahPhaseTimings::Phase phase_process =
 695           full_gc ?
 696           ShenandoahPhaseTimings::full_gc_weakrefs_process :
 697           ShenandoahPhaseTimings::weakrefs_process;
 698 
 699   ShenandoahPhaseTimings::Phase phase_process_termination =
 700           full_gc ?
 701           ShenandoahPhaseTimings::full_gc_weakrefs_termination :
 702           ShenandoahPhaseTimings::weakrefs_termination;
 703 
 704   shenandoah_assert_rp_isalive_not_installed();
 705   ShenandoahIsAliveSelector is_alive;
 706   ReferenceProcessorIsAliveMutator fix_isalive(rp, is_alive.is_alive_closure());
 707 
 708   WorkGang* workers = sh->workers();
 709   uint nworkers = workers->active_workers();
 710 
 711   rp->setup_policy(sh->soft_ref_policy()->should_clear_all_soft_refs());
 712   rp->set_active_mt_degree(nworkers);
 713 
 714   assert(task_queues()->is_empty(), "Should be empty");
 715 
 716   ShenandoahRefProcTaskExecutor executor(workers);
 717 
 718   ReferenceProcessorPhaseTimes pt(sh->gc_timer(), rp->num_queues());
 719 
 720   {
 721     ShenandoahGCPhase phase(phase_process);
 722     ShenandoahTerminationTracker phase_term(phase_process_termination);
 723 
 724     // We don't use single-threaded closures, because we distinguish this
 725     // in the executor. Assert that we should never actually get there.
 726     ShouldNotReachHereBoolObjectClosure should_not_reach_here_is_alive;
 727     ShouldNotReachHereOopClosure should_not_reach_here_keep_alive;
 728     ShouldNotReachHereVoidClosure should_not_reach_here_complete;
 729     rp->process_discovered_references(&should_not_reach_here_is_alive,
 730                                       &should_not_reach_here_keep_alive,
 731                                       &should_not_reach_here_complete,
 732                                       &executor, &pt);
 733 
 734     // Closures instantiated here are only needed for the single-threaded path in WeakProcessor.
 735     // They share the queue 0 for tracking work, which simplifies implementation.
 736     // TODO: As soon as WeakProcessor becomes MT-capable, these closures would become
 737     // unnecessary, and could be removed.
 738     uint serial_worker_id = 0;
 739     ShenandoahPushWorkerQueuesScope scope(workers, task_queues(), 1, /* do_check = */ false);
 740     ShenandoahForwardedIsAliveClosure is_alive;
 741     if (sh->has_forwarded_objects()) {
 742       ShenandoahCMKeepAliveUpdateClosure keep_alive(get_queue(serial_worker_id));
 743       WeakProcessor::weak_oops_do(&is_alive, &keep_alive);
 744     } else {
 745       ShenandoahCMKeepAliveClosure keep_alive(get_queue(serial_worker_id));
 746       WeakProcessor::weak_oops_do(&is_alive, &keep_alive);
 747     }
 748 
 749     pt.print_all_references();
 750 
 751     assert(task_queues()->is_empty(), "Should be empty");
 752   }
 753 }
 754 
 755 class ShenandoahCancelledGCYieldClosure : public YieldClosure {
 756 private:
 757   ShenandoahHeap* const _heap;
 758 public:
 759   ShenandoahCancelledGCYieldClosure() : _heap(ShenandoahHeap::heap()) {};
 760   virtual bool should_return() { return _heap->cancelled_gc(); }
 761 };
 762 
 763 class ShenandoahPrecleanCompleteGCClosure : public VoidClosure {
 764 public:
 765   void do_void() {
 766     ShenandoahHeap* sh = ShenandoahHeap::heap();
 767     ShenandoahConcurrentMark* scm = sh->concurrentMark();
 768     assert(scm->process_references(), "why else would we be here?");
 769     ParallelTaskTerminator terminator(1, scm->task_queues());
 770 
 771     ReferenceProcessor* rp = sh->ref_processor();
 772     shenandoah_assert_rp_isalive_installed();
 773 
 774     scm->mark_loop(0, &terminator, rp,
 775                    false, // not cancellable
 776                    scm->unload_classes(),
 777                    sh->has_forwarded_objects(),
 778                    false); // do not do strdedup
 779   }
 780 };
 781 
 782 class ShenandoahPrecleanKeepAliveUpdateClosure : public OopClosure {
 783 private:
 784   ShenandoahObjToScanQueue* _queue;
 785   ShenandoahHeap* _heap;
 786   ShenandoahMarkingContext* const _mark_context;
 787 
 788   template <class T>
 789   inline void do_oop_work(T* p) {
 790     ShenandoahConcurrentMark::mark_through_ref<T, CONCURRENT, false /* string dedup */>(p, _heap, _queue, _mark_context);
 791   }
 792 
 793 public:
 794   ShenandoahPrecleanKeepAliveUpdateClosure(ShenandoahObjToScanQueue* q) :
 795     _queue(q),
 796     _heap(ShenandoahHeap::heap()),
 797     _mark_context(_heap->next_marking_context()) {}
 798 
 799   void do_oop(narrowOop* p) { do_oop_work(p); }
 800   void do_oop(oop* p)       { do_oop_work(p); }
 801 };
 802 
 803 void ShenandoahConcurrentMark::preclean_weak_refs() {
 804   // Pre-cleaning weak references before diving into STW makes sense at the
 805   // end of concurrent mark. This will filter out the references which referents
 806   // are alive. Note that ReferenceProcessor already filters out these on reference
 807   // discovery, and the bulk of work is done here. This phase processes leftovers
 808   // that missed the initial filtering, i.e. when referent was marked alive after
 809   // reference was discovered by RP.
 810 
 811   assert(process_references(), "sanity");
 812 
 813   ShenandoahHeap* sh = ShenandoahHeap::heap();
 814   ReferenceProcessor* rp = sh->ref_processor();
 815 
 816   // Shortcut if no references were discovered to avoid winding up threads.
 817   if (!rp->has_discovered_references()) {
 818     return;
 819   }
 820 
 821   ReferenceProcessorMTDiscoveryMutator fix_mt_discovery(rp, false);
 822 
 823   shenandoah_assert_rp_isalive_not_installed();
 824   ShenandoahIsAliveSelector is_alive;
 825   ReferenceProcessorIsAliveMutator fix_isalive(rp, is_alive.is_alive_closure());
 826 
 827   // Interrupt on cancelled GC
 828   ShenandoahCancelledGCYieldClosure yield;
 829 
 830   assert(task_queues()->is_empty(), "Should be empty");
 831 
 832   ShenandoahPrecleanCompleteGCClosure complete_gc;
 833   if (sh->has_forwarded_objects()) {
 834     ShenandoahForwardedIsAliveClosure is_alive;
 835     ShenandoahPrecleanKeepAliveUpdateClosure keep_alive(get_queue(0));
 836     ResourceMark rm;
 837     rp->preclean_discovered_references(&is_alive, &keep_alive,
 838                                        &complete_gc, &yield,
 839                                        NULL);
 840   } else {
 841     ShenandoahIsAliveClosure is_alive;
 842     ShenandoahCMKeepAliveClosure keep_alive(get_queue(0));
 843     ResourceMark rm;
 844     rp->preclean_discovered_references(&is_alive, &keep_alive,
 845                                        &complete_gc, &yield,
 846                                        NULL);
 847   }
 848 
 849   assert(task_queues()->is_empty(), "Should be empty");
 850 }
 851 
 852 void ShenandoahConcurrentMark::cancel() {
 853   // Clean up marking stacks.
 854   ShenandoahObjToScanQueueSet* queues = task_queues();
 855   queues->clear();
 856 
 857   // Cancel SATB buffers.
 858   ShenandoahBarrierSet::satb_mark_queue_set().abandon_partial_marking();
 859 }
 860 
 861 ShenandoahObjToScanQueue* ShenandoahConcurrentMark::get_queue(uint worker_id) {
 862   assert(task_queues()->get_reserved() > worker_id, "No reserved queue for worker id: %d", worker_id);
 863   return _task_queues->queue(worker_id);
 864 }
 865 
 866 void ShenandoahConcurrentMark::clear_queue(ShenandoahObjToScanQueue *q) {
 867   q->set_empty();
 868   q->overflow_stack()->clear();
 869   q->clear_buffer();
 870 }
 871 
 872 template <bool CANCELLABLE>
 873 void ShenandoahConcurrentMark::mark_loop_prework(uint w, ParallelTaskTerminator *t, ReferenceProcessor *rp,
 874                                                  bool class_unload, bool update_refs, bool strdedup) {
 875   ShenandoahObjToScanQueue* q = get_queue(w);
 876 
 877   jushort* ld = get_liveness(w);
 878   Copy::fill_to_bytes(ld, _heap->num_regions() * sizeof(jushort));
 879 
 880   // TODO: We can clean up this if we figure out how to do templated oop closures that
 881   // play nice with specialized_oop_iterators.
 882   if (class_unload) {
 883     if (update_refs) {
 884       if (strdedup) {
 885         ShenandoahMarkUpdateRefsMetadataDedupClosure cl(q, rp);
 886         mark_loop_work<ShenandoahMarkUpdateRefsMetadataDedupClosure, CANCELLABLE>(&cl, ld, w, t);
 887       } else {
 888         ShenandoahMarkUpdateRefsMetadataClosure cl(q, rp);
 889         mark_loop_work<ShenandoahMarkUpdateRefsMetadataClosure, CANCELLABLE>(&cl, ld, w, t);
 890       }
 891     } else {
 892       if (strdedup) {
 893         ShenandoahMarkRefsMetadataDedupClosure cl(q, rp);
 894         mark_loop_work<ShenandoahMarkRefsMetadataDedupClosure, CANCELLABLE>(&cl, ld, w, t);
 895       } else {
 896         ShenandoahMarkRefsMetadataClosure cl(q, rp);
 897         mark_loop_work<ShenandoahMarkRefsMetadataClosure, CANCELLABLE>(&cl, ld, w, t);
 898       }
 899     }
 900   } else {
 901     if (update_refs) {
 902       if (strdedup) {
 903         ShenandoahMarkUpdateRefsDedupClosure cl(q, rp);
 904         mark_loop_work<ShenandoahMarkUpdateRefsDedupClosure, CANCELLABLE>(&cl, ld, w, t);
 905       } else {
 906         ShenandoahMarkUpdateRefsClosure cl(q, rp);
 907         mark_loop_work<ShenandoahMarkUpdateRefsClosure, CANCELLABLE>(&cl, ld, w, t);
 908       }
 909     } else {
 910       if (strdedup) {
 911         ShenandoahMarkRefsDedupClosure cl(q, rp);
 912         mark_loop_work<ShenandoahMarkRefsDedupClosure, CANCELLABLE>(&cl, ld, w, t);
 913       } else {
 914         ShenandoahMarkRefsClosure cl(q, rp);
 915         mark_loop_work<ShenandoahMarkRefsClosure, CANCELLABLE>(&cl, ld, w, t);
 916       }
 917     }
 918   }
 919 
 920 
 921   for (uint i = 0; i < _heap->num_regions(); i++) {
 922     ShenandoahHeapRegion* r = _heap->get_region(i);
 923     jushort live = ld[i];
 924     if (live > 0) {
 925       r->increase_live_data_gc_words(live);
 926     }
 927   }
 928 }
 929 
 930 template <class T, bool CANCELLABLE>
 931 void ShenandoahConcurrentMark::mark_loop_work(T* cl, jushort* live_data, uint worker_id, ParallelTaskTerminator *terminator) {
 932   uintx stride = ShenandoahMarkLoopStride;
 933 
 934   ShenandoahHeap* heap = ShenandoahHeap::heap();
 935   ShenandoahObjToScanQueueSet* queues = task_queues();
 936   ShenandoahObjToScanQueue* q;
 937   ShenandoahMarkTask t;
 938 
 939   /*
 940    * Process outstanding queues, if any.
 941    *
 942    * There can be more queues than workers. To deal with the imbalance, we claim
 943    * extra queues first. Since marking can push new tasks into the queue associated
 944    * with this worker id, we come back to process this queue in the normal loop.
 945    */
 946   assert(queues->get_reserved() == heap->workers()->active_workers(),
 947          "Need to reserve proper number of queues: reserved: %u, active: %u", queues->get_reserved(), heap->workers()->active_workers());
 948 
 949   q = queues->claim_next();
 950   while (q != NULL) {
 951     if (CANCELLABLE && heap->check_cancelled_gc_and_yield()) {
 952       ShenandoahCancelledTerminatorTerminator tt;
 953       while (!terminator->offer_termination(&tt));
 954       return;
 955     }
 956 
 957     for (uint i = 0; i < stride; i++) {
 958       if (try_queue(q, t)) {
 959         do_task<T>(q, cl, live_data, &t);
 960       } else {
 961         assert(q->is_empty(), "Must be empty");
 962         q = queues->claim_next();
 963         break;
 964       }
 965     }
 966   }
 967   q = get_queue(worker_id);
 968 
 969   ShenandoahSATBBufferClosure drain_satb(q);
 970   SATBMarkQueueSet& satb_mq_set = ShenandoahBarrierSet::satb_mark_queue_set();
 971 
 972   /*
 973    * Normal marking loop:
 974    */
 975   while (true) {
 976     if (CANCELLABLE && heap->check_cancelled_gc_and_yield()) {
 977       ShenandoahCancelledTerminatorTerminator tt;
 978       while (!terminator->offer_termination(&tt));
 979       return;
 980     }
 981 
 982     while (satb_mq_set.completed_buffers_num() > 0) {
 983       satb_mq_set.apply_closure_to_completed_buffer(&drain_satb);
 984     }
 985 
 986     uint work = 0;
 987     for (uint i = 0; i < stride; i++) {
 988       if (try_queue(q, t) ||
 989           queues->steal(worker_id, t)) {
 990         do_task<T>(q, cl, live_data, &t);
 991         work++;
 992       } else {
 993         break;
 994       }
 995     }
 996 
 997     if (work == 0) {
 998       // No work encountered in current stride, try to terminate.
 999       // Need to leave the STS here otherwise it might block safepoints.
1000       SuspendibleThreadSetLeaver stsl(CANCELLABLE && ShenandoahSuspendibleWorkers);
1001       ShenandoahTerminationTimingsTracker term_tracker(worker_id);
1002       if (terminator->offer_termination()) return;
1003     }
1004   }
1005 }
1006 
1007 bool ShenandoahConcurrentMark::process_references() const {
1008   return _heap->process_references();
1009 }
1010 
1011 bool ShenandoahConcurrentMark::unload_classes() const {
1012   return _heap->unload_classes();
1013 }
1014 
1015 bool ShenandoahConcurrentMark::claim_codecache() {
1016   assert(ShenandoahConcurrentScanCodeRoots, "must not be called otherwise");
1017   return _claimed_codecache.try_set();
1018 }
1019 
1020 void ShenandoahConcurrentMark::clear_claim_codecache() {
1021   assert(ShenandoahConcurrentScanCodeRoots, "must not be called otherwise");
1022   _claimed_codecache.unset();
1023 }
1024 
1025 jushort* ShenandoahConcurrentMark::get_liveness(uint worker_id) {
1026   return _liveness_local[worker_id];
1027 }
1028