1 /*
   2  * Copyright (c) 2013, 2017, Red Hat, Inc. and/or its affiliates.
   3  *
   4  * This code is free software; you can redistribute it and/or modify it
   5  * under the terms of the GNU General Public License version 2 only, as
   6  * published by the Free Software Foundation.
   7  *
   8  * This code is distributed in the hope that it will be useful, but WITHOUT
   9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  11  * version 2 for more details (a copy is included in the LICENSE file that
  12  * accompanied this code).
  13  *
  14  * You should have received a copy of the GNU General Public License version
  15  * 2 along with this work; if not, write to the Free Software Foundation,
  16  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  17  *
  18  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  19  * or visit www.oracle.com if you need additional information or have any
  20  * questions.
  21  *
  22  */
  23 
  24 #include "precompiled.hpp"
  25 #include "classfile/stringTable.hpp"
  26 #include "gc/shared/gcTimer.hpp"
  27 #include "gc/shared/parallelCleaning.hpp"
  28 #include "gc/shared/referenceProcessor.hpp"
  29 #include "gc/shared/strongRootsScope.hpp"
  30 #include "gc/shared/suspendibleThreadSet.hpp"
  31 #include "gc/shenandoah/brooksPointer.hpp"
  32 #include "gc/shenandoah/shenandoahBarrierSet.inline.hpp"
  33 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
  34 #include "gc/shenandoah/shenandoahConcurrentMark.inline.hpp"
  35 #include "gc/shenandoah/shenandoahOopClosures.inline.hpp"
  36 #include "gc/shenandoah/shenandoahMarkCompact.hpp"
  37 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
  38 #include "gc/shenandoah/shenandoahRootProcessor.hpp"
  39 #include "gc/shenandoah/shenandoah_specialized_oop_closures.hpp"
  40 #include "gc/shenandoah/shenandoahTaskqueue.hpp"
  41 #include "gc/shenandoah/shenandoahUtils.hpp"
  42 #include "code/codeCache.hpp"
  43 #include "classfile/symbolTable.hpp"
  44 #include "classfile/systemDictionary.hpp"
  45 #include "memory/iterator.inline.hpp"
  46 #include "oops/oop.inline.hpp"
  47 #include "gc/shared/taskqueue.inline.hpp"
  48 #include "logging/logStream.hpp"
  49 
  50 template<UpdateRefsMode UPDATE_REFS>
  51 class ShenandoahInitMarkRootsClosure : public OopClosure {
  52 private:
  53   ShenandoahObjToScanQueue* _queue;
  54   ShenandoahHeap* _heap;
  55 
  56   template <class T>
  57   inline void do_oop_nv(T* p) {
  58     ShenandoahConcurrentMark::mark_through_ref<T, UPDATE_REFS, false /* string dedup */>(p, _heap, _queue);
  59   }
  60 
  61 public:
  62   ShenandoahInitMarkRootsClosure(ShenandoahObjToScanQueue* q) :
  63     _queue(q), _heap(ShenandoahHeap::heap()) {};
  64 
  65   void do_oop(narrowOop* p) { do_oop_nv(p); }
  66   void do_oop(oop* p)       { do_oop_nv(p); }
  67 };
  68 
  69 ShenandoahMarkRefsSuperClosure::ShenandoahMarkRefsSuperClosure(ShenandoahObjToScanQueue* q, ReferenceProcessor* rp) :
  70   MetadataAwareOopClosure(rp),
  71   _queue(q),
  72   _dedup_queue(NULL),
  73   _heap(ShenandoahHeap::heap())
  74 { }
  75 
  76 
  77 ShenandoahMarkRefsSuperClosure::ShenandoahMarkRefsSuperClosure(ShenandoahObjToScanQueue* q, ShenandoahStrDedupQueue* dq, ReferenceProcessor* rp) :
  78   MetadataAwareOopClosure(rp),
  79   _queue(q),
  80   _dedup_queue(dq),
  81   _heap(ShenandoahHeap::heap())
  82 { }
  83 
  84 
  85 template<UpdateRefsMode UPDATE_REFS>
  86 class ShenandoahInitMarkRootsTask : public AbstractGangTask {
  87 private:
  88   ShenandoahRootProcessor* _rp;
  89   bool _process_refs;
  90 public:
  91   ShenandoahInitMarkRootsTask(ShenandoahRootProcessor* rp, bool process_refs) :
  92     AbstractGangTask("Shenandoah init mark roots task"),
  93     _rp(rp),
  94     _process_refs(process_refs) {
  95   }
  96 
  97   void work(uint worker_id) {
  98     assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
  99 
 100     ShenandoahHeap* heap = ShenandoahHeap::heap();
 101     ShenandoahObjToScanQueueSet* queues = heap->concurrentMark()->task_queues();
 102     assert(queues->get_reserved() > worker_id, "Queue has not been reserved for worker id: %d", worker_id);
 103 
 104     ShenandoahObjToScanQueue* q = queues->queue(worker_id);
 105     ShenandoahInitMarkRootsClosure<UPDATE_REFS> mark_cl(q);
 106     CLDToOopClosure cldCl(&mark_cl);
 107     MarkingCodeBlobClosure blobsCl(&mark_cl, ! CodeBlobToOopClosure::FixRelocations);
 108 
 109     // The rationale for selecting the roots to scan is as follows:
 110     //   a. With unload_classes = true, we only want to scan the actual strong roots from the
 111     //      code cache. This will allow us to identify the dead classes, unload them, *and*
 112     //      invalidate the relevant code cache blobs. This could be only done together with
 113     //      class unloading.
 114     //   b. With unload_classes = false, we have to nominally retain all the references from code
 115     //      cache, because there could be the case of embedded class/oop in the generated code,
 116     //      which we will never visit during mark. Without code cache invalidation, as in (a),
 117     //      we risk executing that code cache blob, and crashing.
 118     //   c. With ShenandoahConcurrentScanCodeRoots, we avoid scanning the entire code cache here,
 119     //      and instead do that in concurrent phase under the relevant lock. This saves init mark
 120     //      pause time.
 121 
 122     ResourceMark m;
 123     if (heap->concurrentMark()->unload_classes()) {
 124       _rp->process_strong_roots(&mark_cl, _process_refs ? NULL : &mark_cl, &cldCl, NULL, &blobsCl, NULL, worker_id);
 125     } else {
 126       if (ShenandoahConcurrentScanCodeRoots) {
 127         CodeBlobClosure* code_blobs = NULL;
 128 #ifdef ASSERT
 129         ShenandoahAssertToSpaceClosure assert_to_space_oops;
 130         CodeBlobToOopClosure assert_to_space(&assert_to_space_oops, !CodeBlobToOopClosure::FixRelocations);
 131         // If conc code cache evac is disabled, code cache should have only to-space ptrs.
 132         // Otherwise, it should have to-space ptrs only if mark does not update refs.
 133         if (!ShenandoahConcurrentEvacCodeRoots && !heap->has_forwarded_objects()) {
 134           code_blobs = &assert_to_space;
 135         }
 136 #endif
 137         _rp->process_all_roots(&mark_cl, _process_refs ? NULL : &mark_cl, &cldCl, code_blobs, NULL, worker_id);
 138       } else {
 139         _rp->process_all_roots(&mark_cl, _process_refs ? NULL : &mark_cl, &cldCl, &blobsCl, NULL, worker_id);
 140       }
 141     }
 142   }
 143 };
 144 
 145 class ShenandoahUpdateRootsTask : public AbstractGangTask {
 146 private:
 147   ShenandoahRootProcessor* _rp;
 148   const bool _update_code_cache;
 149 public:
 150   ShenandoahUpdateRootsTask(ShenandoahRootProcessor* rp, bool update_code_cache) :
 151     AbstractGangTask("Shenandoah update roots task"),
 152     _rp(rp),
 153     _update_code_cache(update_code_cache) {
 154   }
 155 
 156   void work(uint worker_id) {
 157     assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
 158 
 159     ShenandoahHeap* heap = ShenandoahHeap::heap();
 160     ShenandoahUpdateRefsClosure cl;
 161     CLDToOopClosure cldCl(&cl);
 162 
 163     CodeBlobClosure* code_blobs;
 164     CodeBlobToOopClosure update_blobs(&cl, CodeBlobToOopClosure::FixRelocations);
 165 #ifdef ASSERT
 166     ShenandoahAssertToSpaceClosure assert_to_space_oops;
 167     CodeBlobToOopClosure assert_to_space(&assert_to_space_oops, !CodeBlobToOopClosure::FixRelocations);
 168 #endif
 169     if (_update_code_cache) {
 170       code_blobs = &update_blobs;
 171     } else {
 172       code_blobs =
 173         DEBUG_ONLY(&assert_to_space)
 174         NOT_DEBUG(NULL);
 175     }
 176     _rp->process_all_roots(&cl, &cl, &cldCl, code_blobs, NULL, worker_id);
 177   }
 178 };
 179 
 180 class ShenandoahConcurrentMarkingTask : public AbstractGangTask {
 181 private:
 182   ShenandoahConcurrentMark* _cm;
 183   ParallelTaskTerminator* _terminator;
 184   bool _update_refs;
 185 
 186 public:
 187   ShenandoahConcurrentMarkingTask(ShenandoahConcurrentMark* cm, ParallelTaskTerminator* terminator, bool update_refs) :
 188     AbstractGangTask("Root Region Scan"), _cm(cm), _terminator(terminator), _update_refs(update_refs) {
 189   }
 190 
 191 
 192   void work(uint worker_id) {
 193     SuspendibleThreadSetJoiner stsj(ShenandoahSuspendibleWorkers);
 194     ShenandoahObjToScanQueue* q = _cm->get_queue(worker_id);
 195     jushort* live_data = _cm->get_liveness(worker_id);
 196     ReferenceProcessor* rp;
 197     if (_cm->process_references()) {
 198       rp = ShenandoahHeap::heap()->ref_processor();
 199     } else {
 200       rp = NULL;
 201     }
 202 
 203     ReferenceProcessorMaybeNullIsAliveMutator fix_alive(rp, ShenandoahHeap::heap()->is_alive_closure());
 204 
 205     if (ShenandoahConcurrentScanCodeRoots && _cm->claim_codecache()) {
 206       if (! _cm->unload_classes()) {
 207         MutexLockerEx mu(CodeCache_lock, Mutex::_no_safepoint_check_flag);
 208         if (_update_refs) {
 209           ShenandoahMarkResolveRefsClosure cl(q, rp);
 210           CodeBlobToOopClosure blobs(&cl, !CodeBlobToOopClosure::FixRelocations);
 211           CodeCache::blobs_do(&blobs);
 212         } else {
 213           ShenandoahMarkRefsClosure cl(q, rp);
 214           CodeBlobToOopClosure blobs(&cl, !CodeBlobToOopClosure::FixRelocations);
 215           CodeCache::blobs_do(&blobs);
 216         }
 217       }
 218     }
 219 
 220     _cm->mark_loop(worker_id, _terminator, rp,
 221                    true, // cancellable
 222                    true, // drain SATBs as we go
 223                    true, // count liveness
 224                    _cm->unload_classes(),
 225                    _update_refs,
 226                    ShenandoahStringDedup::is_enabled()); // perform string dedup
 227   }
 228 };
 229 
 230 class ShenandoahFinalMarkingTask : public AbstractGangTask {
 231 private:
 232   ShenandoahConcurrentMark* _cm;
 233   ParallelTaskTerminator* _terminator;
 234   bool _update_refs;
 235   bool _count_live;
 236   bool _unload_classes;
 237   bool _dedup_string;
 238 
 239 public:
 240   ShenandoahFinalMarkingTask(ShenandoahConcurrentMark* cm, ParallelTaskTerminator* terminator, bool update_refs,
 241     bool count_live, bool unload_classes, bool dedup_string = false) :
 242     AbstractGangTask("Shenandoah Final Marking"), _cm(cm), _terminator(terminator), _update_refs(update_refs),
 243     _count_live(count_live), _unload_classes(unload_classes), _dedup_string(dedup_string) {
 244   }
 245 
 246   void work(uint worker_id) {
 247     // First drain remaining SATB buffers.
 248     // Notice that this is not strictly necessary for mark-compact. But since
 249     // it requires a StrongRootsScope around the task, we need to claim the
 250     // threads, and performance-wise it doesn't really matter. Adds about 1ms to
 251     // full-gc.
 252     _cm->drain_satb_buffers(worker_id, true);
 253 
 254     ReferenceProcessor* rp;
 255     if (_cm->process_references()) {
 256       rp = ShenandoahHeap::heap()->ref_processor();
 257     } else {
 258       rp = NULL;
 259     }
 260 
 261     ReferenceProcessorMaybeNullIsAliveMutator fix_alive(rp, ShenandoahHeap::heap()->is_alive_closure());
 262 
 263     _cm->mark_loop(worker_id, _terminator, rp,
 264                    false, // not cancellable
 265                    false, // do not drain SATBs, already drained
 266                    _count_live,
 267                    _unload_classes,
 268                    _update_refs,
 269                    _dedup_string);
 270 
 271     assert(_cm->task_queues()->is_empty(), "Should be empty");
 272   }
 273 };
 274 
 275 void ShenandoahConcurrentMark::mark_roots(ShenandoahPhaseTimings::Phase root_phase) {
 276   assert(Thread::current()->is_VM_thread(), "can only do this in VMThread");
 277   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
 278 
 279   ShenandoahHeap* heap = ShenandoahHeap::heap();
 280 
 281   ShenandoahGCPhase phase(root_phase);
 282 
 283   WorkGang* workers = heap->workers();
 284   uint nworkers = workers->active_workers();
 285 
 286   assert(nworkers <= task_queues()->size(), "Just check");
 287 
 288   ShenandoahRootProcessor root_proc(heap, nworkers, root_phase);
 289   TASKQUEUE_STATS_ONLY(reset_taskqueue_stats());
 290   task_queues()->reserve(nworkers);
 291 
 292   if (heap->has_forwarded_objects()) {
 293     ShenandoahInitMarkRootsTask<RESOLVE> mark_roots(&root_proc, process_references());
 294     workers->run_task(&mark_roots);
 295   } else {
 296     // No need to update references, which means the heap is stable.
 297     // Can save time not walking through forwarding pointers.
 298     ShenandoahInitMarkRootsTask<NONE> mark_roots(&root_proc, process_references());
 299     workers->run_task(&mark_roots);
 300   }
 301 
 302   if (ShenandoahConcurrentScanCodeRoots) {
 303     clear_claim_codecache();
 304   }
 305 }
 306 
 307 void ShenandoahConcurrentMark::init_mark_roots() {
 308   assert(Thread::current()->is_VM_thread(), "can only do this in VMThread");
 309   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
 310 
 311   ShenandoahHeap* heap = ShenandoahHeap::heap();
 312 
 313   // Set up ref processing and class unloading.
 314   ShenandoahCollectorPolicy* policy = heap->shenandoahPolicy();
 315   set_process_references(policy->process_references());
 316   set_unload_classes(policy->unload_classes());
 317 
 318   mark_roots(ShenandoahPhaseTimings::scan_roots);
 319 }
 320 
 321 void ShenandoahConcurrentMark::update_roots(ShenandoahPhaseTimings::Phase root_phase) {
 322   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
 323 
 324   bool update_code_cache = true; // initialize to safer value
 325   switch (root_phase) {
 326     case ShenandoahPhaseTimings::update_roots:
 327     case ShenandoahPhaseTimings::final_update_refs_roots:
 328       // If code cache was evacuated concurrently, we need to update code cache roots.
 329       update_code_cache = ShenandoahConcurrentEvacCodeRoots;
 330       break;
 331     case ShenandoahPhaseTimings::full_gc_roots:
 332     case ShenandoahPhaseTimings::final_partial_gc_work:
 333     case ShenandoahPhaseTimings::final_traversal_update_roots:
 334       update_code_cache = true;
 335       break;
 336     default:
 337       ShouldNotReachHere();
 338   }
 339 
 340   ShenandoahHeap* heap = ShenandoahHeap::heap();
 341 
 342   ShenandoahGCPhase phase(root_phase);
 343 
 344 #if defined(COMPILER2) || INCLUDE_JVMCI
 345   DerivedPointerTable::clear();
 346 #endif
 347 
 348   uint nworkers = heap->workers()->active_workers();
 349 
 350   ShenandoahRootProcessor root_proc(heap, nworkers, root_phase);
 351   ShenandoahUpdateRootsTask update_roots(&root_proc, update_code_cache);
 352   heap->workers()->run_task(&update_roots);
 353 
 354 #if defined(COMPILER2) || INCLUDE_JVMCI
 355   DerivedPointerTable::update_pointers();
 356 #endif
 357 }
 358 
 359 void ShenandoahConcurrentMark::initialize(uint workers) {
 360   _heap = ShenandoahHeap::heap();
 361 
 362   uint num_queues = MAX2(workers, 1U);
 363 
 364   _task_queues = new ShenandoahObjToScanQueueSet((int) num_queues);
 365 
 366   for (uint i = 0; i < num_queues; ++i) {
 367     ShenandoahObjToScanQueue* task_queue = new ShenandoahObjToScanQueue();
 368     task_queue->initialize();
 369     _task_queues->register_queue(i, task_queue);
 370   }
 371 
 372   JavaThread::satb_mark_queue_set().set_buffer_size(ShenandoahSATBBufferSize);
 373 
 374   size_t num_regions = ShenandoahHeap::heap()->num_regions();
 375   _liveness_local = NEW_C_HEAP_ARRAY(jushort*, workers, mtGC);
 376   for (uint worker = 0; worker < workers; worker++) {
 377      _liveness_local[worker] = NEW_C_HEAP_ARRAY(jushort, num_regions, mtGC);
 378   }
 379 }
 380 
 381 void ShenandoahConcurrentMark::mark_from_roots() {
 382   ShenandoahHeap* sh = ShenandoahHeap::heap();
 383   WorkGang* workers = sh->workers();
 384   uint nworkers = workers->active_workers();
 385 
 386   bool update_refs = sh->has_forwarded_objects();
 387 
 388   ShenandoahGCPhase conc_mark_phase(ShenandoahPhaseTimings::conc_mark);
 389 
 390   if (process_references()) {
 391     ReferenceProcessor* rp = sh->ref_processor();
 392     rp->set_active_mt_degree(nworkers);
 393 
 394     // enable ("weak") refs discovery
 395     rp->enable_discovery(true /*verify_no_refs*/);
 396     rp->setup_policy(sh->is_full_gc_in_progress()); // snapshot the soft ref policy to be used in this cycle
 397   }
 398 
 399   task_queues()->reserve(nworkers);
 400 
 401   if (UseShenandoahOWST) {
 402     ShenandoahTaskTerminator terminator(nworkers, task_queues());
 403     ShenandoahConcurrentMarkingTask markingTask = ShenandoahConcurrentMarkingTask(this, &terminator, update_refs);
 404     workers->run_task(&markingTask);
 405   } else {
 406     ParallelTaskTerminator terminator(nworkers, task_queues());
 407     ShenandoahConcurrentMarkingTask markingTask = ShenandoahConcurrentMarkingTask(this, &terminator, update_refs);
 408     workers->run_task(&markingTask);
 409   }
 410 
 411   assert(task_queues()->is_empty() || sh->cancelled_concgc(), "Should be empty when not cancelled");
 412   if (! sh->cancelled_concgc()) {
 413     TASKQUEUE_STATS_ONLY(print_taskqueue_stats());
 414   }
 415 
 416   TASKQUEUE_STATS_ONLY(reset_taskqueue_stats());
 417 }
 418 
 419 void ShenandoahConcurrentMark::finish_mark_from_roots() {
 420   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
 421 
 422   ShenandoahHeap* sh = ShenandoahHeap::heap();
 423 
 424   TASKQUEUE_STATS_ONLY(reset_taskqueue_stats());
 425 
 426   shared_finish_mark_from_roots(/* full_gc = */ false);
 427 
 428   if (sh->has_forwarded_objects()) {
 429     update_roots(ShenandoahPhaseTimings::update_roots);
 430   }
 431 
 432   TASKQUEUE_STATS_ONLY(print_taskqueue_stats());
 433 }
 434 
 435 void ShenandoahConcurrentMark::shared_finish_mark_from_roots(bool full_gc) {
 436   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
 437 
 438   ShenandoahHeap* sh = ShenandoahHeap::heap();
 439 
 440   uint nworkers = sh->workers()->active_workers();
 441 
 442   // Finally mark everything else we've got in our queues during the previous steps.
 443   // It does two different things for concurrent vs. mark-compact GC:
 444   // - For concurrent GC, it starts with empty task queues, drains the remaining
 445   //   SATB buffers, and then completes the marking closure.
 446   // - For mark-compact GC, it starts out with the task queues seeded by initial
 447   //   root scan, and completes the closure, thus marking through all live objects
 448   // The implementation is the same, so it's shared here.
 449   {
 450     ShenandoahGCPhase phase(full_gc ?
 451                                ShenandoahPhaseTimings::full_gc_mark_finish_queues :
 452                                ShenandoahPhaseTimings::finish_queues);
 453     bool count_live = !(ShenandoahNoLivenessFullGC && full_gc); // we do not need liveness data for full GC
 454     task_queues()->reserve(nworkers);
 455 
 456     StrongRootsScope scope(nworkers);
 457     if (UseShenandoahOWST) {
 458       ShenandoahTaskTerminator terminator(nworkers, task_queues());
 459       ShenandoahFinalMarkingTask task(this, &terminator, sh->has_forwarded_objects(), count_live,
 460         unload_classes(), full_gc && ShenandoahStringDedup::is_enabled());
 461       sh->workers()->run_task(&task);
 462     } else {
 463       ParallelTaskTerminator terminator(nworkers, task_queues());
 464       ShenandoahFinalMarkingTask task(this, &terminator, sh->has_forwarded_objects(), count_live,
 465         unload_classes(), full_gc && ShenandoahStringDedup::is_enabled());
 466       sh->workers()->run_task(&task);
 467     }
 468   }
 469 
 470   assert(task_queues()->is_empty(), "Should be empty");
 471 
 472   // When we're done marking everything, we process weak references.
 473   if (process_references()) {
 474     weak_refs_work(full_gc);
 475   }
 476 
 477   // And finally finish class unloading
 478   if (unload_classes()) {
 479     sh->unload_classes_and_cleanup_tables(full_gc);
 480   }
 481 
 482   assert(task_queues()->is_empty(), "Should be empty");
 483 
 484 }
 485 
 486 class ShenandoahSATBThreadsClosure : public ThreadClosure {
 487   ShenandoahSATBBufferClosure* _satb_cl;
 488   int _thread_parity;
 489 
 490  public:
 491   ShenandoahSATBThreadsClosure(ShenandoahSATBBufferClosure* satb_cl) :
 492     _satb_cl(satb_cl),
 493     _thread_parity(Threads::thread_claim_parity()) {}
 494 
 495   void do_thread(Thread* thread) {
 496     if (thread->is_Java_thread()) {
 497       if (thread->claim_oops_do(true, _thread_parity)) {
 498         JavaThread* jt = (JavaThread*)thread;
 499         jt->satb_mark_queue().apply_closure_and_empty(_satb_cl);
 500       }
 501     } else if (thread->is_VM_thread()) {
 502       if (thread->claim_oops_do(true, _thread_parity)) {
 503         JavaThread::satb_mark_queue_set().shared_satb_queue()->apply_closure_and_empty(_satb_cl);
 504       }
 505     }
 506   }
 507 };
 508 
 509 void ShenandoahConcurrentMark::drain_satb_buffers(uint worker_id, bool remark) {
 510   ShenandoahObjToScanQueue* q = get_queue(worker_id);
 511   ShenandoahSATBBufferClosure cl(q);
 512 
 513   SATBMarkQueueSet& satb_mq_set = JavaThread::satb_mark_queue_set();
 514   while (satb_mq_set.apply_closure_to_completed_buffer(&cl));
 515 
 516   if (remark) {
 517     ShenandoahSATBThreadsClosure tc(&cl);
 518     Threads::threads_do(&tc);
 519   }
 520 }
 521 
 522 #if TASKQUEUE_STATS
 523 void ShenandoahConcurrentMark::print_taskqueue_stats_hdr(outputStream* const st) {
 524   st->print_raw_cr("GC Task Stats");
 525   st->print_raw("thr "); TaskQueueStats::print_header(1, st); st->cr();
 526   st->print_raw("--- "); TaskQueueStats::print_header(2, st); st->cr();
 527 }
 528 
 529 void ShenandoahConcurrentMark::print_taskqueue_stats() const {
 530   if (!log_develop_is_enabled(Trace, gc, task, stats)) {
 531     return;
 532   }
 533   Log(gc, task, stats) log;
 534   ResourceMark rm;
 535   LogStream ls(log.trace());
 536   outputStream* st = &ls;
 537   print_taskqueue_stats_hdr(st);
 538 
 539   TaskQueueStats totals;
 540   const uint n = _task_queues->size();
 541   for (uint i = 0; i < n; ++i) {
 542     st->print(UINT32_FORMAT_W(3), i);
 543     _task_queues->queue(i)->stats.print(st);
 544     st->cr();
 545     totals += _task_queues->queue(i)->stats;
 546   }
 547   st->print("tot "); totals.print(st); st->cr();
 548   DEBUG_ONLY(totals.verify());
 549 
 550 }
 551 
 552 void ShenandoahConcurrentMark::reset_taskqueue_stats() {
 553   const uint n = task_queues()->size();
 554   for (uint i = 0; i < n; ++i) {
 555     task_queues()->queue(i)->stats.reset();
 556   }
 557 }
 558 #endif // TASKQUEUE_STATS
 559 
 560 // Weak Reference Closures
 561 class ShenandoahCMDrainMarkingStackClosure: public VoidClosure {
 562   uint _worker_id;
 563   ParallelTaskTerminator* _terminator;
 564   bool _reset_terminator;
 565 
 566 public:
 567   ShenandoahCMDrainMarkingStackClosure(uint worker_id, ParallelTaskTerminator* t, bool reset_terminator = false):
 568     _worker_id(worker_id),
 569     _terminator(t),
 570     _reset_terminator(reset_terminator) {
 571   }
 572 
 573   void do_void() {
 574     assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
 575 
 576     ShenandoahHeap* sh = ShenandoahHeap::heap();
 577     ShenandoahConcurrentMark* scm = sh->concurrentMark();
 578     assert(scm->process_references(), "why else would we be here?");
 579     ReferenceProcessor* rp = sh->ref_processor();
 580 
 581     ReferenceProcessorIsAliveMutator fix_alive(rp, ShenandoahHeap::heap()->is_alive_closure());
 582 
 583     scm->mark_loop(_worker_id, _terminator, rp,
 584                    false, // not cancellable
 585                    false, // do not drain SATBs
 586                    true,  // count liveness
 587                    scm->unload_classes(),
 588                    sh->has_forwarded_objects());
 589 
 590     if (_reset_terminator) {
 591       _terminator->reset_for_reuse();
 592     }
 593   }
 594 };
 595 
 596 
 597 class ShenandoahCMKeepAliveClosure : public OopClosure {
 598 private:
 599   ShenandoahObjToScanQueue* _queue;
 600   ShenandoahHeap* _heap;
 601 
 602   template <class T>
 603   inline void do_oop_nv(T* p) {
 604     ShenandoahConcurrentMark::mark_through_ref<T, NONE, false /* string dedup */>(p, _heap, _queue);
 605   }
 606 
 607 public:
 608   ShenandoahCMKeepAliveClosure(ShenandoahObjToScanQueue* q) :
 609     _queue(q), _heap(ShenandoahHeap::heap()) {}
 610 
 611   void do_oop(narrowOop* p) { do_oop_nv(p); }
 612   void do_oop(oop* p)       { do_oop_nv(p); }
 613 };
 614 
 615 class ShenandoahCMKeepAliveUpdateClosure : public OopClosure {
 616 private:
 617   ShenandoahObjToScanQueue* _queue;
 618   ShenandoahHeap* _heap;
 619 
 620   template <class T>
 621   inline void do_oop_nv(T* p) {
 622     ShenandoahConcurrentMark::mark_through_ref<T, SIMPLE, false /* string dedup */>(p, _heap, _queue);
 623   }
 624 
 625 public:
 626   ShenandoahCMKeepAliveUpdateClosure(ShenandoahObjToScanQueue* q) :
 627     _queue(q), _heap(ShenandoahHeap::heap()) {}
 628 
 629   void do_oop(narrowOop* p) { do_oop_nv(p); }
 630   void do_oop(oop* p)       { do_oop_nv(p); }
 631 };
 632 
 633 class ShenandoahRefProcTaskProxy : public AbstractGangTask {
 634 
 635 private:
 636   AbstractRefProcTaskExecutor::ProcessTask& _proc_task;
 637   ParallelTaskTerminator* _terminator;
 638 public:
 639 
 640   ShenandoahRefProcTaskProxy(AbstractRefProcTaskExecutor::ProcessTask& proc_task,
 641                              ParallelTaskTerminator* t) :
 642     AbstractGangTask("Process reference objects in parallel"),
 643     _proc_task(proc_task),
 644     _terminator(t) {
 645   }
 646 
 647   void work(uint worker_id) {
 648     assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
 649     ShenandoahHeap* heap = ShenandoahHeap::heap();
 650     ShenandoahCMDrainMarkingStackClosure complete_gc(worker_id, _terminator);
 651     if (heap->has_forwarded_objects()) {
 652       ShenandoahForwardedIsAliveClosure is_alive;
 653       ShenandoahCMKeepAliveUpdateClosure keep_alive(heap->concurrentMark()->get_queue(worker_id));
 654       _proc_task.work(worker_id, is_alive, keep_alive, complete_gc);
 655     } else {
 656       ShenandoahIsAliveClosure is_alive;
 657       ShenandoahCMKeepAliveClosure keep_alive(heap->concurrentMark()->get_queue(worker_id));
 658       _proc_task.work(worker_id, is_alive, keep_alive, complete_gc);
 659     }
 660   }
 661 };
 662 
 663 class ShenandoahRefEnqueueTaskProxy : public AbstractGangTask {
 664 
 665 private:
 666   AbstractRefProcTaskExecutor::EnqueueTask& _enqueue_task;
 667 
 668 public:
 669 
 670   ShenandoahRefEnqueueTaskProxy(AbstractRefProcTaskExecutor::EnqueueTask& enqueue_task) :
 671     AbstractGangTask("Enqueue reference objects in parallel"),
 672     _enqueue_task(enqueue_task) {
 673   }
 674 
 675   void work(uint worker_id) {
 676     _enqueue_task.work(worker_id);
 677   }
 678 };
 679 
 680 class ShenandoahRefProcTaskExecutor : public AbstractRefProcTaskExecutor {
 681 
 682 private:
 683   WorkGang* _workers;
 684 
 685 public:
 686 
 687   ShenandoahRefProcTaskExecutor(WorkGang* workers) :
 688     _workers(workers) {
 689   }
 690 
 691   // Executes a task using worker threads.
 692   void execute(ProcessTask& task) {
 693     assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint");
 694 
 695     // Shortcut execution if task is empty.
 696     // This should be replaced with the generic ReferenceProcessor shortcut,
 697     // see JDK-8181214, JDK-8043575, JDK-6938732.
 698     if (task.is_empty()) {
 699       return;
 700     }
 701 
 702     ShenandoahHeap* heap = ShenandoahHeap::heap();
 703     ShenandoahConcurrentMark* cm = heap->concurrentMark();
 704     uint nworkers = _workers->active_workers();
 705     cm->task_queues()->reserve(nworkers);
 706     if (UseShenandoahOWST) {
 707       ShenandoahTaskTerminator terminator(nworkers, cm->task_queues());
 708       ShenandoahRefProcTaskProxy proc_task_proxy(task, &terminator);
 709       _workers->run_task(&proc_task_proxy);
 710     } else {
 711       ParallelTaskTerminator terminator(nworkers, cm->task_queues());
 712       ShenandoahRefProcTaskProxy proc_task_proxy(task, &terminator);
 713       _workers->run_task(&proc_task_proxy);
 714     }
 715   }
 716 
 717   void execute(EnqueueTask& task) {
 718     ShenandoahRefEnqueueTaskProxy enqueue_task_proxy(task);
 719     _workers->run_task(&enqueue_task_proxy);
 720   }
 721 };
 722 
 723 
 724 void ShenandoahConcurrentMark::weak_refs_work(bool full_gc) {
 725   assert(process_references(), "sanity");
 726 
 727   ShenandoahHeap* sh = ShenandoahHeap::heap();
 728 
 729   ShenandoahPhaseTimings::Phase phase_root =
 730           full_gc ?
 731           ShenandoahPhaseTimings::full_gc_weakrefs :
 732           ShenandoahPhaseTimings::weakrefs;
 733 
 734   ShenandoahGCPhase phase(phase_root);
 735 
 736   ReferenceProcessor* rp = sh->ref_processor();
 737 
 738   // NOTE: We cannot shortcut on has_discovered_references() here, because
 739   // we will miss marking JNI Weak refs then, see implementation in
 740   // ReferenceProcessor::process_discovered_references.
 741   weak_refs_work_doit(full_gc);
 742 
 743   rp->verify_no_references_recorded();
 744   assert(!rp->discovery_enabled(), "Post condition");
 745 
 746 }
 747 
 748 void ShenandoahConcurrentMark::weak_refs_work_doit(bool full_gc) {
 749   ShenandoahHeap* sh = ShenandoahHeap::heap();
 750 
 751   assert(!sh->is_concurrent_partial_in_progress(), "cannot process weakrefs during conc-partial yet");
 752 
 753   ReferenceProcessor* rp = sh->ref_processor();
 754 
 755   ShenandoahPhaseTimings::Phase phase_process =
 756           full_gc ?
 757           ShenandoahPhaseTimings::full_gc_weakrefs_process :
 758           ShenandoahPhaseTimings::weakrefs_process;
 759 
 760   ShenandoahPhaseTimings::Phase phase_enqueue =
 761           full_gc ?
 762           ShenandoahPhaseTimings::full_gc_weakrefs_enqueue :
 763           ShenandoahPhaseTimings::weakrefs_enqueue;
 764 
 765   ReferenceProcessorIsAliveMutator fix_alive(rp, sh->is_alive_closure());
 766 
 767   WorkGang* workers = sh->workers();
 768   uint nworkers = workers->active_workers();
 769 
 770   // Setup collector policy for softref cleaning.
 771   bool clear_soft_refs = sh->collector_policy()->use_should_clear_all_soft_refs(true /* bogus arg*/);
 772   log_develop_debug(gc, ref)("clearing soft refs: %s", BOOL_TO_STR(clear_soft_refs));
 773   rp->setup_policy(clear_soft_refs);
 774   rp->set_active_mt_degree(nworkers);
 775 
 776   assert(task_queues()->is_empty(), "Should be empty");
 777 
 778   // complete_gc and keep_alive closures instantiated here are only needed for
 779   // single-threaded path in RP. They share the queue 0 for tracking work, which
 780   // simplifies implementation. Since RP may decide to call complete_gc several
 781   // times, we need to be able to reuse the terminator.
 782   uint serial_worker_id = 0;
 783   ParallelTaskTerminator terminator(1, task_queues());
 784   ShenandoahCMDrainMarkingStackClosure complete_gc(serial_worker_id, &terminator, /* reset_terminator = */ true);
 785 
 786   ShenandoahRefProcTaskExecutor executor(workers);
 787 
 788   ReferenceProcessorPhaseTimes pt(sh->gc_timer(), rp->num_q());
 789 
 790   {
 791     ShenandoahGCPhase phase(phase_process);
 792 
 793     if (sh->has_forwarded_objects()) {
 794       ShenandoahForwardedIsAliveClosure is_alive;
 795       ShenandoahCMKeepAliveUpdateClosure keep_alive(get_queue(serial_worker_id));
 796       rp->process_discovered_references(&is_alive, &keep_alive,
 797                                         &complete_gc, &executor,
 798                                         &pt);
 799     } else {
 800       ShenandoahIsAliveClosure is_alive;
 801       ShenandoahCMKeepAliveClosure keep_alive(get_queue(serial_worker_id));
 802       rp->process_discovered_references(&is_alive, &keep_alive,
 803                                         &complete_gc, &executor,
 804                                         &pt);
 805     }
 806     pt.print_all_references();
 807 
 808     assert(task_queues()->is_empty(), "Should be empty");
 809   }
 810 
 811   {
 812     ShenandoahGCPhase phase(phase_enqueue);
 813     rp->enqueue_discovered_references(&executor, &pt);
 814     pt.print_enqueue_phase();
 815   }
 816 }
 817 
 818 class ShenandoahCancelledGCYieldClosure : public YieldClosure {
 819 private:
 820   ShenandoahHeap* const _heap;
 821 public:
 822   ShenandoahCancelledGCYieldClosure() : _heap(ShenandoahHeap::heap()) {};
 823   virtual bool should_return() { return _heap->cancelled_concgc(); }
 824 };
 825 
 826 class ShenandoahPrecleanCompleteGCClosure : public VoidClosure {
 827 public:
 828   void do_void() {
 829     ShenandoahHeap* sh = ShenandoahHeap::heap();
 830     ShenandoahConcurrentMark* scm = sh->concurrentMark();
 831     assert(scm->process_references(), "why else would we be here?");
 832     ReferenceProcessor* rp = sh->ref_processor();
 833     ParallelTaskTerminator terminator(1, scm->task_queues());
 834     ReferenceProcessorIsAliveMutator fix_alive(rp, ShenandoahHeap::heap()->is_alive_closure());
 835 
 836     scm->mark_loop(0, &terminator, rp,
 837                    false, // not cancellable
 838                    true,  // drain SATBs
 839                    true,  // count liveness
 840                    scm->unload_classes(),
 841                    sh->has_forwarded_objects());
 842   }
 843 };
 844 
 845 class ShenandoahPrecleanKeepAliveUpdateClosure : public OopClosure {
 846 private:
 847   ShenandoahObjToScanQueue* _queue;
 848   ShenandoahHeap* _heap;
 849 
 850   template <class T>
 851   inline void do_oop_nv(T* p) {
 852     ShenandoahConcurrentMark::mark_through_ref<T, CONCURRENT, false /* string dedup */>(p, _heap, _queue);
 853   }
 854 
 855 public:
 856   ShenandoahPrecleanKeepAliveUpdateClosure(ShenandoahObjToScanQueue* q) :
 857     _queue(q), _heap(ShenandoahHeap::heap()) {}
 858 
 859   void do_oop(narrowOop* p) { do_oop_nv(p); }
 860   void do_oop(oop* p)       { do_oop_nv(p); }
 861 };
 862 
 863 void ShenandoahConcurrentMark::preclean_weak_refs() {
 864   // Pre-cleaning weak references before diving into STW makes sense at the
 865   // end of concurrent mark. This will filter out the references which referents
 866   // are alive. Note that ReferenceProcessor already filters out these on reference
 867   // discovery, and the bulk of work is done here. This phase processes leftovers
 868   // that missed the initial filtering, i.e. when referent was marked alive after
 869   // reference was discovered by RP.
 870 
 871   assert(process_references(), "sanity");
 872 
 873   ShenandoahHeap* sh = ShenandoahHeap::heap();
 874   ReferenceProcessor* rp = sh->ref_processor();
 875 
 876   // Shortcut if no references were discovered to avoid winding up threads.
 877   if (!rp->has_discovered_references()) {
 878     return;
 879   }
 880 
 881   ReferenceProcessorMTDiscoveryMutator fix_mt_discovery(rp, false);
 882   ReferenceProcessorIsAliveMutator fix_alive(rp, sh->is_alive_closure());
 883 
 884   // Interrupt on cancelled GC
 885   ShenandoahCancelledGCYieldClosure yield;
 886 
 887   assert(task_queues()->is_empty(), "Should be empty");
 888 
 889   ShenandoahPrecleanCompleteGCClosure complete_gc;
 890   if (sh->has_forwarded_objects()) {
 891     ShenandoahForwardedIsAliveClosure is_alive;
 892     ShenandoahPrecleanKeepAliveUpdateClosure keep_alive(get_queue(0));
 893     ResourceMark rm;
 894     rp->preclean_discovered_references(&is_alive, &keep_alive,
 895                                        &complete_gc, &yield,
 896                                        NULL);
 897   } else {
 898     ShenandoahIsAliveClosure is_alive;
 899     ShenandoahCMKeepAliveClosure keep_alive(get_queue(0));
 900     ResourceMark rm;
 901     rp->preclean_discovered_references(&is_alive, &keep_alive,
 902                                        &complete_gc, &yield,
 903                                        NULL);
 904   }
 905 
 906   assert(task_queues()->is_empty(), "Should be empty");
 907 }
 908 
 909 void ShenandoahConcurrentMark::cancel() {
 910   // Clean up marking stacks.
 911   ShenandoahObjToScanQueueSet* queues = task_queues();
 912   queues->clear();
 913 
 914   // Cancel SATB buffers.
 915   JavaThread::satb_mark_queue_set().abandon_partial_marking();
 916 }
 917 
 918 ShenandoahObjToScanQueue* ShenandoahConcurrentMark::get_queue(uint worker_id) {
 919   assert(task_queues()->get_reserved() > worker_id, "No reserved queue for worker id: %d", worker_id);
 920   return _task_queues->queue(worker_id);
 921 }
 922 
 923 void ShenandoahConcurrentMark::clear_queue(ShenandoahObjToScanQueue *q) {
 924   q->set_empty();
 925   q->overflow_stack()->clear();
 926   q->clear_buffer();
 927 }
 928 
 929 template <bool CANCELLABLE, bool DRAIN_SATB, bool COUNT_LIVENESS, bool CLASS_UNLOAD, bool UPDATE_REFS, bool STRING_DEDUP>
 930 void ShenandoahConcurrentMark::mark_loop_prework(uint w, ParallelTaskTerminator *t, ReferenceProcessor *rp) {
 931   ShenandoahObjToScanQueue* q = get_queue(w);
 932 
 933   jushort* ld;
 934   if (COUNT_LIVENESS) {
 935     ld = get_liveness(w);
 936     Copy::fill_to_bytes(ld, _heap->num_regions() * sizeof(jushort));
 937   } else {
 938     ld = NULL;
 939   }
 940 
 941   // TODO: We can clean up this if we figure out how to do templated oop closures that
 942   // play nice with specialized_oop_iterators.
 943   if (CLASS_UNLOAD) {
 944     if (UPDATE_REFS) {
 945       if (STRING_DEDUP) {
 946         ShenandoahStrDedupQueue* dq = ShenandoahStringDedup::queue(w);
 947         ShenandoahMarkUpdateRefsMetadataDedupClosure cl(q, dq, rp);
 948         mark_loop_work<ShenandoahMarkUpdateRefsMetadataDedupClosure, CANCELLABLE, DRAIN_SATB, COUNT_LIVENESS>(&cl, ld, w, t);
 949       } else {
 950         ShenandoahMarkUpdateRefsMetadataClosure cl(q, rp);
 951         mark_loop_work<ShenandoahMarkUpdateRefsMetadataClosure, CANCELLABLE, DRAIN_SATB, COUNT_LIVENESS>(&cl, ld, w, t);
 952       }
 953     } else {
 954       if (STRING_DEDUP) {
 955         ShenandoahStrDedupQueue* dq = ShenandoahStringDedup::queue(w);
 956         ShenandoahMarkRefsMetadataDedupClosure cl(q, dq, rp);
 957         mark_loop_work<ShenandoahMarkRefsMetadataDedupClosure, CANCELLABLE, DRAIN_SATB, COUNT_LIVENESS>(&cl, ld, w, t);
 958 
 959       } else {
 960         ShenandoahMarkRefsMetadataClosure cl(q, rp);
 961         mark_loop_work<ShenandoahMarkRefsMetadataClosure, CANCELLABLE, DRAIN_SATB, COUNT_LIVENESS>(&cl, ld, w, t);
 962       }
 963     }
 964   } else {
 965     if (UPDATE_REFS) {
 966       if (STRING_DEDUP) {
 967         ShenandoahStrDedupQueue* dq = ShenandoahStringDedup::queue(w);
 968         ShenandoahMarkUpdateRefsDedupClosure cl(q, dq, rp);
 969         mark_loop_work<ShenandoahMarkUpdateRefsDedupClosure, CANCELLABLE, DRAIN_SATB, COUNT_LIVENESS>(&cl, ld, w, t);
 970       } else {
 971         ShenandoahMarkUpdateRefsClosure cl(q, rp);
 972         mark_loop_work<ShenandoahMarkUpdateRefsClosure, CANCELLABLE, DRAIN_SATB, COUNT_LIVENESS>(&cl, ld, w, t);
 973       }
 974     } else {
 975       if (STRING_DEDUP) {
 976         ShenandoahStrDedupQueue* dq = ShenandoahStringDedup::queue(w);
 977         ShenandoahMarkRefsDedupClosure cl(q, dq, rp);
 978         mark_loop_work<ShenandoahMarkRefsDedupClosure, CANCELLABLE, DRAIN_SATB, COUNT_LIVENESS>(&cl, ld, w, t);
 979       } else {
 980         ShenandoahMarkRefsClosure cl(q, rp);
 981         mark_loop_work<ShenandoahMarkRefsClosure, CANCELLABLE, DRAIN_SATB, COUNT_LIVENESS>(&cl, ld, w, t);
 982       }
 983     }
 984   }
 985   if (COUNT_LIVENESS) {
 986     for (uint i = 0; i < _heap->regions()->active_regions(); i++) {
 987       ShenandoahHeapRegion* r = _heap->regions()->get(i);
 988       jushort live = ld[i];
 989       if (live > 0) {
 990         r->increase_live_data_words(live);
 991       }
 992     }
 993   }
 994 }
 995 
 996 template <class T, bool CANCELLABLE, bool DRAIN_SATB, bool COUNT_LIVENESS>
 997 void ShenandoahConcurrentMark::mark_loop_work(T* cl, jushort* live_data, uint worker_id, ParallelTaskTerminator *terminator) {
 998   int seed = 17;
 999   uintx stride = CANCELLABLE ? ShenandoahMarkLoopStride : 1;
1000 
1001   ShenandoahHeap* heap = ShenandoahHeap::heap();
1002   ShenandoahObjToScanQueueSet* queues = task_queues();
1003   ShenandoahObjToScanQueue* q;
1004   ShenandoahMarkTask t;
1005 
1006   /*
1007    * Process outstanding queues, if any.
1008    *
1009    * There can be more queues than workers. To deal with the imbalance, we claim
1010    * extra queues first. Since marking can push new tasks into the queue associated
1011    * with this worker id, we come back to process this queue in the normal loop.
1012    */
1013   assert(queues->get_reserved() == heap->workers()->active_workers(),
1014     "Need to reserve proper number of queues");
1015 
1016   q = queues->claim_next();
1017   while (q != NULL) {
1018     if (CANCELLABLE && heap->check_cancelled_concgc_and_yield()) {
1019       ShenandoahCancelledTerminatorTerminator tt;
1020       while (!terminator->offer_termination(&tt));
1021       return;
1022     }
1023 
1024     for (uint i = 0; i < stride; i++) {
1025       if (try_queue(q, t)) {
1026         do_task<T, COUNT_LIVENESS>(q, cl, live_data, &t);
1027       } else {
1028         assert(q->is_empty(), "Must be empty");
1029         q = queues->claim_next();
1030         break;
1031       }
1032     }
1033   }
1034   q = get_queue(worker_id);
1035 
1036   /*
1037    * Normal marking loop:
1038    */
1039   while (true) {
1040     if (CANCELLABLE && heap->check_cancelled_concgc_and_yield()) {
1041       ShenandoahCancelledTerminatorTerminator tt;
1042       while (!terminator->offer_termination(&tt));
1043       return;
1044     }
1045 
1046     for (uint i = 0; i < stride; i++) {
1047       if (try_queue(q, t) ||
1048               (DRAIN_SATB && try_draining_satb_buffer(q, t)) ||
1049               queues->steal(worker_id, &seed, t)) {
1050         do_task<T, COUNT_LIVENESS>(q, cl, live_data, &t);
1051       } else {
1052         // Need to leave the STS here otherwise it might block safepoints.
1053         SuspendibleThreadSetLeaver stsl(CANCELLABLE && ShenandoahSuspendibleWorkers);
1054         if (terminator->offer_termination()) return;
1055       }
1056     }
1057   }
1058 }
1059 
1060 void ShenandoahConcurrentMark::set_process_references(bool pr) {
1061   _process_references.set_cond(pr);
1062 }
1063 
1064 bool ShenandoahConcurrentMark::process_references() const {
1065   return _process_references.is_set();
1066 }
1067 
1068 void ShenandoahConcurrentMark::set_unload_classes(bool uc) {
1069   _unload_classes.set_cond(uc);
1070 }
1071 
1072 bool ShenandoahConcurrentMark::unload_classes() const {
1073   return _unload_classes.is_set();
1074 }
1075 
1076 bool ShenandoahConcurrentMark::claim_codecache() {
1077   assert(ShenandoahConcurrentScanCodeRoots, "must not be called otherwise");
1078   return _claimed_codecache.try_set();
1079 }
1080 
1081 void ShenandoahConcurrentMark::clear_claim_codecache() {
1082   assert(ShenandoahConcurrentScanCodeRoots, "must not be called otherwise");
1083   _claimed_codecache.unset();
1084 }
1085 
1086 jushort* ShenandoahConcurrentMark::get_liveness(uint worker_id) {
1087   return _liveness_local[worker_id];
1088 }
1089 
1090 // Generate Shenandoah specialized oop_oop_iterate functions.
1091 SPECIALIZED_OOP_OOP_ITERATE_CLOSURES_SHENANDOAH(ALL_KLASS_OOP_OOP_ITERATE_DEFN)