1 /*
   2  * Copyright (c) 2017, Red Hat, Inc. and/or its affiliates.
   3  *
   4  * This code is free software; you can redistribute it and/or modify it
   5  * under the terms of the GNU General Public License version 2 only, as
   6  * published by the Free Software Foundation.
   7  *
   8  * This code is distributed in the hope that it will be useful, but WITHOUT
   9  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  10  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
  11  * version 2 for more details (a copy is included in the LICENSE file that
  12  * accompanied this code).
  13  *
  14  * You should have received a copy of the GNU General Public License version
  15  * 2 along with this work; if not, write to the Free Software Foundation,
  16  * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
  17  *
  18  * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA
  19  * or visit www.oracle.com if you need additional information or have any
  20  * questions.
  21  *
  22  */
  23 
  24 #include "precompiled.hpp"
  25 
  26 #include "gc/shared/gcTraceTime.inline.hpp"
  27 #include "gc/shared/workgroup.hpp"
  28 #include "gc/shared/taskqueue.inline.hpp"
  29 #include "gc/shenandoah/shenandoahBarrierSet.hpp"
  30 #include "gc/shenandoah/shenandoahCollectionSet.hpp"
  31 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp"
  32 #include "gc/shenandoah/shenandoahConnectionMatrix.inline.hpp"
  33 #include "gc/shenandoah/shenandoahFreeSet.hpp"
  34 #include "gc/shenandoah/shenandoahPhaseTimings.hpp"
  35 #include "gc/shenandoah/shenandoahHeapRegionSet.hpp"
  36 #include "gc/shenandoah/shenandoahHeap.hpp"
  37 #include "gc/shenandoah/shenandoahHeap.inline.hpp"
  38 #include "gc/shenandoah/shenandoahOopClosures.inline.hpp"
  39 #include "gc/shenandoah/shenandoahPartialGC.hpp"
  40 #include "gc/shenandoah/shenandoahRootProcessor.hpp"
  41 #include "gc/shenandoah/shenandoahTaskqueue.hpp"
  42 #include "gc/shenandoah/shenandoahUtils.hpp"
  43 #include "gc/shenandoah/shenandoahVerifier.hpp"
  44 #include "gc/shenandoah/shenandoahWorkGroup.hpp"
  45 #include "gc/shenandoah/shenandoahWorkerPolicy.hpp"
  46 
  47 #include "memory/iterator.hpp"
  48 #include "runtime/safepoint.hpp"
  49 
  50 class ShenandoahPartialEvacuateUpdateRootsClosure : public OopClosure {
  51   ShenandoahPartialGC* _partial_gc;
  52   Thread* _thread;
  53   ShenandoahObjToScanQueue* _queue;
  54 private:
  55   template <class T>
  56   void do_oop_work(T* p) { _partial_gc->process_oop<T, false>(p, _thread, _queue); }
  57 public:
  58   ShenandoahPartialEvacuateUpdateRootsClosure(ShenandoahObjToScanQueue* q) :
  59     _partial_gc(ShenandoahHeap::heap()->partial_gc()),
  60     _thread(Thread::current()), _queue(q) {}
  61   void do_oop(oop* p) {
  62     assert(! ShenandoahHeap::heap()->is_in_reserved(p), "sanity");
  63     do_oop_work(p);
  64   }
  65   void do_oop(narrowOop* p) { do_oop_work(p); }
  66 };
  67 
  68 class ShenandoahPartialSATBBufferClosure : public SATBBufferClosure {
  69 private:
  70   ShenandoahObjToScanQueue* _queue;
  71   ShenandoahPartialGC* _partial_gc;
  72   Thread* _thread;
  73 public:
  74   ShenandoahPartialSATBBufferClosure(ShenandoahObjToScanQueue* q) :
  75     _queue(q), _partial_gc(ShenandoahHeap::heap()->partial_gc()), _thread(Thread::current()) { }
  76 
  77   void do_buffer(void** buffer, size_t size) {
  78     for (size_t i = 0; i < size; ++i) {
  79       oop* p = (oop*) &buffer[i];
  80       oop obj = RawAccess<>::oop_load(p);
  81       _queue->push(obj);
  82     }
  83   }
  84 };
  85 
  86 class ShenandoahPartialSATBThreadsClosure : public ThreadClosure {
  87   ShenandoahPartialSATBBufferClosure* _satb_cl;
  88   int _thread_parity;
  89 
  90  public:
  91   ShenandoahPartialSATBThreadsClosure(ShenandoahPartialSATBBufferClosure* satb_cl) :
  92     _satb_cl(satb_cl),
  93     _thread_parity(Threads::thread_claim_parity()) {}
  94 
  95   void do_thread(Thread* thread) {
  96     if (thread->is_Java_thread()) {
  97       if (thread->claim_oops_do(true, _thread_parity)) {
  98         JavaThread* jt = (JavaThread*)thread;
  99         ShenandoahThreadLocalData::satb_mark_queue(jt).apply_closure_and_empty(_satb_cl);
 100       }
 101     } else if (thread->is_VM_thread()) {
 102       if (thread->claim_oops_do(true, _thread_parity)) {
 103         ShenandoahBarrierSet::satb_mark_queue_set().shared_satb_queue()->apply_closure_and_empty(_satb_cl);
 104       }
 105     }
 106   }
 107 };
 108 
 109 class ShenandoahInitPartialCollectionTask : public AbstractGangTask {
 110 private:
 111   ShenandoahRootProcessor* _rp;
 112   ShenandoahHeap* _heap;
 113 public:
 114   ShenandoahInitPartialCollectionTask(ShenandoahRootProcessor* rp) :
 115     AbstractGangTask("Shenandoah Init Partial Collection"),
 116     _rp(rp),
 117     _heap(ShenandoahHeap::heap()) {}
 118 
 119   void work(uint worker_id) {
 120     ShenandoahObjToScanQueueSet* queues = _heap->partial_gc()->task_queues();
 121     ShenandoahObjToScanQueue* q = queues->queue(worker_id);
 122 
 123     ShenandoahEvacOOMScope oom_evac_scope;
 124 
 125     // Step 1: Process ordinary GC roots.
 126     {
 127       ShenandoahPartialEvacuateUpdateRootsClosure roots_cl(q);
 128       CLDToOopClosure cld_cl(&roots_cl);
 129       MarkingCodeBlobClosure code_cl(&roots_cl, CodeBlobToOopClosure::FixRelocations);
 130       _rp->process_all_roots(&roots_cl, &roots_cl, &cld_cl, &code_cl, NULL, worker_id);
 131     }
 132   }
 133 };
 134 
 135 class ShenandoahConcurrentPartialCollectionTask : public AbstractGangTask {
 136 private:
 137   ParallelTaskTerminator* _terminator;
 138   ShenandoahHeapRegionSet* _root_regions;
 139   ShenandoahHeap* _heap;
 140 public:
 141   ShenandoahConcurrentPartialCollectionTask(ParallelTaskTerminator* terminator,
 142                                             ShenandoahHeapRegionSet* root_regions) :
 143     AbstractGangTask("Shenandoah Concurrent Partial Collection"),
 144     _terminator(terminator), _root_regions(root_regions),
 145     _heap(ShenandoahHeap::heap()) {}
 146 
 147   void work(uint worker_id) {
 148     ShenandoahEvacOOMScope oom_evac_scope;
 149     ShenandoahPartialGC* partial_gc = _heap->partial_gc();
 150     ShenandoahObjToScanQueueSet* queues = partial_gc->task_queues();
 151     ShenandoahObjToScanQueue* q = queues->queue(worker_id);
 152 
 153     if (partial_gc->check_and_handle_cancelled_gc(_terminator)) return;
 154 
 155     ShenandoahPartialEvacuateUpdateHeapClosure cl(q);
 156 
 157     // Step 2: Process all root regions.
 158     {
 159       ShenandoahHeapRegionSetIterator iter = _root_regions->iterator();
 160       ShenandoahHeapRegion* r = iter.next();
 161       while (r != NULL) {
 162         assert(r->is_root(), "must be root region");
 163         _heap->marked_object_oop_safe_iterate(r, &cl);
 164         if (ShenandoahPacing) {
 165           _heap->pacer()->report_partial(r->get_live_data_words());
 166         }
 167         if (partial_gc->check_and_handle_cancelled_gc(_terminator)) return;
 168         r = iter.next();
 169       }
 170     }
 171     if (partial_gc->check_and_handle_cancelled_gc(_terminator)) return;
 172 
 173     // Step 3: Drain all outstanding work in queues.
 174     partial_gc->main_loop<true>(worker_id, _terminator);
 175   }
 176 };
 177 
 178 class ShenandoahFinalPartialCollectionTask : public AbstractGangTask {
 179 private:
 180   ParallelTaskTerminator* _terminator;
 181   ShenandoahHeap* _heap;
 182 public:
 183   ShenandoahFinalPartialCollectionTask(ParallelTaskTerminator* terminator) :
 184     AbstractGangTask("Shenandoah Final Partial Collection"),
 185     _terminator(terminator),
 186     _heap(ShenandoahHeap::heap()) {}
 187 
 188   void work(uint worker_id) {
 189     ShenandoahEvacOOMScope oom_evac_scope;
 190 
 191     ShenandoahPartialGC* partial_gc = _heap->partial_gc();
 192 
 193     ShenandoahObjToScanQueueSet* queues = partial_gc->task_queues();
 194     ShenandoahObjToScanQueue* q = queues->queue(worker_id);
 195 
 196     // Drain outstanding SATB queues.
 197     {
 198       ShenandoahPartialSATBBufferClosure satb_cl(q);
 199       // Process remaining finished SATB buffers.
 200       SATBMarkQueueSet& satb_mq_set = ShenandoahBarrierSet::satb_mark_queue_set();
 201       while (satb_mq_set.apply_closure_to_completed_buffer(&satb_cl));
 202       // Process remaining threads SATB buffers.
 203       ShenandoahPartialSATBThreadsClosure tc(&satb_cl);
 204       Threads::threads_do(&tc);
 205     }
 206 
 207     // Finally drain all outstanding work in queues.
 208     partial_gc->main_loop<false>(worker_id, _terminator);
 209 
 210   }
 211 };
 212 
 213 class ShenandoahPartialCollectionCleanupTask : public AbstractGangTask {
 214 private:
 215   ShenandoahHeap* _heap;
 216 public:
 217   ShenandoahPartialCollectionCleanupTask() :
 218           AbstractGangTask("Shenandoah Partial Collection Cleanup"),
 219           _heap(ShenandoahHeap::heap()) {
 220     _heap->collection_set()->clear_current_index();
 221   }
 222 
 223   void work(uint worker_id) {
 224     ShenandoahCollectionSet* cset = _heap->collection_set();
 225     ShenandoahHeapRegion* r = cset->claim_next();
 226     while (r != NULL) {
 227       HeapWord* bottom = r->bottom();
 228       HeapWord* top = _heap->complete_top_at_mark_start(r->bottom());
 229       if (top > bottom) {
 230         _heap->complete_mark_bit_map()->clear_range_large(MemRegion(bottom, top));
 231       }
 232       r = cset->claim_next();
 233     }
 234   }
 235 
 236 };
 237 
 238 ShenandoahPartialGC::ShenandoahPartialGC(ShenandoahHeap* heap, size_t num_regions) :
 239   _heap(heap),
 240   _matrix(heap->connection_matrix()),
 241   _root_regions(new ShenandoahHeapRegionSet()),
 242   _task_queues(new ShenandoahObjToScanQueueSet(heap->max_workers())) {
 243 
 244   assert(_matrix != NULL, "need matrix");
 245 
 246   uint num_queues = heap->max_workers();
 247   for (uint i = 0; i < num_queues; ++i) {
 248     ShenandoahObjToScanQueue* task_queue = new ShenandoahObjToScanQueue();
 249     task_queue->initialize();
 250     _task_queues->register_queue(i, task_queue);
 251   }
 252 
 253   from_idxs = NEW_C_HEAP_ARRAY(size_t, ShenandoahPartialInboundThreshold, mtGC);
 254   set_has_work(false);
 255 }
 256 
 257 ShenandoahPartialGC::~ShenandoahPartialGC() {
 258   FREE_C_HEAP_ARRAY(size_t, from_idxs);
 259 }
 260 
 261 bool ShenandoahPartialGC::prepare() {
 262   _heap->collection_set()->clear();
 263   assert(_heap->collection_set()->count() == 0, "collection set not clear");
 264 
 265   _heap->make_tlabs_parsable(true);
 266 
 267   ShenandoahConnectionMatrix* matrix = _heap->connection_matrix();
 268 
 269   if (UseShenandoahMatrix && PrintShenandoahMatrix) {
 270     LogTarget(Info, gc) lt;
 271     LogStream ls(lt);
 272     _heap->connection_matrix()->print_on(&ls);
 273   }
 274 
 275   ShenandoahCollectionSet* collection_set = _heap->collection_set();
 276   size_t num_regions = _heap->num_regions();
 277 
 278   // First pass: reset all roots
 279   for (uint to_idx = 0; to_idx < num_regions; to_idx++) {
 280     ShenandoahHeapRegion* r = _heap->get_region(to_idx);
 281     r->set_root(false);
 282   }
 283 
 284   // Second pass: find collection set, and mark root candidates
 285   _heap->shenandoahPolicy()->choose_collection_set(collection_set, true);
 286 
 287   // Shortcut: no cset, bail
 288   size_t num_cset = collection_set->count();
 289 
 290   if (num_cset == 0) {
 291     log_info(gc, ergo)("No regions with fewer inbound connections than threshold (" UINTX_FORMAT ")",
 292                        ShenandoahPartialInboundThreshold);
 293     return false;
 294   }
 295 
 296   // Final pass: rebuild free set and region set.
 297   ShenandoahFreeSet* free_set = _heap->free_set();
 298   _root_regions->clear();
 299   free_set->clear();
 300 
 301   assert(_root_regions->count() == 0, "must be cleared");
 302 
 303   size_t work_size = 0;
 304 
 305   for (uint from_idx = 0; from_idx < num_regions; from_idx++) {
 306     ShenandoahHeapRegion* r = _heap->get_region(from_idx);
 307 
 308     // Never assume anything implicitely marked.
 309     _heap->set_next_top_at_mark_start(r->bottom(), r->end());
 310 
 311     if (r->is_root() && !r->in_collection_set()) {
 312       _root_regions->add_region(r);
 313       work_size += r->get_live_data_words();
 314 
 315       matrix->clear_region_outbound(from_idx);
 316 
 317       // Since root region can be allocated at, we should bound the scans
 318       // in it at current top. Otherwise, one thread may evacuate objects
 319       // to that root region, while another would try to scan newly evac'ed
 320       // objects under the race.
 321       r->set_concurrent_iteration_safe_limit(r->top());
 322     }
 323   }
 324 
 325   free_set->rebuild();
 326 
 327   if (ShenandoahPacing) {
 328     work_size += collection_set->live_data() >> LogHeapWordSize;
 329     _heap->pacer()->setup_for_partial(work_size);
 330   }
 331 
 332   log_info(gc,ergo)("Got "SIZE_FORMAT" collection set regions, "SIZE_FORMAT" root regions",
 333                      collection_set->count(), _root_regions->count());
 334 
 335   return true;
 336 }
 337 
 338 void ShenandoahPartialGC::init_partial_collection() {
 339   assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "STW partial GC");
 340 
 341   assert(_heap->is_next_bitmap_clear(), "need clear marking bitmap");
 342   _heap->set_alloc_seq_gc_start();
 343 
 344   if (ShenandoahVerify) {
 345     _heap->verifier()->verify_before_partial();
 346   }
 347 
 348   {
 349     ShenandoahGCPhase phase_prepare(ShenandoahPhaseTimings::partial_gc_prepare);
 350     ShenandoahHeapLocker lock(_heap->lock());
 351     bool has_work = prepare();
 352     set_has_work(has_work);
 353   }
 354 
 355   if (!has_work()) {
 356     reset();
 357     return;
 358   }
 359 
 360   _heap->set_concurrent_partial_in_progress(true);
 361 
 362   {
 363     ShenandoahGCPhase phase_work(ShenandoahPhaseTimings::init_partial_gc_work);
 364     assert(_task_queues->is_empty(), "queues must be empty before partial GC");
 365 
 366 #if defined(COMPILER2) || INCLUDE_JVMCI
 367     DerivedPointerTable::clear();
 368 #endif
 369 
 370     {
 371       uint nworkers = _heap->workers()->active_workers();
 372       ShenandoahRootProcessor rp(_heap, nworkers, ShenandoahPhaseTimings::init_partial_gc_work);
 373 
 374       if (UseShenandoahOWST) {
 375         ShenandoahTaskTerminator terminator(nworkers, task_queues());
 376         ShenandoahInitPartialCollectionTask partial_task(&rp);
 377         _heap->workers()->run_task(&partial_task);
 378       } else {
 379         ParallelTaskTerminator terminator(nworkers, task_queues());
 380         ShenandoahInitPartialCollectionTask partial_task(&rp);
 381         _heap->workers()->run_task(&partial_task);
 382       }
 383     }
 384 
 385 #if defined(COMPILER2) || INCLUDE_JVMCI
 386     DerivedPointerTable::update_pointers();
 387 #endif
 388     if (_heap->cancelled_concgc()) {
 389       _heap->fixup_roots();
 390       reset();
 391       _heap->set_concurrent_partial_in_progress(false);
 392     }
 393   }
 394 }
 395 
 396 template <bool DO_SATB>
 397 void ShenandoahPartialGC::main_loop(uint worker_id, ParallelTaskTerminator* terminator) {
 398   ShenandoahObjToScanQueueSet* queues = task_queues();
 399   ShenandoahObjToScanQueue* q = queues->queue(worker_id);
 400 
 401   uintx stride = ShenandoahMarkLoopStride;
 402   ShenandoahPartialEvacuateUpdateHeapClosure cl(q);
 403   ShenandoahMarkTask task;
 404 
 405   // Process outstanding queues, if any.
 406   q = queues->claim_next();
 407   while (q != NULL) {
 408     if (_heap->check_cancelled_concgc_and_yield()) {
 409       ShenandoahCancelledTerminatorTerminator tt;
 410       ShenandoahEvacOOMScopeLeaver oom_scope_leaver;
 411       while (!terminator->offer_termination(&tt));
 412       return;
 413     }
 414 
 415     for (uint i = 0; i < stride; i++) {
 416       if (q->pop_buffer(task) ||
 417           q->pop_local(task) ||
 418           q->pop_overflow(task)) {
 419         oop obj = task.obj();
 420         assert(!CompressedOops::is_null(obj), "must not be null");
 421         obj->oop_iterate(&cl);
 422       } else {
 423         assert(q->is_empty(), "Must be empty");
 424         q = queues->claim_next();
 425         break;
 426       }
 427     }
 428   }
 429 
 430   // Normal loop.
 431   q = queues->queue(worker_id);
 432   ShenandoahPartialSATBBufferClosure satb_cl(q);
 433   SATBMarkQueueSet& satb_mq_set = ShenandoahBarrierSet::satb_mark_queue_set();
 434 
 435   int seed = 17;
 436 
 437   while (true) {
 438     if (check_and_handle_cancelled_gc(terminator)) return;
 439 
 440     for (uint i = 0; i < stride; i++) {
 441       if ((q->pop_buffer(task) ||
 442            q->pop_local(task) ||
 443            q->pop_overflow(task) ||
 444            (DO_SATB && satb_mq_set.apply_closure_to_completed_buffer(&satb_cl) && q->pop_buffer(task)) ||
 445            queues->steal(worker_id, &seed, task))) {
 446         oop obj = task.obj();
 447         assert(!CompressedOops::is_null(obj), "must not be null");
 448         obj->oop_iterate(&cl);
 449       } else {
 450         ShenandoahEvacOOMScopeLeaver oom_scope_leaver;
 451         if (terminator->offer_termination()) return;
 452       }
 453     }
 454   }
 455 }
 456 
 457 bool ShenandoahPartialGC::check_and_handle_cancelled_gc(ParallelTaskTerminator* terminator) {
 458   if (_heap->cancelled_concgc()) {
 459     ShenandoahCancelledTerminatorTerminator tt;
 460     ShenandoahEvacOOMScopeLeaver oom_scope_leaver;
 461     while (! terminator->offer_termination(&tt));
 462     return true;
 463   }
 464   return false;
 465 }
 466 
 467 void ShenandoahPartialGC::concurrent_partial_collection() {
 468   assert(has_work(), "Performance: should only be here when there is work");
 469 
 470   ShenandoahGCPhase phase_work(ShenandoahPhaseTimings::conc_partial);
 471   if (!_heap->cancelled_concgc()) {
 472     uint nworkers = _heap->workers()->active_workers();
 473     task_queues()->reserve(nworkers);
 474     if (UseShenandoahOWST) {
 475       ShenandoahTaskTerminator terminator(nworkers, task_queues());
 476       ShenandoahConcurrentPartialCollectionTask partial_task(&terminator, _root_regions);
 477       _heap->workers()->run_task(&partial_task);
 478     } else {
 479       ParallelTaskTerminator terminator(nworkers, task_queues());
 480       ShenandoahConcurrentPartialCollectionTask partial_task(&terminator, _root_regions);
 481       _heap->workers()->run_task(&partial_task);
 482     }
 483   }
 484 
 485   if (_heap->cancelled_concgc()) {
 486     _task_queues->clear();
 487   }
 488   assert(_task_queues->is_empty(), "queues must be empty after partial GC");
 489 }
 490 
 491 void ShenandoahPartialGC::final_partial_collection() {
 492   assert(has_work(), "Performance: should only be here when there is work");
 493 
 494   if (!_heap->cancelled_concgc()) {
 495     ShenandoahGCPhase phase_work(ShenandoahPhaseTimings::final_partial_gc_work);
 496     uint nworkers = _heap->workers()->active_workers();
 497     task_queues()->reserve(nworkers);
 498 
 499     StrongRootsScope scope(nworkers);
 500     if (UseShenandoahOWST) {
 501       ShenandoahTaskTerminator terminator(nworkers, task_queues());
 502       ShenandoahFinalPartialCollectionTask partial_task(&terminator);
 503       _heap->workers()->run_task(&partial_task);
 504     } else {
 505       ParallelTaskTerminator terminator(nworkers, task_queues());
 506       ShenandoahFinalPartialCollectionTask partial_task(&terminator);
 507       _heap->workers()->run_task(&partial_task);
 508     }
 509   }
 510 
 511   if (!_heap->cancelled_concgc()) {
 512     // Still good? Update the roots then
 513     _heap->concurrentMark()->update_roots(ShenandoahPhaseTimings::final_partial_gc_work);
 514   }
 515 
 516   if (!_heap->cancelled_concgc()) {
 517     // Still good? We can now trash the cset, and make final verification
 518     {
 519       ShenandoahGCPhase phase_cleanup(ShenandoahPhaseTimings::partial_gc_cleanup);
 520       ShenandoahCollectionSet* cset = _heap->collection_set();
 521       ShenandoahHeapLocker lock(_heap->lock());
 522 
 523       ShenandoahPartialCollectionCleanupTask cleanup;
 524       _heap->workers()->run_task(&cleanup);
 525 
 526       // Trash everything when bitmaps are cleared.
 527       cset->clear_current_index();
 528       ShenandoahHeapRegion* r;
 529       while((r = cset->next()) != NULL) {
 530         r->make_trash();
 531       }
 532       cset->clear();
 533 
 534       reset();
 535     }
 536 
 537     if (ShenandoahVerify) {
 538       _heap->verifier()->verify_after_partial();
 539     }
 540   } else {
 541     // On cancellation path, fixup roots to make them consistent
 542     _heap->fixup_roots();
 543     reset();
 544   }
 545 
 546   assert(_task_queues->is_empty(), "queues must be empty after partial GC");
 547   _heap->set_concurrent_partial_in_progress(false);
 548 }
 549 
 550 void ShenandoahPartialGC::reset() {
 551   _task_queues->clear();
 552 
 553   ShenandoahHeapRegionSetIterator root_iter = _root_regions->iterator();
 554   ShenandoahHeapRegion* r;
 555   while((r = root_iter.next()) != NULL) {
 556     r->set_root(false);
 557   }
 558   _root_regions->clear();
 559 
 560   set_has_work(false);
 561 }
 562 
 563 void ShenandoahPartialGC::set_has_work(bool value) {
 564   _has_work.set_cond(value);
 565 }
 566 
 567 bool ShenandoahPartialGC::has_work() {
 568   return _has_work.is_set();
 569 }
 570 
 571 ShenandoahObjToScanQueueSet* ShenandoahPartialGC::task_queues() {
 572   return _task_queues;
 573 }