1 /* 2 * Copyright (c) 2018, 2019, Red Hat, Inc. All rights reserved. 3 * 4 * This code is free software; you can redistribute it and/or modify it 5 * under the terms of the GNU General Public License version 2 only, as 6 * published by the Free Software Foundation. 7 * 8 * This code is distributed in the hope that it will be useful, but WITHOUT 9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 11 * version 2 for more details (a copy is included in the LICENSE file that 12 * accompanied this code). 13 * 14 * You should have received a copy of the GNU General Public License version 15 * 2 along with this work; if not, write to the Free Software Foundation, 16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 17 * 18 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 19 * or visit www.oracle.com if you need additional information or have any 20 * questions. 21 * 22 */ 23 24 #include "precompiled.hpp" 25 26 #include "classfile/classLoaderData.hpp" 27 #include "classfile/classLoaderDataGraph.hpp" 28 #include "gc/shared/referenceProcessor.hpp" 29 #include "gc/shared/referenceProcessorPhaseTimes.hpp" 30 #include "gc/shared/workgroup.hpp" 31 #include "gc/shared/weakProcessor.inline.hpp" 32 #include "gc/shenandoah/shenandoahBarrierSet.hpp" 33 #include "gc/shenandoah/shenandoahCodeRoots.hpp" 34 #include "gc/shenandoah/shenandoahCollectionSet.hpp" 35 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp" 36 #include "gc/shenandoah/shenandoahFreeSet.hpp" 37 #include "gc/shenandoah/shenandoahPhaseTimings.hpp" 38 #include "gc/shenandoah/shenandoahHeap.inline.hpp" 39 #include "gc/shenandoah/shenandoahHeapRegionSet.inline.hpp" 40 #include "gc/shenandoah/shenandoahHeuristics.hpp" 41 #include "gc/shenandoah/shenandoahMarkingContext.inline.hpp" 42 #include "gc/shenandoah/shenandoahOopClosures.inline.hpp" 43 #include "gc/shenandoah/shenandoahRootProcessor.hpp" 44 #include "gc/shenandoah/shenandoahStringDedup.hpp" 45 #include "gc/shenandoah/shenandoahTaskqueue.inline.hpp" 46 #include "gc/shenandoah/shenandoahTimingTracker.hpp" 47 #include "gc/shenandoah/shenandoahTraversalGC.hpp" 48 #include "gc/shenandoah/shenandoahUtils.hpp" 49 #include "gc/shenandoah/shenandoahVerifier.hpp" 50 51 #include "memory/iterator.hpp" 52 #include "memory/metaspace.hpp" 53 #include "memory/resourceArea.hpp" 54 55 /** 56 * NOTE: We are using the SATB buffer in thread.hpp and satbMarkQueue.hpp, however, it is not an SATB algorithm. 57 * We're using the buffer as generic oop buffer to enqueue new values in concurrent oop stores, IOW, the algorithm 58 * is incremental-update-based. 59 * 60 * NOTE on interaction with TAMS: we want to avoid traversing new objects for 61 * several reasons: 62 * - We will not reclaim them in this cycle anyway, because they are not in the 63 * cset 64 * - It makes up for the bulk of work during final-pause 65 * - It also shortens the concurrent cycle because we don't need to 66 * pointlessly traverse through newly allocated objects. 67 * - As a nice side-effect, it solves the I-U termination problem (mutators 68 * cannot outrun the GC by allocating like crazy) 69 * - It is an easy way to achieve MWF. What MWF does is to also enqueue the 70 * target object of stores if it's new. Treating new objects live implicitely 71 * achieves the same, but without extra barriers. I think the effect of 72 * shortened final-pause (mentioned above) is the main advantage of MWF. In 73 * particular, we will not see the head of a completely new long linked list 74 * in final-pause and end up traversing huge chunks of the heap there. 75 * - We don't need to see/update the fields of new objects either, because they 76 * are either still null, or anything that's been stored into them has been 77 * evacuated+enqueued before (and will thus be treated later). 78 * 79 * We achieve this by setting TAMS for each region, and everything allocated 80 * beyond TAMS will be 'implicitely marked'. 81 * 82 * Gotchas: 83 * - While we want new objects to be implicitely marked, we don't want to count 84 * them alive. Otherwise the next cycle wouldn't pick them up and consider 85 * them for cset. This means that we need to protect such regions from 86 * getting accidentally thrashed at the end of traversal cycle. This is why I 87 * keep track of alloc-regions and check is_alloc_region() in the trashing 88 * code. 89 * - We *need* to traverse through evacuated objects. Those objects are 90 * pre-existing, and any references in them point to interesting objects that 91 * we need to see. We also want to count them as live, because we just 92 * determined that they are alive :-) I achieve this by upping TAMS 93 * concurrently for every gclab/gc-shared alloc before publishing the 94 * evacuated object. This way, the GC threads will not consider such objects 95 * implictely marked, and traverse through them as normal. 96 */ 97 class ShenandoahTraversalSATBBufferClosure : public SATBBufferClosure { 98 private: 99 ShenandoahObjToScanQueue* _queue; 100 ShenandoahTraversalGC* _traversal_gc; 101 ShenandoahHeap* const _heap; 102 103 public: 104 ShenandoahTraversalSATBBufferClosure(ShenandoahObjToScanQueue* q) : 105 _queue(q), 106 _heap(ShenandoahHeap::heap()) 107 { } 108 109 void do_buffer(void** buffer, size_t size) { 110 for (size_t i = 0; i < size; ++i) { 111 oop* p = (oop*) &buffer[i]; 112 oop obj = RawAccess<>::oop_load(p); 113 shenandoah_assert_not_forwarded(p, obj); 114 if (_heap->marking_context()->mark(obj)) { 115 _queue->push(ShenandoahMarkTask(obj)); 116 } 117 } 118 } 119 }; 120 121 class ShenandoahTraversalSATBThreadsClosure : public ThreadClosure { 122 private: 123 ShenandoahTraversalSATBBufferClosure* _satb_cl; 124 125 public: 126 ShenandoahTraversalSATBThreadsClosure(ShenandoahTraversalSATBBufferClosure* satb_cl) : 127 _satb_cl(satb_cl) {} 128 129 void do_thread(Thread* thread) { 130 ShenandoahThreadLocalData::satb_mark_queue(thread).apply_closure_and_empty(_satb_cl); 131 } 132 }; 133 134 // Like CLDToOopClosure, but clears has_modified_oops, so that we can record modified CLDs during traversal 135 // and remark them later during final-traversal. 136 class ShenandoahMarkCLDClosure : public CLDClosure { 137 private: 138 OopClosure* _cl; 139 public: 140 ShenandoahMarkCLDClosure(OopClosure* cl) : _cl(cl) {} 141 void do_cld(ClassLoaderData* cld) { 142 cld->oops_do(_cl, true, true); 143 } 144 }; 145 146 // Like CLDToOopClosure, but only process modified CLDs 147 class ShenandoahRemarkCLDClosure : public CLDClosure { 148 private: 149 OopClosure* _cl; 150 public: 151 ShenandoahRemarkCLDClosure(OopClosure* cl) : _cl(cl) {} 152 void do_cld(ClassLoaderData* cld) { 153 if (cld->has_modified_oops()) { 154 cld->oops_do(_cl, true, true); 155 } 156 } 157 }; 158 159 class ShenandoahInitTraversalCollectionTask : public AbstractGangTask { 160 private: 161 ShenandoahRootProcessor* _rp; 162 ShenandoahHeap* _heap; 163 ShenandoahCsetCodeRootsIterator* _cset_coderoots; 164 public: 165 ShenandoahInitTraversalCollectionTask(ShenandoahRootProcessor* rp, ShenandoahCsetCodeRootsIterator* cset_coderoots) : 166 AbstractGangTask("Shenandoah Init Traversal Collection"), 167 _rp(rp), 168 _heap(ShenandoahHeap::heap()), 169 _cset_coderoots(cset_coderoots) {} 170 171 void work(uint worker_id) { 172 ShenandoahParallelWorkerSession worker_session(worker_id); 173 174 ShenandoahEvacOOMScope oom_evac_scope; 175 ShenandoahObjToScanQueueSet* queues = _heap->traversal_gc()->task_queues(); 176 ShenandoahObjToScanQueue* q = queues->queue(worker_id); 177 178 bool process_refs = _heap->process_references(); 179 bool unload_classes = _heap->unload_classes(); 180 ReferenceProcessor* rp = NULL; 181 if (process_refs) { 182 rp = _heap->ref_processor(); 183 } 184 185 // Step 1: Process ordinary GC roots. 186 { 187 ShenandoahTraversalClosure roots_cl(q, rp); 188 ShenandoahMarkCLDClosure cld_cl(&roots_cl); 189 MarkingCodeBlobClosure code_cl(&roots_cl, CodeBlobToOopClosure::FixRelocations); 190 if (unload_classes) { 191 _rp->process_strong_roots(&roots_cl, process_refs ? NULL : &roots_cl, &cld_cl, NULL, NULL, NULL, worker_id); 192 // Need to pre-evac code roots here. Otherwise we might see from-space constants. 193 ShenandoahWorkerTimings* worker_times = _heap->phase_timings()->worker_times(); 194 ShenandoahWorkerTimingsTracker timer(worker_times, ShenandoahPhaseTimings::CodeCacheRoots, worker_id); 195 _cset_coderoots->possibly_parallel_blobs_do(&code_cl); 196 } else { 197 _rp->process_all_roots(&roots_cl, process_refs ? NULL : &roots_cl, &cld_cl, &code_cl, NULL, worker_id); 198 } 199 } 200 } 201 }; 202 203 class ShenandoahConcurrentTraversalCollectionTask : public AbstractGangTask { 204 private: 205 ShenandoahTaskTerminator* _terminator; 206 ShenandoahHeap* _heap; 207 public: 208 ShenandoahConcurrentTraversalCollectionTask(ShenandoahTaskTerminator* terminator) : 209 AbstractGangTask("Shenandoah Concurrent Traversal Collection"), 210 _terminator(terminator), 211 _heap(ShenandoahHeap::heap()) {} 212 213 void work(uint worker_id) { 214 ShenandoahConcurrentWorkerSession worker_session(worker_id); 215 ShenandoahSuspendibleThreadSetJoiner stsj(ShenandoahSuspendibleWorkers); 216 ShenandoahEvacOOMScope oom_evac_scope; 217 ShenandoahTraversalGC* traversal_gc = _heap->traversal_gc(); 218 219 // Drain all outstanding work in queues. 220 traversal_gc->main_loop(worker_id, _terminator, true); 221 } 222 }; 223 224 class ShenandoahFinalTraversalCollectionTask : public AbstractGangTask { 225 private: 226 ShenandoahRootProcessor* _rp; 227 ShenandoahTaskTerminator* _terminator; 228 ShenandoahHeap* _heap; 229 public: 230 ShenandoahFinalTraversalCollectionTask(ShenandoahRootProcessor* rp, ShenandoahTaskTerminator* terminator) : 231 AbstractGangTask("Shenandoah Final Traversal Collection"), 232 _rp(rp), 233 _terminator(terminator), 234 _heap(ShenandoahHeap::heap()) {} 235 236 void work(uint worker_id) { 237 ShenandoahParallelWorkerSession worker_session(worker_id); 238 239 ShenandoahEvacOOMScope oom_evac_scope; 240 ShenandoahTraversalGC* traversal_gc = _heap->traversal_gc(); 241 242 ShenandoahObjToScanQueueSet* queues = traversal_gc->task_queues(); 243 ShenandoahObjToScanQueue* q = queues->queue(worker_id); 244 245 bool process_refs = _heap->process_references(); 246 bool unload_classes = _heap->unload_classes(); 247 ReferenceProcessor* rp = NULL; 248 if (process_refs) { 249 rp = _heap->ref_processor(); 250 } 251 252 // Step 0: Drain outstanding SATB queues. 253 // NOTE: we piggy-back draining of remaining thread SATB buffers on the final root scan below. 254 ShenandoahTraversalSATBBufferClosure satb_cl(q); 255 { 256 // Process remaining finished SATB buffers. 257 SATBMarkQueueSet& satb_mq_set = ShenandoahBarrierSet::satb_mark_queue_set(); 258 while (satb_mq_set.apply_closure_to_completed_buffer(&satb_cl)); 259 // Process remaining threads SATB buffers below. 260 } 261 262 // Step 1: Process GC roots. 263 // For oops in code roots, they are marked, evacuated, enqueued for further traversal, 264 // and the references to the oops are updated during init pause. New nmethods are handled 265 // in similar way during nmethod-register process. Therefore, we don't need to rescan code 266 // roots here. 267 if (!_heap->is_degenerated_gc_in_progress()) { 268 ShenandoahTraversalClosure roots_cl(q, rp); 269 CLDToOopClosure cld_cl(&roots_cl, ClassLoaderData::_claim_strong); 270 ShenandoahTraversalSATBThreadsClosure tc(&satb_cl); 271 if (unload_classes) { 272 ShenandoahRemarkCLDClosure weak_cld_cl(&roots_cl); 273 _rp->process_strong_roots(&roots_cl, process_refs ? NULL : &roots_cl, &cld_cl, &weak_cld_cl, NULL, &tc, worker_id); 274 } else { 275 _rp->process_all_roots(&roots_cl, process_refs ? NULL : &roots_cl, &cld_cl, NULL, &tc, worker_id); 276 } 277 } else { 278 ShenandoahTraversalDegenClosure roots_cl(q, rp); 279 CLDToOopClosure cld_cl(&roots_cl, ClassLoaderData::_claim_strong); 280 ShenandoahTraversalSATBThreadsClosure tc(&satb_cl); 281 if (unload_classes) { 282 ShenandoahRemarkCLDClosure weak_cld_cl(&roots_cl); 283 _rp->process_strong_roots(&roots_cl, process_refs ? NULL : &roots_cl, &cld_cl, &weak_cld_cl, NULL, &tc, worker_id); 284 } else { 285 _rp->process_all_roots(&roots_cl, process_refs ? NULL : &roots_cl, &cld_cl, NULL, &tc, worker_id); 286 } 287 } 288 289 { 290 ShenandoahWorkerTimings *worker_times = _heap->phase_timings()->worker_times(); 291 ShenandoahWorkerTimingsTracker timer(worker_times, ShenandoahPhaseTimings::FinishQueues, worker_id); 292 293 // Step 3: Finally drain all outstanding work in queues. 294 traversal_gc->main_loop(worker_id, _terminator, false); 295 } 296 297 } 298 }; 299 300 ShenandoahTraversalGC::ShenandoahTraversalGC(ShenandoahHeap* heap, size_t num_regions) : 301 _heap(heap), 302 _task_queues(new ShenandoahObjToScanQueueSet(heap->max_workers())), 303 _traversal_set(ShenandoahHeapRegionSet()) { 304 305 uint num_queues = heap->max_workers(); 306 for (uint i = 0; i < num_queues; ++i) { 307 ShenandoahObjToScanQueue* task_queue = new ShenandoahObjToScanQueue(); 308 task_queue->initialize(); 309 _task_queues->register_queue(i, task_queue); 310 } 311 } 312 313 ShenandoahTraversalGC::~ShenandoahTraversalGC() { 314 } 315 316 void ShenandoahTraversalGC::prepare_regions() { 317 size_t num_regions = _heap->num_regions(); 318 ShenandoahMarkingContext* const ctx = _heap->marking_context(); 319 for (size_t i = 0; i < num_regions; i++) { 320 ShenandoahHeapRegion* region = _heap->get_region(i); 321 if (_heap->is_bitmap_slice_committed(region)) { 322 if (_traversal_set.is_in(i)) { 323 ctx->capture_top_at_mark_start(region); 324 region->clear_live_data(); 325 assert(ctx->is_bitmap_clear_range(region->bottom(), region->end()), "bitmap for traversal regions must be cleared"); 326 } else { 327 // Everything outside the traversal set is always considered live. 328 ctx->reset_top_at_mark_start(region); 329 } 330 } else { 331 // FreeSet may contain uncommitted empty regions, once they are recommitted, 332 // their TAMS may have old values, so reset them here. 333 ctx->reset_top_at_mark_start(region); 334 } 335 } 336 } 337 338 void ShenandoahTraversalGC::prepare() { 339 _heap->collection_set()->clear(); 340 assert(_heap->collection_set()->count() == 0, "collection set not clear"); 341 342 { 343 ShenandoahGCPhase phase(ShenandoahPhaseTimings::traversal_gc_make_parsable); 344 _heap->make_parsable(true); 345 } 346 347 if (UseTLAB) { 348 ShenandoahGCPhase phase(ShenandoahPhaseTimings::traversal_gc_resize_tlabs); 349 _heap->resize_tlabs(); 350 } 351 352 assert(_heap->marking_context()->is_bitmap_clear(), "need clean mark bitmap"); 353 assert(!_heap->marking_context()->is_complete(), "should not be complete"); 354 355 ShenandoahFreeSet* free_set = _heap->free_set(); 356 ShenandoahCollectionSet* collection_set = _heap->collection_set(); 357 358 // Find collection set 359 _heap->heuristics()->choose_collection_set(collection_set); 360 prepare_regions(); 361 362 // Rebuild free set 363 free_set->rebuild(); 364 365 log_info(gc, ergo)("Collectable Garbage: " SIZE_FORMAT "M, " SIZE_FORMAT "M CSet, " SIZE_FORMAT " CSet regions", 366 collection_set->garbage() / M, collection_set->live_data() / M, collection_set->count()); 367 } 368 369 void ShenandoahTraversalGC::init_traversal_collection() { 370 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "STW traversal GC"); 371 372 if (ShenandoahVerify) { 373 _heap->verifier()->verify_before_traversal(); 374 } 375 376 if (VerifyBeforeGC) { 377 Universe::verify(); 378 } 379 380 { 381 ShenandoahGCPhase phase_prepare(ShenandoahPhaseTimings::traversal_gc_prepare); 382 ShenandoahHeapLocker lock(_heap->lock()); 383 prepare(); 384 } 385 386 _heap->set_concurrent_traversal_in_progress(true); 387 388 bool process_refs = _heap->process_references(); 389 if (process_refs) { 390 ReferenceProcessor* rp = _heap->ref_processor(); 391 rp->enable_discovery(true /*verify_no_refs*/); 392 rp->setup_policy(_heap->soft_ref_policy()->should_clear_all_soft_refs()); 393 } 394 395 { 396 ShenandoahGCPhase phase_work(ShenandoahPhaseTimings::init_traversal_gc_work); 397 assert(_task_queues->is_empty(), "queues must be empty before traversal GC"); 398 TASKQUEUE_STATS_ONLY(_task_queues->reset_taskqueue_stats()); 399 400 #if defined(COMPILER2) || INCLUDE_JVMCI 401 DerivedPointerTable::clear(); 402 #endif 403 404 { 405 uint nworkers = _heap->workers()->active_workers(); 406 task_queues()->reserve(nworkers); 407 ShenandoahRootProcessor rp(_heap, nworkers, ShenandoahPhaseTimings::init_traversal_gc_work); 408 409 ShenandoahCsetCodeRootsIterator cset_coderoots = ShenandoahCodeRoots::cset_iterator(); 410 411 ShenandoahInitTraversalCollectionTask traversal_task(&rp, &cset_coderoots); 412 _heap->workers()->run_task(&traversal_task); 413 } 414 415 #if defined(COMPILER2) || INCLUDE_JVMCI 416 DerivedPointerTable::update_pointers(); 417 #endif 418 } 419 420 if (ShenandoahPacing) { 421 _heap->pacer()->setup_for_traversal(); 422 } 423 } 424 425 void ShenandoahTraversalGC::main_loop(uint w, ShenandoahTaskTerminator* t, bool sts_yield) { 426 ShenandoahObjToScanQueue* q = task_queues()->queue(w); 427 428 // Initialize live data. 429 jushort* ld = _heap->get_liveness_cache(w); 430 431 ReferenceProcessor* rp = NULL; 432 if (_heap->process_references()) { 433 rp = _heap->ref_processor(); 434 } 435 { 436 if (!_heap->is_degenerated_gc_in_progress()) { 437 if (_heap->unload_classes()) { 438 if (ShenandoahStringDedup::is_enabled()) { 439 ShenandoahTraversalMetadataDedupClosure cl(q, rp); 440 main_loop_work<ShenandoahTraversalMetadataDedupClosure>(&cl, ld, w, t, sts_yield); 441 } else { 442 ShenandoahTraversalMetadataClosure cl(q, rp); 443 main_loop_work<ShenandoahTraversalMetadataClosure>(&cl, ld, w, t, sts_yield); 444 } 445 } else { 446 if (ShenandoahStringDedup::is_enabled()) { 447 ShenandoahTraversalDedupClosure cl(q, rp); 448 main_loop_work<ShenandoahTraversalDedupClosure>(&cl, ld, w, t, sts_yield); 449 } else { 450 ShenandoahTraversalClosure cl(q, rp); 451 main_loop_work<ShenandoahTraversalClosure>(&cl, ld, w, t, sts_yield); 452 } 453 } 454 } else { 455 if (_heap->unload_classes()) { 456 if (ShenandoahStringDedup::is_enabled()) { 457 ShenandoahTraversalMetadataDedupDegenClosure cl(q, rp); 458 main_loop_work<ShenandoahTraversalMetadataDedupDegenClosure>(&cl, ld, w, t, sts_yield); 459 } else { 460 ShenandoahTraversalMetadataDegenClosure cl(q, rp); 461 main_loop_work<ShenandoahTraversalMetadataDegenClosure>(&cl, ld, w, t, sts_yield); 462 } 463 } else { 464 if (ShenandoahStringDedup::is_enabled()) { 465 ShenandoahTraversalDedupDegenClosure cl(q, rp); 466 main_loop_work<ShenandoahTraversalDedupDegenClosure>(&cl, ld, w, t, sts_yield); 467 } else { 468 ShenandoahTraversalDegenClosure cl(q, rp); 469 main_loop_work<ShenandoahTraversalDegenClosure>(&cl, ld, w, t, sts_yield); 470 } 471 } 472 } 473 } 474 475 _heap->flush_liveness_cache(w); 476 } 477 478 template <class T> 479 void ShenandoahTraversalGC::main_loop_work(T* cl, jushort* live_data, uint worker_id, ShenandoahTaskTerminator* terminator, bool sts_yield) { 480 ShenandoahObjToScanQueueSet* queues = task_queues(); 481 ShenandoahObjToScanQueue* q = queues->queue(worker_id); 482 ShenandoahConcurrentMark* conc_mark = _heap->concurrent_mark(); 483 484 uintx stride = ShenandoahMarkLoopStride; 485 486 ShenandoahMarkTask task; 487 488 // Process outstanding queues, if any. 489 q = queues->claim_next(); 490 while (q != NULL) { 491 if (_heap->check_cancelled_gc_and_yield(sts_yield)) { 492 return; 493 } 494 495 for (uint i = 0; i < stride; i++) { 496 if (q->pop(task)) { 497 conc_mark->do_task<T>(q, cl, live_data, &task); 498 } else { 499 assert(q->is_empty(), "Must be empty"); 500 q = queues->claim_next(); 501 break; 502 } 503 } 504 } 505 506 if (check_and_handle_cancelled_gc(terminator, sts_yield)) return; 507 508 // Normal loop. 509 q = queues->queue(worker_id); 510 511 ShenandoahTraversalSATBBufferClosure drain_satb(q); 512 SATBMarkQueueSet& satb_mq_set = ShenandoahBarrierSet::satb_mark_queue_set(); 513 514 while (true) { 515 if (check_and_handle_cancelled_gc(terminator, sts_yield)) return; 516 517 while (satb_mq_set.completed_buffers_num() > 0) { 518 satb_mq_set.apply_closure_to_completed_buffer(&drain_satb); 519 } 520 521 uint work = 0; 522 for (uint i = 0; i < stride; i++) { 523 if (q->pop(task) || 524 queues->steal(worker_id, task)) { 525 conc_mark->do_task<T>(q, cl, live_data, &task); 526 work++; 527 } else { 528 break; 529 } 530 } 531 532 if (work == 0) { 533 // No more work, try to terminate 534 ShenandoahEvacOOMScopeLeaver oom_scope_leaver; 535 ShenandoahSuspendibleThreadSetLeaver stsl(sts_yield && ShenandoahSuspendibleWorkers); 536 ShenandoahTerminationTimingsTracker term_tracker(worker_id); 537 ShenandoahTerminatorTerminator tt(_heap); 538 539 if (terminator->offer_termination(&tt)) return; 540 } 541 } 542 } 543 544 bool ShenandoahTraversalGC::check_and_handle_cancelled_gc(ShenandoahTaskTerminator* terminator, bool sts_yield) { 545 if (_heap->cancelled_gc()) { 546 return true; 547 } 548 return false; 549 } 550 551 void ShenandoahTraversalGC::concurrent_traversal_collection() { 552 ClassLoaderDataGraph::clear_claimed_marks(); 553 554 ShenandoahGCPhase phase_work(ShenandoahPhaseTimings::conc_traversal); 555 if (!_heap->cancelled_gc()) { 556 uint nworkers = _heap->workers()->active_workers(); 557 task_queues()->reserve(nworkers); 558 ShenandoahTerminationTracker tracker(ShenandoahPhaseTimings::conc_traversal_termination); 559 560 ShenandoahTaskTerminator terminator(nworkers, task_queues()); 561 ShenandoahConcurrentTraversalCollectionTask task(&terminator); 562 _heap->workers()->run_task(&task); 563 } 564 565 if (!_heap->cancelled_gc() && ShenandoahPreclean && _heap->process_references()) { 566 preclean_weak_refs(); 567 } 568 } 569 570 void ShenandoahTraversalGC::final_traversal_collection() { 571 _heap->make_parsable(true); 572 573 if (!_heap->cancelled_gc()) { 574 #if defined(COMPILER2) || INCLUDE_JVMCI 575 DerivedPointerTable::clear(); 576 #endif 577 ShenandoahGCPhase phase_work(ShenandoahPhaseTimings::final_traversal_gc_work); 578 uint nworkers = _heap->workers()->active_workers(); 579 task_queues()->reserve(nworkers); 580 581 // Finish traversal 582 ShenandoahRootProcessor rp(_heap, nworkers, ShenandoahPhaseTimings::final_traversal_gc_work); 583 ShenandoahTerminationTracker term(ShenandoahPhaseTimings::final_traversal_gc_termination); 584 585 ShenandoahTaskTerminator terminator(nworkers, task_queues()); 586 ShenandoahFinalTraversalCollectionTask task(&rp, &terminator); 587 _heap->workers()->run_task(&task); 588 #if defined(COMPILER2) || INCLUDE_JVMCI 589 DerivedPointerTable::update_pointers(); 590 #endif 591 } 592 593 if (!_heap->cancelled_gc() && _heap->process_references()) { 594 weak_refs_work(); 595 } 596 597 if (!_heap->cancelled_gc() && _heap->unload_classes()) { 598 _heap->unload_classes_and_cleanup_tables(false); 599 fixup_roots(); 600 } 601 602 if (!_heap->cancelled_gc()) { 603 assert(_task_queues->is_empty(), "queues must be empty after traversal GC"); 604 TASKQUEUE_STATS_ONLY(_task_queues->print_taskqueue_stats()); 605 TASKQUEUE_STATS_ONLY(_task_queues->reset_taskqueue_stats()); 606 607 // No more marking expected 608 _heap->mark_complete_marking_context(); 609 610 // Resize metaspace 611 MetaspaceGC::compute_new_size(); 612 613 // Still good? We can now trash the cset, and make final verification 614 { 615 ShenandoahGCPhase phase_cleanup(ShenandoahPhaseTimings::traversal_gc_cleanup); 616 ShenandoahHeapLocker lock(_heap->lock()); 617 618 // Trash everything 619 // Clear immediate garbage regions. 620 size_t num_regions = _heap->num_regions(); 621 622 ShenandoahHeapRegionSet* traversal_regions = traversal_set(); 623 ShenandoahFreeSet* free_regions = _heap->free_set(); 624 ShenandoahMarkingContext* const ctx = _heap->marking_context(); 625 free_regions->clear(); 626 for (size_t i = 0; i < num_regions; i++) { 627 ShenandoahHeapRegion* r = _heap->get_region(i); 628 bool not_allocated = ctx->top_at_mark_start(r) == r->top(); 629 630 bool candidate = traversal_regions->is_in(r) && !r->has_live() && not_allocated; 631 if (r->is_humongous_start() && candidate) { 632 // Trash humongous. 633 HeapWord* humongous_obj = r->bottom() + ShenandoahBrooksPointer::word_size(); 634 assert(!ctx->is_marked(oop(humongous_obj)), "must not be marked"); 635 r->make_trash_immediate(); 636 while (i + 1 < num_regions && _heap->get_region(i + 1)->is_humongous_continuation()) { 637 i++; 638 r = _heap->get_region(i); 639 assert(r->is_humongous_continuation(), "must be humongous continuation"); 640 r->make_trash_immediate(); 641 } 642 } else if (!r->is_empty() && candidate) { 643 // Trash regular. 644 assert(!r->is_humongous(), "handled above"); 645 assert(!r->is_trash(), "must not already be trashed"); 646 r->make_trash_immediate(); 647 } 648 } 649 _heap->collection_set()->clear(); 650 _heap->free_set()->rebuild(); 651 reset(); 652 } 653 654 assert(_task_queues->is_empty(), "queues must be empty after traversal GC"); 655 _heap->set_concurrent_traversal_in_progress(false); 656 assert(!_heap->cancelled_gc(), "must not be cancelled when getting out here"); 657 658 if (ShenandoahVerify) { 659 _heap->verifier()->verify_after_traversal(); 660 } 661 662 if (VerifyAfterGC) { 663 Universe::verify(); 664 } 665 } 666 } 667 668 class ShenandoahTraversalFixRootsClosure : public OopClosure { 669 private: 670 template <class T> 671 inline void do_oop_work(T* p) { 672 T o = RawAccess<>::oop_load(p); 673 if (!CompressedOops::is_null(o)) { 674 oop obj = CompressedOops::decode_not_null(o); 675 oop forw = ShenandoahBarrierSet::resolve_forwarded_not_null(obj); 676 if (!oopDesc::equals_raw(obj, forw)) { 677 RawAccess<IS_NOT_NULL>::oop_store(p, forw); 678 } 679 } 680 } 681 682 public: 683 inline void do_oop(oop* p) { do_oop_work(p); } 684 inline void do_oop(narrowOop* p) { do_oop_work(p); } 685 }; 686 687 class ShenandoahTraversalFixRootsTask : public AbstractGangTask { 688 private: 689 ShenandoahRootProcessor* _rp; 690 691 public: 692 ShenandoahTraversalFixRootsTask(ShenandoahRootProcessor* rp) : 693 AbstractGangTask("Shenandoah traversal fix roots"), 694 _rp(rp) {} 695 696 void work(uint worker_id) { 697 ShenandoahParallelWorkerSession worker_session(worker_id); 698 ShenandoahTraversalFixRootsClosure cl; 699 MarkingCodeBlobClosure blobsCl(&cl, CodeBlobToOopClosure::FixRelocations); 700 CLDToOopClosure cldCl(&cl, ClassLoaderData::_claim_strong); 701 _rp->process_all_roots(&cl, &cl, &cldCl, &blobsCl, NULL, worker_id); 702 } 703 }; 704 705 void ShenandoahTraversalGC::fixup_roots() { 706 #if defined(COMPILER2) || INCLUDE_JVMCI 707 DerivedPointerTable::clear(); 708 #endif 709 ShenandoahRootProcessor rp(_heap, _heap->workers()->active_workers(), ShenandoahPhaseTimings::final_traversal_update_roots); 710 ShenandoahTraversalFixRootsTask update_roots_task(&rp); 711 _heap->workers()->run_task(&update_roots_task); 712 #if defined(COMPILER2) || INCLUDE_JVMCI 713 DerivedPointerTable::update_pointers(); 714 #endif 715 } 716 717 void ShenandoahTraversalGC::reset() { 718 _task_queues->clear(); 719 } 720 721 ShenandoahObjToScanQueueSet* ShenandoahTraversalGC::task_queues() { 722 return _task_queues; 723 } 724 725 class ShenandoahTraversalCancelledGCYieldClosure : public YieldClosure { 726 private: 727 ShenandoahHeap* const _heap; 728 public: 729 ShenandoahTraversalCancelledGCYieldClosure() : _heap(ShenandoahHeap::heap()) {}; 730 virtual bool should_return() { return _heap->cancelled_gc(); } 731 }; 732 733 class ShenandoahTraversalPrecleanCompleteGCClosure : public VoidClosure { 734 public: 735 void do_void() { 736 ShenandoahHeap* sh = ShenandoahHeap::heap(); 737 ShenandoahTraversalGC* traversal_gc = sh->traversal_gc(); 738 assert(sh->process_references(), "why else would we be here?"); 739 ShenandoahTaskTerminator terminator(1, traversal_gc->task_queues()); 740 shenandoah_assert_rp_isalive_installed(); 741 traversal_gc->main_loop((uint) 0, &terminator, true); 742 } 743 }; 744 745 class ShenandoahTraversalKeepAliveUpdateClosure : public OopClosure { 746 private: 747 ShenandoahObjToScanQueue* _queue; 748 Thread* _thread; 749 ShenandoahTraversalGC* _traversal_gc; 750 ShenandoahMarkingContext* const _mark_context; 751 752 template <class T> 753 inline void do_oop_work(T* p) { 754 _traversal_gc->process_oop<T, false /* string dedup */, false /* degen */>(p, _thread, _queue, _mark_context); 755 } 756 757 public: 758 ShenandoahTraversalKeepAliveUpdateClosure(ShenandoahObjToScanQueue* q) : 759 _queue(q), _thread(Thread::current()), 760 _traversal_gc(ShenandoahHeap::heap()->traversal_gc()), 761 _mark_context(ShenandoahHeap::heap()->marking_context()) {} 762 763 void do_oop(narrowOop* p) { do_oop_work(p); } 764 void do_oop(oop* p) { do_oop_work(p); } 765 }; 766 767 class ShenandoahTraversalWeakUpdateClosure : public OopClosure { 768 private: 769 template <class T> 770 inline void do_oop_work(T* p) { 771 // Cannot call maybe_update_with_forwarded, because on traversal-degen 772 // path the collection set is already dropped. Instead, do the unguarded store. 773 // TODO: This can be fixed after degen-traversal stops dropping cset. 774 T o = RawAccess<>::oop_load(p); 775 if (!CompressedOops::is_null(o)) { 776 oop obj = CompressedOops::decode_not_null(o); 777 obj = ShenandoahBarrierSet::resolve_forwarded_not_null(obj); 778 shenandoah_assert_marked(p, obj); 779 RawAccess<IS_NOT_NULL>::oop_store(p, obj); 780 } 781 } 782 783 public: 784 ShenandoahTraversalWeakUpdateClosure() {} 785 786 void do_oop(narrowOop* p) { do_oop_work(p); } 787 void do_oop(oop* p) { do_oop_work(p); } 788 }; 789 790 class ShenandoahTraversalKeepAliveUpdateDegenClosure : public OopClosure { 791 private: 792 ShenandoahObjToScanQueue* _queue; 793 Thread* _thread; 794 ShenandoahTraversalGC* _traversal_gc; 795 ShenandoahMarkingContext* const _mark_context; 796 797 template <class T> 798 inline void do_oop_work(T* p) { 799 _traversal_gc->process_oop<T, false /* string dedup */, true /* degen */>(p, _thread, _queue, _mark_context); 800 } 801 802 public: 803 ShenandoahTraversalKeepAliveUpdateDegenClosure(ShenandoahObjToScanQueue* q) : 804 _queue(q), _thread(Thread::current()), 805 _traversal_gc(ShenandoahHeap::heap()->traversal_gc()), 806 _mark_context(ShenandoahHeap::heap()->marking_context()) {} 807 808 void do_oop(narrowOop* p) { do_oop_work(p); } 809 void do_oop(oop* p) { do_oop_work(p); } 810 }; 811 812 class ShenandoahTraversalSingleThreadKeepAliveUpdateClosure : public OopClosure { 813 private: 814 ShenandoahObjToScanQueue* _queue; 815 Thread* _thread; 816 ShenandoahTraversalGC* _traversal_gc; 817 ShenandoahMarkingContext* const _mark_context; 818 819 template <class T> 820 inline void do_oop_work(T* p) { 821 ShenandoahEvacOOMScope evac_scope; 822 _traversal_gc->process_oop<T, false /* string dedup */, false /* degen */>(p, _thread, _queue, _mark_context); 823 } 824 825 public: 826 ShenandoahTraversalSingleThreadKeepAliveUpdateClosure(ShenandoahObjToScanQueue* q) : 827 _queue(q), _thread(Thread::current()), 828 _traversal_gc(ShenandoahHeap::heap()->traversal_gc()), 829 _mark_context(ShenandoahHeap::heap()->marking_context()) {} 830 831 void do_oop(narrowOop* p) { do_oop_work(p); } 832 void do_oop(oop* p) { do_oop_work(p); } 833 }; 834 835 class ShenandoahTraversalSingleThreadKeepAliveUpdateDegenClosure : public OopClosure { 836 private: 837 ShenandoahObjToScanQueue* _queue; 838 Thread* _thread; 839 ShenandoahTraversalGC* _traversal_gc; 840 ShenandoahMarkingContext* const _mark_context; 841 842 template <class T> 843 inline void do_oop_work(T* p) { 844 ShenandoahEvacOOMScope evac_scope; 845 _traversal_gc->process_oop<T, false /* string dedup */, true /* degen */>(p, _thread, _queue, _mark_context); 846 } 847 848 public: 849 ShenandoahTraversalSingleThreadKeepAliveUpdateDegenClosure(ShenandoahObjToScanQueue* q) : 850 _queue(q), _thread(Thread::current()), 851 _traversal_gc(ShenandoahHeap::heap()->traversal_gc()), 852 _mark_context(ShenandoahHeap::heap()->marking_context()) {} 853 854 void do_oop(narrowOop* p) { do_oop_work(p); } 855 void do_oop(oop* p) { do_oop_work(p); } 856 }; 857 858 class ShenandoahTraversalPrecleanTask : public AbstractGangTask { 859 private: 860 ReferenceProcessor* _rp; 861 862 public: 863 ShenandoahTraversalPrecleanTask(ReferenceProcessor* rp) : 864 AbstractGangTask("Precleaning task"), 865 _rp(rp) {} 866 867 void work(uint worker_id) { 868 assert(worker_id == 0, "The code below is single-threaded, only one worker is expected"); 869 ShenandoahParallelWorkerSession worker_session(worker_id); 870 ShenandoahSuspendibleThreadSetJoiner stsj(ShenandoahSuspendibleWorkers); 871 ShenandoahEvacOOMScope oom_evac_scope; 872 873 ShenandoahHeap* sh = ShenandoahHeap::heap(); 874 875 ShenandoahObjToScanQueue* q = sh->traversal_gc()->task_queues()->queue(worker_id); 876 877 ShenandoahForwardedIsAliveClosure is_alive; 878 ShenandoahTraversalCancelledGCYieldClosure yield; 879 ShenandoahTraversalPrecleanCompleteGCClosure complete_gc; 880 ShenandoahTraversalKeepAliveUpdateClosure keep_alive(q); 881 ResourceMark rm; 882 _rp->preclean_discovered_references(&is_alive, &keep_alive, 883 &complete_gc, &yield, 884 NULL); 885 } 886 }; 887 888 void ShenandoahTraversalGC::preclean_weak_refs() { 889 // Pre-cleaning weak references before diving into STW makes sense at the 890 // end of concurrent mark. This will filter out the references which referents 891 // are alive. Note that ReferenceProcessor already filters out these on reference 892 // discovery, and the bulk of work is done here. This phase processes leftovers 893 // that missed the initial filtering, i.e. when referent was marked alive after 894 // reference was discovered by RP. 895 896 assert(_heap->process_references(), "sanity"); 897 assert(!_heap->is_degenerated_gc_in_progress(), "must be in concurrent non-degenerated phase"); 898 899 // Shortcut if no references were discovered to avoid winding up threads. 900 ReferenceProcessor* rp = _heap->ref_processor(); 901 if (!rp->has_discovered_references()) { 902 return; 903 } 904 905 ReferenceProcessorMTDiscoveryMutator fix_mt_discovery(rp, false); 906 907 shenandoah_assert_rp_isalive_not_installed(); 908 ShenandoahForwardedIsAliveClosure is_alive; 909 ReferenceProcessorIsAliveMutator fix_isalive(rp, &is_alive); 910 911 assert(task_queues()->is_empty(), "Should be empty"); 912 913 // Execute precleaning in the worker thread: it will give us GCLABs, String dedup 914 // queues and other goodies. When upstream ReferenceProcessor starts supporting 915 // parallel precleans, we can extend this to more threads. 916 ShenandoahPushWorkerScope scope(_heap->workers(), 1, /* check_workers = */ false); 917 918 WorkGang* workers = _heap->workers(); 919 uint nworkers = workers->active_workers(); 920 assert(nworkers == 1, "This code uses only a single worker"); 921 task_queues()->reserve(nworkers); 922 923 ShenandoahTraversalPrecleanTask task(rp); 924 workers->run_task(&task); 925 926 assert(_heap->cancelled_gc() || task_queues()->is_empty(), "Should be empty"); 927 } 928 929 // Weak Reference Closures 930 class ShenandoahTraversalDrainMarkingStackClosure: public VoidClosure { 931 uint _worker_id; 932 ShenandoahTaskTerminator* _terminator; 933 bool _reset_terminator; 934 935 public: 936 ShenandoahTraversalDrainMarkingStackClosure(uint worker_id, ShenandoahTaskTerminator* t, bool reset_terminator = false): 937 _worker_id(worker_id), 938 _terminator(t), 939 _reset_terminator(reset_terminator) { 940 } 941 942 void do_void() { 943 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint"); 944 945 ShenandoahHeap* sh = ShenandoahHeap::heap(); 946 ShenandoahTraversalGC* traversal_gc = sh->traversal_gc(); 947 assert(sh->process_references(), "why else would we be here?"); 948 shenandoah_assert_rp_isalive_installed(); 949 950 traversal_gc->main_loop(_worker_id, _terminator, false); 951 952 if (_reset_terminator) { 953 _terminator->reset_for_reuse(); 954 } 955 } 956 }; 957 958 class ShenandoahTraversalSingleThreadedDrainMarkingStackClosure: public VoidClosure { 959 uint _worker_id; 960 ShenandoahTaskTerminator* _terminator; 961 bool _reset_terminator; 962 963 public: 964 ShenandoahTraversalSingleThreadedDrainMarkingStackClosure(uint worker_id, ShenandoahTaskTerminator* t, bool reset_terminator = false): 965 _worker_id(worker_id), 966 _terminator(t), 967 _reset_terminator(reset_terminator) { 968 } 969 970 void do_void() { 971 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint"); 972 973 ShenandoahHeap* sh = ShenandoahHeap::heap(); 974 ShenandoahTraversalGC* traversal_gc = sh->traversal_gc(); 975 assert(sh->process_references(), "why else would we be here?"); 976 shenandoah_assert_rp_isalive_installed(); 977 978 ShenandoahEvacOOMScope evac_scope; 979 traversal_gc->main_loop(_worker_id, _terminator, false); 980 981 if (_reset_terminator) { 982 _terminator->reset_for_reuse(); 983 } 984 } 985 }; 986 987 void ShenandoahTraversalGC::weak_refs_work() { 988 assert(_heap->process_references(), "sanity"); 989 990 ShenandoahPhaseTimings::Phase phase_root = ShenandoahPhaseTimings::weakrefs; 991 992 ShenandoahGCPhase phase(phase_root); 993 994 ReferenceProcessor* rp = _heap->ref_processor(); 995 996 // NOTE: We cannot shortcut on has_discovered_references() here, because 997 // we will miss marking JNI Weak refs then, see implementation in 998 // ReferenceProcessor::process_discovered_references. 999 weak_refs_work_doit(); 1000 1001 rp->verify_no_references_recorded(); 1002 assert(!rp->discovery_enabled(), "Post condition"); 1003 1004 } 1005 1006 class ShenandoahTraversalRefProcTaskProxy : public AbstractGangTask { 1007 private: 1008 AbstractRefProcTaskExecutor::ProcessTask& _proc_task; 1009 ShenandoahTaskTerminator* _terminator; 1010 1011 public: 1012 ShenandoahTraversalRefProcTaskProxy(AbstractRefProcTaskExecutor::ProcessTask& proc_task, 1013 ShenandoahTaskTerminator* t) : 1014 AbstractGangTask("Process reference objects in parallel"), 1015 _proc_task(proc_task), 1016 _terminator(t) { 1017 } 1018 1019 void work(uint worker_id) { 1020 ShenandoahEvacOOMScope oom_evac_scope; 1021 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint"); 1022 ShenandoahHeap* heap = ShenandoahHeap::heap(); 1023 ShenandoahTraversalDrainMarkingStackClosure complete_gc(worker_id, _terminator); 1024 1025 ShenandoahForwardedIsAliveClosure is_alive; 1026 if (!heap->is_degenerated_gc_in_progress()) { 1027 ShenandoahTraversalKeepAliveUpdateClosure keep_alive(heap->traversal_gc()->task_queues()->queue(worker_id)); 1028 _proc_task.work(worker_id, is_alive, keep_alive, complete_gc); 1029 } else { 1030 ShenandoahTraversalKeepAliveUpdateDegenClosure keep_alive(heap->traversal_gc()->task_queues()->queue(worker_id)); 1031 _proc_task.work(worker_id, is_alive, keep_alive, complete_gc); 1032 } 1033 } 1034 }; 1035 1036 class ShenandoahTraversalRefProcTaskExecutor : public AbstractRefProcTaskExecutor { 1037 private: 1038 WorkGang* _workers; 1039 1040 public: 1041 ShenandoahTraversalRefProcTaskExecutor(WorkGang* workers) : _workers(workers) {} 1042 1043 // Executes a task using worker threads. 1044 void execute(ProcessTask& task, uint ergo_workers) { 1045 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint"); 1046 1047 ShenandoahHeap* heap = ShenandoahHeap::heap(); 1048 ShenandoahTraversalGC* traversal_gc = heap->traversal_gc(); 1049 ShenandoahPushWorkerQueuesScope scope(_workers, 1050 traversal_gc->task_queues(), 1051 ergo_workers, 1052 /* do_check = */ false); 1053 uint nworkers = _workers->active_workers(); 1054 traversal_gc->task_queues()->reserve(nworkers); 1055 ShenandoahTaskTerminator terminator(nworkers, traversal_gc->task_queues()); 1056 ShenandoahTraversalRefProcTaskProxy proc_task_proxy(task, &terminator); 1057 _workers->run_task(&proc_task_proxy); 1058 } 1059 }; 1060 1061 void ShenandoahTraversalGC::weak_refs_work_doit() { 1062 ReferenceProcessor* rp = _heap->ref_processor(); 1063 1064 ShenandoahPhaseTimings::Phase phase_process = ShenandoahPhaseTimings::weakrefs_process; 1065 1066 shenandoah_assert_rp_isalive_not_installed(); 1067 ShenandoahForwardedIsAliveClosure is_alive; 1068 ReferenceProcessorIsAliveMutator fix_isalive(rp, &is_alive); 1069 1070 WorkGang* workers = _heap->workers(); 1071 uint nworkers = workers->active_workers(); 1072 1073 rp->setup_policy(_heap->soft_ref_policy()->should_clear_all_soft_refs()); 1074 rp->set_active_mt_degree(nworkers); 1075 1076 assert(task_queues()->is_empty(), "Should be empty"); 1077 1078 // complete_gc and keep_alive closures instantiated here are only needed for 1079 // single-threaded path in RP. They share the queue 0 for tracking work, which 1080 // simplifies implementation. Since RP may decide to call complete_gc several 1081 // times, we need to be able to reuse the terminator. 1082 uint serial_worker_id = 0; 1083 ShenandoahTaskTerminator terminator(1, task_queues()); 1084 ShenandoahTraversalSingleThreadedDrainMarkingStackClosure complete_gc(serial_worker_id, &terminator, /* reset_terminator = */ true); 1085 ShenandoahPushWorkerQueuesScope scope(workers, task_queues(), 1, /* do_check = */ false); 1086 1087 ShenandoahTraversalRefProcTaskExecutor executor(workers); 1088 1089 ReferenceProcessorPhaseTimes pt(_heap->gc_timer(), rp->num_queues()); 1090 if (!_heap->is_degenerated_gc_in_progress()) { 1091 ShenandoahTraversalSingleThreadKeepAliveUpdateClosure keep_alive(task_queues()->queue(serial_worker_id)); 1092 rp->process_discovered_references(&is_alive, &keep_alive, 1093 &complete_gc, &executor, 1094 &pt); 1095 } else { 1096 ShenandoahTraversalSingleThreadKeepAliveUpdateDegenClosure keep_alive(task_queues()->queue(serial_worker_id)); 1097 rp->process_discovered_references(&is_alive, &keep_alive, 1098 &complete_gc, &executor, 1099 &pt); 1100 } 1101 1102 { 1103 ShenandoahGCPhase phase(phase_process); 1104 ShenandoahTerminationTracker termination(ShenandoahPhaseTimings::weakrefs_termination); 1105 1106 // Process leftover weak oops (using parallel version) 1107 ShenandoahTraversalWeakUpdateClosure cl; 1108 WeakProcessor::weak_oops_do(workers, &is_alive, &cl, 1); 1109 1110 pt.print_all_references(); 1111 1112 assert(task_queues()->is_empty() || _heap->cancelled_gc(), "Should be empty"); 1113 } 1114 }