1 /* 2 * Copyright (c) 2018, 2019, Red Hat, Inc. All rights reserved. 3 * 4 * This code is free software; you can redistribute it and/or modify it 5 * under the terms of the GNU General Public License version 2 only, as 6 * published by the Free Software Foundation. 7 * 8 * This code is distributed in the hope that it will be useful, but WITHOUT 9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 11 * version 2 for more details (a copy is included in the LICENSE file that 12 * accompanied this code). 13 * 14 * You should have received a copy of the GNU General Public License version 15 * 2 along with this work; if not, write to the Free Software Foundation, 16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 17 * 18 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 19 * or visit www.oracle.com if you need additional information or have any 20 * questions. 21 * 22 */ 23 24 #include "precompiled.hpp" 25 26 #include "gc/shared/referenceProcessor.hpp" 27 #include "gc/shared/referenceProcessorPhaseTimes.hpp" 28 #include "gc/shared/workgroup.hpp" 29 #include "gc/shared/weakProcessor.hpp" 30 #include "gc/shenandoah/shenandoahBarrierSet.hpp" 31 #include "gc/shenandoah/shenandoahClosures.inline.hpp" 32 #include "gc/shenandoah/shenandoahCodeRoots.hpp" 33 #include "gc/shenandoah/shenandoahCollectionSet.hpp" 34 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp" 35 #include "gc/shenandoah/shenandoahFreeSet.hpp" 36 #include "gc/shenandoah/shenandoahPhaseTimings.hpp" 37 #include "gc/shenandoah/shenandoahHeap.inline.hpp" 38 #include "gc/shenandoah/shenandoahHeapRegionSet.inline.hpp" 39 #include "gc/shenandoah/shenandoahHeuristics.hpp" 40 #include "gc/shenandoah/shenandoahMarkingContext.inline.hpp" 41 #include "gc/shenandoah/shenandoahOopClosures.inline.hpp" 42 #include "gc/shenandoah/shenandoahRootProcessor.inline.hpp" 43 #include "gc/shenandoah/shenandoahStringDedup.hpp" 44 #include "gc/shenandoah/shenandoahTaskqueue.inline.hpp" 45 #include "gc/shenandoah/shenandoahTimingTracker.hpp" 46 #include "gc/shenandoah/shenandoahTraversalGC.hpp" 47 #include "gc/shenandoah/shenandoahUtils.hpp" 48 #include "gc/shenandoah/shenandoahVerifier.hpp" 49 50 #include "memory/iterator.hpp" 51 #include "memory/metaspace.hpp" 52 #include "memory/resourceArea.hpp" 53 54 /** 55 * NOTE: We are using the SATB buffer in thread.hpp and satbMarkQueue.hpp, however, it is not an SATB algorithm. 56 * We're using the buffer as generic oop buffer to enqueue new values in concurrent oop stores, IOW, the algorithm 57 * is incremental-update-based. 58 * 59 * NOTE on interaction with TAMS: we want to avoid traversing new objects for 60 * several reasons: 61 * - We will not reclaim them in this cycle anyway, because they are not in the 62 * cset 63 * - It makes up for the bulk of work during final-pause 64 * - It also shortens the concurrent cycle because we don't need to 65 * pointlessly traverse through newly allocated objects. 66 * - As a nice side-effect, it solves the I-U termination problem (mutators 67 * cannot outrun the GC by allocating like crazy) 68 * - It is an easy way to achieve MWF. What MWF does is to also enqueue the 69 * target object of stores if it's new. Treating new objects live implicitely 70 * achieves the same, but without extra barriers. I think the effect of 71 * shortened final-pause (mentioned above) is the main advantage of MWF. In 72 * particular, we will not see the head of a completely new long linked list 73 * in final-pause and end up traversing huge chunks of the heap there. 74 * - We don't need to see/update the fields of new objects either, because they 75 * are either still null, or anything that's been stored into them has been 76 * evacuated+enqueued before (and will thus be treated later). 77 * 78 * We achieve this by setting TAMS for each region, and everything allocated 79 * beyond TAMS will be 'implicitely marked'. 80 * 81 * Gotchas: 82 * - While we want new objects to be implicitely marked, we don't want to count 83 * them alive. Otherwise the next cycle wouldn't pick them up and consider 84 * them for cset. This means that we need to protect such regions from 85 * getting accidentally thrashed at the end of traversal cycle. This is why I 86 * keep track of alloc-regions and check is_alloc_region() in the trashing 87 * code. 88 * - We *need* to traverse through evacuated objects. Those objects are 89 * pre-existing, and any references in them point to interesting objects that 90 * we need to see. We also want to count them as live, because we just 91 * determined that they are alive :-) I achieve this by upping TAMS 92 * concurrently for every gclab/gc-shared alloc before publishing the 93 * evacuated object. This way, the GC threads will not consider such objects 94 * implictely marked, and traverse through them as normal. 95 */ 96 class ShenandoahTraversalSATBBufferClosure : public SATBBufferClosure { 97 private: 98 ShenandoahObjToScanQueue* _queue; 99 ShenandoahTraversalGC* _traversal_gc; 100 ShenandoahHeap* const _heap; 101 102 public: 103 ShenandoahTraversalSATBBufferClosure(ShenandoahObjToScanQueue* q) : 104 _queue(q), 105 _heap(ShenandoahHeap::heap()) 106 { } 107 108 void do_buffer(void** buffer, size_t size) { 109 for (size_t i = 0; i < size; ++i) { 110 oop* p = (oop*) &buffer[i]; 111 oop obj = RawAccess<>::oop_load(p); 112 shenandoah_assert_not_forwarded(p, obj); 113 if (_heap->marking_context()->mark(obj)) { 114 _queue->push(ShenandoahMarkTask(obj)); 115 } 116 } 117 } 118 }; 119 120 class ShenandoahTraversalSATBThreadsClosure : public ThreadClosure { 121 private: 122 ShenandoahTraversalSATBBufferClosure* _satb_cl; 123 124 public: 125 ShenandoahTraversalSATBThreadsClosure(ShenandoahTraversalSATBBufferClosure* satb_cl) : 126 _satb_cl(satb_cl) {} 127 128 void do_thread(Thread* thread) { 129 if (thread->is_Java_thread()) { 130 JavaThread* jt = (JavaThread*)thread; 131 ShenandoahThreadLocalData::satb_mark_queue(jt).apply_closure_and_empty(_satb_cl); 132 } else if (thread->is_VM_thread()) { 133 ShenandoahBarrierSet::satb_mark_queue_set().shared_satb_queue()->apply_closure_and_empty(_satb_cl); 134 } 135 } 136 }; 137 138 // Like CLDToOopClosure, but clears has_modified_oops, so that we can record modified CLDs during traversal 139 // and remark them later during final-traversal. 140 class ShenandoahMarkCLDClosure : public CLDClosure { 141 private: 142 OopClosure* _cl; 143 public: 144 ShenandoahMarkCLDClosure(OopClosure* cl) : _cl(cl) {} 145 void do_cld(ClassLoaderData* cld) { 146 cld->oops_do(_cl, true, true); 147 } 148 }; 149 150 // Like CLDToOopClosure, but only process modified CLDs 151 class ShenandoahRemarkCLDClosure : public CLDClosure { 152 private: 153 OopClosure* _cl; 154 public: 155 ShenandoahRemarkCLDClosure(OopClosure* cl) : _cl(cl) {} 156 void do_cld(ClassLoaderData* cld) { 157 if (cld->has_modified_oops()) { 158 cld->oops_do(_cl, true, true); 159 } 160 } 161 }; 162 163 class ShenandoahInitTraversalCollectionTask : public AbstractGangTask { 164 private: 165 ShenandoahRootProcessor* _rp; 166 ShenandoahHeap* _heap; 167 ShenandoahCsetCodeRootsIterator* _cset_coderoots; 168 public: 169 ShenandoahInitTraversalCollectionTask(ShenandoahRootProcessor* rp, ShenandoahCsetCodeRootsIterator* cset_coderoots) : 170 AbstractGangTask("Shenandoah Init Traversal Collection"), 171 _rp(rp), 172 _heap(ShenandoahHeap::heap()), 173 _cset_coderoots(cset_coderoots) {} 174 175 void work(uint worker_id) { 176 ShenandoahParallelWorkerSession worker_session(worker_id); 177 178 ShenandoahEvacOOMScope oom_evac_scope; 179 ShenandoahObjToScanQueueSet* queues = _heap->traversal_gc()->task_queues(); 180 ShenandoahObjToScanQueue* q = queues->queue(worker_id); 181 182 bool process_refs = _heap->process_references(); 183 bool unload_classes = _heap->unload_classes(); 184 ReferenceProcessor* rp = NULL; 185 if (process_refs) { 186 rp = _heap->ref_processor(); 187 } 188 189 // Step 1: Process ordinary GC roots. 190 { 191 ShenandoahTraversalClosure roots_cl(q, rp); 192 ShenandoahMarkCLDClosure cld_cl(&roots_cl); 193 MarkingCodeBlobClosure code_cl(&roots_cl, CodeBlobToOopClosure::FixRelocations); 194 if (unload_classes) { 195 _rp->process_strong_roots(&roots_cl, &cld_cl, NULL, NULL, worker_id); 196 // Need to pre-evac code roots here. Otherwise we might see from-space constants. 197 ShenandoahWorkerTimings* worker_times = _heap->phase_timings()->worker_times(); 198 ShenandoahWorkerTimingsTracker timer(worker_times, ShenandoahPhaseTimings::CodeCacheRoots, worker_id); 199 _cset_coderoots->possibly_parallel_blobs_do(&code_cl); 200 } else { 201 _rp->process_all_roots(&roots_cl, &cld_cl, &code_cl, NULL, worker_id); 202 } 203 } 204 } 205 }; 206 207 class ShenandoahConcurrentTraversalCollectionTask : public AbstractGangTask { 208 private: 209 ShenandoahTaskTerminator* _terminator; 210 ShenandoahHeap* _heap; 211 public: 212 ShenandoahConcurrentTraversalCollectionTask(ShenandoahTaskTerminator* terminator) : 213 AbstractGangTask("Shenandoah Concurrent Traversal Collection"), 214 _terminator(terminator), 215 _heap(ShenandoahHeap::heap()) {} 216 217 void work(uint worker_id) { 218 ShenandoahConcurrentWorkerSession worker_session(worker_id); 219 ShenandoahSuspendibleThreadSetJoiner stsj(ShenandoahSuspendibleWorkers); 220 ShenandoahEvacOOMScope oom_evac_scope; 221 ShenandoahTraversalGC* traversal_gc = _heap->traversal_gc(); 222 223 // Drain all outstanding work in queues. 224 traversal_gc->main_loop(worker_id, _terminator, true); 225 } 226 }; 227 228 class ShenandoahFinalTraversalCollectionTask : public AbstractGangTask { 229 private: 230 ShenandoahRootProcessor* _rp; 231 ShenandoahTaskTerminator* _terminator; 232 ShenandoahHeap* _heap; 233 public: 234 ShenandoahFinalTraversalCollectionTask(ShenandoahRootProcessor* rp, ShenandoahTaskTerminator* terminator) : 235 AbstractGangTask("Shenandoah Final Traversal Collection"), 236 _rp(rp), 237 _terminator(terminator), 238 _heap(ShenandoahHeap::heap()) {} 239 240 void work(uint worker_id) { 241 ShenandoahParallelWorkerSession worker_session(worker_id); 242 243 ShenandoahEvacOOMScope oom_evac_scope; 244 ShenandoahTraversalGC* traversal_gc = _heap->traversal_gc(); 245 246 ShenandoahObjToScanQueueSet* queues = traversal_gc->task_queues(); 247 ShenandoahObjToScanQueue* q = queues->queue(worker_id); 248 249 bool process_refs = _heap->process_references(); 250 bool unload_classes = _heap->unload_classes(); 251 ReferenceProcessor* rp = NULL; 252 if (process_refs) { 253 rp = _heap->ref_processor(); 254 } 255 256 // Step 0: Drain outstanding SATB queues. 257 // NOTE: we piggy-back draining of remaining thread SATB buffers on the final root scan below. 258 ShenandoahTraversalSATBBufferClosure satb_cl(q); 259 { 260 // Process remaining finished SATB buffers. 261 SATBMarkQueueSet& satb_mq_set = ShenandoahBarrierSet::satb_mark_queue_set(); 262 while (satb_mq_set.apply_closure_to_completed_buffer(&satb_cl)); 263 // Process remaining threads SATB buffers below. 264 } 265 266 // Step 1: Process GC roots. 267 // For oops in code roots, they are marked, evacuated, enqueued for further traversal, 268 // and the references to the oops are updated during init pause. New nmethods are handled 269 // in similar way during nmethod-register process. Therefore, we don't need to rescan code 270 // roots here. 271 if (!_heap->is_degenerated_gc_in_progress()) { 272 ShenandoahTraversalClosure roots_cl(q, rp); 273 CLDToOopClosure cld_cl(&roots_cl); 274 ShenandoahTraversalSATBThreadsClosure tc(&satb_cl); 275 if (unload_classes) { 276 ShenandoahRemarkCLDClosure remark_cld_cl(&roots_cl); 277 _rp->process_strong_roots(&roots_cl, &remark_cld_cl, NULL, &tc, worker_id); 278 } else { 279 _rp->process_all_roots(&roots_cl, &cld_cl, NULL, &tc, worker_id); 280 } 281 } else { 282 ShenandoahTraversalDegenClosure roots_cl(q, rp); 283 CLDToOopClosure cld_cl(&roots_cl); 284 ShenandoahTraversalSATBThreadsClosure tc(&satb_cl); 285 if (unload_classes) { 286 ShenandoahRemarkCLDClosure remark_cld_cl(&roots_cl); 287 _rp->process_strong_roots(&roots_cl, &remark_cld_cl, NULL, &tc, worker_id); 288 } else { 289 _rp->process_all_roots(&roots_cl, &cld_cl, NULL, &tc, worker_id); 290 } 291 } 292 293 { 294 ShenandoahWorkerTimings *worker_times = _heap->phase_timings()->worker_times(); 295 ShenandoahWorkerTimingsTracker timer(worker_times, ShenandoahPhaseTimings::FinishQueues, worker_id); 296 297 // Step 3: Finally drain all outstanding work in queues. 298 traversal_gc->main_loop(worker_id, _terminator, false); 299 } 300 301 } 302 }; 303 304 ShenandoahTraversalGC::ShenandoahTraversalGC(ShenandoahHeap* heap, size_t num_regions) : 305 _heap(heap), 306 _task_queues(new ShenandoahObjToScanQueueSet(heap->max_workers())), 307 _traversal_set(ShenandoahHeapRegionSet()) { 308 309 uint num_queues = heap->max_workers(); 310 for (uint i = 0; i < num_queues; ++i) { 311 ShenandoahObjToScanQueue* task_queue = new ShenandoahObjToScanQueue(); 312 task_queue->initialize(); 313 _task_queues->register_queue(i, task_queue); 314 } 315 } 316 317 ShenandoahTraversalGC::~ShenandoahTraversalGC() { 318 } 319 320 void ShenandoahTraversalGC::prepare_regions() { 321 size_t num_regions = _heap->num_regions(); 322 ShenandoahMarkingContext* const ctx = _heap->marking_context(); 323 for (size_t i = 0; i < num_regions; i++) { 324 ShenandoahHeapRegion* region = _heap->get_region(i); 325 if (_heap->is_bitmap_slice_committed(region)) { 326 if (_traversal_set.is_in(i)) { 327 ctx->capture_top_at_mark_start(region); 328 region->clear_live_data(); 329 assert(ctx->is_bitmap_clear_range(region->bottom(), region->end()), "bitmap for traversal regions must be cleared"); 330 } else { 331 // Everything outside the traversal set is always considered live. 332 ctx->reset_top_at_mark_start(region); 333 } 334 } else { 335 // FreeSet may contain uncommitted empty regions, once they are recommitted, 336 // their TAMS may have old values, so reset them here. 337 ctx->reset_top_at_mark_start(region); 338 } 339 } 340 } 341 342 void ShenandoahTraversalGC::prepare() { 343 _heap->collection_set()->clear(); 344 assert(_heap->collection_set()->count() == 0, "collection set not clear"); 345 346 if (UseTLAB) { 347 ShenandoahGCPhase phase(ShenandoahPhaseTimings::traversal_gc_accumulate_stats); 348 _heap->accumulate_statistics_tlabs(); 349 } 350 351 { 352 ShenandoahGCPhase phase(ShenandoahPhaseTimings::traversal_gc_make_parsable); 353 _heap->make_parsable(true); 354 } 355 356 if (UseTLAB) { 357 ShenandoahGCPhase phase(ShenandoahPhaseTimings::traversal_gc_resize_tlabs); 358 _heap->resize_tlabs(); 359 } 360 361 assert(_heap->marking_context()->is_bitmap_clear(), "need clean mark bitmap"); 362 assert(!_heap->marking_context()->is_complete(), "should not be complete"); 363 364 ShenandoahFreeSet* free_set = _heap->free_set(); 365 ShenandoahCollectionSet* collection_set = _heap->collection_set(); 366 367 // Find collection set 368 _heap->heuristics()->choose_collection_set(collection_set); 369 prepare_regions(); 370 371 // Rebuild free set 372 free_set->rebuild(); 373 374 log_info(gc, ergo)("Collectable Garbage: " SIZE_FORMAT "M, " SIZE_FORMAT "M CSet, " SIZE_FORMAT " CSet regions", 375 collection_set->garbage() / M, collection_set->live_data() / M, collection_set->count()); 376 } 377 378 void ShenandoahTraversalGC::init_traversal_collection() { 379 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "STW traversal GC"); 380 381 if (ShenandoahVerify) { 382 _heap->verifier()->verify_before_traversal(); 383 } 384 385 if (VerifyBeforeGC) { 386 Universe::verify(); 387 } 388 389 { 390 ShenandoahGCPhase phase_prepare(ShenandoahPhaseTimings::traversal_gc_prepare); 391 ShenandoahHeapLocker lock(_heap->lock()); 392 prepare(); 393 } 394 395 _heap->set_concurrent_traversal_in_progress(true); 396 397 bool process_refs = _heap->process_references(); 398 if (process_refs) { 399 ReferenceProcessor* rp = _heap->ref_processor(); 400 rp->enable_discovery(true /*verify_no_refs*/); 401 rp->setup_policy(_heap->soft_ref_policy()->should_clear_all_soft_refs()); 402 } 403 404 { 405 ShenandoahGCPhase phase_work(ShenandoahPhaseTimings::init_traversal_gc_work); 406 assert(_task_queues->is_empty(), "queues must be empty before traversal GC"); 407 TASKQUEUE_STATS_ONLY(_task_queues->reset_taskqueue_stats()); 408 409 #if defined(COMPILER2) || INCLUDE_JVMCI 410 DerivedPointerTable::clear(); 411 #endif 412 413 { 414 uint nworkers = _heap->workers()->active_workers(); 415 task_queues()->reserve(nworkers); 416 ShenandoahRootProcessor rp(_heap, nworkers, ShenandoahPhaseTimings::init_traversal_gc_work); 417 418 ShenandoahCsetCodeRootsIterator cset_coderoots = ShenandoahCodeRoots::cset_iterator(); 419 420 ShenandoahInitTraversalCollectionTask traversal_task(&rp, &cset_coderoots); 421 _heap->workers()->run_task(&traversal_task); 422 } 423 424 #if defined(COMPILER2) || INCLUDE_JVMCI 425 DerivedPointerTable::update_pointers(); 426 #endif 427 } 428 429 if (ShenandoahPacing) { 430 _heap->pacer()->setup_for_traversal(); 431 } 432 } 433 434 void ShenandoahTraversalGC::main_loop(uint w, ShenandoahTaskTerminator* t, bool sts_yield) { 435 ShenandoahObjToScanQueue* q = task_queues()->queue(w); 436 437 // Initialize live data. 438 jushort* ld = _heap->get_liveness_cache(w); 439 440 ReferenceProcessor* rp = NULL; 441 if (_heap->process_references()) { 442 rp = _heap->ref_processor(); 443 } 444 { 445 if (!_heap->is_degenerated_gc_in_progress()) { 446 if (_heap->unload_classes()) { 447 if (ShenandoahStringDedup::is_enabled()) { 448 ShenandoahTraversalMetadataDedupClosure cl(q, rp); 449 main_loop_work<ShenandoahTraversalMetadataDedupClosure>(&cl, ld, w, t, sts_yield); 450 } else { 451 ShenandoahTraversalMetadataClosure cl(q, rp); 452 main_loop_work<ShenandoahTraversalMetadataClosure>(&cl, ld, w, t, sts_yield); 453 } 454 } else { 455 if (ShenandoahStringDedup::is_enabled()) { 456 ShenandoahTraversalDedupClosure cl(q, rp); 457 main_loop_work<ShenandoahTraversalDedupClosure>(&cl, ld, w, t, sts_yield); 458 } else { 459 ShenandoahTraversalClosure cl(q, rp); 460 main_loop_work<ShenandoahTraversalClosure>(&cl, ld, w, t, sts_yield); 461 } 462 } 463 } else { 464 if (_heap->unload_classes()) { 465 if (ShenandoahStringDedup::is_enabled()) { 466 ShenandoahTraversalMetadataDedupDegenClosure cl(q, rp); 467 main_loop_work<ShenandoahTraversalMetadataDedupDegenClosure>(&cl, ld, w, t, sts_yield); 468 } else { 469 ShenandoahTraversalMetadataDegenClosure cl(q, rp); 470 main_loop_work<ShenandoahTraversalMetadataDegenClosure>(&cl, ld, w, t, sts_yield); 471 } 472 } else { 473 if (ShenandoahStringDedup::is_enabled()) { 474 ShenandoahTraversalDedupDegenClosure cl(q, rp); 475 main_loop_work<ShenandoahTraversalDedupDegenClosure>(&cl, ld, w, t, sts_yield); 476 } else { 477 ShenandoahTraversalDegenClosure cl(q, rp); 478 main_loop_work<ShenandoahTraversalDegenClosure>(&cl, ld, w, t, sts_yield); 479 } 480 } 481 } 482 } 483 484 _heap->flush_liveness_cache(w); 485 } 486 487 template <class T> 488 void ShenandoahTraversalGC::main_loop_work(T* cl, jushort* live_data, uint worker_id, ShenandoahTaskTerminator* terminator, bool sts_yield) { 489 ShenandoahObjToScanQueueSet* queues = task_queues(); 490 ShenandoahObjToScanQueue* q = queues->queue(worker_id); 491 ShenandoahConcurrentMark* conc_mark = _heap->concurrent_mark(); 492 493 uintx stride = ShenandoahMarkLoopStride; 494 495 ShenandoahMarkTask task; 496 497 // Process outstanding queues, if any. 498 q = queues->claim_next(); 499 while (q != NULL) { 500 if (_heap->check_cancelled_gc_and_yield(sts_yield)) { 501 return; 502 } 503 504 for (uint i = 0; i < stride; i++) { 505 if (q->pop(task)) { 506 conc_mark->do_task<T>(q, cl, live_data, &task); 507 } else { 508 assert(q->is_empty(), "Must be empty"); 509 q = queues->claim_next(); 510 break; 511 } 512 } 513 } 514 515 if (check_and_handle_cancelled_gc(terminator, sts_yield)) return; 516 517 // Normal loop. 518 q = queues->queue(worker_id); 519 520 ShenandoahTraversalSATBBufferClosure drain_satb(q); 521 SATBMarkQueueSet& satb_mq_set = ShenandoahBarrierSet::satb_mark_queue_set(); 522 523 int seed = 17; 524 525 while (true) { 526 if (check_and_handle_cancelled_gc(terminator, sts_yield)) return; 527 528 while (satb_mq_set.completed_buffers_num() > 0) { 529 satb_mq_set.apply_closure_to_completed_buffer(&drain_satb); 530 } 531 532 uint work = 0; 533 for (uint i = 0; i < stride; i++) { 534 if (q->pop(task) || 535 queues->steal(worker_id, &seed, task)) { 536 conc_mark->do_task<T>(q, cl, live_data, &task); 537 work++; 538 } else { 539 break; 540 } 541 } 542 543 if (work == 0) { 544 // No more work, try to terminate 545 ShenandoahEvacOOMScopeLeaver oom_scope_leaver; 546 ShenandoahSuspendibleThreadSetLeaver stsl(sts_yield && ShenandoahSuspendibleWorkers); 547 ShenandoahTerminationTimingsTracker term_tracker(worker_id); 548 ShenandoahTerminatorTerminator tt(_heap); 549 550 if (terminator->offer_termination(&tt)) return; 551 } 552 } 553 } 554 555 bool ShenandoahTraversalGC::check_and_handle_cancelled_gc(ShenandoahTaskTerminator* terminator, bool sts_yield) { 556 if (_heap->cancelled_gc()) { 557 return true; 558 } 559 return false; 560 } 561 562 void ShenandoahTraversalGC::concurrent_traversal_collection() { 563 ClassLoaderDataGraph::clear_claimed_marks(); 564 565 ShenandoahGCPhase phase_work(ShenandoahPhaseTimings::conc_traversal); 566 if (!_heap->cancelled_gc()) { 567 uint nworkers = _heap->workers()->active_workers(); 568 task_queues()->reserve(nworkers); 569 ShenandoahTerminationTracker tracker(ShenandoahPhaseTimings::conc_traversal_termination); 570 571 ShenandoahTaskTerminator terminator(nworkers, task_queues()); 572 ShenandoahConcurrentTraversalCollectionTask task(&terminator); 573 _heap->workers()->run_task(&task); 574 } 575 576 if (!_heap->cancelled_gc() && ShenandoahPreclean && _heap->process_references()) { 577 preclean_weak_refs(); 578 } 579 } 580 581 void ShenandoahTraversalGC::final_traversal_collection() { 582 _heap->make_parsable(true); 583 584 if (!_heap->cancelled_gc()) { 585 #if defined(COMPILER2) || INCLUDE_JVMCI 586 DerivedPointerTable::clear(); 587 #endif 588 ShenandoahGCPhase phase_work(ShenandoahPhaseTimings::final_traversal_gc_work); 589 uint nworkers = _heap->workers()->active_workers(); 590 task_queues()->reserve(nworkers); 591 592 // Finish traversal 593 ShenandoahRootProcessor rp(_heap, nworkers, ShenandoahPhaseTimings::final_traversal_gc_work); 594 ShenandoahTerminationTracker term(ShenandoahPhaseTimings::final_traversal_gc_termination); 595 596 ShenandoahTaskTerminator terminator(nworkers, task_queues()); 597 ShenandoahFinalTraversalCollectionTask task(&rp, &terminator); 598 _heap->workers()->run_task(&task); 599 #if defined(COMPILER2) || INCLUDE_JVMCI 600 DerivedPointerTable::update_pointers(); 601 #endif 602 } 603 604 if (!_heap->cancelled_gc() && _heap->process_references()) { 605 weak_refs_work(); 606 } 607 608 if (!_heap->cancelled_gc()) { 609 if (_heap->unload_classes()) { 610 _heap->unload_classes_and_cleanup_tables(false); 611 } 612 613 fixup_roots(); 614 } 615 616 if (!_heap->cancelled_gc()) { 617 assert(_task_queues->is_empty(), "queues must be empty after traversal GC"); 618 TASKQUEUE_STATS_ONLY(_task_queues->print_taskqueue_stats()); 619 TASKQUEUE_STATS_ONLY(_task_queues->reset_taskqueue_stats()); 620 621 // No more marking expected 622 _heap->mark_complete_marking_context(); 623 624 // Resize metaspace 625 MetaspaceGC::compute_new_size(); 626 627 // Still good? We can now trash the cset, and make final verification 628 { 629 ShenandoahGCPhase phase_cleanup(ShenandoahPhaseTimings::traversal_gc_cleanup); 630 ShenandoahHeapLocker lock(_heap->lock()); 631 632 // Trash everything 633 // Clear immediate garbage regions. 634 size_t num_regions = _heap->num_regions(); 635 636 ShenandoahHeapRegionSet* traversal_regions = traversal_set(); 637 ShenandoahFreeSet* free_regions = _heap->free_set(); 638 ShenandoahMarkingContext* const ctx = _heap->marking_context(); 639 free_regions->clear(); 640 for (size_t i = 0; i < num_regions; i++) { 641 ShenandoahHeapRegion* r = _heap->get_region(i); 642 bool not_allocated = ctx->top_at_mark_start(r) == r->top(); 643 644 bool candidate = traversal_regions->is_in(r) && !r->has_live() && not_allocated; 645 if (r->is_humongous_start() && candidate) { 646 // Trash humongous. 647 HeapWord* humongous_obj = r->bottom() + ShenandoahBrooksPointer::word_size(); 648 assert(!ctx->is_marked(oop(humongous_obj)), "must not be marked"); 649 r->make_trash_immediate(); 650 while (i + 1 < num_regions && _heap->get_region(i + 1)->is_humongous_continuation()) { 651 i++; 652 r = _heap->get_region(i); 653 assert(r->is_humongous_continuation(), "must be humongous continuation"); 654 r->make_trash_immediate(); 655 } 656 } else if (!r->is_empty() && candidate) { 657 // Trash regular. 658 assert(!r->is_humongous(), "handled above"); 659 assert(!r->is_trash(), "must not already be trashed"); 660 r->make_trash_immediate(); 661 } 662 } 663 _heap->collection_set()->clear(); 664 _heap->free_set()->rebuild(); 665 reset(); 666 } 667 668 assert(_task_queues->is_empty(), "queues must be empty after traversal GC"); 669 _heap->set_concurrent_traversal_in_progress(false); 670 assert(!_heap->cancelled_gc(), "must not be cancelled when getting out here"); 671 672 if (ShenandoahVerify) { 673 _heap->verifier()->verify_after_traversal(); 674 } 675 676 if (VerifyAfterGC) { 677 Universe::verify(); 678 } 679 } 680 } 681 682 class ShenandoahTraversalFixRootsClosure : public OopClosure { 683 private: 684 template <class T> 685 inline void do_oop_work(T* p) { 686 T o = RawAccess<>::oop_load(p); 687 if (!CompressedOops::is_null(o)) { 688 oop obj = CompressedOops::decode_not_null(o); 689 oop forw = ShenandoahBarrierSet::resolve_forwarded_not_null(obj); 690 if (!oopDesc::unsafe_equals(obj, forw)) { 691 RawAccess<IS_NOT_NULL>::oop_store(p, forw); 692 } 693 } 694 } 695 696 public: 697 inline void do_oop(oop* p) { do_oop_work(p); } 698 inline void do_oop(narrowOop* p) { do_oop_work(p); } 699 }; 700 701 class ShenandoahTraversalFixRootsTask : public AbstractGangTask { 702 private: 703 ShenandoahRootProcessor* _rp; 704 705 public: 706 ShenandoahTraversalFixRootsTask(ShenandoahRootProcessor* rp) : 707 AbstractGangTask("Shenandoah traversal fix roots"), 708 _rp(rp) { 709 assert(ShenandoahHeap::heap()->has_forwarded_objects(), "Must be"); 710 } 711 712 void work(uint worker_id) { 713 ShenandoahParallelWorkerSession worker_session(worker_id); 714 ShenandoahTraversalFixRootsClosure cl; 715 MarkingCodeBlobClosure blobsCl(&cl, CodeBlobToOopClosure::FixRelocations); 716 CLDToOopClosure cldCl(&cl); 717 _rp->update_all_roots<ShenandoahForwardedIsAliveClosure>(&cl, &cldCl, &blobsCl, NULL, worker_id); 718 } 719 }; 720 721 void ShenandoahTraversalGC::fixup_roots() { 722 #if defined(COMPILER2) || INCLUDE_JVMCI 723 DerivedPointerTable::clear(); 724 #endif 725 ShenandoahRootProcessor rp(_heap, _heap->workers()->active_workers(), ShenandoahPhaseTimings::final_traversal_update_roots); 726 ShenandoahTraversalFixRootsTask update_roots_task(&rp); 727 _heap->workers()->run_task(&update_roots_task); 728 #if defined(COMPILER2) || INCLUDE_JVMCI 729 DerivedPointerTable::update_pointers(); 730 #endif 731 } 732 733 void ShenandoahTraversalGC::reset() { 734 _task_queues->clear(); 735 } 736 737 ShenandoahObjToScanQueueSet* ShenandoahTraversalGC::task_queues() { 738 return _task_queues; 739 } 740 741 class ShenandoahTraversalCancelledGCYieldClosure : public YieldClosure { 742 private: 743 ShenandoahHeap* const _heap; 744 public: 745 ShenandoahTraversalCancelledGCYieldClosure() : _heap(ShenandoahHeap::heap()) {}; 746 virtual bool should_return() { return _heap->cancelled_gc(); } 747 }; 748 749 class ShenandoahTraversalPrecleanCompleteGCClosure : public VoidClosure { 750 public: 751 void do_void() { 752 ShenandoahHeap* sh = ShenandoahHeap::heap(); 753 ShenandoahTraversalGC* traversal_gc = sh->traversal_gc(); 754 assert(sh->process_references(), "why else would we be here?"); 755 ShenandoahTaskTerminator terminator(1, traversal_gc->task_queues()); 756 shenandoah_assert_rp_isalive_installed(); 757 traversal_gc->main_loop((uint) 0, &terminator, true); 758 } 759 }; 760 761 class ShenandoahTraversalKeepAliveUpdateClosure : public OopClosure { 762 private: 763 ShenandoahObjToScanQueue* _queue; 764 Thread* _thread; 765 ShenandoahTraversalGC* _traversal_gc; 766 ShenandoahMarkingContext* const _mark_context; 767 768 template <class T> 769 inline void do_oop_work(T* p) { 770 _traversal_gc->process_oop<T, false /* string dedup */, false /* degen */>(p, _thread, _queue, _mark_context); 771 } 772 773 public: 774 ShenandoahTraversalKeepAliveUpdateClosure(ShenandoahObjToScanQueue* q) : 775 _queue(q), _thread(Thread::current()), 776 _traversal_gc(ShenandoahHeap::heap()->traversal_gc()), 777 _mark_context(ShenandoahHeap::heap()->marking_context()) {} 778 779 void do_oop(narrowOop* p) { do_oop_work(p); } 780 void do_oop(oop* p) { do_oop_work(p); } 781 }; 782 783 class ShenandoahTraversalWeakUpdateClosure : public OopClosure { 784 private: 785 template <class T> 786 inline void do_oop_work(T* p) { 787 // Cannot call maybe_update_with_forwarded, because on traversal-degen 788 // path the collection set is already dropped. Instead, do the unguarded store. 789 // TODO: This can be fixed after degen-traversal stops dropping cset. 790 T o = RawAccess<>::oop_load(p); 791 if (!CompressedOops::is_null(o)) { 792 oop obj = CompressedOops::decode_not_null(o); 793 obj = ShenandoahBarrierSet::resolve_forwarded_not_null(obj); 794 shenandoah_assert_marked(p, obj); 795 RawAccess<IS_NOT_NULL>::oop_store(p, obj); 796 } 797 } 798 799 public: 800 ShenandoahTraversalWeakUpdateClosure() {} 801 802 void do_oop(narrowOop* p) { do_oop_work(p); } 803 void do_oop(oop* p) { do_oop_work(p); } 804 }; 805 806 class ShenandoahTraversalKeepAliveUpdateDegenClosure : public OopClosure { 807 private: 808 ShenandoahObjToScanQueue* _queue; 809 Thread* _thread; 810 ShenandoahTraversalGC* _traversal_gc; 811 ShenandoahMarkingContext* const _mark_context; 812 813 template <class T> 814 inline void do_oop_work(T* p) { 815 _traversal_gc->process_oop<T, false /* string dedup */, true /* degen */>(p, _thread, _queue, _mark_context); 816 } 817 818 public: 819 ShenandoahTraversalKeepAliveUpdateDegenClosure(ShenandoahObjToScanQueue* q) : 820 _queue(q), _thread(Thread::current()), 821 _traversal_gc(ShenandoahHeap::heap()->traversal_gc()), 822 _mark_context(ShenandoahHeap::heap()->marking_context()) {} 823 824 void do_oop(narrowOop* p) { do_oop_work(p); } 825 void do_oop(oop* p) { do_oop_work(p); } 826 }; 827 828 class ShenandoahTraversalSingleThreadKeepAliveUpdateClosure : public OopClosure { 829 private: 830 ShenandoahObjToScanQueue* _queue; 831 Thread* _thread; 832 ShenandoahTraversalGC* _traversal_gc; 833 ShenandoahMarkingContext* const _mark_context; 834 835 template <class T> 836 inline void do_oop_work(T* p) { 837 ShenandoahEvacOOMScope evac_scope; 838 _traversal_gc->process_oop<T, false /* string dedup */, false /* degen */>(p, _thread, _queue, _mark_context); 839 } 840 841 public: 842 ShenandoahTraversalSingleThreadKeepAliveUpdateClosure(ShenandoahObjToScanQueue* q) : 843 _queue(q), _thread(Thread::current()), 844 _traversal_gc(ShenandoahHeap::heap()->traversal_gc()), 845 _mark_context(ShenandoahHeap::heap()->marking_context()) {} 846 847 void do_oop(narrowOop* p) { do_oop_work(p); } 848 void do_oop(oop* p) { do_oop_work(p); } 849 }; 850 851 class ShenandoahTraversalSingleThreadKeepAliveUpdateDegenClosure : public OopClosure { 852 private: 853 ShenandoahObjToScanQueue* _queue; 854 Thread* _thread; 855 ShenandoahTraversalGC* _traversal_gc; 856 ShenandoahMarkingContext* const _mark_context; 857 858 template <class T> 859 inline void do_oop_work(T* p) { 860 ShenandoahEvacOOMScope evac_scope; 861 _traversal_gc->process_oop<T, false /* string dedup */, true /* degen */>(p, _thread, _queue, _mark_context); 862 } 863 864 public: 865 ShenandoahTraversalSingleThreadKeepAliveUpdateDegenClosure(ShenandoahObjToScanQueue* q) : 866 _queue(q), _thread(Thread::current()), 867 _traversal_gc(ShenandoahHeap::heap()->traversal_gc()), 868 _mark_context(ShenandoahHeap::heap()->marking_context()) {} 869 870 void do_oop(narrowOop* p) { do_oop_work(p); } 871 void do_oop(oop* p) { do_oop_work(p); } 872 }; 873 874 class ShenandoahTraversalPrecleanTask : public AbstractGangTask { 875 private: 876 ReferenceProcessor* _rp; 877 878 public: 879 ShenandoahTraversalPrecleanTask(ReferenceProcessor* rp) : 880 AbstractGangTask("Precleaning task"), 881 _rp(rp) {} 882 883 void work(uint worker_id) { 884 assert(worker_id == 0, "The code below is single-threaded, only one worker is expected"); 885 ShenandoahParallelWorkerSession worker_session(worker_id); 886 ShenandoahSuspendibleThreadSetJoiner stsj(ShenandoahSuspendibleWorkers); 887 ShenandoahEvacOOMScope oom_evac_scope; 888 889 ShenandoahHeap* sh = ShenandoahHeap::heap(); 890 891 ShenandoahObjToScanQueue* q = sh->traversal_gc()->task_queues()->queue(worker_id); 892 893 ShenandoahForwardedIsAliveClosure is_alive; 894 ShenandoahTraversalCancelledGCYieldClosure yield; 895 ShenandoahTraversalPrecleanCompleteGCClosure complete_gc; 896 ShenandoahTraversalKeepAliveUpdateClosure keep_alive(q); 897 ResourceMark rm; 898 _rp->preclean_discovered_references(&is_alive, &keep_alive, 899 &complete_gc, &yield, 900 NULL); 901 } 902 }; 903 904 void ShenandoahTraversalGC::preclean_weak_refs() { 905 // Pre-cleaning weak references before diving into STW makes sense at the 906 // end of concurrent mark. This will filter out the references which referents 907 // are alive. Note that ReferenceProcessor already filters out these on reference 908 // discovery, and the bulk of work is done here. This phase processes leftovers 909 // that missed the initial filtering, i.e. when referent was marked alive after 910 // reference was discovered by RP. 911 912 assert(_heap->process_references(), "sanity"); 913 assert(!_heap->is_degenerated_gc_in_progress(), "must be in concurrent non-degenerated phase"); 914 915 // Shortcut if no references were discovered to avoid winding up threads. 916 ReferenceProcessor* rp = _heap->ref_processor(); 917 if (!rp->has_discovered_references()) { 918 return; 919 } 920 921 ReferenceProcessorMTDiscoveryMutator fix_mt_discovery(rp, false); 922 923 shenandoah_assert_rp_isalive_not_installed(); 924 ShenandoahForwardedIsAliveClosure is_alive; 925 ReferenceProcessorIsAliveMutator fix_isalive(rp, &is_alive); 926 927 assert(task_queues()->is_empty(), "Should be empty"); 928 929 // Execute precleaning in the worker thread: it will give us GCLABs, String dedup 930 // queues and other goodies. When upstream ReferenceProcessor starts supporting 931 // parallel precleans, we can extend this to more threads. 932 ShenandoahPushWorkerScope scope(_heap->workers(), 1, /* check_workers = */ false); 933 934 WorkGang* workers = _heap->workers(); 935 uint nworkers = workers->active_workers(); 936 assert(nworkers == 1, "This code uses only a single worker"); 937 task_queues()->reserve(nworkers); 938 939 ShenandoahTraversalPrecleanTask task(rp); 940 workers->run_task(&task); 941 942 assert(_heap->cancelled_gc() || task_queues()->is_empty(), "Should be empty"); 943 } 944 945 // Weak Reference Closures 946 class ShenandoahTraversalDrainMarkingStackClosure: public VoidClosure { 947 uint _worker_id; 948 ShenandoahTaskTerminator* _terminator; 949 bool _reset_terminator; 950 951 public: 952 ShenandoahTraversalDrainMarkingStackClosure(uint worker_id, ShenandoahTaskTerminator* t, bool reset_terminator = false): 953 _worker_id(worker_id), 954 _terminator(t), 955 _reset_terminator(reset_terminator) { 956 } 957 958 void do_void() { 959 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint"); 960 961 ShenandoahHeap* sh = ShenandoahHeap::heap(); 962 ShenandoahTraversalGC* traversal_gc = sh->traversal_gc(); 963 assert(sh->process_references(), "why else would we be here?"); 964 shenandoah_assert_rp_isalive_installed(); 965 966 traversal_gc->main_loop(_worker_id, _terminator, false); 967 968 if (_reset_terminator) { 969 _terminator->reset_for_reuse(); 970 } 971 } 972 }; 973 974 class ShenandoahTraversalSingleThreadedDrainMarkingStackClosure: public VoidClosure { 975 uint _worker_id; 976 ShenandoahTaskTerminator* _terminator; 977 bool _reset_terminator; 978 979 public: 980 ShenandoahTraversalSingleThreadedDrainMarkingStackClosure(uint worker_id, ShenandoahTaskTerminator* t, bool reset_terminator = false): 981 _worker_id(worker_id), 982 _terminator(t), 983 _reset_terminator(reset_terminator) { 984 } 985 986 void do_void() { 987 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint"); 988 989 ShenandoahHeap* sh = ShenandoahHeap::heap(); 990 ShenandoahTraversalGC* traversal_gc = sh->traversal_gc(); 991 assert(sh->process_references(), "why else would we be here?"); 992 shenandoah_assert_rp_isalive_installed(); 993 994 ShenandoahEvacOOMScope evac_scope; 995 traversal_gc->main_loop(_worker_id, _terminator, false); 996 997 if (_reset_terminator) { 998 _terminator->reset_for_reuse(); 999 } 1000 } 1001 }; 1002 1003 void ShenandoahTraversalGC::weak_refs_work() { 1004 assert(_heap->process_references(), "sanity"); 1005 1006 ShenandoahPhaseTimings::Phase phase_root = ShenandoahPhaseTimings::weakrefs; 1007 1008 ShenandoahGCPhase phase(phase_root); 1009 1010 ReferenceProcessor* rp = _heap->ref_processor(); 1011 1012 // NOTE: We cannot shortcut on has_discovered_references() here, because 1013 // we will miss marking JNI Weak refs then, see implementation in 1014 // ReferenceProcessor::process_discovered_references. 1015 weak_refs_work_doit(); 1016 1017 rp->verify_no_references_recorded(); 1018 assert(!rp->discovery_enabled(), "Post condition"); 1019 1020 } 1021 1022 class ShenandoahTraversalRefProcTaskProxy : public AbstractGangTask { 1023 private: 1024 AbstractRefProcTaskExecutor::ProcessTask& _proc_task; 1025 ShenandoahTaskTerminator* _terminator; 1026 1027 public: 1028 ShenandoahTraversalRefProcTaskProxy(AbstractRefProcTaskExecutor::ProcessTask& proc_task, 1029 ShenandoahTaskTerminator* t) : 1030 AbstractGangTask("Process reference objects in parallel"), 1031 _proc_task(proc_task), 1032 _terminator(t) { 1033 } 1034 1035 void work(uint worker_id) { 1036 ShenandoahEvacOOMScope oom_evac_scope; 1037 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint"); 1038 ShenandoahHeap* heap = ShenandoahHeap::heap(); 1039 ShenandoahTraversalDrainMarkingStackClosure complete_gc(worker_id, _terminator); 1040 1041 ShenandoahForwardedIsAliveClosure is_alive; 1042 if (!heap->is_degenerated_gc_in_progress()) { 1043 ShenandoahTraversalKeepAliveUpdateClosure keep_alive(heap->traversal_gc()->task_queues()->queue(worker_id)); 1044 _proc_task.work(worker_id, is_alive, keep_alive, complete_gc); 1045 } else { 1046 ShenandoahTraversalKeepAliveUpdateDegenClosure keep_alive(heap->traversal_gc()->task_queues()->queue(worker_id)); 1047 _proc_task.work(worker_id, is_alive, keep_alive, complete_gc); 1048 } 1049 } 1050 }; 1051 1052 class ShenandoahTraversalRefProcTaskExecutor : public AbstractRefProcTaskExecutor { 1053 private: 1054 WorkGang* _workers; 1055 1056 public: 1057 ShenandoahTraversalRefProcTaskExecutor(WorkGang* workers) : _workers(workers) {} 1058 1059 // Executes a task using worker threads. 1060 void execute(ProcessTask& task, uint ergo_workers) { 1061 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint"); 1062 1063 ShenandoahHeap* heap = ShenandoahHeap::heap(); 1064 ShenandoahTraversalGC* traversal_gc = heap->traversal_gc(); 1065 ShenandoahPushWorkerQueuesScope scope(_workers, 1066 traversal_gc->task_queues(), 1067 ergo_workers, 1068 /* do_check = */ false); 1069 uint nworkers = _workers->active_workers(); 1070 traversal_gc->task_queues()->reserve(nworkers); 1071 ShenandoahTaskTerminator terminator(nworkers, traversal_gc->task_queues()); 1072 ShenandoahTraversalRefProcTaskProxy proc_task_proxy(task, &terminator); 1073 _workers->run_task(&proc_task_proxy); 1074 } 1075 }; 1076 1077 void ShenandoahTraversalGC::weak_refs_work_doit() { 1078 ReferenceProcessor* rp = _heap->ref_processor(); 1079 1080 ShenandoahPhaseTimings::Phase phase_process = ShenandoahPhaseTimings::weakrefs_process; 1081 1082 shenandoah_assert_rp_isalive_not_installed(); 1083 ShenandoahForwardedIsAliveClosure is_alive; 1084 ReferenceProcessorIsAliveMutator fix_isalive(rp, &is_alive); 1085 1086 WorkGang* workers = _heap->workers(); 1087 uint nworkers = workers->active_workers(); 1088 1089 rp->setup_policy(_heap->soft_ref_policy()->should_clear_all_soft_refs()); 1090 rp->set_active_mt_degree(nworkers); 1091 1092 assert(task_queues()->is_empty(), "Should be empty"); 1093 1094 // complete_gc and keep_alive closures instantiated here are only needed for 1095 // single-threaded path in RP. They share the queue 0 for tracking work, which 1096 // simplifies implementation. Since RP may decide to call complete_gc several 1097 // times, we need to be able to reuse the terminator. 1098 uint serial_worker_id = 0; 1099 ShenandoahTaskTerminator terminator(1, task_queues()); 1100 ShenandoahTraversalSingleThreadedDrainMarkingStackClosure complete_gc(serial_worker_id, &terminator, /* reset_terminator = */ true); 1101 ShenandoahPushWorkerQueuesScope scope(workers, task_queues(), 1, /* do_check = */ false); 1102 1103 ShenandoahTraversalRefProcTaskExecutor executor(workers); 1104 1105 ReferenceProcessorPhaseTimes pt(_heap->gc_timer(), rp->num_queues()); 1106 if (!_heap->is_degenerated_gc_in_progress()) { 1107 ShenandoahTraversalSingleThreadKeepAliveUpdateClosure keep_alive(task_queues()->queue(serial_worker_id)); 1108 rp->process_discovered_references(&is_alive, &keep_alive, 1109 &complete_gc, &executor, 1110 &pt); 1111 } else { 1112 ShenandoahTraversalSingleThreadKeepAliveUpdateDegenClosure keep_alive(task_queues()->queue(serial_worker_id)); 1113 rp->process_discovered_references(&is_alive, &keep_alive, 1114 &complete_gc, &executor, 1115 &pt); 1116 } 1117 1118 { 1119 ShenandoahGCPhase phase(phase_process); 1120 ShenandoahTerminationTracker termination(ShenandoahPhaseTimings::weakrefs_termination); 1121 1122 // Process leftover weak oops 1123 ShenandoahTraversalWeakUpdateClosure cl; 1124 WeakProcessor::weak_oops_do(&is_alive, &cl); 1125 1126 pt.print_all_references(); 1127 1128 assert(task_queues()->is_empty() || _heap->cancelled_gc(), "Should be empty"); 1129 } 1130 }