1 /* 2 * Copyright (c) 2018, 2019, Red Hat, Inc. All rights reserved. 3 * 4 * This code is free software; you can redistribute it and/or modify it 5 * under the terms of the GNU General Public License version 2 only, as 6 * published by the Free Software Foundation. 7 * 8 * This code is distributed in the hope that it will be useful, but WITHOUT 9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 11 * version 2 for more details (a copy is included in the LICENSE file that 12 * accompanied this code). 13 * 14 * You should have received a copy of the GNU General Public License version 15 * 2 along with this work; if not, write to the Free Software Foundation, 16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 17 * 18 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 19 * or visit www.oracle.com if you need additional information or have any 20 * questions. 21 * 22 */ 23 24 #include "precompiled.hpp" 25 26 #include "classfile/classLoaderData.hpp" 27 #include "classfile/classLoaderDataGraph.hpp" 28 #include "gc/shared/referenceProcessor.hpp" 29 #include "gc/shared/referenceProcessorPhaseTimes.hpp" 30 #include "gc/shared/workgroup.hpp" 31 #include "gc/shared/weakProcessor.inline.hpp" 32 #include "gc/shenandoah/shenandoahBarrierSet.hpp" 33 #include "gc/shenandoah/shenandoahClosures.inline.hpp" 34 #include "gc/shenandoah/shenandoahCodeRoots.hpp" 35 #include "gc/shenandoah/shenandoahCollectionSet.hpp" 36 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp" 37 #include "gc/shenandoah/shenandoahFreeSet.hpp" 38 #include "gc/shenandoah/shenandoahForwarding.hpp" 39 #include "gc/shenandoah/shenandoahPhaseTimings.hpp" 40 #include "gc/shenandoah/shenandoahHeap.inline.hpp" 41 #include "gc/shenandoah/shenandoahHeapRegionSet.inline.hpp" 42 #include "gc/shenandoah/shenandoahHeuristics.hpp" 43 #include "gc/shenandoah/shenandoahMarkingContext.inline.hpp" 44 #include "gc/shenandoah/shenandoahOopClosures.inline.hpp" 45 #include "gc/shenandoah/shenandoahRootProcessor.inline.hpp" 46 #include "gc/shenandoah/shenandoahStringDedup.hpp" 47 #include "gc/shenandoah/shenandoahTaskqueue.inline.hpp" 48 #include "gc/shenandoah/shenandoahTimingTracker.hpp" 49 #include "gc/shenandoah/shenandoahTraversalGC.hpp" 50 #include "gc/shenandoah/shenandoahUtils.hpp" 51 #include "gc/shenandoah/shenandoahVerifier.hpp" 52 53 #include "memory/iterator.hpp" 54 #include "memory/metaspace.hpp" 55 #include "memory/resourceArea.hpp" 56 #include "memory/universe.hpp" 57 58 /** 59 * NOTE: We are using the SATB buffer in thread.hpp and satbMarkQueue.hpp, however, it is not an SATB algorithm. 60 * We're using the buffer as generic oop buffer to enqueue new values in concurrent oop stores, IOW, the algorithm 61 * is incremental-update-based. 62 * 63 * NOTE on interaction with TAMS: we want to avoid traversing new objects for 64 * several reasons: 65 * - We will not reclaim them in this cycle anyway, because they are not in the 66 * cset 67 * - It makes up for the bulk of work during final-pause 68 * - It also shortens the concurrent cycle because we don't need to 69 * pointlessly traverse through newly allocated objects. 70 * - As a nice side-effect, it solves the I-U termination problem (mutators 71 * cannot outrun the GC by allocating like crazy) 72 * - It is an easy way to achieve MWF. What MWF does is to also enqueue the 73 * target object of stores if it's new. Treating new objects live implicitely 74 * achieves the same, but without extra barriers. I think the effect of 75 * shortened final-pause (mentioned above) is the main advantage of MWF. In 76 * particular, we will not see the head of a completely new long linked list 77 * in final-pause and end up traversing huge chunks of the heap there. 78 * - We don't need to see/update the fields of new objects either, because they 79 * are either still null, or anything that's been stored into them has been 80 * evacuated+enqueued before (and will thus be treated later). 81 * 82 * We achieve this by setting TAMS for each region, and everything allocated 83 * beyond TAMS will be 'implicitely marked'. 84 * 85 * Gotchas: 86 * - While we want new objects to be implicitely marked, we don't want to count 87 * them alive. Otherwise the next cycle wouldn't pick them up and consider 88 * them for cset. This means that we need to protect such regions from 89 * getting accidentally thrashed at the end of traversal cycle. This is why I 90 * keep track of alloc-regions and check is_alloc_region() in the trashing 91 * code. 92 * - We *need* to traverse through evacuated objects. Those objects are 93 * pre-existing, and any references in them point to interesting objects that 94 * we need to see. We also want to count them as live, because we just 95 * determined that they are alive :-) I achieve this by upping TAMS 96 * concurrently for every gclab/gc-shared alloc before publishing the 97 * evacuated object. This way, the GC threads will not consider such objects 98 * implictely marked, and traverse through them as normal. 99 */ 100 class ShenandoahTraversalSATBBufferClosure : public SATBBufferClosure { 101 private: 102 ShenandoahObjToScanQueue* _queue; 103 ShenandoahTraversalGC* _traversal_gc; 104 ShenandoahHeap* const _heap; 105 106 public: 107 ShenandoahTraversalSATBBufferClosure(ShenandoahObjToScanQueue* q) : 108 _queue(q), 109 _heap(ShenandoahHeap::heap()) 110 { } 111 112 void do_buffer(void** buffer, size_t size) { 113 for (size_t i = 0; i < size; ++i) { 114 oop* p = (oop*) &buffer[i]; 115 oop obj = RawAccess<>::oop_load(p); 116 shenandoah_assert_not_forwarded(p, obj); 117 if (_heap->marking_context()->mark(obj)) { 118 _queue->push(ShenandoahMarkTask(obj)); 119 } 120 } 121 } 122 }; 123 124 class ShenandoahTraversalSATBThreadsClosure : public ThreadClosure { 125 private: 126 ShenandoahTraversalSATBBufferClosure* _satb_cl; 127 128 public: 129 ShenandoahTraversalSATBThreadsClosure(ShenandoahTraversalSATBBufferClosure* satb_cl) : 130 _satb_cl(satb_cl) {} 131 132 void do_thread(Thread* thread) { 133 ShenandoahThreadLocalData::satb_mark_queue(thread).apply_closure_and_empty(_satb_cl); 134 } 135 }; 136 137 // Like CLDToOopClosure, but clears has_modified_oops, so that we can record modified CLDs during traversal 138 // and remark them later during final-traversal. 139 class ShenandoahMarkCLDClosure : public CLDClosure { 140 private: 141 OopClosure* _cl; 142 public: 143 ShenandoahMarkCLDClosure(OopClosure* cl) : _cl(cl) {} 144 void do_cld(ClassLoaderData* cld) { 145 cld->oops_do(_cl, true, true); 146 } 147 }; 148 149 // Like CLDToOopClosure, but only process modified CLDs 150 class ShenandoahRemarkCLDClosure : public CLDClosure { 151 private: 152 OopClosure* _cl; 153 public: 154 ShenandoahRemarkCLDClosure(OopClosure* cl) : _cl(cl) {} 155 void do_cld(ClassLoaderData* cld) { 156 if (cld->has_modified_oops()) { 157 cld->oops_do(_cl, true, true); 158 } 159 } 160 }; 161 162 class ShenandoahInitTraversalCollectionTask : public AbstractGangTask { 163 private: 164 ShenandoahRootProcessor* _rp; 165 ShenandoahHeap* _heap; 166 ShenandoahCsetCodeRootsIterator* _cset_coderoots; 167 public: 168 ShenandoahInitTraversalCollectionTask(ShenandoahRootProcessor* rp, ShenandoahCsetCodeRootsIterator* cset_coderoots) : 169 AbstractGangTask("Shenandoah Init Traversal Collection"), 170 _rp(rp), 171 _heap(ShenandoahHeap::heap()), 172 _cset_coderoots(cset_coderoots) {} 173 174 void work(uint worker_id) { 175 ShenandoahParallelWorkerSession worker_session(worker_id); 176 177 ShenandoahEvacOOMScope oom_evac_scope; 178 ShenandoahObjToScanQueueSet* queues = _heap->traversal_gc()->task_queues(); 179 ShenandoahObjToScanQueue* q = queues->queue(worker_id); 180 181 bool process_refs = _heap->process_references(); 182 bool unload_classes = _heap->unload_classes(); 183 ReferenceProcessor* rp = NULL; 184 if (process_refs) { 185 rp = _heap->ref_processor(); 186 } 187 188 // Step 1: Process ordinary GC roots. 189 { 190 ShenandoahTraversalClosure roots_cl(q, rp); 191 ShenandoahMarkCLDClosure cld_cl(&roots_cl); 192 MarkingCodeBlobClosure code_cl(&roots_cl, CodeBlobToOopClosure::FixRelocations); 193 if (unload_classes) { 194 _rp->process_strong_roots(&roots_cl, &cld_cl, NULL, NULL, worker_id); 195 // Need to pre-evac code roots here. Otherwise we might see from-space constants. 196 ShenandoahWorkerTimings* worker_times = _heap->phase_timings()->worker_times(); 197 ShenandoahWorkerTimingsTracker timer(worker_times, ShenandoahPhaseTimings::CodeCacheRoots, worker_id); 198 _cset_coderoots->possibly_parallel_blobs_do(&code_cl); 199 } else { 200 _rp->process_all_roots(&roots_cl, &cld_cl, &code_cl, NULL, worker_id); 201 } 202 if (ShenandoahStringDedup::is_enabled()) { 203 AlwaysTrueClosure is_alive; 204 ShenandoahStringDedup::parallel_oops_do(&is_alive, &roots_cl, worker_id); 205 } 206 } 207 } 208 }; 209 210 class ShenandoahConcurrentTraversalCollectionTask : public AbstractGangTask { 211 private: 212 ShenandoahTaskTerminator* _terminator; 213 ShenandoahHeap* _heap; 214 public: 215 ShenandoahConcurrentTraversalCollectionTask(ShenandoahTaskTerminator* terminator) : 216 AbstractGangTask("Shenandoah Concurrent Traversal Collection"), 217 _terminator(terminator), 218 _heap(ShenandoahHeap::heap()) {} 219 220 void work(uint worker_id) { 221 ShenandoahConcurrentWorkerSession worker_session(worker_id); 222 ShenandoahSuspendibleThreadSetJoiner stsj(ShenandoahSuspendibleWorkers); 223 ShenandoahEvacOOMScope oom_evac_scope; 224 ShenandoahTraversalGC* traversal_gc = _heap->traversal_gc(); 225 226 // Drain all outstanding work in queues. 227 traversal_gc->main_loop(worker_id, _terminator, true); 228 } 229 }; 230 231 class ShenandoahFinalTraversalCollectionTask : public AbstractGangTask { 232 private: 233 ShenandoahRootProcessor* _rp; 234 ShenandoahTaskTerminator* _terminator; 235 ShenandoahHeap* _heap; 236 public: 237 ShenandoahFinalTraversalCollectionTask(ShenandoahRootProcessor* rp, ShenandoahTaskTerminator* terminator) : 238 AbstractGangTask("Shenandoah Final Traversal Collection"), 239 _rp(rp), 240 _terminator(terminator), 241 _heap(ShenandoahHeap::heap()) {} 242 243 void work(uint worker_id) { 244 ShenandoahParallelWorkerSession worker_session(worker_id); 245 246 ShenandoahEvacOOMScope oom_evac_scope; 247 ShenandoahTraversalGC* traversal_gc = _heap->traversal_gc(); 248 249 ShenandoahObjToScanQueueSet* queues = traversal_gc->task_queues(); 250 ShenandoahObjToScanQueue* q = queues->queue(worker_id); 251 252 bool process_refs = _heap->process_references(); 253 bool unload_classes = _heap->unload_classes(); 254 ReferenceProcessor* rp = NULL; 255 if (process_refs) { 256 rp = _heap->ref_processor(); 257 } 258 259 // Step 0: Drain outstanding SATB queues. 260 // NOTE: we piggy-back draining of remaining thread SATB buffers on the final root scan below. 261 ShenandoahTraversalSATBBufferClosure satb_cl(q); 262 { 263 // Process remaining finished SATB buffers. 264 SATBMarkQueueSet& satb_mq_set = ShenandoahBarrierSet::satb_mark_queue_set(); 265 while (satb_mq_set.apply_closure_to_completed_buffer(&satb_cl)); 266 // Process remaining threads SATB buffers below. 267 } 268 269 // Step 1: Process GC roots. 270 // For oops in code roots, they are marked, evacuated, enqueued for further traversal, 271 // and the references to the oops are updated during init pause. New nmethods are handled 272 // in similar way during nmethod-register process. Therefore, we don't need to rescan code 273 // roots here. 274 if (!_heap->is_degenerated_gc_in_progress()) { 275 ShenandoahTraversalClosure roots_cl(q, rp); 276 CLDToOopClosure cld_cl(&roots_cl, ClassLoaderData::_claim_strong); 277 ShenandoahTraversalSATBThreadsClosure tc(&satb_cl); 278 if (unload_classes) { 279 ShenandoahRemarkCLDClosure remark_cld_cl(&roots_cl); 280 _rp->process_strong_roots(&roots_cl, &remark_cld_cl, NULL, &tc, worker_id); 281 } else { 282 _rp->process_all_roots(&roots_cl, &cld_cl, NULL, &tc, worker_id); 283 } 284 } else { 285 ShenandoahTraversalDegenClosure roots_cl(q, rp); 286 CLDToOopClosure cld_cl(&roots_cl, ClassLoaderData::_claim_strong); 287 ShenandoahTraversalSATBThreadsClosure tc(&satb_cl); 288 if (unload_classes) { 289 ShenandoahRemarkCLDClosure remark_cld_cl(&roots_cl); 290 _rp->process_strong_roots(&roots_cl, &remark_cld_cl, NULL, &tc, worker_id); 291 } else { 292 _rp->process_all_roots(&roots_cl, &cld_cl, NULL, &tc, worker_id); 293 } 294 } 295 296 { 297 ShenandoahWorkerTimings *worker_times = _heap->phase_timings()->worker_times(); 298 ShenandoahWorkerTimingsTracker timer(worker_times, ShenandoahPhaseTimings::FinishQueues, worker_id); 299 300 // Step 3: Finally drain all outstanding work in queues. 301 traversal_gc->main_loop(worker_id, _terminator, false); 302 } 303 304 } 305 }; 306 307 ShenandoahTraversalGC::ShenandoahTraversalGC(ShenandoahHeap* heap, size_t num_regions) : 308 _heap(heap), 309 _task_queues(new ShenandoahObjToScanQueueSet(heap->max_workers())), 310 _traversal_set(ShenandoahHeapRegionSet()) { 311 312 uint num_queues = heap->max_workers(); 313 for (uint i = 0; i < num_queues; ++i) { 314 ShenandoahObjToScanQueue* task_queue = new ShenandoahObjToScanQueue(); 315 task_queue->initialize(); 316 _task_queues->register_queue(i, task_queue); 317 } 318 } 319 320 ShenandoahTraversalGC::~ShenandoahTraversalGC() { 321 } 322 323 void ShenandoahTraversalGC::prepare_regions() { 324 size_t num_regions = _heap->num_regions(); 325 ShenandoahMarkingContext* const ctx = _heap->marking_context(); 326 for (size_t i = 0; i < num_regions; i++) { 327 ShenandoahHeapRegion* region = _heap->get_region(i); 328 if (_heap->is_bitmap_slice_committed(region)) { 329 if (_traversal_set.is_in(i)) { 330 ctx->capture_top_at_mark_start(region); 331 region->clear_live_data(); 332 assert(ctx->is_bitmap_clear_range(region->bottom(), region->end()), "bitmap for traversal regions must be cleared"); 333 } else { 334 // Everything outside the traversal set is always considered live. 335 ctx->reset_top_at_mark_start(region); 336 } 337 } else { 338 // FreeSet may contain uncommitted empty regions, once they are recommitted, 339 // their TAMS may have old values, so reset them here. 340 ctx->reset_top_at_mark_start(region); 341 } 342 } 343 } 344 345 void ShenandoahTraversalGC::prepare() { 346 _heap->collection_set()->clear(); 347 assert(_heap->collection_set()->count() == 0, "collection set not clear"); 348 349 { 350 ShenandoahGCPhase phase(ShenandoahPhaseTimings::traversal_gc_make_parsable); 351 _heap->make_parsable(true); 352 } 353 354 if (UseTLAB) { 355 ShenandoahGCPhase phase(ShenandoahPhaseTimings::traversal_gc_resize_tlabs); 356 _heap->resize_tlabs(); 357 } 358 359 assert(_heap->marking_context()->is_bitmap_clear(), "need clean mark bitmap"); 360 assert(!_heap->marking_context()->is_complete(), "should not be complete"); 361 362 ShenandoahFreeSet* free_set = _heap->free_set(); 363 ShenandoahCollectionSet* collection_set = _heap->collection_set(); 364 365 // Find collection set 366 _heap->heuristics()->choose_collection_set(collection_set); 367 prepare_regions(); 368 369 // Rebuild free set 370 free_set->rebuild(); 371 372 log_info(gc, ergo)("Collectable Garbage: " SIZE_FORMAT "M, " SIZE_FORMAT "M CSet, " SIZE_FORMAT " CSet regions", 373 collection_set->garbage() / M, collection_set->live_data() / M, collection_set->count()); 374 } 375 376 void ShenandoahTraversalGC::init_traversal_collection() { 377 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "STW traversal GC"); 378 379 if (ShenandoahVerify) { 380 _heap->verifier()->verify_before_traversal(); 381 } 382 383 if (VerifyBeforeGC) { 384 Universe::verify(); 385 } 386 387 { 388 ShenandoahGCPhase phase_prepare(ShenandoahPhaseTimings::traversal_gc_prepare); 389 ShenandoahHeapLocker lock(_heap->lock()); 390 prepare(); 391 } 392 393 _heap->set_concurrent_traversal_in_progress(true); 394 395 bool process_refs = _heap->process_references(); 396 if (process_refs) { 397 ReferenceProcessor* rp = _heap->ref_processor(); 398 rp->enable_discovery(true /*verify_no_refs*/); 399 rp->setup_policy(_heap->soft_ref_policy()->should_clear_all_soft_refs()); 400 } 401 402 { 403 ShenandoahGCPhase phase_work(ShenandoahPhaseTimings::init_traversal_gc_work); 404 assert(_task_queues->is_empty(), "queues must be empty before traversal GC"); 405 TASKQUEUE_STATS_ONLY(_task_queues->reset_taskqueue_stats()); 406 407 #if defined(COMPILER2) || INCLUDE_JVMCI 408 DerivedPointerTable::clear(); 409 #endif 410 411 { 412 uint nworkers = _heap->workers()->active_workers(); 413 task_queues()->reserve(nworkers); 414 ShenandoahRootProcessor rp(_heap, nworkers, ShenandoahPhaseTimings::init_traversal_gc_work); 415 416 ShenandoahCsetCodeRootsIterator cset_coderoots = ShenandoahCodeRoots::cset_iterator(); 417 418 ShenandoahInitTraversalCollectionTask traversal_task(&rp, &cset_coderoots); 419 _heap->workers()->run_task(&traversal_task); 420 } 421 422 #if defined(COMPILER2) || INCLUDE_JVMCI 423 DerivedPointerTable::update_pointers(); 424 #endif 425 } 426 427 if (ShenandoahPacing) { 428 _heap->pacer()->setup_for_traversal(); 429 } 430 } 431 432 void ShenandoahTraversalGC::main_loop(uint w, ShenandoahTaskTerminator* t, bool sts_yield) { 433 ShenandoahObjToScanQueue* q = task_queues()->queue(w); 434 435 // Initialize live data. 436 jushort* ld = _heap->get_liveness_cache(w); 437 438 ReferenceProcessor* rp = NULL; 439 if (_heap->process_references()) { 440 rp = _heap->ref_processor(); 441 } 442 { 443 if (!_heap->is_degenerated_gc_in_progress()) { 444 if (_heap->unload_classes()) { 445 if (ShenandoahStringDedup::is_enabled()) { 446 ShenandoahTraversalMetadataDedupClosure cl(q, rp); 447 main_loop_work<ShenandoahTraversalMetadataDedupClosure>(&cl, ld, w, t, sts_yield); 448 } else { 449 ShenandoahTraversalMetadataClosure cl(q, rp); 450 main_loop_work<ShenandoahTraversalMetadataClosure>(&cl, ld, w, t, sts_yield); 451 } 452 } else { 453 if (ShenandoahStringDedup::is_enabled()) { 454 ShenandoahTraversalDedupClosure cl(q, rp); 455 main_loop_work<ShenandoahTraversalDedupClosure>(&cl, ld, w, t, sts_yield); 456 } else { 457 ShenandoahTraversalClosure cl(q, rp); 458 main_loop_work<ShenandoahTraversalClosure>(&cl, ld, w, t, sts_yield); 459 } 460 } 461 } else { 462 if (_heap->unload_classes()) { 463 if (ShenandoahStringDedup::is_enabled()) { 464 ShenandoahTraversalMetadataDedupDegenClosure cl(q, rp); 465 main_loop_work<ShenandoahTraversalMetadataDedupDegenClosure>(&cl, ld, w, t, sts_yield); 466 } else { 467 ShenandoahTraversalMetadataDegenClosure cl(q, rp); 468 main_loop_work<ShenandoahTraversalMetadataDegenClosure>(&cl, ld, w, t, sts_yield); 469 } 470 } else { 471 if (ShenandoahStringDedup::is_enabled()) { 472 ShenandoahTraversalDedupDegenClosure cl(q, rp); 473 main_loop_work<ShenandoahTraversalDedupDegenClosure>(&cl, ld, w, t, sts_yield); 474 } else { 475 ShenandoahTraversalDegenClosure cl(q, rp); 476 main_loop_work<ShenandoahTraversalDegenClosure>(&cl, ld, w, t, sts_yield); 477 } 478 } 479 } 480 } 481 482 _heap->flush_liveness_cache(w); 483 } 484 485 template <class T> 486 void ShenandoahTraversalGC::main_loop_work(T* cl, jushort* live_data, uint worker_id, ShenandoahTaskTerminator* terminator, bool sts_yield) { 487 ShenandoahObjToScanQueueSet* queues = task_queues(); 488 ShenandoahObjToScanQueue* q = queues->queue(worker_id); 489 ShenandoahConcurrentMark* conc_mark = _heap->concurrent_mark(); 490 491 uintx stride = ShenandoahMarkLoopStride; 492 493 ShenandoahMarkTask task; 494 495 // Process outstanding queues, if any. 496 q = queues->claim_next(); 497 while (q != NULL) { 498 if (_heap->check_cancelled_gc_and_yield(sts_yield)) { 499 return; 500 } 501 502 for (uint i = 0; i < stride; i++) { 503 if (q->pop(task)) { 504 conc_mark->do_task<T>(q, cl, live_data, &task); 505 } else { 506 assert(q->is_empty(), "Must be empty"); 507 q = queues->claim_next(); 508 break; 509 } 510 } 511 } 512 513 if (check_and_handle_cancelled_gc(terminator, sts_yield)) return; 514 515 // Normal loop. 516 q = queues->queue(worker_id); 517 518 ShenandoahTraversalSATBBufferClosure drain_satb(q); 519 SATBMarkQueueSet& satb_mq_set = ShenandoahBarrierSet::satb_mark_queue_set(); 520 521 while (true) { 522 if (check_and_handle_cancelled_gc(terminator, sts_yield)) return; 523 524 while (satb_mq_set.completed_buffers_num() > 0) { 525 satb_mq_set.apply_closure_to_completed_buffer(&drain_satb); 526 } 527 528 uint work = 0; 529 for (uint i = 0; i < stride; i++) { 530 if (q->pop(task) || 531 queues->steal(worker_id, task)) { 532 conc_mark->do_task<T>(q, cl, live_data, &task); 533 work++; 534 } else { 535 break; 536 } 537 } 538 539 if (work == 0) { 540 // No more work, try to terminate 541 ShenandoahEvacOOMScopeLeaver oom_scope_leaver; 542 ShenandoahSuspendibleThreadSetLeaver stsl(sts_yield && ShenandoahSuspendibleWorkers); 543 ShenandoahTerminationTimingsTracker term_tracker(worker_id); 544 ShenandoahTerminatorTerminator tt(_heap); 545 546 if (terminator->offer_termination(&tt)) return; 547 } 548 } 549 } 550 551 bool ShenandoahTraversalGC::check_and_handle_cancelled_gc(ShenandoahTaskTerminator* terminator, bool sts_yield) { 552 if (_heap->cancelled_gc()) { 553 return true; 554 } 555 return false; 556 } 557 558 void ShenandoahTraversalGC::concurrent_traversal_collection() { 559 ShenandoahGCPhase phase_work(ShenandoahPhaseTimings::conc_traversal); 560 if (!_heap->cancelled_gc()) { 561 uint nworkers = _heap->workers()->active_workers(); 562 task_queues()->reserve(nworkers); 563 ShenandoahTerminationTracker tracker(ShenandoahPhaseTimings::conc_traversal_termination); 564 565 ShenandoahTaskTerminator terminator(nworkers, task_queues()); 566 ShenandoahConcurrentTraversalCollectionTask task(&terminator); 567 _heap->workers()->run_task(&task); 568 } 569 570 if (!_heap->cancelled_gc() && ShenandoahPreclean && _heap->process_references()) { 571 preclean_weak_refs(); 572 } 573 } 574 575 void ShenandoahTraversalGC::final_traversal_collection() { 576 _heap->make_parsable(true); 577 578 if (!_heap->cancelled_gc()) { 579 #if defined(COMPILER2) || INCLUDE_JVMCI 580 DerivedPointerTable::clear(); 581 #endif 582 ShenandoahGCPhase phase_work(ShenandoahPhaseTimings::final_traversal_gc_work); 583 uint nworkers = _heap->workers()->active_workers(); 584 task_queues()->reserve(nworkers); 585 586 // Finish traversal 587 ShenandoahRootProcessor rp(_heap, nworkers, ShenandoahPhaseTimings::final_traversal_gc_work); 588 ShenandoahTerminationTracker term(ShenandoahPhaseTimings::final_traversal_gc_termination); 589 590 ShenandoahTaskTerminator terminator(nworkers, task_queues()); 591 ShenandoahFinalTraversalCollectionTask task(&rp, &terminator); 592 _heap->workers()->run_task(&task); 593 #if defined(COMPILER2) || INCLUDE_JVMCI 594 DerivedPointerTable::update_pointers(); 595 #endif 596 } 597 598 if (!_heap->cancelled_gc() && _heap->process_references()) { 599 weak_refs_work(); 600 } 601 602 if (!_heap->cancelled_gc()) { 603 fixup_roots(); 604 if (_heap->unload_classes()) { 605 _heap->unload_classes_and_cleanup_tables(false); 606 } 607 } 608 609 if (!_heap->cancelled_gc()) { 610 assert(_task_queues->is_empty(), "queues must be empty after traversal GC"); 611 TASKQUEUE_STATS_ONLY(_task_queues->print_taskqueue_stats()); 612 TASKQUEUE_STATS_ONLY(_task_queues->reset_taskqueue_stats()); 613 614 // No more marking expected 615 _heap->mark_complete_marking_context(); 616 617 // Resize metaspace 618 MetaspaceGC::compute_new_size(); 619 620 // Still good? We can now trash the cset, and make final verification 621 { 622 ShenandoahGCPhase phase_cleanup(ShenandoahPhaseTimings::traversal_gc_cleanup); 623 ShenandoahHeapLocker lock(_heap->lock()); 624 625 // Trash everything 626 // Clear immediate garbage regions. 627 size_t num_regions = _heap->num_regions(); 628 629 ShenandoahHeapRegionSet* traversal_regions = traversal_set(); 630 ShenandoahFreeSet* free_regions = _heap->free_set(); 631 ShenandoahMarkingContext* const ctx = _heap->marking_context(); 632 free_regions->clear(); 633 for (size_t i = 0; i < num_regions; i++) { 634 ShenandoahHeapRegion* r = _heap->get_region(i); 635 bool not_allocated = ctx->top_at_mark_start(r) == r->top(); 636 637 bool candidate = traversal_regions->is_in(r) && !r->has_live() && not_allocated; 638 if (r->is_humongous_start() && candidate) { 639 // Trash humongous. 640 HeapWord* humongous_obj = r->bottom() + ShenandoahForwarding::word_size(); 641 assert(!ctx->is_marked(oop(humongous_obj)), "must not be marked"); 642 r->make_trash_immediate(); 643 while (i + 1 < num_regions && _heap->get_region(i + 1)->is_humongous_continuation()) { 644 i++; 645 r = _heap->get_region(i); 646 assert(r->is_humongous_continuation(), "must be humongous continuation"); 647 r->make_trash_immediate(); 648 } 649 } else if (!r->is_empty() && candidate) { 650 // Trash regular. 651 assert(!r->is_humongous(), "handled above"); 652 assert(!r->is_trash(), "must not already be trashed"); 653 r->make_trash_immediate(); 654 } 655 } 656 _heap->collection_set()->clear(); 657 _heap->free_set()->rebuild(); 658 reset(); 659 } 660 661 assert(_task_queues->is_empty(), "queues must be empty after traversal GC"); 662 _heap->set_concurrent_traversal_in_progress(false); 663 assert(!_heap->cancelled_gc(), "must not be cancelled when getting out here"); 664 665 if (ShenandoahVerify) { 666 _heap->verifier()->verify_after_traversal(); 667 } 668 669 if (VerifyAfterGC) { 670 Universe::verify(); 671 } 672 } 673 } 674 675 class ShenandoahTraversalFixRootsClosure : public OopClosure { 676 private: 677 template <class T> 678 inline void do_oop_work(T* p) { 679 T o = RawAccess<>::oop_load(p); 680 if (!CompressedOops::is_null(o)) { 681 oop obj = CompressedOops::decode_not_null(o); 682 oop forw = ShenandoahBarrierSet::resolve_forwarded_not_null(obj); 683 if (!oopDesc::equals_raw(obj, forw)) { 684 RawAccess<IS_NOT_NULL>::oop_store(p, forw); 685 } 686 } 687 } 688 689 public: 690 inline void do_oop(oop* p) { do_oop_work(p); } 691 inline void do_oop(narrowOop* p) { do_oop_work(p); } 692 }; 693 694 class ShenandoahTraversalFixRootsTask : public AbstractGangTask { 695 private: 696 ShenandoahRootProcessor* _rp; 697 698 public: 699 ShenandoahTraversalFixRootsTask(ShenandoahRootProcessor* rp) : 700 AbstractGangTask("Shenandoah traversal fix roots"), 701 _rp(rp) { 702 assert(ShenandoahHeap::heap()->has_forwarded_objects(), "Must be"); 703 } 704 705 void work(uint worker_id) { 706 ShenandoahParallelWorkerSession worker_session(worker_id); 707 ShenandoahTraversalFixRootsClosure cl; 708 MarkingCodeBlobClosure blobsCl(&cl, CodeBlobToOopClosure::FixRelocations); 709 CLDToOopClosure cldCl(&cl, ClassLoaderData::_claim_strong); 710 _rp->update_all_roots<ShenandoahForwardedIsAliveClosure>(&cl, &cldCl, &blobsCl, NULL, worker_id); 711 } 712 }; 713 714 void ShenandoahTraversalGC::fixup_roots() { 715 #if defined(COMPILER2) || INCLUDE_JVMCI 716 DerivedPointerTable::clear(); 717 #endif 718 ShenandoahRootProcessor rp(_heap, _heap->workers()->active_workers(), ShenandoahPhaseTimings::final_traversal_update_roots); 719 ShenandoahTraversalFixRootsTask update_roots_task(&rp); 720 _heap->workers()->run_task(&update_roots_task); 721 #if defined(COMPILER2) || INCLUDE_JVMCI 722 DerivedPointerTable::update_pointers(); 723 #endif 724 } 725 726 void ShenandoahTraversalGC::reset() { 727 _task_queues->clear(); 728 } 729 730 ShenandoahObjToScanQueueSet* ShenandoahTraversalGC::task_queues() { 731 return _task_queues; 732 } 733 734 class ShenandoahTraversalCancelledGCYieldClosure : public YieldClosure { 735 private: 736 ShenandoahHeap* const _heap; 737 public: 738 ShenandoahTraversalCancelledGCYieldClosure() : _heap(ShenandoahHeap::heap()) {}; 739 virtual bool should_return() { return _heap->cancelled_gc(); } 740 }; 741 742 class ShenandoahTraversalPrecleanCompleteGCClosure : public VoidClosure { 743 public: 744 void do_void() { 745 ShenandoahHeap* sh = ShenandoahHeap::heap(); 746 ShenandoahTraversalGC* traversal_gc = sh->traversal_gc(); 747 assert(sh->process_references(), "why else would we be here?"); 748 ShenandoahTaskTerminator terminator(1, traversal_gc->task_queues()); 749 shenandoah_assert_rp_isalive_installed(); 750 traversal_gc->main_loop((uint) 0, &terminator, true); 751 } 752 }; 753 754 class ShenandoahTraversalKeepAliveUpdateClosure : public OopClosure { 755 private: 756 ShenandoahObjToScanQueue* _queue; 757 Thread* _thread; 758 ShenandoahTraversalGC* _traversal_gc; 759 ShenandoahMarkingContext* const _mark_context; 760 761 template <class T> 762 inline void do_oop_work(T* p) { 763 _traversal_gc->process_oop<T, false /* string dedup */, false /* degen */>(p, _thread, _queue, _mark_context); 764 } 765 766 public: 767 ShenandoahTraversalKeepAliveUpdateClosure(ShenandoahObjToScanQueue* q) : 768 _queue(q), _thread(Thread::current()), 769 _traversal_gc(ShenandoahHeap::heap()->traversal_gc()), 770 _mark_context(ShenandoahHeap::heap()->marking_context()) {} 771 772 void do_oop(narrowOop* p) { do_oop_work(p); } 773 void do_oop(oop* p) { do_oop_work(p); } 774 }; 775 776 class ShenandoahTraversalKeepAliveUpdateDegenClosure : public OopClosure { 777 private: 778 ShenandoahObjToScanQueue* _queue; 779 Thread* _thread; 780 ShenandoahTraversalGC* _traversal_gc; 781 ShenandoahMarkingContext* const _mark_context; 782 783 template <class T> 784 inline void do_oop_work(T* p) { 785 _traversal_gc->process_oop<T, false /* string dedup */, true /* degen */>(p, _thread, _queue, _mark_context); 786 } 787 788 public: 789 ShenandoahTraversalKeepAliveUpdateDegenClosure(ShenandoahObjToScanQueue* q) : 790 _queue(q), _thread(Thread::current()), 791 _traversal_gc(ShenandoahHeap::heap()->traversal_gc()), 792 _mark_context(ShenandoahHeap::heap()->marking_context()) {} 793 794 void do_oop(narrowOop* p) { do_oop_work(p); } 795 void do_oop(oop* p) { do_oop_work(p); } 796 }; 797 798 class ShenandoahTraversalSingleThreadKeepAliveUpdateClosure : public OopClosure { 799 private: 800 ShenandoahObjToScanQueue* _queue; 801 Thread* _thread; 802 ShenandoahTraversalGC* _traversal_gc; 803 ShenandoahMarkingContext* const _mark_context; 804 805 template <class T> 806 inline void do_oop_work(T* p) { 807 ShenandoahEvacOOMScope evac_scope; 808 _traversal_gc->process_oop<T, false /* string dedup */, false /* degen */>(p, _thread, _queue, _mark_context); 809 } 810 811 public: 812 ShenandoahTraversalSingleThreadKeepAliveUpdateClosure(ShenandoahObjToScanQueue* q) : 813 _queue(q), _thread(Thread::current()), 814 _traversal_gc(ShenandoahHeap::heap()->traversal_gc()), 815 _mark_context(ShenandoahHeap::heap()->marking_context()) {} 816 817 void do_oop(narrowOop* p) { do_oop_work(p); } 818 void do_oop(oop* p) { do_oop_work(p); } 819 }; 820 821 class ShenandoahTraversalSingleThreadKeepAliveUpdateDegenClosure : public OopClosure { 822 private: 823 ShenandoahObjToScanQueue* _queue; 824 Thread* _thread; 825 ShenandoahTraversalGC* _traversal_gc; 826 ShenandoahMarkingContext* const _mark_context; 827 828 template <class T> 829 inline void do_oop_work(T* p) { 830 ShenandoahEvacOOMScope evac_scope; 831 _traversal_gc->process_oop<T, false /* string dedup */, true /* degen */>(p, _thread, _queue, _mark_context); 832 } 833 834 public: 835 ShenandoahTraversalSingleThreadKeepAliveUpdateDegenClosure(ShenandoahObjToScanQueue* q) : 836 _queue(q), _thread(Thread::current()), 837 _traversal_gc(ShenandoahHeap::heap()->traversal_gc()), 838 _mark_context(ShenandoahHeap::heap()->marking_context()) {} 839 840 void do_oop(narrowOop* p) { do_oop_work(p); } 841 void do_oop(oop* p) { do_oop_work(p); } 842 }; 843 844 class ShenandoahTraversalPrecleanTask : public AbstractGangTask { 845 private: 846 ReferenceProcessor* _rp; 847 848 public: 849 ShenandoahTraversalPrecleanTask(ReferenceProcessor* rp) : 850 AbstractGangTask("Precleaning task"), 851 _rp(rp) {} 852 853 void work(uint worker_id) { 854 assert(worker_id == 0, "The code below is single-threaded, only one worker is expected"); 855 ShenandoahParallelWorkerSession worker_session(worker_id); 856 ShenandoahSuspendibleThreadSetJoiner stsj(ShenandoahSuspendibleWorkers); 857 ShenandoahEvacOOMScope oom_evac_scope; 858 859 ShenandoahHeap* sh = ShenandoahHeap::heap(); 860 861 ShenandoahObjToScanQueue* q = sh->traversal_gc()->task_queues()->queue(worker_id); 862 863 ShenandoahForwardedIsAliveClosure is_alive; 864 ShenandoahTraversalCancelledGCYieldClosure yield; 865 ShenandoahTraversalPrecleanCompleteGCClosure complete_gc; 866 ShenandoahTraversalKeepAliveUpdateClosure keep_alive(q); 867 ResourceMark rm; 868 _rp->preclean_discovered_references(&is_alive, &keep_alive, 869 &complete_gc, &yield, 870 NULL); 871 } 872 }; 873 874 void ShenandoahTraversalGC::preclean_weak_refs() { 875 // Pre-cleaning weak references before diving into STW makes sense at the 876 // end of concurrent mark. This will filter out the references which referents 877 // are alive. Note that ReferenceProcessor already filters out these on reference 878 // discovery, and the bulk of work is done here. This phase processes leftovers 879 // that missed the initial filtering, i.e. when referent was marked alive after 880 // reference was discovered by RP. 881 882 assert(_heap->process_references(), "sanity"); 883 assert(!_heap->is_degenerated_gc_in_progress(), "must be in concurrent non-degenerated phase"); 884 885 // Shortcut if no references were discovered to avoid winding up threads. 886 ReferenceProcessor* rp = _heap->ref_processor(); 887 if (!rp->has_discovered_references()) { 888 return; 889 } 890 891 ReferenceProcessorMTDiscoveryMutator fix_mt_discovery(rp, false); 892 893 shenandoah_assert_rp_isalive_not_installed(); 894 ShenandoahForwardedIsAliveClosure is_alive; 895 ReferenceProcessorIsAliveMutator fix_isalive(rp, &is_alive); 896 897 assert(task_queues()->is_empty(), "Should be empty"); 898 899 // Execute precleaning in the worker thread: it will give us GCLABs, String dedup 900 // queues and other goodies. When upstream ReferenceProcessor starts supporting 901 // parallel precleans, we can extend this to more threads. 902 ShenandoahPushWorkerScope scope(_heap->workers(), 1, /* check_workers = */ false); 903 904 WorkGang* workers = _heap->workers(); 905 uint nworkers = workers->active_workers(); 906 assert(nworkers == 1, "This code uses only a single worker"); 907 task_queues()->reserve(nworkers); 908 909 ShenandoahTraversalPrecleanTask task(rp); 910 workers->run_task(&task); 911 912 assert(_heap->cancelled_gc() || task_queues()->is_empty(), "Should be empty"); 913 } 914 915 // Weak Reference Closures 916 class ShenandoahTraversalDrainMarkingStackClosure: public VoidClosure { 917 uint _worker_id; 918 ShenandoahTaskTerminator* _terminator; 919 bool _reset_terminator; 920 921 public: 922 ShenandoahTraversalDrainMarkingStackClosure(uint worker_id, ShenandoahTaskTerminator* t, bool reset_terminator = false): 923 _worker_id(worker_id), 924 _terminator(t), 925 _reset_terminator(reset_terminator) { 926 } 927 928 void do_void() { 929 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint"); 930 931 ShenandoahHeap* sh = ShenandoahHeap::heap(); 932 ShenandoahTraversalGC* traversal_gc = sh->traversal_gc(); 933 assert(sh->process_references(), "why else would we be here?"); 934 shenandoah_assert_rp_isalive_installed(); 935 936 traversal_gc->main_loop(_worker_id, _terminator, false); 937 938 if (_reset_terminator) { 939 _terminator->reset_for_reuse(); 940 } 941 } 942 }; 943 944 class ShenandoahTraversalSingleThreadedDrainMarkingStackClosure: public VoidClosure { 945 uint _worker_id; 946 ShenandoahTaskTerminator* _terminator; 947 bool _reset_terminator; 948 949 public: 950 ShenandoahTraversalSingleThreadedDrainMarkingStackClosure(uint worker_id, ShenandoahTaskTerminator* t, bool reset_terminator = false): 951 _worker_id(worker_id), 952 _terminator(t), 953 _reset_terminator(reset_terminator) { 954 } 955 956 void do_void() { 957 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint"); 958 959 ShenandoahHeap* sh = ShenandoahHeap::heap(); 960 ShenandoahTraversalGC* traversal_gc = sh->traversal_gc(); 961 assert(sh->process_references(), "why else would we be here?"); 962 shenandoah_assert_rp_isalive_installed(); 963 964 ShenandoahEvacOOMScope evac_scope; 965 traversal_gc->main_loop(_worker_id, _terminator, false); 966 967 if (_reset_terminator) { 968 _terminator->reset_for_reuse(); 969 } 970 } 971 }; 972 973 void ShenandoahTraversalGC::weak_refs_work() { 974 assert(_heap->process_references(), "sanity"); 975 976 ShenandoahPhaseTimings::Phase phase_root = ShenandoahPhaseTimings::weakrefs; 977 978 ShenandoahGCPhase phase(phase_root); 979 980 ReferenceProcessor* rp = _heap->ref_processor(); 981 982 // NOTE: We cannot shortcut on has_discovered_references() here, because 983 // we will miss marking JNI Weak refs then, see implementation in 984 // ReferenceProcessor::process_discovered_references. 985 weak_refs_work_doit(); 986 987 rp->verify_no_references_recorded(); 988 assert(!rp->discovery_enabled(), "Post condition"); 989 990 } 991 992 class ShenandoahTraversalRefProcTaskProxy : public AbstractGangTask { 993 private: 994 AbstractRefProcTaskExecutor::ProcessTask& _proc_task; 995 ShenandoahTaskTerminator* _terminator; 996 997 public: 998 ShenandoahTraversalRefProcTaskProxy(AbstractRefProcTaskExecutor::ProcessTask& proc_task, 999 ShenandoahTaskTerminator* t) : 1000 AbstractGangTask("Process reference objects in parallel"), 1001 _proc_task(proc_task), 1002 _terminator(t) { 1003 } 1004 1005 void work(uint worker_id) { 1006 ShenandoahEvacOOMScope oom_evac_scope; 1007 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint"); 1008 ShenandoahHeap* heap = ShenandoahHeap::heap(); 1009 ShenandoahTraversalDrainMarkingStackClosure complete_gc(worker_id, _terminator); 1010 1011 ShenandoahForwardedIsAliveClosure is_alive; 1012 if (!heap->is_degenerated_gc_in_progress()) { 1013 ShenandoahTraversalKeepAliveUpdateClosure keep_alive(heap->traversal_gc()->task_queues()->queue(worker_id)); 1014 _proc_task.work(worker_id, is_alive, keep_alive, complete_gc); 1015 } else { 1016 ShenandoahTraversalKeepAliveUpdateDegenClosure keep_alive(heap->traversal_gc()->task_queues()->queue(worker_id)); 1017 _proc_task.work(worker_id, is_alive, keep_alive, complete_gc); 1018 } 1019 } 1020 }; 1021 1022 class ShenandoahTraversalRefProcTaskExecutor : public AbstractRefProcTaskExecutor { 1023 private: 1024 WorkGang* _workers; 1025 1026 public: 1027 ShenandoahTraversalRefProcTaskExecutor(WorkGang* workers) : _workers(workers) {} 1028 1029 // Executes a task using worker threads. 1030 void execute(ProcessTask& task, uint ergo_workers) { 1031 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint"); 1032 1033 ShenandoahHeap* heap = ShenandoahHeap::heap(); 1034 ShenandoahTraversalGC* traversal_gc = heap->traversal_gc(); 1035 ShenandoahPushWorkerQueuesScope scope(_workers, 1036 traversal_gc->task_queues(), 1037 ergo_workers, 1038 /* do_check = */ false); 1039 uint nworkers = _workers->active_workers(); 1040 traversal_gc->task_queues()->reserve(nworkers); 1041 ShenandoahTaskTerminator terminator(nworkers, traversal_gc->task_queues()); 1042 ShenandoahTraversalRefProcTaskProxy proc_task_proxy(task, &terminator); 1043 _workers->run_task(&proc_task_proxy); 1044 } 1045 }; 1046 1047 void ShenandoahTraversalGC::weak_refs_work_doit() { 1048 ReferenceProcessor* rp = _heap->ref_processor(); 1049 1050 ShenandoahPhaseTimings::Phase phase_process = ShenandoahPhaseTimings::weakrefs_process; 1051 1052 shenandoah_assert_rp_isalive_not_installed(); 1053 ShenandoahForwardedIsAliveClosure is_alive; 1054 ReferenceProcessorIsAliveMutator fix_isalive(rp, &is_alive); 1055 1056 WorkGang* workers = _heap->workers(); 1057 uint nworkers = workers->active_workers(); 1058 1059 rp->setup_policy(_heap->soft_ref_policy()->should_clear_all_soft_refs()); 1060 rp->set_active_mt_degree(nworkers); 1061 1062 assert(task_queues()->is_empty(), "Should be empty"); 1063 1064 // complete_gc and keep_alive closures instantiated here are only needed for 1065 // single-threaded path in RP. They share the queue 0 for tracking work, which 1066 // simplifies implementation. Since RP may decide to call complete_gc several 1067 // times, we need to be able to reuse the terminator. 1068 uint serial_worker_id = 0; 1069 ShenandoahTaskTerminator terminator(1, task_queues()); 1070 ShenandoahTraversalSingleThreadedDrainMarkingStackClosure complete_gc(serial_worker_id, &terminator, /* reset_terminator = */ true); 1071 ShenandoahPushWorkerQueuesScope scope(workers, task_queues(), 1, /* do_check = */ false); 1072 1073 ShenandoahTraversalRefProcTaskExecutor executor(workers); 1074 1075 ReferenceProcessorPhaseTimes pt(_heap->gc_timer(), rp->num_queues()); 1076 if (!_heap->is_degenerated_gc_in_progress()) { 1077 ShenandoahTraversalSingleThreadKeepAliveUpdateClosure keep_alive(task_queues()->queue(serial_worker_id)); 1078 rp->process_discovered_references(&is_alive, &keep_alive, 1079 &complete_gc, &executor, 1080 &pt); 1081 } else { 1082 ShenandoahTraversalSingleThreadKeepAliveUpdateDegenClosure keep_alive(task_queues()->queue(serial_worker_id)); 1083 rp->process_discovered_references(&is_alive, &keep_alive, 1084 &complete_gc, &executor, 1085 &pt); 1086 } 1087 1088 pt.print_all_references(); 1089 assert(task_queues()->is_empty() || _heap->cancelled_gc(), "Should be empty"); 1090 }