1 /* 2 * Copyright (c) 2018, 2019, Red Hat, Inc. All rights reserved. 3 * 4 * This code is free software; you can redistribute it and/or modify it 5 * under the terms of the GNU General Public License version 2 only, as 6 * published by the Free Software Foundation. 7 * 8 * This code is distributed in the hope that it will be useful, but WITHOUT 9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 11 * version 2 for more details (a copy is included in the LICENSE file that 12 * accompanied this code). 13 * 14 * You should have received a copy of the GNU General Public License version 15 * 2 along with this work; if not, write to the Free Software Foundation, 16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 17 * 18 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 19 * or visit www.oracle.com if you need additional information or have any 20 * questions. 21 * 22 */ 23 24 #include "precompiled.hpp" 25 26 #include "classfile/classLoaderData.hpp" 27 #include "classfile/classLoaderDataGraph.hpp" 28 #include "gc/shared/referenceProcessor.hpp" 29 #include "gc/shared/referenceProcessorPhaseTimes.hpp" 30 #include "gc/shared/workgroup.hpp" 31 #include "gc/shared/weakProcessor.inline.hpp" 32 #include "gc/shenandoah/shenandoahBarrierSet.hpp" 33 #include "gc/shenandoah/shenandoahClosures.inline.hpp" 34 #include "gc/shenandoah/shenandoahCodeRoots.hpp" 35 #include "gc/shenandoah/shenandoahCollectionSet.hpp" 36 #include "gc/shenandoah/shenandoahCollectorPolicy.hpp" 37 #include "gc/shenandoah/shenandoahFreeSet.hpp" 38 #include "gc/shenandoah/shenandoahForwarding.hpp" 39 #include "gc/shenandoah/shenandoahPhaseTimings.hpp" 40 #include "gc/shenandoah/shenandoahHeap.inline.hpp" 41 #include "gc/shenandoah/shenandoahHeapRegionSet.inline.hpp" 42 #include "gc/shenandoah/shenandoahHeuristics.hpp" 43 #include "gc/shenandoah/shenandoahMarkingContext.inline.hpp" 44 #include "gc/shenandoah/shenandoahOopClosures.inline.hpp" 45 #include "gc/shenandoah/shenandoahRootProcessor.inline.hpp" 46 #include "gc/shenandoah/shenandoahStringDedup.hpp" 47 #include "gc/shenandoah/shenandoahTaskqueue.inline.hpp" 48 #include "gc/shenandoah/shenandoahTimingTracker.hpp" 49 #include "gc/shenandoah/shenandoahTraversalGC.hpp" 50 #include "gc/shenandoah/shenandoahUtils.hpp" 51 #include "gc/shenandoah/shenandoahVerifier.hpp" 52 53 #include "memory/iterator.hpp" 54 #include "memory/metaspace.hpp" 55 #include "memory/resourceArea.hpp" 56 #include "memory/universe.hpp" 57 58 /** 59 * NOTE: We are using the SATB buffer in thread.hpp and satbMarkQueue.hpp, however, it is not an SATB algorithm. 60 * We're using the buffer as generic oop buffer to enqueue new values in concurrent oop stores, IOW, the algorithm 61 * is incremental-update-based. 62 * 63 * NOTE on interaction with TAMS: we want to avoid traversing new objects for 64 * several reasons: 65 * - We will not reclaim them in this cycle anyway, because they are not in the 66 * cset 67 * - It makes up for the bulk of work during final-pause 68 * - It also shortens the concurrent cycle because we don't need to 69 * pointlessly traverse through newly allocated objects. 70 * - As a nice side-effect, it solves the I-U termination problem (mutators 71 * cannot outrun the GC by allocating like crazy) 72 * - It is an easy way to achieve MWF. What MWF does is to also enqueue the 73 * target object of stores if it's new. Treating new objects live implicitely 74 * achieves the same, but without extra barriers. I think the effect of 75 * shortened final-pause (mentioned above) is the main advantage of MWF. In 76 * particular, we will not see the head of a completely new long linked list 77 * in final-pause and end up traversing huge chunks of the heap there. 78 * - We don't need to see/update the fields of new objects either, because they 79 * are either still null, or anything that's been stored into them has been 80 * evacuated+enqueued before (and will thus be treated later). 81 * 82 * We achieve this by setting TAMS for each region, and everything allocated 83 * beyond TAMS will be 'implicitely marked'. 84 * 85 * Gotchas: 86 * - While we want new objects to be implicitely marked, we don't want to count 87 * them alive. Otherwise the next cycle wouldn't pick them up and consider 88 * them for cset. This means that we need to protect such regions from 89 * getting accidentally thrashed at the end of traversal cycle. This is why I 90 * keep track of alloc-regions and check is_alloc_region() in the trashing 91 * code. 92 * - We *need* to traverse through evacuated objects. Those objects are 93 * pre-existing, and any references in them point to interesting objects that 94 * we need to see. We also want to count them as live, because we just 95 * determined that they are alive :-) I achieve this by upping TAMS 96 * concurrently for every gclab/gc-shared alloc before publishing the 97 * evacuated object. This way, the GC threads will not consider such objects 98 * implictely marked, and traverse through them as normal. 99 */ 100 class ShenandoahTraversalSATBBufferClosure : public SATBBufferClosure { 101 private: 102 ShenandoahObjToScanQueue* _queue; 103 ShenandoahTraversalGC* _traversal_gc; 104 ShenandoahHeap* const _heap; 105 106 public: 107 ShenandoahTraversalSATBBufferClosure(ShenandoahObjToScanQueue* q) : 108 _queue(q), 109 _heap(ShenandoahHeap::heap()) 110 { } 111 112 void do_buffer(void** buffer, size_t size) { 113 for (size_t i = 0; i < size; ++i) { 114 oop* p = (oop*) &buffer[i]; 115 oop obj = RawAccess<>::oop_load(p); 116 shenandoah_assert_not_forwarded(p, obj); 117 if (_heap->marking_context()->mark(obj)) { 118 _queue->push(ShenandoahMarkTask(obj)); 119 } 120 } 121 } 122 }; 123 124 class ShenandoahTraversalSATBThreadsClosure : public ThreadClosure { 125 private: 126 ShenandoahTraversalSATBBufferClosure* _satb_cl; 127 128 public: 129 ShenandoahTraversalSATBThreadsClosure(ShenandoahTraversalSATBBufferClosure* satb_cl) : 130 _satb_cl(satb_cl) {} 131 132 void do_thread(Thread* thread) { 133 ShenandoahThreadLocalData::satb_mark_queue(thread).apply_closure_and_empty(_satb_cl); 134 } 135 }; 136 137 // Like CLDToOopClosure, but clears has_modified_oops, so that we can record modified CLDs during traversal 138 // and remark them later during final-traversal. 139 class ShenandoahMarkCLDClosure : public CLDClosure { 140 private: 141 OopClosure* _cl; 142 public: 143 ShenandoahMarkCLDClosure(OopClosure* cl) : _cl(cl) {} 144 void do_cld(ClassLoaderData* cld) { 145 cld->oops_do(_cl, true, true); 146 } 147 }; 148 149 // Like CLDToOopClosure, but only process modified CLDs 150 class ShenandoahRemarkCLDClosure : public CLDClosure { 151 private: 152 OopClosure* _cl; 153 public: 154 ShenandoahRemarkCLDClosure(OopClosure* cl) : _cl(cl) {} 155 void do_cld(ClassLoaderData* cld) { 156 if (cld->has_modified_oops()) { 157 cld->oops_do(_cl, true, true); 158 } 159 } 160 }; 161 162 class ShenandoahInitTraversalCollectionTask : public AbstractGangTask { 163 private: 164 ShenandoahRootProcessor* _rp; 165 ShenandoahHeap* _heap; 166 ShenandoahCsetCodeRootsIterator* _cset_coderoots; 167 ShenandoahStringDedupRoot _dedup_roots; 168 169 public: 170 ShenandoahInitTraversalCollectionTask(ShenandoahRootProcessor* rp, ShenandoahCsetCodeRootsIterator* cset_coderoots) : 171 AbstractGangTask("Shenandoah Init Traversal Collection"), 172 _rp(rp), 173 _heap(ShenandoahHeap::heap()), 174 _cset_coderoots(cset_coderoots) {} 175 176 void work(uint worker_id) { 177 ShenandoahParallelWorkerSession worker_session(worker_id); 178 179 ShenandoahEvacOOMScope oom_evac_scope; 180 ShenandoahObjToScanQueueSet* queues = _heap->traversal_gc()->task_queues(); 181 ShenandoahObjToScanQueue* q = queues->queue(worker_id); 182 183 bool process_refs = _heap->process_references(); 184 bool unload_classes = _heap->unload_classes(); 185 ReferenceProcessor* rp = NULL; 186 if (process_refs) { 187 rp = _heap->ref_processor(); 188 } 189 190 // Step 1: Process ordinary GC roots. 191 { 192 ShenandoahTraversalClosure roots_cl(q, rp); 193 ShenandoahMarkCLDClosure cld_cl(&roots_cl); 194 MarkingCodeBlobClosure code_cl(&roots_cl, CodeBlobToOopClosure::FixRelocations); 195 if (unload_classes) { 196 _rp->strong_roots_do(worker_id, &roots_cl); 197 // Need to pre-evac code roots here. Otherwise we might see from-space constants. 198 ShenandoahWorkerTimings* worker_times = _heap->phase_timings()->worker_times(); 199 ShenandoahWorkerTimingsTracker timer(worker_times, ShenandoahPhaseTimings::CodeCacheRoots, worker_id); 200 _cset_coderoots->possibly_parallel_blobs_do(&code_cl); 201 } else { 202 _rp->roots_do(worker_id, &roots_cl, &cld_cl, &code_cl); 203 } 204 205 AlwaysTrueClosure is_alive; 206 _dedup_roots.oops_do(&is_alive, &roots_cl, worker_id); 207 } 208 } 209 }; 210 211 class ShenandoahConcurrentTraversalCollectionTask : public AbstractGangTask { 212 private: 213 ShenandoahTaskTerminator* _terminator; 214 ShenandoahHeap* _heap; 215 public: 216 ShenandoahConcurrentTraversalCollectionTask(ShenandoahTaskTerminator* terminator) : 217 AbstractGangTask("Shenandoah Concurrent Traversal Collection"), 218 _terminator(terminator), 219 _heap(ShenandoahHeap::heap()) {} 220 221 void work(uint worker_id) { 222 ShenandoahConcurrentWorkerSession worker_session(worker_id); 223 ShenandoahSuspendibleThreadSetJoiner stsj(ShenandoahSuspendibleWorkers); 224 ShenandoahEvacOOMScope oom_evac_scope; 225 ShenandoahTraversalGC* traversal_gc = _heap->traversal_gc(); 226 227 // Drain all outstanding work in queues. 228 traversal_gc->main_loop(worker_id, _terminator, true); 229 } 230 }; 231 232 class ShenandoahFinalTraversalCollectionTask : public AbstractGangTask { 233 private: 234 ShenandoahRootProcessor* _rp; 235 ShenandoahTaskTerminator* _terminator; 236 ShenandoahHeap* _heap; 237 public: 238 ShenandoahFinalTraversalCollectionTask(ShenandoahRootProcessor* rp, ShenandoahTaskTerminator* terminator) : 239 AbstractGangTask("Shenandoah Final Traversal Collection"), 240 _rp(rp), 241 _terminator(terminator), 242 _heap(ShenandoahHeap::heap()) {} 243 244 void work(uint worker_id) { 245 ShenandoahParallelWorkerSession worker_session(worker_id); 246 247 ShenandoahEvacOOMScope oom_evac_scope; 248 ShenandoahTraversalGC* traversal_gc = _heap->traversal_gc(); 249 250 ShenandoahObjToScanQueueSet* queues = traversal_gc->task_queues(); 251 ShenandoahObjToScanQueue* q = queues->queue(worker_id); 252 253 bool process_refs = _heap->process_references(); 254 bool unload_classes = _heap->unload_classes(); 255 ReferenceProcessor* rp = NULL; 256 if (process_refs) { 257 rp = _heap->ref_processor(); 258 } 259 260 // Step 0: Drain outstanding SATB queues. 261 // NOTE: we piggy-back draining of remaining thread SATB buffers on the final root scan below. 262 ShenandoahTraversalSATBBufferClosure satb_cl(q); 263 { 264 // Process remaining finished SATB buffers. 265 SATBMarkQueueSet& satb_mq_set = ShenandoahBarrierSet::satb_mark_queue_set(); 266 while (satb_mq_set.apply_closure_to_completed_buffer(&satb_cl)); 267 // Process remaining threads SATB buffers below. 268 } 269 270 // Step 1: Process GC roots. 271 // For oops in code roots, they are marked, evacuated, enqueued for further traversal, 272 // and the references to the oops are updated during init pause. New nmethods are handled 273 // in similar way during nmethod-register process. Therefore, we don't need to rescan code 274 // roots here. 275 if (!_heap->is_degenerated_gc_in_progress()) { 276 ShenandoahTraversalClosure roots_cl(q, rp); 277 ShenandoahTraversalSATBThreadsClosure tc(&satb_cl); 278 if (unload_classes) { 279 ShenandoahRemarkCLDClosure remark_cld_cl(&roots_cl); 280 _rp->strong_roots_do(worker_id, &roots_cl, &remark_cld_cl, NULL, &tc); 281 } else { 282 CLDToOopClosure cld_cl(&roots_cl, ClassLoaderData::_claim_strong); 283 _rp->roots_do(worker_id, &roots_cl, &cld_cl, NULL, &tc); 284 } 285 } else { 286 ShenandoahTraversalDegenClosure roots_cl(q, rp); 287 ShenandoahTraversalSATBThreadsClosure tc(&satb_cl); 288 if (unload_classes) { 289 ShenandoahRemarkCLDClosure remark_cld_cl(&roots_cl); 290 _rp->strong_roots_do(worker_id, &roots_cl, &remark_cld_cl, NULL, &tc); 291 } else { 292 CLDToOopClosure cld_cl(&roots_cl, ClassLoaderData::_claim_strong); 293 _rp->roots_do(worker_id, &roots_cl, &cld_cl, NULL, &tc); 294 } 295 } 296 297 { 298 ShenandoahWorkerTimings *worker_times = _heap->phase_timings()->worker_times(); 299 ShenandoahWorkerTimingsTracker timer(worker_times, ShenandoahPhaseTimings::FinishQueues, worker_id); 300 301 // Step 3: Finally drain all outstanding work in queues. 302 traversal_gc->main_loop(worker_id, _terminator, false); 303 } 304 305 } 306 }; 307 308 ShenandoahTraversalGC::ShenandoahTraversalGC(ShenandoahHeap* heap, size_t num_regions) : 309 _heap(heap), 310 _task_queues(new ShenandoahObjToScanQueueSet(heap->max_workers())), 311 _traversal_set(ShenandoahHeapRegionSet()) { 312 313 uint num_queues = heap->max_workers(); 314 for (uint i = 0; i < num_queues; ++i) { 315 ShenandoahObjToScanQueue* task_queue = new ShenandoahObjToScanQueue(); 316 task_queue->initialize(); 317 _task_queues->register_queue(i, task_queue); 318 } 319 } 320 321 ShenandoahTraversalGC::~ShenandoahTraversalGC() { 322 } 323 324 void ShenandoahTraversalGC::prepare_regions() { 325 size_t num_regions = _heap->num_regions(); 326 ShenandoahMarkingContext* const ctx = _heap->marking_context(); 327 for (size_t i = 0; i < num_regions; i++) { 328 ShenandoahHeapRegion* region = _heap->get_region(i); 329 if (_heap->is_bitmap_slice_committed(region)) { 330 if (_traversal_set.is_in(i)) { 331 ctx->capture_top_at_mark_start(region); 332 region->clear_live_data(); 333 assert(ctx->is_bitmap_clear_range(region->bottom(), region->end()), "bitmap for traversal regions must be cleared"); 334 } else { 335 // Everything outside the traversal set is always considered live. 336 ctx->reset_top_at_mark_start(region); 337 } 338 } else { 339 // FreeSet may contain uncommitted empty regions, once they are recommitted, 340 // their TAMS may have old values, so reset them here. 341 ctx->reset_top_at_mark_start(region); 342 } 343 } 344 } 345 346 void ShenandoahTraversalGC::prepare() { 347 _heap->collection_set()->clear(); 348 assert(_heap->collection_set()->count() == 0, "collection set not clear"); 349 350 { 351 ShenandoahGCPhase phase(ShenandoahPhaseTimings::traversal_gc_make_parsable); 352 _heap->make_parsable(true); 353 } 354 355 if (UseTLAB) { 356 ShenandoahGCPhase phase(ShenandoahPhaseTimings::traversal_gc_resize_tlabs); 357 _heap->resize_tlabs(); 358 } 359 360 assert(_heap->marking_context()->is_bitmap_clear(), "need clean mark bitmap"); 361 assert(!_heap->marking_context()->is_complete(), "should not be complete"); 362 363 ShenandoahFreeSet* free_set = _heap->free_set(); 364 ShenandoahCollectionSet* collection_set = _heap->collection_set(); 365 366 // Find collection set 367 _heap->heuristics()->choose_collection_set(collection_set); 368 prepare_regions(); 369 370 // Rebuild free set 371 free_set->rebuild(); 372 373 log_info(gc, ergo)("Collectable Garbage: " SIZE_FORMAT "M, " SIZE_FORMAT "M CSet, " SIZE_FORMAT " CSet regions", 374 collection_set->garbage() / M, collection_set->live_data() / M, collection_set->count()); 375 } 376 377 void ShenandoahTraversalGC::init_traversal_collection() { 378 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "STW traversal GC"); 379 380 if (ShenandoahVerify) { 381 _heap->verifier()->verify_before_traversal(); 382 } 383 384 if (VerifyBeforeGC) { 385 Universe::verify(); 386 } 387 388 { 389 ShenandoahGCPhase phase_prepare(ShenandoahPhaseTimings::traversal_gc_prepare); 390 ShenandoahHeapLocker lock(_heap->lock()); 391 prepare(); 392 } 393 394 _heap->set_concurrent_traversal_in_progress(true); 395 396 bool process_refs = _heap->process_references(); 397 if (process_refs) { 398 ReferenceProcessor* rp = _heap->ref_processor(); 399 rp->enable_discovery(true /*verify_no_refs*/); 400 rp->setup_policy(_heap->soft_ref_policy()->should_clear_all_soft_refs()); 401 } 402 403 { 404 ShenandoahGCPhase phase_work(ShenandoahPhaseTimings::init_traversal_gc_work); 405 assert(_task_queues->is_empty(), "queues must be empty before traversal GC"); 406 TASKQUEUE_STATS_ONLY(_task_queues->reset_taskqueue_stats()); 407 408 #if defined(COMPILER2) || INCLUDE_JVMCI 409 DerivedPointerTable::clear(); 410 #endif 411 412 { 413 uint nworkers = _heap->workers()->active_workers(); 414 task_queues()->reserve(nworkers); 415 ShenandoahRootProcessor rp(nworkers, ShenandoahPhaseTimings::init_traversal_gc_work); 416 417 ShenandoahCsetCodeRootsIterator cset_coderoots = ShenandoahCodeRoots::cset_iterator(); 418 419 ShenandoahInitTraversalCollectionTask traversal_task(&rp, &cset_coderoots); 420 _heap->workers()->run_task(&traversal_task); 421 } 422 423 #if defined(COMPILER2) || INCLUDE_JVMCI 424 DerivedPointerTable::update_pointers(); 425 #endif 426 } 427 428 if (ShenandoahPacing) { 429 _heap->pacer()->setup_for_traversal(); 430 } 431 } 432 433 void ShenandoahTraversalGC::main_loop(uint w, ShenandoahTaskTerminator* t, bool sts_yield) { 434 ShenandoahObjToScanQueue* q = task_queues()->queue(w); 435 436 // Initialize live data. 437 jushort* ld = _heap->get_liveness_cache(w); 438 439 ReferenceProcessor* rp = NULL; 440 if (_heap->process_references()) { 441 rp = _heap->ref_processor(); 442 } 443 { 444 if (!_heap->is_degenerated_gc_in_progress()) { 445 if (_heap->unload_classes()) { 446 if (ShenandoahStringDedup::is_enabled()) { 447 ShenandoahTraversalMetadataDedupClosure cl(q, rp); 448 main_loop_work<ShenandoahTraversalMetadataDedupClosure>(&cl, ld, w, t, sts_yield); 449 } else { 450 ShenandoahTraversalMetadataClosure cl(q, rp); 451 main_loop_work<ShenandoahTraversalMetadataClosure>(&cl, ld, w, t, sts_yield); 452 } 453 } else { 454 if (ShenandoahStringDedup::is_enabled()) { 455 ShenandoahTraversalDedupClosure cl(q, rp); 456 main_loop_work<ShenandoahTraversalDedupClosure>(&cl, ld, w, t, sts_yield); 457 } else { 458 ShenandoahTraversalClosure cl(q, rp); 459 main_loop_work<ShenandoahTraversalClosure>(&cl, ld, w, t, sts_yield); 460 } 461 } 462 } else { 463 if (_heap->unload_classes()) { 464 if (ShenandoahStringDedup::is_enabled()) { 465 ShenandoahTraversalMetadataDedupDegenClosure cl(q, rp); 466 main_loop_work<ShenandoahTraversalMetadataDedupDegenClosure>(&cl, ld, w, t, sts_yield); 467 } else { 468 ShenandoahTraversalMetadataDegenClosure cl(q, rp); 469 main_loop_work<ShenandoahTraversalMetadataDegenClosure>(&cl, ld, w, t, sts_yield); 470 } 471 } else { 472 if (ShenandoahStringDedup::is_enabled()) { 473 ShenandoahTraversalDedupDegenClosure cl(q, rp); 474 main_loop_work<ShenandoahTraversalDedupDegenClosure>(&cl, ld, w, t, sts_yield); 475 } else { 476 ShenandoahTraversalDegenClosure cl(q, rp); 477 main_loop_work<ShenandoahTraversalDegenClosure>(&cl, ld, w, t, sts_yield); 478 } 479 } 480 } 481 } 482 483 _heap->flush_liveness_cache(w); 484 } 485 486 template <class T> 487 void ShenandoahTraversalGC::main_loop_work(T* cl, jushort* live_data, uint worker_id, ShenandoahTaskTerminator* terminator, bool sts_yield) { 488 ShenandoahObjToScanQueueSet* queues = task_queues(); 489 ShenandoahObjToScanQueue* q = queues->queue(worker_id); 490 ShenandoahConcurrentMark* conc_mark = _heap->concurrent_mark(); 491 492 uintx stride = ShenandoahMarkLoopStride; 493 494 ShenandoahMarkTask task; 495 496 // Process outstanding queues, if any. 497 q = queues->claim_next(); 498 while (q != NULL) { 499 if (_heap->check_cancelled_gc_and_yield(sts_yield)) { 500 return; 501 } 502 503 for (uint i = 0; i < stride; i++) { 504 if (q->pop(task)) { 505 conc_mark->do_task<T>(q, cl, live_data, &task); 506 } else { 507 assert(q->is_empty(), "Must be empty"); 508 q = queues->claim_next(); 509 break; 510 } 511 } 512 } 513 514 if (check_and_handle_cancelled_gc(terminator, sts_yield)) return; 515 516 // Normal loop. 517 q = queues->queue(worker_id); 518 519 ShenandoahTraversalSATBBufferClosure drain_satb(q); 520 SATBMarkQueueSet& satb_mq_set = ShenandoahBarrierSet::satb_mark_queue_set(); 521 522 while (true) { 523 if (check_and_handle_cancelled_gc(terminator, sts_yield)) return; 524 525 while (satb_mq_set.completed_buffers_num() > 0) { 526 satb_mq_set.apply_closure_to_completed_buffer(&drain_satb); 527 } 528 529 uint work = 0; 530 for (uint i = 0; i < stride; i++) { 531 if (q->pop(task) || 532 queues->steal(worker_id, task)) { 533 conc_mark->do_task<T>(q, cl, live_data, &task); 534 work++; 535 } else { 536 break; 537 } 538 } 539 540 if (work == 0) { 541 // No more work, try to terminate 542 ShenandoahEvacOOMScopeLeaver oom_scope_leaver; 543 ShenandoahSuspendibleThreadSetLeaver stsl(sts_yield && ShenandoahSuspendibleWorkers); 544 ShenandoahTerminationTimingsTracker term_tracker(worker_id); 545 ShenandoahTerminatorTerminator tt(_heap); 546 547 if (terminator->offer_termination(&tt)) return; 548 } 549 } 550 } 551 552 bool ShenandoahTraversalGC::check_and_handle_cancelled_gc(ShenandoahTaskTerminator* terminator, bool sts_yield) { 553 if (_heap->cancelled_gc()) { 554 return true; 555 } 556 return false; 557 } 558 559 void ShenandoahTraversalGC::concurrent_traversal_collection() { 560 { 561 MutexLocker ml(ClassLoaderDataGraph_lock); 562 ClassLoaderDataGraph::clear_claimed_marks(); 563 } 564 565 ShenandoahGCPhase phase_work(ShenandoahPhaseTimings::conc_traversal); 566 if (!_heap->cancelled_gc()) { 567 uint nworkers = _heap->workers()->active_workers(); 568 task_queues()->reserve(nworkers); 569 ShenandoahTerminationTracker tracker(ShenandoahPhaseTimings::conc_traversal_termination); 570 571 ShenandoahTaskTerminator terminator(nworkers, task_queues()); 572 ShenandoahConcurrentTraversalCollectionTask task(&terminator); 573 _heap->workers()->run_task(&task); 574 } 575 576 if (!_heap->cancelled_gc() && ShenandoahPreclean && _heap->process_references()) { 577 preclean_weak_refs(); 578 } 579 } 580 581 void ShenandoahTraversalGC::final_traversal_collection() { 582 _heap->make_parsable(true); 583 584 if (!_heap->cancelled_gc()) { 585 #if defined(COMPILER2) || INCLUDE_JVMCI 586 DerivedPointerTable::clear(); 587 #endif 588 ShenandoahGCPhase phase_work(ShenandoahPhaseTimings::final_traversal_gc_work); 589 uint nworkers = _heap->workers()->active_workers(); 590 task_queues()->reserve(nworkers); 591 592 // Finish traversal 593 ShenandoahRootProcessor rp(nworkers, ShenandoahPhaseTimings::final_traversal_gc_work); 594 ShenandoahTerminationTracker term(ShenandoahPhaseTimings::final_traversal_gc_termination); 595 596 ShenandoahTaskTerminator terminator(nworkers, task_queues()); 597 ShenandoahFinalTraversalCollectionTask task(&rp, &terminator); 598 _heap->workers()->run_task(&task); 599 #if defined(COMPILER2) || INCLUDE_JVMCI 600 DerivedPointerTable::update_pointers(); 601 #endif 602 } 603 604 if (!_heap->cancelled_gc() && _heap->process_references()) { 605 weak_refs_work(); 606 } 607 608 if (!_heap->cancelled_gc()) { 609 fixup_roots(); 610 if (_heap->unload_classes()) { 611 _heap->unload_classes_and_cleanup_tables(false); 612 } 613 } 614 615 if (!_heap->cancelled_gc()) { 616 assert(_task_queues->is_empty(), "queues must be empty after traversal GC"); 617 TASKQUEUE_STATS_ONLY(_task_queues->print_taskqueue_stats()); 618 TASKQUEUE_STATS_ONLY(_task_queues->reset_taskqueue_stats()); 619 620 // No more marking expected 621 _heap->mark_complete_marking_context(); 622 623 // Resize metaspace 624 MetaspaceGC::compute_new_size(); 625 626 // Still good? We can now trash the cset, and make final verification 627 { 628 ShenandoahGCPhase phase_cleanup(ShenandoahPhaseTimings::traversal_gc_cleanup); 629 ShenandoahHeapLocker lock(_heap->lock()); 630 631 // Trash everything 632 // Clear immediate garbage regions. 633 size_t num_regions = _heap->num_regions(); 634 635 ShenandoahHeapRegionSet* traversal_regions = traversal_set(); 636 ShenandoahFreeSet* free_regions = _heap->free_set(); 637 ShenandoahMarkingContext* const ctx = _heap->marking_context(); 638 free_regions->clear(); 639 for (size_t i = 0; i < num_regions; i++) { 640 ShenandoahHeapRegion* r = _heap->get_region(i); 641 bool not_allocated = ctx->top_at_mark_start(r) == r->top(); 642 643 bool candidate = traversal_regions->is_in(r) && !r->has_live() && not_allocated; 644 if (r->is_humongous_start() && candidate) { 645 // Trash humongous. 646 HeapWord* humongous_obj = r->bottom() + ShenandoahForwarding::word_size(); 647 assert(!ctx->is_marked(oop(humongous_obj)), "must not be marked"); 648 r->make_trash_immediate(); 649 while (i + 1 < num_regions && _heap->get_region(i + 1)->is_humongous_continuation()) { 650 i++; 651 r = _heap->get_region(i); 652 assert(r->is_humongous_continuation(), "must be humongous continuation"); 653 r->make_trash_immediate(); 654 } 655 } else if (!r->is_empty() && candidate) { 656 // Trash regular. 657 assert(!r->is_humongous(), "handled above"); 658 assert(!r->is_trash(), "must not already be trashed"); 659 r->make_trash_immediate(); 660 } 661 } 662 _heap->collection_set()->clear(); 663 _heap->free_set()->rebuild(); 664 reset(); 665 } 666 667 assert(_task_queues->is_empty(), "queues must be empty after traversal GC"); 668 _heap->set_concurrent_traversal_in_progress(false); 669 assert(!_heap->cancelled_gc(), "must not be cancelled when getting out here"); 670 671 if (ShenandoahVerify) { 672 _heap->verifier()->verify_after_traversal(); 673 } 674 675 if (VerifyAfterGC) { 676 Universe::verify(); 677 } 678 } 679 } 680 681 class ShenandoahTraversalFixRootsClosure : public OopClosure { 682 private: 683 template <class T> 684 inline void do_oop_work(T* p) { 685 T o = RawAccess<>::oop_load(p); 686 if (!CompressedOops::is_null(o)) { 687 oop obj = CompressedOops::decode_not_null(o); 688 oop forw = ShenandoahBarrierSet::resolve_forwarded_not_null(obj); 689 if (!oopDesc::equals_raw(obj, forw)) { 690 RawAccess<IS_NOT_NULL>::oop_store(p, forw); 691 } 692 } 693 } 694 695 public: 696 inline void do_oop(oop* p) { do_oop_work(p); } 697 inline void do_oop(narrowOop* p) { do_oop_work(p); } 698 }; 699 700 class ShenandoahTraversalFixRootsTask : public AbstractGangTask { 701 private: 702 ShenandoahRootUpdater* _rp; 703 704 public: 705 ShenandoahTraversalFixRootsTask(ShenandoahRootUpdater* rp) : 706 AbstractGangTask("Shenandoah traversal fix roots"), 707 _rp(rp) { 708 assert(ShenandoahHeap::heap()->has_forwarded_objects(), "Must be"); 709 } 710 711 void work(uint worker_id) { 712 ShenandoahParallelWorkerSession worker_session(worker_id); 713 ShenandoahTraversalFixRootsClosure cl; 714 ShenandoahForwardedIsAliveClosure is_alive; 715 _rp->roots_do<ShenandoahForwardedIsAliveClosure, ShenandoahTraversalFixRootsClosure>(worker_id, &is_alive, &cl); 716 } 717 }; 718 719 void ShenandoahTraversalGC::fixup_roots() { 720 #if defined(COMPILER2) || INCLUDE_JVMCI 721 DerivedPointerTable::clear(); 722 #endif 723 ShenandoahRootUpdater rp(_heap->workers()->active_workers(), ShenandoahPhaseTimings::final_traversal_update_roots, true /* update code cache */); 724 ShenandoahTraversalFixRootsTask update_roots_task(&rp); 725 _heap->workers()->run_task(&update_roots_task); 726 #if defined(COMPILER2) || INCLUDE_JVMCI 727 DerivedPointerTable::update_pointers(); 728 #endif 729 } 730 731 void ShenandoahTraversalGC::reset() { 732 _task_queues->clear(); 733 } 734 735 ShenandoahObjToScanQueueSet* ShenandoahTraversalGC::task_queues() { 736 return _task_queues; 737 } 738 739 class ShenandoahTraversalCancelledGCYieldClosure : public YieldClosure { 740 private: 741 ShenandoahHeap* const _heap; 742 public: 743 ShenandoahTraversalCancelledGCYieldClosure() : _heap(ShenandoahHeap::heap()) {}; 744 virtual bool should_return() { return _heap->cancelled_gc(); } 745 }; 746 747 class ShenandoahTraversalPrecleanCompleteGCClosure : public VoidClosure { 748 public: 749 void do_void() { 750 ShenandoahHeap* sh = ShenandoahHeap::heap(); 751 ShenandoahTraversalGC* traversal_gc = sh->traversal_gc(); 752 assert(sh->process_references(), "why else would we be here?"); 753 ShenandoahTaskTerminator terminator(1, traversal_gc->task_queues()); 754 shenandoah_assert_rp_isalive_installed(); 755 traversal_gc->main_loop((uint) 0, &terminator, true); 756 } 757 }; 758 759 class ShenandoahTraversalKeepAliveUpdateClosure : public OopClosure { 760 private: 761 ShenandoahObjToScanQueue* _queue; 762 Thread* _thread; 763 ShenandoahTraversalGC* _traversal_gc; 764 ShenandoahMarkingContext* const _mark_context; 765 766 template <class T> 767 inline void do_oop_work(T* p) { 768 _traversal_gc->process_oop<T, false /* string dedup */, false /* degen */>(p, _thread, _queue, _mark_context); 769 } 770 771 public: 772 ShenandoahTraversalKeepAliveUpdateClosure(ShenandoahObjToScanQueue* q) : 773 _queue(q), _thread(Thread::current()), 774 _traversal_gc(ShenandoahHeap::heap()->traversal_gc()), 775 _mark_context(ShenandoahHeap::heap()->marking_context()) {} 776 777 void do_oop(narrowOop* p) { do_oop_work(p); } 778 void do_oop(oop* p) { do_oop_work(p); } 779 }; 780 781 class ShenandoahTraversalKeepAliveUpdateDegenClosure : public OopClosure { 782 private: 783 ShenandoahObjToScanQueue* _queue; 784 Thread* _thread; 785 ShenandoahTraversalGC* _traversal_gc; 786 ShenandoahMarkingContext* const _mark_context; 787 788 template <class T> 789 inline void do_oop_work(T* p) { 790 _traversal_gc->process_oop<T, false /* string dedup */, true /* degen */>(p, _thread, _queue, _mark_context); 791 } 792 793 public: 794 ShenandoahTraversalKeepAliveUpdateDegenClosure(ShenandoahObjToScanQueue* q) : 795 _queue(q), _thread(Thread::current()), 796 _traversal_gc(ShenandoahHeap::heap()->traversal_gc()), 797 _mark_context(ShenandoahHeap::heap()->marking_context()) {} 798 799 void do_oop(narrowOop* p) { do_oop_work(p); } 800 void do_oop(oop* p) { do_oop_work(p); } 801 }; 802 803 class ShenandoahTraversalSingleThreadKeepAliveUpdateClosure : public OopClosure { 804 private: 805 ShenandoahObjToScanQueue* _queue; 806 Thread* _thread; 807 ShenandoahTraversalGC* _traversal_gc; 808 ShenandoahMarkingContext* const _mark_context; 809 810 template <class T> 811 inline void do_oop_work(T* p) { 812 ShenandoahEvacOOMScope evac_scope; 813 _traversal_gc->process_oop<T, false /* string dedup */, false /* degen */>(p, _thread, _queue, _mark_context); 814 } 815 816 public: 817 ShenandoahTraversalSingleThreadKeepAliveUpdateClosure(ShenandoahObjToScanQueue* q) : 818 _queue(q), _thread(Thread::current()), 819 _traversal_gc(ShenandoahHeap::heap()->traversal_gc()), 820 _mark_context(ShenandoahHeap::heap()->marking_context()) {} 821 822 void do_oop(narrowOop* p) { do_oop_work(p); } 823 void do_oop(oop* p) { do_oop_work(p); } 824 }; 825 826 class ShenandoahTraversalSingleThreadKeepAliveUpdateDegenClosure : public OopClosure { 827 private: 828 ShenandoahObjToScanQueue* _queue; 829 Thread* _thread; 830 ShenandoahTraversalGC* _traversal_gc; 831 ShenandoahMarkingContext* const _mark_context; 832 833 template <class T> 834 inline void do_oop_work(T* p) { 835 ShenandoahEvacOOMScope evac_scope; 836 _traversal_gc->process_oop<T, false /* string dedup */, true /* degen */>(p, _thread, _queue, _mark_context); 837 } 838 839 public: 840 ShenandoahTraversalSingleThreadKeepAliveUpdateDegenClosure(ShenandoahObjToScanQueue* q) : 841 _queue(q), _thread(Thread::current()), 842 _traversal_gc(ShenandoahHeap::heap()->traversal_gc()), 843 _mark_context(ShenandoahHeap::heap()->marking_context()) {} 844 845 void do_oop(narrowOop* p) { do_oop_work(p); } 846 void do_oop(oop* p) { do_oop_work(p); } 847 }; 848 849 class ShenandoahTraversalPrecleanTask : public AbstractGangTask { 850 private: 851 ReferenceProcessor* _rp; 852 853 public: 854 ShenandoahTraversalPrecleanTask(ReferenceProcessor* rp) : 855 AbstractGangTask("Precleaning task"), 856 _rp(rp) {} 857 858 void work(uint worker_id) { 859 assert(worker_id == 0, "The code below is single-threaded, only one worker is expected"); 860 ShenandoahParallelWorkerSession worker_session(worker_id); 861 ShenandoahSuspendibleThreadSetJoiner stsj(ShenandoahSuspendibleWorkers); 862 ShenandoahEvacOOMScope oom_evac_scope; 863 864 ShenandoahHeap* sh = ShenandoahHeap::heap(); 865 866 ShenandoahObjToScanQueue* q = sh->traversal_gc()->task_queues()->queue(worker_id); 867 868 ShenandoahForwardedIsAliveClosure is_alive; 869 ShenandoahTraversalCancelledGCYieldClosure yield; 870 ShenandoahTraversalPrecleanCompleteGCClosure complete_gc; 871 ShenandoahTraversalKeepAliveUpdateClosure keep_alive(q); 872 ResourceMark rm; 873 _rp->preclean_discovered_references(&is_alive, &keep_alive, 874 &complete_gc, &yield, 875 NULL); 876 } 877 }; 878 879 void ShenandoahTraversalGC::preclean_weak_refs() { 880 // Pre-cleaning weak references before diving into STW makes sense at the 881 // end of concurrent mark. This will filter out the references which referents 882 // are alive. Note that ReferenceProcessor already filters out these on reference 883 // discovery, and the bulk of work is done here. This phase processes leftovers 884 // that missed the initial filtering, i.e. when referent was marked alive after 885 // reference was discovered by RP. 886 887 assert(_heap->process_references(), "sanity"); 888 assert(!_heap->is_degenerated_gc_in_progress(), "must be in concurrent non-degenerated phase"); 889 890 // Shortcut if no references were discovered to avoid winding up threads. 891 ReferenceProcessor* rp = _heap->ref_processor(); 892 if (!rp->has_discovered_references()) { 893 return; 894 } 895 896 ReferenceProcessorMTDiscoveryMutator fix_mt_discovery(rp, false); 897 898 shenandoah_assert_rp_isalive_not_installed(); 899 ShenandoahForwardedIsAliveClosure is_alive; 900 ReferenceProcessorIsAliveMutator fix_isalive(rp, &is_alive); 901 902 assert(task_queues()->is_empty(), "Should be empty"); 903 904 // Execute precleaning in the worker thread: it will give us GCLABs, String dedup 905 // queues and other goodies. When upstream ReferenceProcessor starts supporting 906 // parallel precleans, we can extend this to more threads. 907 ShenandoahPushWorkerScope scope(_heap->workers(), 1, /* check_workers = */ false); 908 909 WorkGang* workers = _heap->workers(); 910 uint nworkers = workers->active_workers(); 911 assert(nworkers == 1, "This code uses only a single worker"); 912 task_queues()->reserve(nworkers); 913 914 ShenandoahTraversalPrecleanTask task(rp); 915 workers->run_task(&task); 916 917 assert(_heap->cancelled_gc() || task_queues()->is_empty(), "Should be empty"); 918 } 919 920 // Weak Reference Closures 921 class ShenandoahTraversalDrainMarkingStackClosure: public VoidClosure { 922 uint _worker_id; 923 ShenandoahTaskTerminator* _terminator; 924 bool _reset_terminator; 925 926 public: 927 ShenandoahTraversalDrainMarkingStackClosure(uint worker_id, ShenandoahTaskTerminator* t, bool reset_terminator = false): 928 _worker_id(worker_id), 929 _terminator(t), 930 _reset_terminator(reset_terminator) { 931 } 932 933 void do_void() { 934 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint"); 935 936 ShenandoahHeap* sh = ShenandoahHeap::heap(); 937 ShenandoahTraversalGC* traversal_gc = sh->traversal_gc(); 938 assert(sh->process_references(), "why else would we be here?"); 939 shenandoah_assert_rp_isalive_installed(); 940 941 traversal_gc->main_loop(_worker_id, _terminator, false); 942 943 if (_reset_terminator) { 944 _terminator->reset_for_reuse(); 945 } 946 } 947 }; 948 949 class ShenandoahTraversalSingleThreadedDrainMarkingStackClosure: public VoidClosure { 950 uint _worker_id; 951 ShenandoahTaskTerminator* _terminator; 952 bool _reset_terminator; 953 954 public: 955 ShenandoahTraversalSingleThreadedDrainMarkingStackClosure(uint worker_id, ShenandoahTaskTerminator* t, bool reset_terminator = false): 956 _worker_id(worker_id), 957 _terminator(t), 958 _reset_terminator(reset_terminator) { 959 } 960 961 void do_void() { 962 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint"); 963 964 ShenandoahHeap* sh = ShenandoahHeap::heap(); 965 ShenandoahTraversalGC* traversal_gc = sh->traversal_gc(); 966 assert(sh->process_references(), "why else would we be here?"); 967 shenandoah_assert_rp_isalive_installed(); 968 969 ShenandoahEvacOOMScope evac_scope; 970 traversal_gc->main_loop(_worker_id, _terminator, false); 971 972 if (_reset_terminator) { 973 _terminator->reset_for_reuse(); 974 } 975 } 976 }; 977 978 void ShenandoahTraversalGC::weak_refs_work() { 979 assert(_heap->process_references(), "sanity"); 980 981 ShenandoahPhaseTimings::Phase phase_root = ShenandoahPhaseTimings::weakrefs; 982 983 ShenandoahGCPhase phase(phase_root); 984 985 ReferenceProcessor* rp = _heap->ref_processor(); 986 987 // NOTE: We cannot shortcut on has_discovered_references() here, because 988 // we will miss marking JNI Weak refs then, see implementation in 989 // ReferenceProcessor::process_discovered_references. 990 weak_refs_work_doit(); 991 992 rp->verify_no_references_recorded(); 993 assert(!rp->discovery_enabled(), "Post condition"); 994 995 } 996 997 class ShenandoahTraversalRefProcTaskProxy : public AbstractGangTask { 998 private: 999 AbstractRefProcTaskExecutor::ProcessTask& _proc_task; 1000 ShenandoahTaskTerminator* _terminator; 1001 1002 public: 1003 ShenandoahTraversalRefProcTaskProxy(AbstractRefProcTaskExecutor::ProcessTask& proc_task, 1004 ShenandoahTaskTerminator* t) : 1005 AbstractGangTask("Process reference objects in parallel"), 1006 _proc_task(proc_task), 1007 _terminator(t) { 1008 } 1009 1010 void work(uint worker_id) { 1011 ShenandoahEvacOOMScope oom_evac_scope; 1012 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint"); 1013 ShenandoahHeap* heap = ShenandoahHeap::heap(); 1014 ShenandoahTraversalDrainMarkingStackClosure complete_gc(worker_id, _terminator); 1015 1016 ShenandoahForwardedIsAliveClosure is_alive; 1017 if (!heap->is_degenerated_gc_in_progress()) { 1018 ShenandoahTraversalKeepAliveUpdateClosure keep_alive(heap->traversal_gc()->task_queues()->queue(worker_id)); 1019 _proc_task.work(worker_id, is_alive, keep_alive, complete_gc); 1020 } else { 1021 ShenandoahTraversalKeepAliveUpdateDegenClosure keep_alive(heap->traversal_gc()->task_queues()->queue(worker_id)); 1022 _proc_task.work(worker_id, is_alive, keep_alive, complete_gc); 1023 } 1024 } 1025 }; 1026 1027 class ShenandoahTraversalRefProcTaskExecutor : public AbstractRefProcTaskExecutor { 1028 private: 1029 WorkGang* _workers; 1030 1031 public: 1032 ShenandoahTraversalRefProcTaskExecutor(WorkGang* workers) : _workers(workers) {} 1033 1034 // Executes a task using worker threads. 1035 void execute(ProcessTask& task, uint ergo_workers) { 1036 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "Must be at a safepoint"); 1037 1038 ShenandoahHeap* heap = ShenandoahHeap::heap(); 1039 ShenandoahTraversalGC* traversal_gc = heap->traversal_gc(); 1040 ShenandoahPushWorkerQueuesScope scope(_workers, 1041 traversal_gc->task_queues(), 1042 ergo_workers, 1043 /* do_check = */ false); 1044 uint nworkers = _workers->active_workers(); 1045 traversal_gc->task_queues()->reserve(nworkers); 1046 ShenandoahTaskTerminator terminator(nworkers, traversal_gc->task_queues()); 1047 ShenandoahTraversalRefProcTaskProxy proc_task_proxy(task, &terminator); 1048 _workers->run_task(&proc_task_proxy); 1049 } 1050 }; 1051 1052 void ShenandoahTraversalGC::weak_refs_work_doit() { 1053 ReferenceProcessor* rp = _heap->ref_processor(); 1054 1055 ShenandoahPhaseTimings::Phase phase_process = ShenandoahPhaseTimings::weakrefs_process; 1056 1057 shenandoah_assert_rp_isalive_not_installed(); 1058 ShenandoahForwardedIsAliveClosure is_alive; 1059 ReferenceProcessorIsAliveMutator fix_isalive(rp, &is_alive); 1060 1061 WorkGang* workers = _heap->workers(); 1062 uint nworkers = workers->active_workers(); 1063 1064 rp->setup_policy(_heap->soft_ref_policy()->should_clear_all_soft_refs()); 1065 rp->set_active_mt_degree(nworkers); 1066 1067 assert(task_queues()->is_empty(), "Should be empty"); 1068 1069 // complete_gc and keep_alive closures instantiated here are only needed for 1070 // single-threaded path in RP. They share the queue 0 for tracking work, which 1071 // simplifies implementation. Since RP may decide to call complete_gc several 1072 // times, we need to be able to reuse the terminator. 1073 uint serial_worker_id = 0; 1074 ShenandoahTaskTerminator terminator(1, task_queues()); 1075 ShenandoahTraversalSingleThreadedDrainMarkingStackClosure complete_gc(serial_worker_id, &terminator, /* reset_terminator = */ true); 1076 ShenandoahPushWorkerQueuesScope scope(workers, task_queues(), 1, /* do_check = */ false); 1077 1078 ShenandoahTraversalRefProcTaskExecutor executor(workers); 1079 1080 ReferenceProcessorPhaseTimes pt(_heap->gc_timer(), rp->num_queues()); 1081 if (!_heap->is_degenerated_gc_in_progress()) { 1082 ShenandoahTraversalSingleThreadKeepAliveUpdateClosure keep_alive(task_queues()->queue(serial_worker_id)); 1083 rp->process_discovered_references(&is_alive, &keep_alive, 1084 &complete_gc, &executor, 1085 &pt); 1086 } else { 1087 ShenandoahTraversalSingleThreadKeepAliveUpdateDegenClosure keep_alive(task_queues()->queue(serial_worker_id)); 1088 rp->process_discovered_references(&is_alive, &keep_alive, 1089 &complete_gc, &executor, 1090 &pt); 1091 } 1092 1093 pt.print_all_references(); 1094 assert(task_queues()->is_empty() || _heap->cancelled_gc(), "Should be empty"); 1095 }