1 /* 2 * Copyright (c) 2014, 2019, Red Hat, Inc. All rights reserved. 3 * 4 * This code is free software; you can redistribute it and/or modify it 5 * under the terms of the GNU General Public License version 2 only, as 6 * published by the Free Software Foundation. 7 * 8 * This code is distributed in the hope that it will be useful, but WITHOUT 9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 11 * version 2 for more details (a copy is included in the LICENSE file that 12 * accompanied this code). 13 * 14 * You should have received a copy of the GNU General Public License version 15 * 2 along with this work; if not, write to the Free Software Foundation, 16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 17 * 18 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 19 * or visit www.oracle.com if you need additional information or have any 20 * questions. 21 * 22 */ 23 24 #include "precompiled.hpp" 25 26 #include "code/codeCache.hpp" 27 #include "gc/shared/gcTraceTime.inline.hpp" 28 #include "gc/shared/preservedMarks.inline.hpp" 29 #include "gc/shenandoah/shenandoahForwarding.inline.hpp" 30 #include "gc/shenandoah/shenandoahConcurrentMark.inline.hpp" 31 #include "gc/shenandoah/shenandoahCollectionSet.hpp" 32 #include "gc/shenandoah/shenandoahFreeSet.hpp" 33 #include "gc/shenandoah/shenandoahPhaseTimings.hpp" 34 #include "gc/shenandoah/shenandoahMarkCompact.hpp" 35 #include "gc/shenandoah/shenandoahHeapRegionSet.hpp" 36 #include "gc/shenandoah/shenandoahHeap.inline.hpp" 37 #include "gc/shenandoah/shenandoahHeapRegion.inline.hpp" 38 #include "gc/shenandoah/shenandoahMarkingContext.inline.hpp" 39 #include "gc/shenandoah/shenandoahRootProcessor.inline.hpp" 40 #include "gc/shenandoah/shenandoahTaskqueue.inline.hpp" 41 #include "gc/shenandoah/shenandoahUtils.hpp" 42 #include "gc/shenandoah/shenandoahVerifier.hpp" 43 #include "gc/shenandoah/shenandoahVMOperations.hpp" 44 #include "gc/shenandoah/shenandoahWorkerPolicy.hpp" 45 #include "gc/shenandoah/heuristics/shenandoahHeuristics.hpp" 46 #include "memory/metaspace.hpp" 47 #include "oops/oop.inline.hpp" 48 #include "runtime/biasedLocking.hpp" 49 #include "runtime/thread.hpp" 50 #include "utilities/copy.hpp" 51 #include "utilities/growableArray.hpp" 52 #include "gc/shared/workgroup.hpp" 53 54 ShenandoahMarkCompact::ShenandoahMarkCompact() : 55 _gc_timer(NULL), 56 _preserved_marks(new PreservedMarksSet(true)) {} 57 58 void ShenandoahMarkCompact::initialize(GCTimer* gc_timer) { 59 _gc_timer = gc_timer; 60 } 61 62 void ShenandoahMarkCompact::do_it(GCCause::Cause gc_cause) { 63 ShenandoahHeap* heap = ShenandoahHeap::heap(); 64 65 if (ShenandoahVerify) { 66 heap->verifier()->verify_before_fullgc(); 67 } 68 69 if (VerifyBeforeGC) { 70 Universe::verify(); 71 } 72 73 heap->set_full_gc_in_progress(true); 74 75 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "must be at a safepoint"); 76 assert(Thread::current()->is_VM_thread(), "Do full GC only while world is stopped"); 77 78 { 79 ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_heapdump_pre); 80 heap->pre_full_gc_dump(_gc_timer); 81 } 82 83 { 84 ShenandoahGCPhase prepare_phase(ShenandoahPhaseTimings::full_gc_prepare); 85 // Full GC is supposed to recover from any GC state: 86 87 // a0. Remember if we have forwarded objects 88 bool has_forwarded_objects = heap->has_forwarded_objects(); 89 90 // a1. Cancel evacuation, if in progress 91 if (heap->is_evacuation_in_progress()) { 92 heap->set_evacuation_in_progress(false); 93 } 94 assert(!heap->is_evacuation_in_progress(), "sanity"); 95 96 // a2. Cancel update-refs, if in progress 97 if (heap->is_update_refs_in_progress()) { 98 heap->set_update_refs_in_progress(false); 99 } 100 assert(!heap->is_update_refs_in_progress(), "sanity"); 101 102 // b. Cancel concurrent mark, if in progress 103 if (heap->is_concurrent_mark_in_progress()) { 104 heap->concurrent_mark()->cancel(); 105 heap->set_concurrent_mark_in_progress(false); 106 } 107 assert(!heap->is_concurrent_mark_in_progress(), "sanity"); 108 109 // c. Reset the bitmaps for new marking 110 heap->reset_mark_bitmap(); 111 assert(heap->marking_context()->is_bitmap_clear(), "sanity"); 112 assert(!heap->marking_context()->is_complete(), "sanity"); 113 114 // d. Abandon reference discovery and clear all discovered references. 115 ReferenceProcessor* rp = heap->ref_processor(); 116 rp->disable_discovery(); 117 rp->abandon_partial_discovery(); 118 rp->verify_no_references_recorded(); 119 120 // e. Set back forwarded objects bit back, in case some steps above dropped it. 121 heap->set_has_forwarded_objects(has_forwarded_objects); 122 123 // f. Sync pinned region status from the CP marks 124 heap->sync_pinned_region_status(); 125 126 // The rest of prologue: 127 BiasedLocking::preserve_marks(); 128 _preserved_marks->init(heap->workers()->active_workers()); 129 } 130 131 heap->make_parsable(true); 132 133 CodeCache::gc_prologue(); 134 135 OrderAccess::fence(); 136 137 phase1_mark_heap(); 138 139 // Once marking is done, which may have fixed up forwarded objects, we can drop it. 140 // Coming out of Full GC, we would not have any forwarded objects. 141 // This also prevents resolves with fwdptr from kicking in while adjusting pointers in phase3. 142 heap->set_has_forwarded_objects(false); 143 144 heap->set_full_gc_move_in_progress(true); 145 146 // Setup workers for the rest 147 OrderAccess::fence(); 148 149 // Initialize worker slices 150 ShenandoahHeapRegionSet** worker_slices = NEW_C_HEAP_ARRAY(ShenandoahHeapRegionSet*, heap->max_workers(), mtGC); 151 for (uint i = 0; i < heap->max_workers(); i++) { 152 worker_slices[i] = new ShenandoahHeapRegionSet(); 153 } 154 155 { 156 // The rest of code performs region moves, where region status is undefined 157 // until all phases run together. 158 ShenandoahHeapLocker lock(heap->lock()); 159 160 phase2_calculate_target_addresses(worker_slices); 161 162 OrderAccess::fence(); 163 164 phase3_update_references(); 165 166 phase4_compact_objects(worker_slices); 167 } 168 169 { 170 // Epilogue 171 SharedRestorePreservedMarksTaskExecutor exec(heap->workers()); 172 _preserved_marks->restore(&exec); 173 BiasedLocking::restore_marks(); 174 _preserved_marks->reclaim(); 175 176 CodeCache::gc_epilogue(); 177 } 178 179 // Resize metaspace 180 MetaspaceGC::compute_new_size(); 181 182 // Free worker slices 183 for (uint i = 0; i < heap->max_workers(); i++) { 184 delete worker_slices[i]; 185 } 186 FREE_C_HEAP_ARRAY(ShenandoahHeapRegionSet*, worker_slices); 187 188 heap->set_full_gc_move_in_progress(false); 189 heap->set_full_gc_in_progress(false); 190 191 if (ShenandoahVerify) { 192 heap->verifier()->verify_after_fullgc(); 193 } 194 195 if (VerifyAfterGC) { 196 Universe::verify(); 197 } 198 199 { 200 ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_heapdump_post); 201 heap->post_full_gc_dump(_gc_timer); 202 } 203 } 204 205 class ShenandoahPrepareForMarkClosure: public ShenandoahHeapRegionClosure { 206 private: 207 ShenandoahMarkingContext* const _ctx; 208 209 public: 210 ShenandoahPrepareForMarkClosure() : _ctx(ShenandoahHeap::heap()->marking_context()) {} 211 212 void heap_region_do(ShenandoahHeapRegion *r) { 213 _ctx->capture_top_at_mark_start(r); 214 r->clear_live_data(); 215 } 216 }; 217 218 void ShenandoahMarkCompact::phase1_mark_heap() { 219 GCTraceTime(Info, gc, phases) time("Phase 1: Mark live objects", _gc_timer); 220 ShenandoahGCPhase mark_phase(ShenandoahPhaseTimings::full_gc_mark); 221 222 ShenandoahHeap* heap = ShenandoahHeap::heap(); 223 224 ShenandoahPrepareForMarkClosure cl; 225 heap->heap_region_iterate(&cl); 226 227 ShenandoahConcurrentMark* cm = heap->concurrent_mark(); 228 229 heap->set_process_references(heap->heuristics()->can_process_references()); 230 heap->set_unload_classes(heap->heuristics()->can_unload_classes()); 231 232 ReferenceProcessor* rp = heap->ref_processor(); 233 // enable ("weak") refs discovery 234 rp->enable_discovery(true /*verify_no_refs*/); 235 rp->setup_policy(true); // forcefully purge all soft references 236 rp->set_active_mt_degree(heap->workers()->active_workers()); 237 238 cm->update_roots(ShenandoahPhaseTimings::full_gc_update_roots); 239 cm->mark_roots(ShenandoahPhaseTimings::full_gc_scan_roots); 240 cm->finish_mark_from_roots(/* full_gc = */ true); 241 heap->mark_complete_marking_context(); 242 heap->parallel_cleaning(true /* full_gc */); 243 } 244 245 class ShenandoahPrepareForCompactionObjectClosure : public ObjectClosure { 246 private: 247 PreservedMarks* const _preserved_marks; 248 ShenandoahHeap* const _heap; 249 GrowableArray<ShenandoahHeapRegion*>& _empty_regions; 250 int _empty_regions_pos; 251 ShenandoahHeapRegion* _to_region; 252 ShenandoahHeapRegion* _from_region; 253 HeapWord* _compact_point; 254 255 public: 256 ShenandoahPrepareForCompactionObjectClosure(PreservedMarks* preserved_marks, 257 GrowableArray<ShenandoahHeapRegion*>& empty_regions, 258 ShenandoahHeapRegion* to_region) : 259 _preserved_marks(preserved_marks), 260 _heap(ShenandoahHeap::heap()), 261 _empty_regions(empty_regions), 262 _empty_regions_pos(0), 263 _to_region(to_region), 264 _from_region(NULL), 265 _compact_point(to_region->bottom()) {} 266 267 void set_from_region(ShenandoahHeapRegion* from_region) { 268 _from_region = from_region; 269 } 270 271 void finish_region() { 272 assert(_to_region != NULL, "should not happen"); 273 _to_region->set_new_top(_compact_point); 274 } 275 276 bool is_compact_same_region() { 277 return _from_region == _to_region; 278 } 279 280 int empty_regions_pos() { 281 return _empty_regions_pos; 282 } 283 284 void do_object(oop p) { 285 assert(_from_region != NULL, "must set before work"); 286 assert(_heap->complete_marking_context()->is_marked(p), "must be marked"); 287 assert(!_heap->complete_marking_context()->allocated_after_mark_start((HeapWord*) p), "must be truly marked"); 288 289 size_t obj_size = p->size(); 290 if (_compact_point + obj_size > _to_region->end()) { 291 finish_region(); 292 293 // Object doesn't fit. Pick next empty region and start compacting there. 294 ShenandoahHeapRegion* new_to_region; 295 if (_empty_regions_pos < _empty_regions.length()) { 296 new_to_region = _empty_regions.at(_empty_regions_pos); 297 _empty_regions_pos++; 298 } else { 299 // Out of empty region? Compact within the same region. 300 new_to_region = _from_region; 301 } 302 303 assert(new_to_region != _to_region, "must not reuse same to-region"); 304 assert(new_to_region != NULL, "must not be NULL"); 305 _to_region = new_to_region; 306 _compact_point = _to_region->bottom(); 307 } 308 309 // Object fits into current region, record new location: 310 assert(_compact_point + obj_size <= _to_region->end(), "must fit"); 311 shenandoah_assert_not_forwarded(NULL, p); 312 _preserved_marks->push_if_necessary(p, p->mark_raw()); 313 p->forward_to(oop(_compact_point)); 314 _compact_point += obj_size; 315 } 316 }; 317 318 class ShenandoahPrepareForCompactionTask : public AbstractGangTask { 319 private: 320 PreservedMarksSet* const _preserved_marks; 321 ShenandoahHeap* const _heap; 322 ShenandoahHeapRegionSet** const _worker_slices; 323 324 public: 325 ShenandoahPrepareForCompactionTask(PreservedMarksSet *preserved_marks, ShenandoahHeapRegionSet **worker_slices) : 326 AbstractGangTask("Shenandoah Prepare For Compaction Task"), 327 _preserved_marks(preserved_marks), 328 _heap(ShenandoahHeap::heap()), _worker_slices(worker_slices) { 329 } 330 331 static bool is_candidate_region(ShenandoahHeapRegion* r) { 332 // Empty region: get it into the slice to defragment the slice itself. 333 // We could have skipped this without violating correctness, but we really 334 // want to compact all live regions to the start of the heap, which sometimes 335 // means moving them into the fully empty regions. 336 if (r->is_empty()) return true; 337 338 // Can move the region, and this is not the humongous region. Humongous 339 // moves are special cased here, because their moves are handled separately. 340 return r->is_stw_move_allowed() && !r->is_humongous(); 341 } 342 343 void work(uint worker_id) { 344 ShenandoahHeapRegionSet* slice = _worker_slices[worker_id]; 345 ShenandoahHeapRegionSetIterator it(slice); 346 ShenandoahHeapRegion* from_region = it.next(); 347 // No work? 348 if (from_region == NULL) { 349 return; 350 } 351 352 // Sliding compaction. Walk all regions in the slice, and compact them. 353 // Remember empty regions and reuse them as needed. 354 ResourceMark rm; 355 356 GrowableArray<ShenandoahHeapRegion*> empty_regions((int)_heap->num_regions()); 357 358 ShenandoahPrepareForCompactionObjectClosure cl(_preserved_marks->get(worker_id), empty_regions, from_region); 359 360 while (from_region != NULL) { 361 assert(is_candidate_region(from_region), "Sanity"); 362 363 cl.set_from_region(from_region); 364 if (from_region->has_live()) { 365 _heap->marked_object_iterate(from_region, &cl); 366 } 367 368 // Compacted the region to somewhere else? From-region is empty then. 369 if (!cl.is_compact_same_region()) { 370 empty_regions.append(from_region); 371 } 372 from_region = it.next(); 373 } 374 cl.finish_region(); 375 376 // Mark all remaining regions as empty 377 for (int pos = cl.empty_regions_pos(); pos < empty_regions.length(); ++pos) { 378 ShenandoahHeapRegion* r = empty_regions.at(pos); 379 r->set_new_top(r->bottom()); 380 } 381 } 382 }; 383 384 void ShenandoahMarkCompact::calculate_target_humongous_objects() { 385 ShenandoahHeap* heap = ShenandoahHeap::heap(); 386 387 // Compute the new addresses for humongous objects. We need to do this after addresses 388 // for regular objects are calculated, and we know what regions in heap suffix are 389 // available for humongous moves. 390 // 391 // Scan the heap backwards, because we are compacting humongous regions towards the end. 392 // Maintain the contiguous compaction window in [to_begin; to_end), so that we can slide 393 // humongous start there. 394 // 395 // The complication is potential non-movable regions during the scan. If such region is 396 // detected, then sliding restarts towards that non-movable region. 397 398 size_t to_begin = heap->num_regions(); 399 size_t to_end = heap->num_regions(); 400 401 for (size_t c = heap->num_regions(); c > 0; c--) { 402 ShenandoahHeapRegion *r = heap->get_region(c - 1); 403 if (r->is_humongous_continuation() || (r->new_top() == r->bottom())) { 404 // To-region candidate: record this, and continue scan 405 to_begin = r->index(); 406 continue; 407 } 408 409 if (r->is_humongous_start() && r->is_stw_move_allowed()) { 410 // From-region candidate: movable humongous region 411 oop old_obj = oop(r->bottom()); 412 size_t words_size = old_obj->size(); 413 size_t num_regions = ShenandoahHeapRegion::required_regions(words_size * HeapWordSize); 414 415 size_t start = to_end - num_regions; 416 417 if (start >= to_begin && start != r->index()) { 418 // Fits into current window, and the move is non-trivial. Record the move then, and continue scan. 419 _preserved_marks->get(0)->push_if_necessary(old_obj, old_obj->mark_raw()); 420 old_obj->forward_to(oop(heap->get_region(start)->bottom())); 421 to_end = start; 422 continue; 423 } 424 } 425 426 // Failed to fit. Scan starting from current region. 427 to_begin = r->index(); 428 to_end = r->index(); 429 } 430 } 431 432 class ShenandoahEnsureHeapActiveClosure: public ShenandoahHeapRegionClosure { 433 private: 434 ShenandoahHeap* const _heap; 435 436 public: 437 ShenandoahEnsureHeapActiveClosure() : _heap(ShenandoahHeap::heap()) {} 438 void heap_region_do(ShenandoahHeapRegion* r) { 439 if (r->is_trash()) { 440 r->recycle(); 441 } 442 if (r->is_cset()) { 443 r->make_regular_bypass(); 444 } 445 if (r->is_empty_uncommitted()) { 446 r->make_committed_bypass(); 447 } 448 assert (r->is_committed(), "only committed regions in heap now, see region " SIZE_FORMAT, r->index()); 449 450 // Record current region occupancy: this communicates empty regions are free 451 // to the rest of Full GC code. 452 r->set_new_top(r->top()); 453 } 454 }; 455 456 class ShenandoahTrashImmediateGarbageClosure: public ShenandoahHeapRegionClosure { 457 private: 458 ShenandoahHeap* const _heap; 459 ShenandoahMarkingContext* const _ctx; 460 461 public: 462 ShenandoahTrashImmediateGarbageClosure() : 463 _heap(ShenandoahHeap::heap()), 464 _ctx(ShenandoahHeap::heap()->complete_marking_context()) {} 465 466 void heap_region_do(ShenandoahHeapRegion* r) { 467 if (r->is_humongous_start()) { 468 oop humongous_obj = oop(r->bottom()); 469 if (!_ctx->is_marked(humongous_obj)) { 470 assert(!r->has_live(), 471 "Region " SIZE_FORMAT " is not marked, should not have live", r->index()); 472 _heap->trash_humongous_region_at(r); 473 } else { 474 assert(r->has_live(), 475 "Region " SIZE_FORMAT " should have live", r->index()); 476 } 477 } else if (r->is_humongous_continuation()) { 478 // If we hit continuation, the non-live humongous starts should have been trashed already 479 assert(r->humongous_start_region()->has_live(), 480 "Region " SIZE_FORMAT " should have live", r->index()); 481 } else if (r->is_regular()) { 482 if (!r->has_live()) { 483 r->make_trash_immediate(); 484 } 485 } 486 } 487 }; 488 489 void ShenandoahMarkCompact::distribute_slices(ShenandoahHeapRegionSet** worker_slices) { 490 ShenandoahHeap* heap = ShenandoahHeap::heap(); 491 492 uint n_workers = heap->workers()->active_workers(); 493 size_t n_regions = heap->num_regions(); 494 495 // What we want to accomplish: have the dense prefix of data, while still balancing 496 // out the parallel work. 497 // 498 // Assuming the amount of work is driven by the live data that needs moving, we can slice 499 // the entire heap into equal-live-sized prefix slices, and compact into them. So, each 500 // thread takes all regions in its prefix subset, and then it takes some regions from 501 // the tail. 502 // 503 // Tail region selection becomes interesting. 504 // 505 // First, we want to distribute the regions fairly between the workers, and those regions 506 // might have different amount of live data. So, until we sure no workers need live data, 507 // we need to only take what the worker needs. 508 // 509 // Second, since we slide everything to the left in each slice, the most busy regions 510 // would be the ones on the left. Which means we want to have all workers have their after-tail 511 // regions as close to the left as possible. 512 // 513 // The easiest way to do this is to distribute after-tail regions in round-robin between 514 // workers that still need live data. 515 // 516 // Consider parallel workers A, B, C, then the target slice layout would be: 517 // 518 // AAAAAAAABBBBBBBBCCCCCCCC|ABCABCABCABCABCABCABCABABABABABABABABABABAAAAA 519 // 520 // (.....dense-prefix.....) (.....................tail...................) 521 // [all regions fully live] [left-most regions are fuller that right-most] 522 // 523 524 // Compute how much live data is there. This would approximate the size of dense prefix 525 // we target to create. 526 size_t total_live = 0; 527 for (size_t idx = 0; idx < n_regions; idx++) { 528 ShenandoahHeapRegion *r = heap->get_region(idx); 529 if (ShenandoahPrepareForCompactionTask::is_candidate_region(r)) { 530 total_live += r->get_live_data_words(); 531 } 532 } 533 534 // Estimate the size for the dense prefix. Note that we specifically count only the 535 // "full" regions, so there would be some non-full regions in the slice tail. 536 size_t live_per_worker = total_live / n_workers; 537 size_t prefix_regions_per_worker = live_per_worker / ShenandoahHeapRegion::region_size_words(); 538 size_t prefix_regions_total = prefix_regions_per_worker * n_workers; 539 prefix_regions_total = MIN2(prefix_regions_total, n_regions); 540 assert(prefix_regions_total <= n_regions, "Sanity"); 541 542 // There might be non-candidate regions in the prefix. To compute where the tail actually 543 // ends up being, we need to account those as well. 544 size_t prefix_end = prefix_regions_total; 545 for (size_t idx = 0; idx < prefix_regions_total; idx++) { 546 ShenandoahHeapRegion *r = heap->get_region(idx); 547 if (!ShenandoahPrepareForCompactionTask::is_candidate_region(r)) { 548 prefix_end++; 549 } 550 } 551 prefix_end = MIN2(prefix_end, n_regions); 552 assert(prefix_end <= n_regions, "Sanity"); 553 554 // Distribute prefix regions per worker: each thread definitely gets its own same-sized 555 // subset of dense prefix. 556 size_t prefix_idx = 0; 557 558 size_t* live = NEW_C_HEAP_ARRAY(size_t, n_workers, mtGC); 559 560 for (size_t wid = 0; wid < n_workers; wid++) { 561 ShenandoahHeapRegionSet* slice = worker_slices[wid]; 562 563 live[wid] = 0; 564 size_t regs = 0; 565 566 // Add all prefix regions for this worker 567 while (prefix_idx < prefix_end && regs < prefix_regions_per_worker) { 568 ShenandoahHeapRegion *r = heap->get_region(prefix_idx); 569 if (ShenandoahPrepareForCompactionTask::is_candidate_region(r)) { 570 slice->add_region(r); 571 live[wid] += r->get_live_data_words(); 572 regs++; 573 } 574 prefix_idx++; 575 } 576 } 577 578 // Distribute the tail among workers in round-robin fashion. 579 size_t wid = n_workers - 1; 580 581 for (size_t tail_idx = prefix_end; tail_idx < n_regions; tail_idx++) { 582 ShenandoahHeapRegion *r = heap->get_region(tail_idx); 583 if (ShenandoahPrepareForCompactionTask::is_candidate_region(r)) { 584 assert(wid < n_workers, "Sanity"); 585 586 size_t live_region = r->get_live_data_words(); 587 588 // Select next worker that still needs live data. 589 size_t old_wid = wid; 590 do { 591 wid++; 592 if (wid == n_workers) wid = 0; 593 } while (live[wid] + live_region >= live_per_worker && old_wid != wid); 594 595 if (old_wid == wid) { 596 // Circled back to the same worker? This means liveness data was 597 // miscalculated. Bump the live_per_worker limit so that 598 // everyone gets a piece of the leftover work. 599 live_per_worker += ShenandoahHeapRegion::region_size_words(); 600 } 601 602 worker_slices[wid]->add_region(r); 603 live[wid] += live_region; 604 } 605 } 606 607 FREE_C_HEAP_ARRAY(size_t, live); 608 609 #ifdef ASSERT 610 ResourceBitMap map(n_regions); 611 for (size_t wid = 0; wid < n_workers; wid++) { 612 ShenandoahHeapRegionSetIterator it(worker_slices[wid]); 613 ShenandoahHeapRegion* r = it.next(); 614 while (r != NULL) { 615 size_t idx = r->index(); 616 assert(ShenandoahPrepareForCompactionTask::is_candidate_region(r), "Sanity: " SIZE_FORMAT, idx); 617 assert(!map.at(idx), "No region distributed twice: " SIZE_FORMAT, idx); 618 map.at_put(idx, true); 619 r = it.next(); 620 } 621 } 622 623 for (size_t rid = 0; rid < n_regions; rid++) { 624 bool is_candidate = ShenandoahPrepareForCompactionTask::is_candidate_region(heap->get_region(rid)); 625 bool is_distributed = map.at(rid); 626 assert(is_distributed || !is_candidate, "All candidates are distributed: " SIZE_FORMAT, rid); 627 } 628 #endif 629 } 630 631 void ShenandoahMarkCompact::phase2_calculate_target_addresses(ShenandoahHeapRegionSet** worker_slices) { 632 GCTraceTime(Info, gc, phases) time("Phase 2: Compute new object addresses", _gc_timer); 633 ShenandoahGCPhase calculate_address_phase(ShenandoahPhaseTimings::full_gc_calculate_addresses); 634 635 ShenandoahHeap* heap = ShenandoahHeap::heap(); 636 637 // About to figure out which regions can be compacted, make sure pinning status 638 // had been updated in GC prologue. 639 heap->assert_pinned_region_status(); 640 641 { 642 // Trash the immediately collectible regions before computing addresses 643 ShenandoahTrashImmediateGarbageClosure tigcl; 644 heap->heap_region_iterate(&tigcl); 645 646 // Make sure regions are in good state: committed, active, clean. 647 // This is needed because we are potentially sliding the data through them. 648 ShenandoahEnsureHeapActiveClosure ecl; 649 heap->heap_region_iterate(&ecl); 650 } 651 652 // Compute the new addresses for regular objects 653 { 654 ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_calculate_addresses_regular); 655 656 distribute_slices(worker_slices); 657 658 ShenandoahPrepareForCompactionTask task(_preserved_marks, worker_slices); 659 heap->workers()->run_task(&task); 660 } 661 662 // Compute the new addresses for humongous objects 663 { 664 ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_calculate_addresses_humong); 665 calculate_target_humongous_objects(); 666 } 667 } 668 669 class ShenandoahAdjustPointersClosure : public MetadataVisitingOopIterateClosure { 670 private: 671 ShenandoahHeap* const _heap; 672 ShenandoahMarkingContext* const _ctx; 673 674 template <class T> 675 inline void do_oop_work(T* p) { 676 T o = RawAccess<>::oop_load(p); 677 if (!CompressedOops::is_null(o)) { 678 oop obj = CompressedOops::decode_not_null(o); 679 assert(_ctx->is_marked(obj), "must be marked"); 680 if (obj->is_forwarded()) { 681 oop forw = obj->forwardee(); 682 RawAccess<IS_NOT_NULL>::oop_store(p, forw); 683 } 684 } 685 } 686 687 public: 688 ShenandoahAdjustPointersClosure() : 689 _heap(ShenandoahHeap::heap()), 690 _ctx(ShenandoahHeap::heap()->complete_marking_context()) {} 691 692 void do_oop(oop* p) { do_oop_work(p); } 693 void do_oop(narrowOop* p) { do_oop_work(p); } 694 }; 695 696 class ShenandoahAdjustPointersObjectClosure : public ObjectClosure { 697 private: 698 ShenandoahHeap* const _heap; 699 ShenandoahAdjustPointersClosure _cl; 700 701 public: 702 ShenandoahAdjustPointersObjectClosure() : 703 _heap(ShenandoahHeap::heap()) { 704 } 705 void do_object(oop p) { 706 assert(_heap->complete_marking_context()->is_marked(p), "must be marked"); 707 p->oop_iterate(&_cl); 708 } 709 }; 710 711 class ShenandoahAdjustPointersTask : public AbstractGangTask { 712 private: 713 ShenandoahHeap* const _heap; 714 ShenandoahRegionIterator _regions; 715 716 public: 717 ShenandoahAdjustPointersTask() : 718 AbstractGangTask("Shenandoah Adjust Pointers Task"), 719 _heap(ShenandoahHeap::heap()) { 720 } 721 722 void work(uint worker_id) { 723 ShenandoahAdjustPointersObjectClosure obj_cl; 724 ShenandoahHeapRegion* r = _regions.next(); 725 while (r != NULL) { 726 if (!r->is_humongous_continuation() && r->has_live()) { 727 _heap->marked_object_iterate(r, &obj_cl); 728 } 729 r = _regions.next(); 730 } 731 } 732 }; 733 734 class ShenandoahAdjustRootPointersTask : public AbstractGangTask { 735 private: 736 ShenandoahRootAdjuster* _rp; 737 PreservedMarksSet* _preserved_marks; 738 public: 739 ShenandoahAdjustRootPointersTask(ShenandoahRootAdjuster* rp, PreservedMarksSet* preserved_marks) : 740 AbstractGangTask("Shenandoah Adjust Root Pointers Task"), 741 _rp(rp), 742 _preserved_marks(preserved_marks) {} 743 744 void work(uint worker_id) { 745 ShenandoahAdjustPointersClosure cl; 746 _rp->roots_do(worker_id, &cl); 747 _preserved_marks->get(worker_id)->adjust_during_full_gc(); 748 } 749 }; 750 751 void ShenandoahMarkCompact::phase3_update_references() { 752 GCTraceTime(Info, gc, phases) time("Phase 3: Adjust pointers", _gc_timer); 753 ShenandoahGCPhase adjust_pointer_phase(ShenandoahPhaseTimings::full_gc_adjust_pointers); 754 755 ShenandoahHeap* heap = ShenandoahHeap::heap(); 756 757 WorkGang* workers = heap->workers(); 758 uint nworkers = workers->active_workers(); 759 { 760 #if COMPILER2_OR_JVMCI 761 DerivedPointerTable::clear(); 762 #endif 763 ShenandoahRootAdjuster rp(nworkers, ShenandoahPhaseTimings::full_gc_adjust_roots); 764 ShenandoahAdjustRootPointersTask task(&rp, _preserved_marks); 765 workers->run_task(&task); 766 #if COMPILER2_OR_JVMCI 767 DerivedPointerTable::update_pointers(); 768 #endif 769 } 770 771 ShenandoahAdjustPointersTask adjust_pointers_task; 772 workers->run_task(&adjust_pointers_task); 773 } 774 775 class ShenandoahCompactObjectsClosure : public ObjectClosure { 776 private: 777 ShenandoahHeap* const _heap; 778 uint const _worker_id; 779 780 public: 781 ShenandoahCompactObjectsClosure(uint worker_id) : 782 _heap(ShenandoahHeap::heap()), _worker_id(worker_id) {} 783 784 void do_object(oop p) { 785 assert(_heap->complete_marking_context()->is_marked(p), "must be marked"); 786 size_t size = (size_t)p->size(); 787 if (p->is_forwarded()) { 788 HeapWord* compact_from = (HeapWord*) p; 789 HeapWord* compact_to = (HeapWord*) p->forwardee(); 790 Copy::aligned_conjoint_words(compact_from, compact_to, size); 791 oop new_obj = oop(compact_to); 792 new_obj->init_mark_raw(); 793 } 794 } 795 }; 796 797 class ShenandoahCompactObjectsTask : public AbstractGangTask { 798 private: 799 ShenandoahHeap* const _heap; 800 ShenandoahHeapRegionSet** const _worker_slices; 801 802 public: 803 ShenandoahCompactObjectsTask(ShenandoahHeapRegionSet** worker_slices) : 804 AbstractGangTask("Shenandoah Compact Objects Task"), 805 _heap(ShenandoahHeap::heap()), 806 _worker_slices(worker_slices) { 807 } 808 809 void work(uint worker_id) { 810 ShenandoahHeapRegionSetIterator slice(_worker_slices[worker_id]); 811 812 ShenandoahCompactObjectsClosure cl(worker_id); 813 ShenandoahHeapRegion* r = slice.next(); 814 while (r != NULL) { 815 assert(!r->is_humongous(), "must not get humongous regions here"); 816 if (r->has_live()) { 817 _heap->marked_object_iterate(r, &cl); 818 } 819 r->set_top(r->new_top()); 820 r = slice.next(); 821 } 822 } 823 }; 824 825 class ShenandoahPostCompactClosure : public ShenandoahHeapRegionClosure { 826 private: 827 ShenandoahHeap* const _heap; 828 size_t _live; 829 830 public: 831 ShenandoahPostCompactClosure() : _heap(ShenandoahHeap::heap()), _live(0) { 832 _heap->free_set()->clear(); 833 } 834 835 void heap_region_do(ShenandoahHeapRegion* r) { 836 assert (!r->is_cset(), "cset regions should have been demoted already"); 837 838 // Need to reset the complete-top-at-mark-start pointer here because 839 // the complete marking bitmap is no longer valid. This ensures 840 // size-based iteration in marked_object_iterate(). 841 // NOTE: See blurb at ShenandoahMCResetCompleteBitmapTask on why we need to skip 842 // pinned regions. 843 if (!r->is_pinned()) { 844 _heap->complete_marking_context()->reset_top_at_mark_start(r); 845 } 846 847 size_t live = r->used(); 848 849 // Make empty regions that have been allocated into regular 850 if (r->is_empty() && live > 0) { 851 r->make_regular_bypass(); 852 } 853 854 // Reclaim regular regions that became empty 855 if (r->is_regular() && live == 0) { 856 r->make_trash(); 857 } 858 859 // Recycle all trash regions 860 if (r->is_trash()) { 861 live = 0; 862 r->recycle(); 863 } 864 865 r->set_live_data(live); 866 r->reset_alloc_metadata(); 867 _live += live; 868 } 869 870 size_t get_live() { 871 return _live; 872 } 873 }; 874 875 void ShenandoahMarkCompact::compact_humongous_objects() { 876 // Compact humongous regions, based on their fwdptr objects. 877 // 878 // This code is serial, because doing the in-slice parallel sliding is tricky. In most cases, 879 // humongous regions are already compacted, and do not require further moves, which alleviates 880 // sliding costs. We may consider doing this in parallel in future. 881 882 ShenandoahHeap* heap = ShenandoahHeap::heap(); 883 884 for (size_t c = heap->num_regions(); c > 0; c--) { 885 ShenandoahHeapRegion* r = heap->get_region(c - 1); 886 if (r->is_humongous_start()) { 887 oop old_obj = oop(r->bottom()); 888 if (!old_obj->is_forwarded()) { 889 // No need to move the object, it stays at the same slot 890 continue; 891 } 892 size_t words_size = old_obj->size(); 893 size_t num_regions = ShenandoahHeapRegion::required_regions(words_size * HeapWordSize); 894 895 size_t old_start = r->index(); 896 size_t old_end = old_start + num_regions - 1; 897 size_t new_start = heap->heap_region_index_containing(old_obj->forwardee()); 898 size_t new_end = new_start + num_regions - 1; 899 assert(old_start != new_start, "must be real move"); 900 assert(r->is_stw_move_allowed(), "Region " SIZE_FORMAT " should be movable", r->index()); 901 902 Copy::aligned_conjoint_words(heap->get_region(old_start)->bottom(), 903 heap->get_region(new_start)->bottom(), 904 ShenandoahHeapRegion::region_size_words()*num_regions); 905 906 oop new_obj = oop(heap->get_region(new_start)->bottom()); 907 new_obj->init_mark_raw(); 908 909 { 910 for (size_t c = old_start; c <= old_end; c++) { 911 ShenandoahHeapRegion* r = heap->get_region(c); 912 r->make_regular_bypass(); 913 r->set_top(r->bottom()); 914 } 915 916 for (size_t c = new_start; c <= new_end; c++) { 917 ShenandoahHeapRegion* r = heap->get_region(c); 918 if (c == new_start) { 919 r->make_humongous_start_bypass(); 920 } else { 921 r->make_humongous_cont_bypass(); 922 } 923 924 // Trailing region may be non-full, record the remainder there 925 size_t remainder = words_size & ShenandoahHeapRegion::region_size_words_mask(); 926 if ((c == new_end) && (remainder != 0)) { 927 r->set_top(r->bottom() + remainder); 928 } else { 929 r->set_top(r->end()); 930 } 931 932 r->reset_alloc_metadata(); 933 } 934 } 935 } 936 } 937 } 938 939 // This is slightly different to ShHeap::reset_next_mark_bitmap: 940 // we need to remain able to walk pinned regions. 941 // Since pinned region do not move and don't get compacted, we will get holes with 942 // unreachable objects in them (which may have pointers to unloaded Klasses and thus 943 // cannot be iterated over using oop->size(). The only way to safely iterate over those is using 944 // a valid marking bitmap and valid TAMS pointer. This class only resets marking 945 // bitmaps for un-pinned regions, and later we only reset TAMS for unpinned regions. 946 class ShenandoahMCResetCompleteBitmapTask : public AbstractGangTask { 947 private: 948 ShenandoahRegionIterator _regions; 949 950 public: 951 ShenandoahMCResetCompleteBitmapTask() : 952 AbstractGangTask("Parallel Reset Bitmap Task") { 953 } 954 955 void work(uint worker_id) { 956 ShenandoahHeapRegion* region = _regions.next(); 957 ShenandoahHeap* heap = ShenandoahHeap::heap(); 958 ShenandoahMarkingContext* const ctx = heap->complete_marking_context(); 959 while (region != NULL) { 960 if (heap->is_bitmap_slice_committed(region) && !region->is_pinned() && region->has_live()) { 961 ctx->clear_bitmap(region); 962 } 963 region = _regions.next(); 964 } 965 } 966 }; 967 968 void ShenandoahMarkCompact::phase4_compact_objects(ShenandoahHeapRegionSet** worker_slices) { 969 GCTraceTime(Info, gc, phases) time("Phase 4: Move objects", _gc_timer); 970 ShenandoahGCPhase compaction_phase(ShenandoahPhaseTimings::full_gc_copy_objects); 971 972 ShenandoahHeap* heap = ShenandoahHeap::heap(); 973 974 // Compact regular objects first 975 { 976 ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_copy_objects_regular); 977 ShenandoahCompactObjectsTask compact_task(worker_slices); 978 heap->workers()->run_task(&compact_task); 979 } 980 981 // Compact humongous objects after regular object moves 982 { 983 ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_copy_objects_humong); 984 compact_humongous_objects(); 985 } 986 987 // Reset complete bitmap. We're about to reset the complete-top-at-mark-start pointer 988 // and must ensure the bitmap is in sync. 989 { 990 ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_copy_objects_reset_complete); 991 ShenandoahMCResetCompleteBitmapTask task; 992 heap->workers()->run_task(&task); 993 } 994 995 // Bring regions in proper states after the collection, and set heap properties. 996 { 997 ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_copy_objects_rebuild); 998 999 ShenandoahPostCompactClosure post_compact; 1000 heap->heap_region_iterate(&post_compact); 1001 heap->set_used(post_compact.get_live()); 1002 1003 heap->collection_set()->clear(); 1004 heap->free_set()->rebuild(); 1005 } 1006 1007 heap->clear_cancelled_gc(); 1008 }