1 /* 2 * Copyright (c) 2014, 2019, Red Hat, Inc. All rights reserved. 3 * 4 * This code is free software; you can redistribute it and/or modify it 5 * under the terms of the GNU General Public License version 2 only, as 6 * published by the Free Software Foundation. 7 * 8 * This code is distributed in the hope that it will be useful, but WITHOUT 9 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 10 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License 11 * version 2 for more details (a copy is included in the LICENSE file that 12 * accompanied this code). 13 * 14 * You should have received a copy of the GNU General Public License version 15 * 2 along with this work; if not, write to the Free Software Foundation, 16 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. 17 * 18 * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA 19 * or visit www.oracle.com if you need additional information or have any 20 * questions. 21 * 22 */ 23 24 #include "precompiled.hpp" 25 26 #include "code/codeCache.hpp" 27 #include "gc/shared/gcTraceTime.inline.hpp" 28 #include "gc/shared/preservedMarks.inline.hpp" 29 #include "gc/shenandoah/shenandoahForwarding.inline.hpp" 30 #include "gc/shenandoah/shenandoahConcurrentMark.inline.hpp" 31 #include "gc/shenandoah/shenandoahCollectionSet.hpp" 32 #include "gc/shenandoah/shenandoahFreeSet.hpp" 33 #include "gc/shenandoah/shenandoahPhaseTimings.hpp" 34 #include "gc/shenandoah/shenandoahMarkCompact.hpp" 35 #include "gc/shenandoah/shenandoahHeapRegionSet.hpp" 36 #include "gc/shenandoah/shenandoahHeap.inline.hpp" 37 #include "gc/shenandoah/shenandoahHeuristics.hpp" 38 #include "gc/shenandoah/shenandoahMarkingContext.inline.hpp" 39 #include "gc/shenandoah/shenandoahRootProcessor.inline.hpp" 40 #include "gc/shenandoah/shenandoahTraversalGC.hpp" 41 #include "gc/shenandoah/shenandoahTaskqueue.inline.hpp" 42 #include "gc/shenandoah/shenandoahUtils.hpp" 43 #include "gc/shenandoah/shenandoahVerifier.hpp" 44 #include "gc/shenandoah/shenandoahVMOperations.hpp" 45 #include "gc/shenandoah/shenandoahWorkerPolicy.hpp" 46 #include "memory/metaspace.hpp" 47 #include "memory/universe.hpp" 48 #include "oops/compressedOops.inline.hpp" 49 #include "oops/oop.inline.hpp" 50 #include "runtime/biasedLocking.hpp" 51 #include "runtime/orderAccess.hpp" 52 #include "runtime/thread.hpp" 53 #include "utilities/copy.hpp" 54 #include "utilities/growableArray.hpp" 55 #include "gc/shared/workgroup.hpp" 56 57 ShenandoahMarkCompact::ShenandoahMarkCompact() : 58 _gc_timer(NULL), 59 _preserved_marks(new PreservedMarksSet(true)) {} 60 61 void ShenandoahMarkCompact::initialize(GCTimer* gc_timer) { 62 _gc_timer = gc_timer; 63 } 64 65 void ShenandoahMarkCompact::do_it(GCCause::Cause gc_cause) { 66 ShenandoahHeap* heap = ShenandoahHeap::heap(); 67 68 if (ShenandoahVerify) { 69 heap->verifier()->verify_before_fullgc(); 70 } 71 72 if (VerifyBeforeGC) { 73 Universe::verify(); 74 } 75 76 heap->set_full_gc_in_progress(true); 77 78 assert(ShenandoahSafepoint::is_at_shenandoah_safepoint(), "must be at a safepoint"); 79 assert(Thread::current()->is_VM_thread(), "Do full GC only while world is stopped"); 80 81 { 82 ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_heapdumps); 83 heap->pre_full_gc_dump(_gc_timer); 84 } 85 86 { 87 ShenandoahGCPhase prepare_phase(ShenandoahPhaseTimings::full_gc_prepare); 88 // Full GC is supposed to recover from any GC state: 89 90 // a0. Remember if we have forwarded objects 91 bool has_forwarded_objects = heap->has_forwarded_objects(); 92 93 // a1. Cancel evacuation, if in progress 94 if (heap->is_evacuation_in_progress()) { 95 heap->set_evacuation_in_progress(false); 96 } 97 assert(!heap->is_evacuation_in_progress(), "sanity"); 98 99 // a2. Cancel update-refs, if in progress 100 if (heap->is_update_refs_in_progress()) { 101 heap->set_update_refs_in_progress(false); 102 } 103 assert(!heap->is_update_refs_in_progress(), "sanity"); 104 105 // a3. Cancel concurrent traversal GC, if in progress 106 if (heap->is_concurrent_traversal_in_progress()) { 107 heap->traversal_gc()->reset(); 108 heap->set_concurrent_traversal_in_progress(false); 109 } 110 111 // b. Cancel concurrent mark, if in progress 112 if (heap->is_concurrent_mark_in_progress()) { 113 heap->concurrent_mark()->cancel(); 114 heap->set_concurrent_mark_in_progress(false); 115 } 116 assert(!heap->is_concurrent_mark_in_progress(), "sanity"); 117 118 // c. Reset the bitmaps for new marking 119 heap->reset_mark_bitmap(); 120 assert(heap->marking_context()->is_bitmap_clear(), "sanity"); 121 assert(!heap->marking_context()->is_complete(), "sanity"); 122 123 // d. Abandon reference discovery and clear all discovered references. 124 ReferenceProcessor* rp = heap->ref_processor(); 125 rp->disable_discovery(); 126 rp->abandon_partial_discovery(); 127 rp->verify_no_references_recorded(); 128 129 // e. Set back forwarded objects bit back, in case some steps above dropped it. 130 heap->set_has_forwarded_objects(has_forwarded_objects); 131 132 // f. Sync pinned region status from the CP marks 133 heap->sync_pinned_region_status(); 134 135 // The rest of prologue: 136 BiasedLocking::preserve_marks(); 137 _preserved_marks->init(heap->workers()->active_workers()); 138 } 139 140 heap->make_parsable(true); 141 142 OrderAccess::fence(); 143 144 phase1_mark_heap(); 145 146 // Once marking is done, which may have fixed up forwarded objects, we can drop it. 147 // Coming out of Full GC, we would not have any forwarded objects. 148 // This also prevents resolves with fwdptr from kicking in while adjusting pointers in phase3. 149 heap->set_has_forwarded_objects(false); 150 151 heap->set_full_gc_move_in_progress(true); 152 153 // Setup workers for the rest 154 OrderAccess::fence(); 155 156 // Initialize worker slices 157 ShenandoahHeapRegionSet** worker_slices = NEW_C_HEAP_ARRAY(ShenandoahHeapRegionSet*, heap->max_workers(), mtGC); 158 for (uint i = 0; i < heap->max_workers(); i++) { 159 worker_slices[i] = new ShenandoahHeapRegionSet(); 160 } 161 162 { 163 // The rest of code performs region moves, where region status is undefined 164 // until all phases run together. 165 ShenandoahHeapLocker lock(heap->lock()); 166 167 phase2_calculate_target_addresses(worker_slices); 168 169 OrderAccess::fence(); 170 171 phase3_update_references(); 172 173 phase4_compact_objects(worker_slices); 174 } 175 176 { 177 // Epilogue 178 SharedRestorePreservedMarksTaskExecutor exec(heap->workers()); 179 _preserved_marks->restore(&exec); 180 BiasedLocking::restore_marks(); 181 _preserved_marks->reclaim(); 182 } 183 184 // Resize metaspace 185 MetaspaceGC::compute_new_size(); 186 187 // Free worker slices 188 for (uint i = 0; i < heap->max_workers(); i++) { 189 delete worker_slices[i]; 190 } 191 FREE_C_HEAP_ARRAY(ShenandoahHeapRegionSet*, worker_slices); 192 193 heap->set_full_gc_move_in_progress(false); 194 heap->set_full_gc_in_progress(false); 195 196 if (ShenandoahVerify) { 197 heap->verifier()->verify_after_fullgc(); 198 } 199 200 if (VerifyAfterGC) { 201 Universe::verify(); 202 } 203 204 { 205 ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_heapdumps); 206 heap->post_full_gc_dump(_gc_timer); 207 } 208 } 209 210 class ShenandoahPrepareForMarkClosure: public ShenandoahHeapRegionClosure { 211 private: 212 ShenandoahMarkingContext* const _ctx; 213 214 public: 215 ShenandoahPrepareForMarkClosure() : _ctx(ShenandoahHeap::heap()->marking_context()) {} 216 217 void heap_region_do(ShenandoahHeapRegion *r) { 218 _ctx->capture_top_at_mark_start(r); 219 r->clear_live_data(); 220 r->set_concurrent_iteration_safe_limit(r->top()); 221 } 222 }; 223 224 void ShenandoahMarkCompact::phase1_mark_heap() { 225 GCTraceTime(Info, gc, phases) time("Phase 1: Mark live objects", _gc_timer); 226 ShenandoahGCPhase mark_phase(ShenandoahPhaseTimings::full_gc_mark); 227 228 ShenandoahHeap* heap = ShenandoahHeap::heap(); 229 230 ShenandoahPrepareForMarkClosure cl; 231 heap->heap_region_iterate(&cl); 232 233 ShenandoahConcurrentMark* cm = heap->concurrent_mark(); 234 235 heap->set_process_references(heap->heuristics()->can_process_references()); 236 heap->set_unload_classes(heap->heuristics()->can_unload_classes()); 237 238 ReferenceProcessor* rp = heap->ref_processor(); 239 // enable ("weak") refs discovery 240 rp->enable_discovery(true /*verify_no_refs*/); 241 rp->setup_policy(true); // forcefully purge all soft references 242 rp->set_active_mt_degree(heap->workers()->active_workers()); 243 244 cm->update_roots(ShenandoahPhaseTimings::full_gc_roots); 245 cm->mark_roots(ShenandoahPhaseTimings::full_gc_roots); 246 cm->finish_mark_from_roots(/* full_gc = */ true); 247 heap->mark_complete_marking_context(); 248 heap->parallel_cleaning(true /* full_gc */); 249 } 250 251 class ShenandoahPrepareForCompactionObjectClosure : public ObjectClosure { 252 private: 253 PreservedMarks* const _preserved_marks; 254 ShenandoahHeap* const _heap; 255 GrowableArray<ShenandoahHeapRegion*>& _empty_regions; 256 int _empty_regions_pos; 257 ShenandoahHeapRegion* _to_region; 258 ShenandoahHeapRegion* _from_region; 259 HeapWord* _compact_point; 260 261 public: 262 ShenandoahPrepareForCompactionObjectClosure(PreservedMarks* preserved_marks, 263 GrowableArray<ShenandoahHeapRegion*>& empty_regions, 264 ShenandoahHeapRegion* to_region) : 265 _preserved_marks(preserved_marks), 266 _heap(ShenandoahHeap::heap()), 267 _empty_regions(empty_regions), 268 _empty_regions_pos(0), 269 _to_region(to_region), 270 _from_region(NULL), 271 _compact_point(to_region->bottom()) {} 272 273 void set_from_region(ShenandoahHeapRegion* from_region) { 274 _from_region = from_region; 275 } 276 277 void finish_region() { 278 assert(_to_region != NULL, "should not happen"); 279 _to_region->set_new_top(_compact_point); 280 } 281 282 bool is_compact_same_region() { 283 return _from_region == _to_region; 284 } 285 286 int empty_regions_pos() { 287 return _empty_regions_pos; 288 } 289 290 void do_object(oop p) { 291 assert(_from_region != NULL, "must set before work"); 292 assert(_heap->complete_marking_context()->is_marked(p), "must be marked"); 293 assert(!_heap->complete_marking_context()->allocated_after_mark_start((HeapWord*) p), "must be truly marked"); 294 295 size_t obj_size = p->size(); 296 if (_compact_point + obj_size > _to_region->end()) { 297 finish_region(); 298 299 // Object doesn't fit. Pick next empty region and start compacting there. 300 ShenandoahHeapRegion* new_to_region; 301 if (_empty_regions_pos < _empty_regions.length()) { 302 new_to_region = _empty_regions.at(_empty_regions_pos); 303 _empty_regions_pos++; 304 } else { 305 // Out of empty region? Compact within the same region. 306 new_to_region = _from_region; 307 } 308 309 assert(new_to_region != _to_region, "must not reuse same to-region"); 310 assert(new_to_region != NULL, "must not be NULL"); 311 _to_region = new_to_region; 312 _compact_point = _to_region->bottom(); 313 } 314 315 // Object fits into current region, record new location: 316 assert(_compact_point + obj_size <= _to_region->end(), "must fit"); 317 shenandoah_assert_not_forwarded(NULL, p); 318 _preserved_marks->push_if_necessary(p, p->mark_raw()); 319 p->forward_to(oop(_compact_point)); 320 _compact_point += obj_size; 321 } 322 }; 323 324 class ShenandoahPrepareForCompactionTask : public AbstractGangTask { 325 private: 326 PreservedMarksSet* const _preserved_marks; 327 ShenandoahHeap* const _heap; 328 ShenandoahHeapRegionSet** const _worker_slices; 329 ShenandoahRegionIterator _heap_regions; 330 331 ShenandoahHeapRegion* next_from_region(ShenandoahHeapRegionSet* slice) { 332 ShenandoahHeapRegion* from_region = _heap_regions.next(); 333 334 // Look for next candidate for this slice: 335 while (from_region != NULL) { 336 // Empty region: get it into the slice to defragment the slice itself. 337 // We could have skipped this without violating correctness, but we really 338 // want to compact all live regions to the start of the heap, which sometimes 339 // means moving them into the fully empty regions. 340 if (from_region->is_empty()) break; 341 342 // Can move the region, and this is not the humongous region. Humongous 343 // moves are special cased here, because their moves are handled separately. 344 if (from_region->is_stw_move_allowed() && !from_region->is_humongous()) break; 345 346 from_region = _heap_regions.next(); 347 } 348 349 if (from_region != NULL) { 350 assert(slice != NULL, "sanity"); 351 assert(!from_region->is_humongous(), "this path cannot handle humongous regions"); 352 assert(from_region->is_empty() || from_region->is_stw_move_allowed(), "only regions that can be moved in mark-compact"); 353 slice->add_region(from_region); 354 } 355 356 return from_region; 357 } 358 359 public: 360 ShenandoahPrepareForCompactionTask(PreservedMarksSet* preserved_marks, ShenandoahHeapRegionSet** worker_slices) : 361 AbstractGangTask("Shenandoah Prepare For Compaction Task"), 362 _preserved_marks(preserved_marks), 363 _heap(ShenandoahHeap::heap()), _worker_slices(worker_slices) { 364 } 365 366 void work(uint worker_id) { 367 ShenandoahHeapRegionSet* slice = _worker_slices[worker_id]; 368 ShenandoahHeapRegion* from_region = next_from_region(slice); 369 // No work? 370 if (from_region == NULL) { 371 return; 372 } 373 374 // Sliding compaction. Walk all regions in the slice, and compact them. 375 // Remember empty regions and reuse them as needed. 376 ResourceMark rm; 377 GrowableArray<ShenandoahHeapRegion*> empty_regions((int)_heap->num_regions()); 378 ShenandoahPrepareForCompactionObjectClosure cl(_preserved_marks->get(worker_id), empty_regions, from_region); 379 while (from_region != NULL) { 380 cl.set_from_region(from_region); 381 if (from_region->has_live()) { 382 _heap->marked_object_iterate(from_region, &cl); 383 } 384 385 // Compacted the region to somewhere else? From-region is empty then. 386 if (!cl.is_compact_same_region()) { 387 empty_regions.append(from_region); 388 } 389 from_region = next_from_region(slice); 390 } 391 cl.finish_region(); 392 393 // Mark all remaining regions as empty 394 for (int pos = cl.empty_regions_pos(); pos < empty_regions.length(); ++pos) { 395 ShenandoahHeapRegion* r = empty_regions.at(pos); 396 r->set_new_top(r->bottom()); 397 } 398 } 399 }; 400 401 void ShenandoahMarkCompact::calculate_target_humongous_objects() { 402 ShenandoahHeap* heap = ShenandoahHeap::heap(); 403 404 // Compute the new addresses for humongous objects. We need to do this after addresses 405 // for regular objects are calculated, and we know what regions in heap suffix are 406 // available for humongous moves. 407 // 408 // Scan the heap backwards, because we are compacting humongous regions towards the end. 409 // Maintain the contiguous compaction window in [to_begin; to_end), so that we can slide 410 // humongous start there. 411 // 412 // The complication is potential non-movable regions during the scan. If such region is 413 // detected, then sliding restarts towards that non-movable region. 414 415 size_t to_begin = heap->num_regions(); 416 size_t to_end = heap->num_regions(); 417 418 for (size_t c = heap->num_regions(); c > 0; c--) { 419 ShenandoahHeapRegion *r = heap->get_region(c - 1); 420 if (r->is_humongous_continuation() || (r->new_top() == r->bottom())) { 421 // To-region candidate: record this, and continue scan 422 to_begin = r->region_number(); 423 continue; 424 } 425 426 if (r->is_humongous_start() && r->is_stw_move_allowed()) { 427 // From-region candidate: movable humongous region 428 oop old_obj = oop(r->bottom()); 429 size_t words_size = old_obj->size(); 430 size_t num_regions = ShenandoahHeapRegion::required_regions(words_size * HeapWordSize); 431 432 size_t start = to_end - num_regions; 433 434 if (start >= to_begin && start != r->region_number()) { 435 // Fits into current window, and the move is non-trivial. Record the move then, and continue scan. 436 _preserved_marks->get(0)->push_if_necessary(old_obj, old_obj->mark_raw()); 437 old_obj->forward_to(oop(heap->get_region(start)->bottom())); 438 to_end = start; 439 continue; 440 } 441 } 442 443 // Failed to fit. Scan starting from current region. 444 to_begin = r->region_number(); 445 to_end = r->region_number(); 446 } 447 } 448 449 class ShenandoahEnsureHeapActiveClosure: public ShenandoahHeapRegionClosure { 450 private: 451 ShenandoahHeap* const _heap; 452 453 public: 454 ShenandoahEnsureHeapActiveClosure() : _heap(ShenandoahHeap::heap()) {} 455 void heap_region_do(ShenandoahHeapRegion* r) { 456 if (r->is_trash()) { 457 r->recycle(); 458 } 459 if (r->is_cset()) { 460 r->make_regular_bypass(); 461 } 462 if (r->is_empty_uncommitted()) { 463 r->make_committed_bypass(); 464 } 465 assert (r->is_committed(), "only committed regions in heap now, see region " SIZE_FORMAT, r->region_number()); 466 467 // Record current region occupancy: this communicates empty regions are free 468 // to the rest of Full GC code. 469 r->set_new_top(r->top()); 470 } 471 }; 472 473 class ShenandoahTrashImmediateGarbageClosure: public ShenandoahHeapRegionClosure { 474 private: 475 ShenandoahHeap* const _heap; 476 ShenandoahMarkingContext* const _ctx; 477 478 public: 479 ShenandoahTrashImmediateGarbageClosure() : 480 _heap(ShenandoahHeap::heap()), 481 _ctx(ShenandoahHeap::heap()->complete_marking_context()) {} 482 483 void heap_region_do(ShenandoahHeapRegion* r) { 484 if (r->is_humongous_start()) { 485 oop humongous_obj = oop(r->bottom()); 486 if (!_ctx->is_marked(humongous_obj)) { 487 assert(!r->has_live(), 488 "Region " SIZE_FORMAT " is not marked, should not have live", r->region_number()); 489 _heap->trash_humongous_region_at(r); 490 } else { 491 assert(r->has_live(), 492 "Region " SIZE_FORMAT " should have live", r->region_number()); 493 } 494 } else if (r->is_humongous_continuation()) { 495 // If we hit continuation, the non-live humongous starts should have been trashed already 496 assert(r->humongous_start_region()->has_live(), 497 "Region " SIZE_FORMAT " should have live", r->region_number()); 498 } else if (r->is_regular()) { 499 if (!r->has_live()) { 500 r->make_trash_immediate(); 501 } 502 } 503 } 504 }; 505 506 void ShenandoahMarkCompact::phase2_calculate_target_addresses(ShenandoahHeapRegionSet** worker_slices) { 507 GCTraceTime(Info, gc, phases) time("Phase 2: Compute new object addresses", _gc_timer); 508 ShenandoahGCPhase calculate_address_phase(ShenandoahPhaseTimings::full_gc_calculate_addresses); 509 510 ShenandoahHeap* heap = ShenandoahHeap::heap(); 511 512 // About to figure out which regions can be compacted, make sure pinning status 513 // had been updated in GC prologue. 514 heap->assert_pinned_region_status(); 515 516 { 517 // Trash the immediately collectible regions before computing addresses 518 ShenandoahTrashImmediateGarbageClosure tigcl; 519 heap->heap_region_iterate(&tigcl); 520 521 // Make sure regions are in good state: committed, active, clean. 522 // This is needed because we are potentially sliding the data through them. 523 ShenandoahEnsureHeapActiveClosure ecl; 524 heap->heap_region_iterate(&ecl); 525 } 526 527 // Compute the new addresses for regular objects 528 { 529 ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_calculate_addresses_regular); 530 ShenandoahPrepareForCompactionTask prepare_task(_preserved_marks, worker_slices); 531 heap->workers()->run_task(&prepare_task); 532 } 533 534 // Compute the new addresses for humongous objects 535 { 536 ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_calculate_addresses_humong); 537 calculate_target_humongous_objects(); 538 } 539 } 540 541 class ShenandoahAdjustPointersClosure : public MetadataVisitingOopIterateClosure { 542 private: 543 ShenandoahHeap* const _heap; 544 ShenandoahMarkingContext* const _ctx; 545 546 template <class T> 547 inline void do_oop_work(T* p) { 548 T o = RawAccess<>::oop_load(p); 549 if (!CompressedOops::is_null(o)) { 550 oop obj = CompressedOops::decode_not_null(o); 551 assert(_ctx->is_marked(obj), "must be marked"); 552 if (obj->is_forwarded()) { 553 oop forw = obj->forwardee(); 554 RawAccess<IS_NOT_NULL>::oop_store(p, forw); 555 } 556 } 557 } 558 559 public: 560 ShenandoahAdjustPointersClosure() : 561 _heap(ShenandoahHeap::heap()), 562 _ctx(ShenandoahHeap::heap()->complete_marking_context()) {} 563 564 void do_oop(oop* p) { do_oop_work(p); } 565 void do_oop(narrowOop* p) { do_oop_work(p); } 566 }; 567 568 class ShenandoahAdjustPointersObjectClosure : public ObjectClosure { 569 private: 570 ShenandoahHeap* const _heap; 571 ShenandoahAdjustPointersClosure _cl; 572 573 public: 574 ShenandoahAdjustPointersObjectClosure() : 575 _heap(ShenandoahHeap::heap()) { 576 } 577 void do_object(oop p) { 578 assert(_heap->complete_marking_context()->is_marked(p), "must be marked"); 579 p->oop_iterate(&_cl); 580 } 581 }; 582 583 class ShenandoahAdjustPointersTask : public AbstractGangTask { 584 private: 585 ShenandoahHeap* const _heap; 586 ShenandoahRegionIterator _regions; 587 588 public: 589 ShenandoahAdjustPointersTask() : 590 AbstractGangTask("Shenandoah Adjust Pointers Task"), 591 _heap(ShenandoahHeap::heap()) { 592 } 593 594 void work(uint worker_id) { 595 ShenandoahAdjustPointersObjectClosure obj_cl; 596 ShenandoahHeapRegion* r = _regions.next(); 597 while (r != NULL) { 598 if (!r->is_humongous_continuation() && r->has_live()) { 599 _heap->marked_object_iterate(r, &obj_cl); 600 } 601 r = _regions.next(); 602 } 603 } 604 }; 605 606 class ShenandoahAdjustRootPointersTask : public AbstractGangTask { 607 private: 608 ShenandoahRootAdjuster* _rp; 609 PreservedMarksSet* _preserved_marks; 610 public: 611 ShenandoahAdjustRootPointersTask(ShenandoahRootAdjuster* rp, PreservedMarksSet* preserved_marks) : 612 AbstractGangTask("Shenandoah Adjust Root Pointers Task"), 613 _rp(rp), 614 _preserved_marks(preserved_marks) {} 615 616 void work(uint worker_id) { 617 ShenandoahAdjustPointersClosure cl; 618 _rp->roots_do(worker_id, &cl); 619 _preserved_marks->get(worker_id)->adjust_during_full_gc(); 620 } 621 }; 622 623 void ShenandoahMarkCompact::phase3_update_references() { 624 GCTraceTime(Info, gc, phases) time("Phase 3: Adjust pointers", _gc_timer); 625 ShenandoahGCPhase adjust_pointer_phase(ShenandoahPhaseTimings::full_gc_adjust_pointers); 626 627 ShenandoahHeap* heap = ShenandoahHeap::heap(); 628 629 WorkGang* workers = heap->workers(); 630 uint nworkers = workers->active_workers(); 631 { 632 #if COMPILER2_OR_JVMCI 633 DerivedPointerTable::clear(); 634 #endif 635 ShenandoahRootAdjuster rp(nworkers, ShenandoahPhaseTimings::full_gc_roots); 636 ShenandoahAdjustRootPointersTask task(&rp, _preserved_marks); 637 workers->run_task(&task); 638 #if COMPILER2_OR_JVMCI 639 DerivedPointerTable::update_pointers(); 640 #endif 641 } 642 643 ShenandoahAdjustPointersTask adjust_pointers_task; 644 workers->run_task(&adjust_pointers_task); 645 } 646 647 class ShenandoahCompactObjectsClosure : public ObjectClosure { 648 private: 649 ShenandoahHeap* const _heap; 650 uint const _worker_id; 651 652 public: 653 ShenandoahCompactObjectsClosure(uint worker_id) : 654 _heap(ShenandoahHeap::heap()), _worker_id(worker_id) {} 655 656 void do_object(oop p) { 657 assert(_heap->complete_marking_context()->is_marked(p), "must be marked"); 658 size_t size = (size_t)p->size(); 659 if (p->is_forwarded()) { 660 HeapWord* compact_from = (HeapWord*) p; 661 HeapWord* compact_to = (HeapWord*) p->forwardee(); 662 Copy::aligned_conjoint_words(compact_from, compact_to, size); 663 oop new_obj = oop(compact_to); 664 new_obj->init_mark_raw(); 665 } 666 } 667 }; 668 669 class ShenandoahCompactObjectsTask : public AbstractGangTask { 670 private: 671 ShenandoahHeap* const _heap; 672 ShenandoahHeapRegionSet** const _worker_slices; 673 674 public: 675 ShenandoahCompactObjectsTask(ShenandoahHeapRegionSet** worker_slices) : 676 AbstractGangTask("Shenandoah Compact Objects Task"), 677 _heap(ShenandoahHeap::heap()), 678 _worker_slices(worker_slices) { 679 } 680 681 void work(uint worker_id) { 682 ShenandoahHeapRegionSetIterator slice(_worker_slices[worker_id]); 683 684 ShenandoahCompactObjectsClosure cl(worker_id); 685 ShenandoahHeapRegion* r = slice.next(); 686 while (r != NULL) { 687 assert(!r->is_humongous(), "must not get humongous regions here"); 688 if (r->has_live()) { 689 _heap->marked_object_iterate(r, &cl); 690 } 691 r->set_top(r->new_top()); 692 r = slice.next(); 693 } 694 } 695 }; 696 697 class ShenandoahPostCompactClosure : public ShenandoahHeapRegionClosure { 698 private: 699 ShenandoahHeap* const _heap; 700 size_t _live; 701 702 public: 703 ShenandoahPostCompactClosure() : _heap(ShenandoahHeap::heap()), _live(0) { 704 _heap->free_set()->clear(); 705 } 706 707 void heap_region_do(ShenandoahHeapRegion* r) { 708 assert (!r->is_cset(), "cset regions should have been demoted already"); 709 710 // Need to reset the complete-top-at-mark-start pointer here because 711 // the complete marking bitmap is no longer valid. This ensures 712 // size-based iteration in marked_object_iterate(). 713 // NOTE: See blurb at ShenandoahMCResetCompleteBitmapTask on why we need to skip 714 // pinned regions. 715 if (!r->is_pinned()) { 716 _heap->complete_marking_context()->reset_top_at_mark_start(r); 717 } 718 719 size_t live = r->used(); 720 721 // Make empty regions that have been allocated into regular 722 if (r->is_empty() && live > 0) { 723 r->make_regular_bypass(); 724 } 725 726 // Reclaim regular regions that became empty 727 if (r->is_regular() && live == 0) { 728 r->make_trash(); 729 } 730 731 // Recycle all trash regions 732 if (r->is_trash()) { 733 live = 0; 734 r->recycle(); 735 } 736 737 r->set_live_data(live); 738 r->reset_alloc_metadata_to_shared(); 739 _live += live; 740 } 741 742 size_t get_live() { 743 return _live; 744 } 745 }; 746 747 void ShenandoahMarkCompact::compact_humongous_objects() { 748 // Compact humongous regions, based on their fwdptr objects. 749 // 750 // This code is serial, because doing the in-slice parallel sliding is tricky. In most cases, 751 // humongous regions are already compacted, and do not require further moves, which alleviates 752 // sliding costs. We may consider doing this in parallel in future. 753 754 ShenandoahHeap* heap = ShenandoahHeap::heap(); 755 756 for (size_t c = heap->num_regions(); c > 0; c--) { 757 ShenandoahHeapRegion* r = heap->get_region(c - 1); 758 if (r->is_humongous_start()) { 759 oop old_obj = oop(r->bottom()); 760 if (!old_obj->is_forwarded()) { 761 // No need to move the object, it stays at the same slot 762 continue; 763 } 764 size_t words_size = old_obj->size(); 765 size_t num_regions = ShenandoahHeapRegion::required_regions(words_size * HeapWordSize); 766 767 size_t old_start = r->region_number(); 768 size_t old_end = old_start + num_regions - 1; 769 size_t new_start = heap->heap_region_index_containing(old_obj->forwardee()); 770 size_t new_end = new_start + num_regions - 1; 771 assert(old_start != new_start, "must be real move"); 772 assert(r->is_stw_move_allowed(), "Region " SIZE_FORMAT " should be movable", r->region_number()); 773 774 Copy::aligned_conjoint_words(heap->get_region(old_start)->bottom(), 775 heap->get_region(new_start)->bottom(), 776 ShenandoahHeapRegion::region_size_words()*num_regions); 777 778 oop new_obj = oop(heap->get_region(new_start)->bottom()); 779 new_obj->init_mark_raw(); 780 781 { 782 for (size_t c = old_start; c <= old_end; c++) { 783 ShenandoahHeapRegion* r = heap->get_region(c); 784 r->make_regular_bypass(); 785 r->set_top(r->bottom()); 786 } 787 788 for (size_t c = new_start; c <= new_end; c++) { 789 ShenandoahHeapRegion* r = heap->get_region(c); 790 if (c == new_start) { 791 r->make_humongous_start_bypass(); 792 } else { 793 r->make_humongous_cont_bypass(); 794 } 795 796 // Trailing region may be non-full, record the remainder there 797 size_t remainder = words_size & ShenandoahHeapRegion::region_size_words_mask(); 798 if ((c == new_end) && (remainder != 0)) { 799 r->set_top(r->bottom() + remainder); 800 } else { 801 r->set_top(r->end()); 802 } 803 804 r->reset_alloc_metadata_to_shared(); 805 } 806 } 807 } 808 } 809 } 810 811 // This is slightly different to ShHeap::reset_next_mark_bitmap: 812 // we need to remain able to walk pinned regions. 813 // Since pinned region do not move and don't get compacted, we will get holes with 814 // unreachable objects in them (which may have pointers to unloaded Klasses and thus 815 // cannot be iterated over using oop->size(). The only way to safely iterate over those is using 816 // a valid marking bitmap and valid TAMS pointer. This class only resets marking 817 // bitmaps for un-pinned regions, and later we only reset TAMS for unpinned regions. 818 class ShenandoahMCResetCompleteBitmapTask : public AbstractGangTask { 819 private: 820 ShenandoahRegionIterator _regions; 821 822 public: 823 ShenandoahMCResetCompleteBitmapTask() : 824 AbstractGangTask("Parallel Reset Bitmap Task") { 825 } 826 827 void work(uint worker_id) { 828 ShenandoahHeapRegion* region = _regions.next(); 829 ShenandoahHeap* heap = ShenandoahHeap::heap(); 830 ShenandoahMarkingContext* const ctx = heap->complete_marking_context(); 831 while (region != NULL) { 832 if (heap->is_bitmap_slice_committed(region) && !region->is_pinned() && region->has_live()) { 833 ctx->clear_bitmap(region); 834 } 835 region = _regions.next(); 836 } 837 } 838 }; 839 840 void ShenandoahMarkCompact::phase4_compact_objects(ShenandoahHeapRegionSet** worker_slices) { 841 GCTraceTime(Info, gc, phases) time("Phase 4: Move objects", _gc_timer); 842 ShenandoahGCPhase compaction_phase(ShenandoahPhaseTimings::full_gc_copy_objects); 843 844 ShenandoahHeap* heap = ShenandoahHeap::heap(); 845 846 // Compact regular objects first 847 { 848 ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_copy_objects_regular); 849 ShenandoahCompactObjectsTask compact_task(worker_slices); 850 heap->workers()->run_task(&compact_task); 851 } 852 853 // Compact humongous objects after regular object moves 854 { 855 ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_copy_objects_humong); 856 compact_humongous_objects(); 857 } 858 859 // Reset complete bitmap. We're about to reset the complete-top-at-mark-start pointer 860 // and must ensure the bitmap is in sync. 861 { 862 ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_copy_objects_reset_complete); 863 ShenandoahMCResetCompleteBitmapTask task; 864 heap->workers()->run_task(&task); 865 } 866 867 // Bring regions in proper states after the collection, and set heap properties. 868 { 869 ShenandoahGCPhase phase(ShenandoahPhaseTimings::full_gc_copy_objects_rebuild); 870 871 ShenandoahPostCompactClosure post_compact; 872 heap->heap_region_iterate(&post_compact); 873 heap->set_used(post_compact.get_live()); 874 875 heap->collection_set()->clear(); 876 heap->free_set()->rebuild(); 877 } 878 879 heap->clear_cancelled_gc(); 880 }